Skip to content

Latest commit

 

History

History
275 lines (258 loc) · 21.9 KB

webassembly-opcodes.md

File metadata and controls

275 lines (258 loc) · 21.9 KB

WebAssembly SIMD operations

The SIMD operations are grouped according to the interpretation of the input and output vectors:

Shape Int Float Bool
v8x16 i8x16 - b8x16
v16x8 i16x8 - b16x8
v32x4 i32x4 f32x4 b32x4
v64x2 i64x2 f64x2 b64x2

v128 operations

WebAssembly Portable SIMD
v128.and(a: v128, b: v128) -> v128 v128.and
v128.or(a: v128, b: v128) -> v128 v128.or
v128.xor(a: v128, b: v128) -> v128 v128.xor
v128.not(a: v128) -> v128 v128.not
v128.load(addr, offset) -> v128 v128.load
v128.store(addr, offset, data: v128) v128.store

v8x16 operations

WebAssembly Portable SIMD
v8x16.select(s: b8x16, t: v128, f: v128) -> v128 v8x16.select
v8x16.swizzle(a: v128, s: LaneIdx16[16]) -> v128 v8x16.swizzle
v8x16.shuffle(a: v128, b: v128, s: LaneIdx32[16]) -> v128 v8x16.shuffle

i8x16 operations

WebAssembly Portable SIMD
i8x16.build(x: i32[16]) -> v128 i8x16.build
i8x16.splat(x: i32) -> v128 i8x16.splat
i8x16.extract_lane_s(a: v128, i: LaneIdx16) -> i32 i8x16.extractLane
i8x16.extract_lane_u(a: v128, i: LaneIdx16) -> i32 i8x16.extractLane
i8x16.replace_lane(a: v128, i: LaneIdx16, x: i32) -> v128 i8x16.replaceLane
i8x16.add(a: v128, b: v128) -> v128 i8x16.add
i8x16.sub(a: v128, b: v128) -> v128 i8x16.sub
i8x16.mul(a: v128, b: v128) -> v128 i8x16.mul
i8x16.neg(a: v128) -> v128 i8x16.neg
i8x16.add_saturate_s(a: v128, b: v128) -> v128 s8x16.addSaturate
i8x16.add_saturate_u(a: v128, b: v128) -> v128 u8x16.addSaturate
i8x16.sub_saturate_s(a: v128, b: v128) -> v128 s8x16.subSaturate
i8x16.sub_saturate_u(a: v128, b: v128) -> v128 u8x16.subSaturate
i8x16.shl(a: v128, y: i32) -> v128 i8x16.shiftLeftByScalar
i8x16.shr_s(a: v128, y: i32) -> v128 s8x16.shiftRightByScalar
i8x16.shr_u(a: v128, y: i32) -> v128 u8x16.shiftRightByScalar
i8x16.eq(a: v128, b: v128) -> b8x16 i8x16.equal
i8x16.ne(a: v128, b: v128) -> b8x16 i8x16.notEqual
i8x16.lt_s(a: v128, b: v128) -> b8x16 s8x16.lessThan
i8x16.lt_u(a: v128, b: v128) -> b8x16 u8x16.lessThan
i8x16.le_s(a: v128, b: v128) -> b8x16 s8x16.lessThanOrEqual
i8x16.le_u(a: v128, b: v128) -> b8x16 u8x16.lessThanOrEqual
i8x16.gt_s(a: v128, b: v128) -> b8x16 s8x16.greaterThan
i8x16.gt_u(a: v128, b: v128) -> b8x16 u8x16.greaterThan
i8x16.ge_s(a: v128, b: v128) -> b8x16 s8x16.greaterThanOrEqual
i8x16.ge_u(a: v128, b: v128) -> b8x16 u8x16.greaterThanOrEqual

v16x8 operations

WebAssembly Portable SIMD
v16x8.select(s: b16x8, t: v128, f: v128) -> v128 v16x8.select
v16x8.swizzle(a: v128, s: LaneIdx8[8]) -> v128 v16x8.swizzle
v16x8.shuffle(a: v128, b: v128, s: LaneIdx16[8]) -> v128 v16x8.shuffle

i16x8 operations

WebAssembly Portable SIMD
i16x8.build(x: i32[8]) -> v128 i16x8.build
i16x8.splat(x: i32) -> v128 i16x8.splat
i16x8.extract_lane_s(a: v128, i: LaneIdx8) -> i32 i16x8.extractLane
i16x8.extract_lane_u(a: v128, i: LaneIdx8) -> i32 i16x8.extractLane
i16x8.replace_lane(a: v128, i: LaneIdx8, x: i32) -> v128 i16x8.replaceLane
i16x8.add(a: v128, b: v128) -> v128 i16x8.add
i16x8.sub(a: v128, b: v128) -> v128 i16x8.sub
i16x8.mul(a: v128, b: v128) -> v128 i16x8.mul
i16x8.neg(a: v128) -> v128 i16x8.neg
i16x8.add_saturate_s(a: v128, b: v128) -> v128 s16x8.addSaturate
i16x8.add_saturate_u(a: v128, b: v128) -> v128 u16x8.addSaturate
i16x8.sub_saturate_s(a: v128, b: v128) -> v128 s16x8.subSaturate
i16x8.sub_saturate_u(a: v128, b: v128) -> v128 u16x8.subSaturate
i16x8.shl(a: v128, y: i32) -> v128 i16x8.shiftLeftByScalar
i16x8.shr_s(a: v128, y: i32) -> v128 s16x8.shiftRightByScalar
i16x8.shr_u(a: v128, y: i32) -> v128 u16x8.shiftRightByScalar
i16x8.eq(a: v128, b: v128) -> b16x8 i16x8.equal
i16x8.ne(a: v128, b: v128) -> b16x8 i16x8.notEqual
i16x8.lt_s(a: v128, b: v128) -> b16x8 s16x8.lessThan
i16x8.lt_u(a: v128, b: v128) -> b16x8 u16x8.lessThan
i16x8.le_s(a: v128, b: v128) -> b16x8 s16x8.lessThanOrEqual
i16x8.le_u(a: v128, b: v128) -> b16x8 u16x8.lessThanOrEqual
i16x8.gt_s(a: v128, b: v128) -> b16x8 s16x8.greaterThan
i16x8.gt_u(a: v128, b: v128) -> b16x8 u16x8.greaterThan
i16x8.ge_s(a: v128, b: v128) -> b16x8 s16x8.greaterThanOrEqual
i16x8.ge_u(a: v128, b: v128) -> b16x8 u16x8.greaterThanOrEqual

v32x4 operations

WebAssembly Portable SIMD
v32x4.select(s: b32x4, t: v128, f: v128) -> v128 v32x4.select
v32x4.swizzle(a: v128, s: LaneIdx4[4]) -> v128 v32x4.swizzle
v32x4.shuffle(a: v128, b: v128, s: LaneIdx8[4]) -> v128 v32x4.shuffle
v32x4.load1(addr, offset) -> v128 v32x4.load1
v32x4.load2(addr, offset) -> v128 v32x4.load2
v32x4.load3(addr, offset) -> v128 v32x4.load3
v32x4.store1(addr, offset, data: v128) v32x4.store1
v32x4.store2(addr, offset, data: v128) v32x4.store2
v32x4.store3(addr, offset, data: v128) v32x4.store3

i32x4 operations

WebAssembly Portable SIMD
i32x4.build(x: i32[4]) -> v128 i32x4.build
i32x4.splat(x: i32) -> v128 i32x4.splat
i32x4.extract_lane(a: v128, i: LaneIdx4) -> i32 i32x4.extractLane
i32x4.replace_lane(a: v128, i: LaneIdx4, x: i32) -> v128 i32x4.replaceLane
i32x4.add(a: v128, b: v128) -> v128 i32x4.add
i32x4.sub(a: v128, b: v128) -> v128 i32x4.sub
i32x4.mul(a: v128, b: v128) -> v128 i32x4.mul
i32x4.neg(a: v128) -> v128 i32x4.neg
i32x4.shl(a: v128, y: i32) -> v128 i32x4.shiftLeftByScalar
i32x4.shr_s(a: v128, y: i32) -> v128 s32x4.shiftRightByScalar
i32x4.shr_u(a: v128, y: i32) -> v128 u32x4.shiftRightByScalar
i32x4.eq(a: v128, b: v128) -> b32x4 i32x4.equal
i32x4.ne(a: v128, b: v128) -> b32x4 i32x4.notEqual
i32x4.lt_s(a: v128, b: v128) -> b32x4 s32x4.lessThan
i32x4.lt_u(a: v128, b: v128) -> b32x4 u32x4.lessThan
i32x4.le_s(a: v128, b: v128) -> b32x4 s32x4.lessThanOrEqual
i32x4.le_u(a: v128, b: v128) -> b32x4 u32x4.lessThanOrEqual
i32x4.gt_s(a: v128, b: v128) -> b32x4 s32x4.greaterThan
i32x4.gt_u(a: v128, b: v128) -> b32x4 u32x4.greaterThan
i32x4.ge_s(a: v128, b: v128) -> b32x4 s32x4.greaterThanOrEqual
i32x4.ge_u(a: v128, b: v128) -> b32x4 u32x4.greaterThanOrEqual
i32x4.trunc_s/f32x4(a: v128) -> v128 s32x4.fromFloat
i32x4.trunc_u/f32x4(a: v128) -> v128 u32x4.fromFloat

f32x4 operations

WebAssembly Portable SIMD
f32x4.build(x: f32[4]) -> v128 f32x4.build
f32x4.splat(x: f32) -> v128 f32x4.splat
f32x4.extract_lane(a: v128, i: LaneIdx4) -> f32 f32x4.extractLane
f32x4.replace_lane(a: v128, i: LaneIdx4, x: f32) -> v128 f32x4.replaceLane
f32x4.add(a: v128, b: v128) -> v128 f32x4.add
f32x4.sub(a: v128, b: v128) -> v128 f32x4.sub
f32x4.mul(a: v128, b: v128) -> v128 f32x4.mul
f32x4.neg(a: v128) -> v128 f32x4.neg
f32x4.eq(a: v128, b: v128) -> b32x4 f32x4.equal
f32x4.ne(a: v128, b: v128) -> b32x4 f32x4.notEqual
f32x4.lt(a: v128, b: v128) -> b32x4 f32x4.lessThan
f32x4.le(a: v128, b: v128) -> b32x4 f32x4.lessThanOrEqual
f32x4.gt(a: v128, b: v128) -> b32x4 f32x4.greaterThan
f32x4.ge(a: v128, b: v128) -> b32x4 f32x4.greaterThanOrEqual
f32x4.abs(a: v128) -> v128 f32x4.abs
f32x4.min(a: v128, b: v128) -> v128 f32x4.min
f32x4.max(a: v128, b: v128) -> v128 f32x4.max
f32x4.div(a: v128, b: v128) -> v128 f32x4.div
f32x4.sqrt(a: v128) -> v128 f32x4.sqrt
f32x4.convert_s/i32x4(a: v128) -> v128 f32x4.fromSignedInt
f32x4.convert_u/i32x4(a: v128) -> v128 f32x4.fromUnsignedInt

v64x2 operations

WebAssembly Portable SIMD
v64x2.select(s: b64x2, t: v128, f: v128) -> v128 v64x2.select
v64x2.swizzle(a: v128, s: LaneIdx2[2]) -> v128 v64x2.swizzle
v64x2.shuffle(a: v128, b: v128, s: LaneIdx4[2]) -> v128 v64x2.shuffle

i64x2 operations

WebAssembly Portable SIMD
i64x2.build(x: i64[2]) -> v128 i64x2.build
i64x2.splat(x: i64) -> v128 i64x2.splat
i64x2.extract_lane(a: v128, i: LaneIdx2) -> i64 i64x2.extractLane
i64x2.replace_lane(a: v128, i: LaneIdx2, x: i64) -> v128 i64x2.replaceLane
i64x2.add(a: v128, b: v128) -> v128 i64x2.add
i64x2.sub(a: v128, b: v128) -> v128 i64x2.sub
i64x2.mul(a: v128, b: v128) -> v128 i64x2.mul
i64x2.neg(a: v128) -> v128 i64x2.neg
i64x2.shl(a: v128, y: i32) -> v128 i64x2.shiftLeftByScalar
i64x2.shr_s(a: v128, y: i32) -> v128 s64x2.shiftRightByScalar
i64x2.shr_u(a: v128, y: i32) -> v128 u64x2.shiftRightByScalar
i64x2.eq(a: v128, b: v128) -> b64x2 i64x2.equal
i64x2.ne(a: v128, b: v128) -> b64x2 i64x2.notEqual
i64x2.lt_s(a: v128, b: v128) -> b64x2 s64x2.lessThan
i64x2.lt_u(a: v128, b: v128) -> b64x2 u64x2.lessThan
i64x2.le_s(a: v128, b: v128) -> b64x2 s64x2.lessThanOrEqual
i64x2.le_u(a: v128, b: v128) -> b64x2 u64x2.lessThanOrEqual
i64x2.gt_s(a: v128, b: v128) -> b64x2 s64x2.greaterThan
i64x2.gt_u(a: v128, b: v128) -> b64x2 u64x2.greaterThan
i64x2.ge_s(a: v128, b: v128) -> b64x2 s64x2.greaterThanOrEqual
i64x2.ge_u(a: v128, b: v128) -> b64x2 u64x2.greaterThanOrEqual
i64x2.trunc_s/f64x2(a: v128) -> v128 s64x2.fromFloat
i64x2.trunc_u/f64x2(a: v128) -> v128 u64x2.fromFloat

f64x2 operations

WebAssembly Portable SIMD
f64x2.build(x: f64[2]) -> v128 f64x2.build
f64x2.splat(x: f64) -> v128 f64x2.splat
f64x2.extract_lane(a: v128, i: LaneIdx2) -> f64 f64x2.extractLane
f64x2.replace_lane(a: v128, i: LaneIdx2, x: f64) -> v128 f64x2.replaceLane
f64x2.add(a: v128, b: v128) -> v128 f64x2.add
f64x2.sub(a: v128, b: v128) -> v128 f64x2.sub
f64x2.mul(a: v128, b: v128) -> v128 f64x2.mul
f64x2.neg(a: v128) -> v128 f64x2.neg
f64x2.eq(a: v128, b: v128) -> b64x2 f64x2.equal
f64x2.ne(a: v128, b: v128) -> b64x2 f64x2.notEqual
f64x2.lt(a: v128, b: v128) -> b64x2 f64x2.lessThan
f64x2.le(a: v128, b: v128) -> b64x2 f64x2.lessThanOrEqual
f64x2.gt(a: v128, b: v128) -> b64x2 f64x2.greaterThan
f64x2.ge(a: v128, b: v128) -> b64x2 f64x2.greaterThanOrEqual
f64x2.abs(a: v128) -> v128 f64x2.abs
f64x2.min(a: v128, b: v128) -> v128 f64x2.min
f64x2.max(a: v128, b: v128) -> v128 f64x2.max
f64x2.div(a: v128, b: v128) -> v128 f64x2.div
f64x2.sqrt(a: v128) -> v128 f64x2.sqrt
f64x2.convert_s/i64x2(a: v128) -> v128 f64x2.fromSignedInt
f64x2.convert_u/i64x2(a: v128) -> v128 f64x2.fromUnsignedInt

b8x16 operations

WebAssembly Portable SIMD
b8x16.build(x: i32[16]) -> b8x16 b8x16.build
b8x16.splat(x: i32) -> b8x16 b8x16.splat
b8x16.extract_lane(a: b8x16, i: LaneIdx16) -> i32 b8x16.extractLane
b8x16.replace_lane(a: b8x16, i: LaneIdx16, x: i32) -> b8x16 b8x16.replaceLane
b8x16.and(a: b8x16, b: b8x16) -> b8x16 b8x16.and
b8x16.or(a: b8x16, b: b8x16) -> b8x16 b8x16.or
b8x16.xor(a: b8x16, b: b8x16) -> b8x16 b8x16.xor
b8x16.not(a: b8x16) -> b8x16 b8x16.not
b8x16.any_true(a: b8x16) -> i32 b8x16.anyTrue
b8x16.all_true(a: b8x16) -> i32 b8x16.allTrue

b16x8 operations

WebAssembly Portable SIMD
b16x8.build(x: i32[8]) -> b16x8 b16x8.build
b16x8.splat(x: i32) -> b16x8 b16x8.splat
b16x8.extract_lane(a: b16x8, i: LaneIdx8) -> i32 b16x8.extractLane
b16x8.replace_lane(a: b16x8, i: LaneIdx8, x: i32) -> b16x8 b16x8.replaceLane
b16x8.and(a: b16x8, b: b16x8) -> b16x8 b16x8.and
b16x8.or(a: b16x8, b: b16x8) -> b16x8 b16x8.or
b16x8.xor(a: b16x8, b: b16x8) -> b16x8 b16x8.xor
b16x8.not(a: b16x8) -> b16x8 b16x8.not
b16x8.any_true(a: b16x8) -> i32 b16x8.anyTrue
b16x8.all_true(a: b16x8) -> i32 b16x8.allTrue

b32x4 operations

WebAssembly Portable SIMD
b32x4.build(x: i32[4]) -> b32x4 b32x4.build
b32x4.splat(x: i32) -> b32x4 b32x4.splat
b32x4.extract_lane(a: b32x4, i: LaneIdx4) -> i32 b32x4.extractLane
b32x4.replace_lane(a: b32x4, i: LaneIdx4, x: i32) -> b32x4 b32x4.replaceLane
b32x4.and(a: b32x4, b: b32x4) -> b32x4 b32x4.and
b32x4.or(a: b32x4, b: b32x4) -> b32x4 b32x4.or
b32x4.xor(a: b32x4, b: b32x4) -> b32x4 b32x4.xor
b32x4.not(a: b32x4) -> b32x4 b32x4.not
b32x4.any_true(a: b32x4) -> i32 b32x4.anyTrue
b32x4.all_true(a: b32x4) -> i32 b32x4.allTrue

b64x2 operations

WebAssembly Portable SIMD
b64x2.build(x: i32[2]) -> b64x2 b64x2.build
b64x2.splat(x: i32) -> b64x2 b64x2.splat
b64x2.extract_lane(a: b64x2, i: LaneIdx2) -> i32 b64x2.extractLane
b64x2.replace_lane(a: b64x2, i: LaneIdx2, x: i32) -> b64x2 b64x2.replaceLane
b64x2.and(a: b64x2, b: b64x2) -> b64x2 b64x2.and
b64x2.or(a: b64x2, b: b64x2) -> b64x2 b64x2.or
b64x2.xor(a: b64x2, b: b64x2) -> b64x2 b64x2.xor
b64x2.not(a: b64x2) -> b64x2 b64x2.not
b64x2.any_true(a: b64x2) -> i32 b64x2.anyTrue
b64x2.all_true(a: b64x2) -> i32 b64x2.allTrue