-
Notifications
You must be signed in to change notification settings - Fork 1.1k
/
IR.h
1004 lines (796 loc) · 32.6 KB
/
IR.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#ifndef HALIDE_IR_H
#define HALIDE_IR_H
/** \file
* Subtypes for Halide expressions (\ref Halide::Expr) and statements (\ref Halide::Internal::Stmt)
*/
#include <string>
#include <vector>
#include "Buffer.h"
#include "Expr.h"
#include "FunctionPtr.h"
#include "LoopPartitioningDirective.h"
#include "ModulusRemainder.h"
#include "Parameter.h"
#include "PrefetchDirective.h"
#include "Reduction.h"
#include "Type.h"
namespace Halide {
namespace Internal {
class Function;
/** The actual IR nodes begin here. Remember that all the Expr
* nodes also have a public "type" property */
/** Cast a node from one type to another. Can't change vector widths. */
struct Cast : public ExprNode<Cast> {
Expr value;
static Expr make(Type t, Expr v);
static const IRNodeType _node_type = IRNodeType::Cast;
/** Check if the cast is equivalent to a reinterpret. */
bool is_reinterpret() const {
return (type.is_int_or_uint() &&
value.type().is_int_or_uint() &&
type.bits() == value.type().bits());
}
};
/** Reinterpret value as another type, without affecting any of the bits
* (on little-endian systems). */
struct Reinterpret : public ExprNode<Reinterpret> {
Expr value;
static Expr make(Type t, Expr v);
static const IRNodeType _node_type = IRNodeType::Reinterpret;
};
/** The sum of two expressions */
struct Add : public ExprNode<Add> {
Expr a, b;
static Expr make(Expr a, Expr b);
static const IRNodeType _node_type = IRNodeType::Add;
};
/** The difference of two expressions */
struct Sub : public ExprNode<Sub> {
Expr a, b;
static Expr make(Expr a, Expr b);
static const IRNodeType _node_type = IRNodeType::Sub;
};
/** The product of two expressions */
struct Mul : public ExprNode<Mul> {
Expr a, b;
static Expr make(Expr a, Expr b);
static const IRNodeType _node_type = IRNodeType::Mul;
};
/** The ratio of two expressions */
struct Div : public ExprNode<Div> {
Expr a, b;
static Expr make(Expr a, Expr b);
static const IRNodeType _node_type = IRNodeType::Div;
};
/** The remainder of a / b. Mostly equivalent to '%' in C, except that
* the result here is always positive. For floats, this is equivalent
* to calling fmod. */
struct Mod : public ExprNode<Mod> {
Expr a, b;
static Expr make(Expr a, Expr b);
static const IRNodeType _node_type = IRNodeType::Mod;
};
/** The lesser of two values. */
struct Min : public ExprNode<Min> {
Expr a, b;
static Expr make(Expr a, Expr b);
static const IRNodeType _node_type = IRNodeType::Min;
};
/** The greater of two values */
struct Max : public ExprNode<Max> {
Expr a, b;
static Expr make(Expr a, Expr b);
static const IRNodeType _node_type = IRNodeType::Max;
};
/** Is the first expression equal to the second */
struct EQ : public ExprNode<EQ> {
Expr a, b;
static Expr make(Expr a, Expr b);
static const IRNodeType _node_type = IRNodeType::EQ;
};
/** Is the first expression not equal to the second */
struct NE : public ExprNode<NE> {
Expr a, b;
static Expr make(Expr a, Expr b);
static const IRNodeType _node_type = IRNodeType::NE;
};
/** Is the first expression less than the second. */
struct LT : public ExprNode<LT> {
Expr a, b;
static Expr make(Expr a, Expr b);
static const IRNodeType _node_type = IRNodeType::LT;
};
/** Is the first expression less than or equal to the second. */
struct LE : public ExprNode<LE> {
Expr a, b;
static Expr make(Expr a, Expr b);
static const IRNodeType _node_type = IRNodeType::LE;
};
/** Is the first expression greater than the second. */
struct GT : public ExprNode<GT> {
Expr a, b;
static Expr make(Expr a, Expr b);
static const IRNodeType _node_type = IRNodeType::GT;
};
/** Is the first expression greater than or equal to the second. */
struct GE : public ExprNode<GE> {
Expr a, b;
static Expr make(Expr a, Expr b);
static const IRNodeType _node_type = IRNodeType::GE;
};
/** Logical and - are both expressions true */
struct And : public ExprNode<And> {
Expr a, b;
static Expr make(Expr a, Expr b);
static const IRNodeType _node_type = IRNodeType::And;
};
/** Logical or - is at least one of the expression true */
struct Or : public ExprNode<Or> {
Expr a, b;
static Expr make(Expr a, Expr b);
static const IRNodeType _node_type = IRNodeType::Or;
};
/** Logical not - true if the expression false */
struct Not : public ExprNode<Not> {
Expr a;
static Expr make(Expr a);
static const IRNodeType _node_type = IRNodeType::Not;
};
/** A ternary operator. Evalutes 'true_value' and 'false_value',
* then selects between them based on 'condition'. Equivalent to
* the ternary operator in C. */
struct Select : public ExprNode<Select> {
Expr condition, true_value, false_value;
static Expr make(Expr condition, Expr true_value, Expr false_value);
static const IRNodeType _node_type = IRNodeType::Select;
};
/** Load a value from a named symbol if predicate is true. The buffer
* is treated as an array of the 'type' of this Load node. That is,
* the buffer has no inherent type. The name may be the name of an
* enclosing allocation, an input or output buffer, or any other
* symbol of type Handle(). */
struct Load : public ExprNode<Load> {
std::string name;
Expr predicate, index;
// If it's a load from an image argument or compiled-in constant
// image, this will point to that
Buffer<> image;
// If it's a load from an image parameter, this points to that
Parameter param;
// The alignment of the index. If the index is a vector, this is
// the alignment of the first lane.
ModulusRemainder alignment;
static Expr make(Type type, const std::string &name,
Expr index, Buffer<> image,
Parameter param,
Expr predicate,
ModulusRemainder alignment);
static const IRNodeType _node_type = IRNodeType::Load;
};
/** A linear ramp vector node. This is vector with 'lanes' elements,
* where element i is 'base' + i*'stride'. This is a convenient way to
* pass around vectors without busting them up into individual
* elements. E.g. a dense vector load from a buffer can use a ramp
* node with stride 1 as the index. */
struct Ramp : public ExprNode<Ramp> {
Expr base, stride;
int lanes;
static Expr make(Expr base, Expr stride, int lanes);
static const IRNodeType _node_type = IRNodeType::Ramp;
};
/** A vector with 'lanes' elements, in which every element is
* 'value'. This is a special case of the ramp node above, in which
* the stride is zero. */
struct Broadcast : public ExprNode<Broadcast> {
Expr value;
int lanes;
static Expr make(Expr value, int lanes);
static const IRNodeType _node_type = IRNodeType::Broadcast;
};
/** A let expression, like you might find in a functional
* language. Within the expression \ref Let::body, instances of the Var
* node \ref Let::name refer to \ref Let::value. */
struct Let : public ExprNode<Let> {
std::string name;
Expr value, body;
static Expr make(const std::string &name, Expr value, Expr body);
static const IRNodeType _node_type = IRNodeType::Let;
};
/** The statement form of a let node. Within the statement 'body',
* instances of the Var named 'name' refer to 'value' */
struct LetStmt : public StmtNode<LetStmt> {
std::string name;
Expr value;
Stmt body;
static Stmt make(const std::string &name, Expr value, Stmt body);
static const IRNodeType _node_type = IRNodeType::LetStmt;
};
/** If the 'condition' is false, then evaluate and return the message,
* which should be a call to an error function. */
struct AssertStmt : public StmtNode<AssertStmt> {
// if condition then val else error out with message
Expr condition;
Expr message;
static Stmt make(Expr condition, Expr message);
static const IRNodeType _node_type = IRNodeType::AssertStmt;
};
/** This node is a helpful annotation to do with permissions. If 'is_produce' is
* set to true, this represents a producer node which may also contain updates;
* otherwise, this represents a consumer node. If the producer node contains
* updates, the body of the node will be a block of 'produce' and 'update'
* in that order. In a producer node, the access is read-write only (or write
* only if it doesn't have updates). In a consumer node, the access is read-only.
* None of this is actually enforced, the node is purely for informative purposes
* to help out our analysis during lowering. For every unique ProducerConsumer,
* there is an associated Realize node with the same name that creates the buffer
* being read from or written to in the body of the ProducerConsumer.
*/
struct ProducerConsumer : public StmtNode<ProducerConsumer> {
std::string name;
bool is_producer;
Stmt body;
static Stmt make(const std::string &name, bool is_producer, Stmt body);
static Stmt make_produce(const std::string &name, Stmt body);
static Stmt make_consume(const std::string &name, Stmt body);
static const IRNodeType _node_type = IRNodeType::ProducerConsumer;
};
/** Store a 'value' to the buffer called 'name' at a given 'index' if
* 'predicate' is true. The buffer is interpreted as an array of the
* same type as 'value'. The name may be the name of an enclosing
* Allocate node, an output buffer, or any other symbol of type
* Handle(). */
struct Store : public StmtNode<Store> {
std::string name;
Expr predicate, value, index;
// If it's a store to an output buffer, then this parameter points to it.
Parameter param;
// The alignment of the index. If the index is a vector, this is
// the alignment of the first lane.
ModulusRemainder alignment;
static Stmt make(const std::string &name, Expr value, Expr index,
Parameter param, Expr predicate, ModulusRemainder alignment);
static const IRNodeType _node_type = IRNodeType::Store;
};
/** This defines the value of a function at a multi-dimensional
* location. You should think of it as a store to a multi-dimensional
* array. It gets lowered to a conventional Store node. The name must
* correspond to an output buffer or the name of an enclosing Realize
* node. */
struct Provide : public StmtNode<Provide> {
std::string name;
std::vector<Expr> values;
std::vector<Expr> args;
Expr predicate;
static Stmt make(const std::string &name, const std::vector<Expr> &values, const std::vector<Expr> &args, const Expr &predicate);
static const IRNodeType _node_type = IRNodeType::Provide;
};
/** Allocate a scratch area called with the given name, type, and
* size. The buffer lives for at most the duration of the body
* statement, within which it may or may not be freed explicitly with
* a Free node with a matching name. Allocation only occurs if the
* condition evaluates to true. Within the body of the allocation,
* defines a symbol with the given name and the type Handle(). */
struct Allocate : public StmtNode<Allocate> {
std::string name;
Type type;
MemoryType memory_type;
std::vector<Expr> extents;
// A boolean condition that determines if the allocation needs to be made at all.
Expr condition;
// These override the code generator dependent malloc and free
// equivalents if provided. If the new_expr succeeds, that is it
// returns non-nullptr, the function named be free_function is
// guaranteed to be called. The free function signature must match
// that of the code generator dependent free (typically
// halide_free). If free_function is left empty, code generator
// default will be called.
Expr new_expr;
std::string free_function;
// Extra padding elements to allow for overreads. Elements in the padding
// have undetermined values, but are guaranteed safe to load.
int padding;
Stmt body;
static Stmt make(const std::string &name, Type type, MemoryType memory_type,
const std::vector<Expr> &extents,
Expr condition, Stmt body,
Expr new_expr = Expr(), const std::string &free_function = std::string(), int padding = 0);
/** A routine to check if the extents are all constants, and if so verify
* the total size is less than 2^31 - 1. If the result is constant, but
* overflows, this routine asserts. This returns 0 if the extents are
* not all constants; otherwise, it returns the total constant allocation
* size. Does not include any padding bytes. */
static int32_t constant_allocation_size(const std::vector<Expr> &extents, const std::string &name);
int32_t constant_allocation_size() const;
static const IRNodeType _node_type = IRNodeType::Allocate;
};
/** Free the resources associated with the given buffer. */
struct Free : public StmtNode<Free> {
std::string name;
static Stmt make(const std::string &name);
static const IRNodeType _node_type = IRNodeType::Free;
};
/** Allocate a multi-dimensional buffer of the given type and
* size. Create some scratch memory that will back the function 'name'
* over the range specified in 'bounds'. The bounds are a vector of
* (min, extent) pairs for each dimension. Allocation only occurs if
* the condition evaluates to true.
*/
struct Realize : public StmtNode<Realize> {
std::string name;
std::vector<Type> types;
MemoryType memory_type;
Region bounds;
Expr condition;
Stmt body;
static Stmt make(const std::string &name, const std::vector<Type> &types, MemoryType memory_type, const Region &bounds, Expr condition, Stmt body);
static const IRNodeType _node_type = IRNodeType::Realize;
};
/** A sequence of statements to be executed in-order. 'first' is never
a Block, so this can be treated as a linked list. */
struct Block : public StmtNode<Block> {
Stmt first, rest;
static Stmt make(Stmt first, Stmt rest);
/** Construct zero or more Blocks to invoke a list of statements in order.
* This method may not return a Block statement if stmts.size() <= 1. */
static Stmt make(const std::vector<Stmt> &stmts);
static const IRNodeType _node_type = IRNodeType::Block;
};
/** A pair of statements executed concurrently. Both statements are
* joined before the Stmt ends. This is the parallel equivalent to
* Block. */
struct Fork : public StmtNode<Fork> {
Stmt first, rest;
static Stmt make(Stmt first, Stmt rest);
static const IRNodeType _node_type = IRNodeType::Fork;
};
/** An if-then-else block. 'else' may be undefined. */
struct IfThenElse : public StmtNode<IfThenElse> {
Expr condition;
Stmt then_case, else_case;
static Stmt make(Expr condition, Stmt then_case, Stmt else_case = Stmt());
static const IRNodeType _node_type = IRNodeType::IfThenElse;
};
/** Evaluate and discard an expression, presumably because it has some side-effect. */
struct Evaluate : public StmtNode<Evaluate> {
Expr value;
static Stmt make(Expr v);
static const IRNodeType _node_type = IRNodeType::Evaluate;
};
/** A function call. This can represent a call to some extern function
* (like sin), but it's also our multi-dimensional version of a Load,
* so it can be a load from an input image, or a call to another
* halide function. These two types of call nodes don't survive all
* the way down to code generation - the lowering process converts
* them to Load nodes. */
struct Call : public ExprNode<Call> {
std::string name;
std::vector<Expr> args;
typedef enum { Image, ///< A load from an input image
Extern, ///< A call to an external C-ABI function, possibly with side-effects
ExternCPlusPlus, ///< A call to an external C-ABI function, possibly with side-effects
PureExtern, ///< A call to a guaranteed-side-effect-free external function
Halide, ///< A call to a Func
Intrinsic, ///< A possibly-side-effecty compiler intrinsic, which has special handling during codegen
PureIntrinsic ///< A side-effect-free version of the above.
} CallType;
CallType call_type;
// Halide uses calls internally to represent certain operations
// (instead of IR nodes). These are matched by name. Note that
// these are deliberately char* (rather than std::string) so that
// they can be referenced at static-initialization time without
// risking ambiguous initalization order; we use a typedef to simplify
// declaration.
typedef const char *const ConstString;
// enums for various well-known intrinsics. (It is not *required* that all
// intrinsics have an enum entry here, but as a matter of style, it is recommended.)
// Note that these are only used in the API; inside the node, they are translated
// into a name. (To recover the name, call get_intrinsic_name().)
//
// Please keep this list sorted alphabetically; the specific enum values
// are *not* guaranteed to be stable across time.
enum IntrinsicOp {
abs,
absd,
add_image_checks_marker,
alloca,
bitwise_and,
bitwise_not,
bitwise_or,
bitwise_xor,
bool_to_mask,
// Bundle multiple exprs together temporarily for analysis (e.g. CSE)
bundle,
call_cached_indirect_function,
cast_mask,
// Concatenate bits of the args, with least significant bits as the
// first arg (i.e. little-endian)
concat_bits,
count_leading_zeros,
count_trailing_zeros,
debug_to_file,
declare_box_touched,
div_round_to_zero,
dynamic_shuffle,
// Extract some contiguous slice of bits from the argument starting at
// the nth bit, counting from the least significant bit, with the number
// of bits determined by the return type.
extract_bits,
extract_mask_element,
get_user_context,
gpu_thread_barrier,
halving_add,
halving_sub,
hvx_gather,
hvx_scatter,
hvx_scatter_acc,
hvx_scatter_release,
if_then_else,
if_then_else_mask,
image_load,
image_store,
lerp,
likely,
likely_if_innermost,
load_typed_struct_member,
make_struct,
memoize_expr,
mod_round_to_zero,
mul_shift_right,
mux,
popcount,
prefetch,
profiling_enable_instance_marker,
promise_clamped,
random,
register_destructor,
require,
require_mask,
return_second,
rewrite_buffer,
// Round a floating point value to nearest integer, with ties going to even
round,
rounding_halving_add,
rounding_mul_shift_right,
rounding_shift_left,
rounding_shift_right,
saturating_add,
saturating_sub,
saturating_cast,
scatter_gather,
select_mask,
shift_left,
shift_right,
signed_integer_overflow,
size_of_halide_buffer_t,
// Marks the point in lowering where the outermost skip stages checks
// should be introduced.
skip_stages_marker,
// Takes a realization name and a loop variable. Declares that values of
// the realization that were stored on earlier loop iterations of the
// given loop are potentially loaded in this loop iteration somewhere
// after this point. Must occur inside a Realize node and For node of
// the given names but outside any corresponding ProducerConsumer
// nodes. Communicates to storage folding that sliding window took
// place.
sliding_window_marker,
// Compute (arg[0] + arg[1]) / 2, assuming arg[0] < arg[1].
sorted_avg,
strict_float,
stringify,
target_arch_is,
target_bits,
target_has_feature,
target_natural_vector_size,
target_os_is,
undef,
unreachable,
unsafe_promise_clamped,
// One-sided variants of widening_add, widening_mul, and widening_sub.
// arg[0] + widen(arg[1])
widen_right_add,
// arg[0] * widen(arg[1])
widen_right_mul,
// arg[0] - widen(arg[1])
widen_right_sub,
widening_add,
widening_mul,
widening_shift_left,
widening_shift_right,
widening_sub,
get_runtime_vscale,
IntrinsicOpCount // Sentinel: keep last.
};
static const char *get_intrinsic_name(IntrinsicOp op);
// We also declare some symbolic names for some of the runtime
// functions that we want to construct Call nodes to here to avoid
// magic string constants and the potential risk of typos.
HALIDE_EXPORT static ConstString
buffer_get_dimensions,
buffer_get_min,
buffer_get_extent,
buffer_get_stride,
buffer_get_max,
buffer_get_host,
buffer_get_device,
buffer_get_device_interface,
buffer_get_shape,
buffer_get_host_dirty,
buffer_get_device_dirty,
buffer_get_type,
buffer_set_host_dirty,
buffer_set_device_dirty,
buffer_is_bounds_query,
buffer_init,
buffer_init_from_buffer,
buffer_crop,
buffer_set_bounds,
trace;
// If it's a call to another halide function, this call node holds
// a possibly-weak reference to that function.
FunctionPtr func;
// If that function has multiple values, which value does this
// call node refer to?
int value_index;
// If it's a call to an image, this call nodes hold a
// pointer to that image's buffer
Buffer<> image;
// If it's a call to an image parameter, this call node holds a
// pointer to that
Parameter param;
static Expr make(Type type, IntrinsicOp op, const std::vector<Expr> &args, CallType call_type,
FunctionPtr func = FunctionPtr(), int value_index = 0,
const Buffer<> &image = Buffer<>(), Parameter param = Parameter());
static Expr make(Type type, const std::string &name, const std::vector<Expr> &args, CallType call_type,
FunctionPtr func = FunctionPtr(), int value_index = 0,
Buffer<> image = Buffer<>(), Parameter param = Parameter());
/** Convenience constructor for calls to other halide functions */
static Expr make(const Function &func, const std::vector<Expr> &args, int idx = 0);
/** Convenience constructor for loads from concrete images */
static Expr make(const Buffer<> &image, const std::vector<Expr> &args) {
return make(image.type(), image.name(), args, Image, FunctionPtr(), 0, image, Parameter());
}
/** Convenience constructor for loads from images parameters */
static Expr make(const Parameter ¶m, const std::vector<Expr> &args) {
return make(param.type(), param.name(), args, Image, FunctionPtr(), 0, Buffer<>(), param);
}
/** Check if a call node is pure within a pipeline, meaning that
* the same args always give the same result, and the calls can be
* reordered, duplicated, unified, etc without changing the
* meaning of anything. Not transitive - doesn't guarantee the
* args themselves are pure. An example of a pure Call node is
* sqrt. If in doubt, don't mark a Call node as pure. */
bool is_pure() const {
return (call_type == PureExtern ||
call_type == Image ||
call_type == PureIntrinsic);
}
bool is_intrinsic() const {
return (call_type == Intrinsic ||
call_type == PureIntrinsic);
}
bool is_intrinsic(IntrinsicOp op) const {
return is_intrinsic() && this->name == get_intrinsic_name(op);
}
bool is_intrinsic(std::initializer_list<IntrinsicOp> intrinsics) const {
for (IntrinsicOp i : intrinsics) {
if (is_intrinsic(i)) {
return true;
}
}
return false;
}
bool is_tag() const {
return is_intrinsic({Call::likely, Call::likely_if_innermost, Call::strict_float});
}
/** Returns a pointer to a call node if the expression is a call to
* one of the requested intrinsics. */
static const Call *as_intrinsic(const Expr &e, std::initializer_list<IntrinsicOp> intrinsics) {
if (const Call *c = e.as<Call>()) {
for (IntrinsicOp i : intrinsics) {
if (c->is_intrinsic(i)) {
return c;
}
}
}
return nullptr;
}
static const Call *as_tag(const Expr &e) {
return as_intrinsic(e, {Call::likely, Call::likely_if_innermost, Call::strict_float});
}
bool is_extern() const {
return (call_type == Extern ||
call_type == ExternCPlusPlus ||
call_type == PureExtern);
}
static const IRNodeType _node_type = IRNodeType::Call;
};
/** A named variable. Might be a loop variable, function argument,
* parameter, reduction variable, or something defined by a Let or
* LetStmt node. */
struct Variable : public ExprNode<Variable> {
std::string name;
/** References to scalar parameters, or to the dimensions of buffer
* parameters hang onto those expressions. */
Parameter param;
/** References to properties of literal image parameters. */
Buffer<> image;
/** Reduction variables hang onto their domains */
ReductionDomain reduction_domain;
static Expr make(Type type, const std::string &name) {
return make(type, name, Buffer<>(), Parameter(), ReductionDomain());
}
static Expr make(Type type, const std::string &name, Parameter param) {
return make(type, name, Buffer<>(), std::move(param), ReductionDomain());
}
static Expr make(Type type, const std::string &name, const Buffer<> &image) {
return make(type, name, image, Parameter(), ReductionDomain());
}
static Expr make(Type type, const std::string &name, ReductionDomain reduction_domain) {
return make(type, name, Buffer<>(), Parameter(), std::move(reduction_domain));
}
static Expr make(Type type, const std::string &name, Buffer<> image,
Parameter param, ReductionDomain reduction_domain);
static const IRNodeType _node_type = IRNodeType::Variable;
};
/** A for loop. Execute the 'body' statement for all values of the
* variable 'name' from 'min' to 'min + extent'. There are four
* types of For nodes. A 'Serial' for loop is a conventional
* one. In a 'Parallel' for loop, each iteration of the loop
* happens in parallel or in some unspecified order. In a
* 'Vectorized' for loop, each iteration maps to one SIMD lane,
* and the whole loop is executed in one shot. For this case,
* 'extent' must be some small integer constant (probably 4, 8, or
* 16). An 'Unrolled' for loop compiles to a completely unrolled
* version of the loop. Each iteration becomes its own
* statement. Again in this case, 'extent' should be a small
* integer constant. */
struct For : public StmtNode<For> {
std::string name;
Expr min, extent;
ForType for_type;
DeviceAPI device_api;
Stmt body;
Partition partition_policy;
static Stmt make(const std::string &name,
Expr min, Expr extent,
ForType for_type, Partition partition_policy,
DeviceAPI device_api,
Stmt body);
bool is_unordered_parallel() const {
return Halide::Internal::is_unordered_parallel(for_type);
}
bool is_parallel() const {
return Halide::Internal::is_parallel(for_type);
}
static const IRNodeType _node_type = IRNodeType::For;
};
struct Acquire : public StmtNode<Acquire> {
Expr semaphore;
Expr count;
Stmt body;
static Stmt make(Expr semaphore, Expr count, Stmt body);
static const IRNodeType _node_type = IRNodeType::Acquire;
};
/** Construct a new vector by taking elements from another sequence of
* vectors. */
struct Shuffle : public ExprNode<Shuffle> {
std::vector<Expr> vectors;
/** Indices indicating which vector element to place into the
* result. The elements are numbered by their position in the
* concatenation of the vector arguments. */
std::vector<int> indices;
static Expr make(const std::vector<Expr> &vectors,
const std::vector<int> &indices);
/** Convenience constructor for making a shuffle representing an
* interleaving of vectors of the same length. */
static Expr make_interleave(const std::vector<Expr> &vectors);
/** Convenience constructor for making a shuffle representing a
* concatenation of the vectors. */
static Expr make_concat(const std::vector<Expr> &vectors);
/** Convenience constructor for making a shuffle representing a
* broadcast of a vector. */
static Expr make_broadcast(Expr vector, int factor);
/** Convenience constructor for making a shuffle representing a
* contiguous subset of a vector. */
static Expr make_slice(Expr vector, int begin, int stride, int size);
/** Convenience constructor for making a shuffle representing
* extracting a single element. */
static Expr make_extract_element(Expr vector, int i);
/** Check if this shuffle is an interleaving of the vector
* arguments. */
bool is_interleave() const;
/** Check if this shuffle can be represented as a repeating pattern that
* repeats the same shuffle of the single input vector some number of times.
* For example: 0, 3, 1, 1, 0, 3, 1, 1, ....., 0, 3, 1, 1
*/
bool is_broadcast() const;
int broadcast_factor() const;
/** Check if this shuffle is a concatenation of the vector
* arguments. */
bool is_concat() const;
/** Check if this shuffle is a contiguous strict subset of the
* vector arguments, and if so, the offset and stride of the
* slice. */
///@{
bool is_slice() const;
int slice_begin() const {
return indices[0];
}
int slice_stride() const {
return indices.size() >= 2 ? indices[1] - indices[0] : 1;
}
///@}
/** Check if this shuffle is extracting a scalar from the vector
* arguments. */
bool is_extract_element() const;
static const IRNodeType _node_type = IRNodeType::Shuffle;
};
/** Represent a multi-dimensional region of a Func or an ImageParam that
* needs to be prefetched. */
struct Prefetch : public StmtNode<Prefetch> {
std::string name;
std::vector<Type> types;
Region bounds;
PrefetchDirective prefetch;
Expr condition;
Stmt body;
static Stmt make(const std::string &name, const std::vector<Type> &types,
const Region &bounds,
const PrefetchDirective &prefetch,
Expr condition, Stmt body);
static const IRNodeType _node_type = IRNodeType::Prefetch;
};
/**
* Represents a location where storage will be hoisted to for a Func / Realize
* node with a given name.
*
*/
struct HoistedStorage : public StmtNode<HoistedStorage> {
std::string name;
Stmt body;
static Stmt make(const std::string &name,
Stmt body);
static const IRNodeType _node_type = IRNodeType::HoistedStorage;
};
/** Lock all the Store nodes in the body statement.
* Typically the lock is implemented by an atomic operation
* (e.g. atomic add or atomic compare-and-swap).
* However, if necessary, the node can access a mutex buffer through
* mutex_name and mutex_args, by lowering this node into
* calls to acquire and release the lock. */
struct Atomic : public StmtNode<Atomic> {
std::string producer_name;
std::string mutex_name; // empty string if not using mutex
Stmt body;
static Stmt make(const std::string &producer_name,
const std::string &mutex_name,
Stmt body);
static const IRNodeType _node_type = IRNodeType::Atomic;
};
/** Horizontally reduce a vector to a scalar or narrower vector using
* the given commutative and associative binary operator. The reduction
* factor is dictated by the number of lanes in the input and output
* types. Groups of adjacent lanes are combined. The number of lanes
* in the input type must be a divisor of the number of lanes of the
* output type. */
struct VectorReduce : public ExprNode<VectorReduce> {
// 99.9% of the time people will use this for horizontal addition,
// but these are all of our commutative and associative primitive
// operators.
typedef enum {
Add,
SaturatingAdd,
Mul,
Min,
Max,
And,
Or,
} Operator;
Expr value;
Operator op;
static Expr make(Operator op, Expr vec, int lanes);
static const IRNodeType _node_type = IRNodeType::VectorReduce;
};