From 06bda6b3f1d881ea1570277f0ca77f173bd8e498 Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Mon, 14 Oct 2024 22:17:22 -0400 Subject: [PATCH 01/22] Add pcap parsing script --- frontends/queues/pcaps/parse_pcap | 183 ++++++++++++++++++++++++++++++ 1 file changed, 183 insertions(+) create mode 100755 frontends/queues/pcaps/parse_pcap diff --git a/frontends/queues/pcaps/parse_pcap b/frontends/queues/pcaps/parse_pcap new file mode 100755 index 000000000..f83fca34e --- /dev/null +++ b/frontends/queues/pcaps/parse_pcap @@ -0,0 +1,183 @@ +#!/usr/bin/env python3 +# ========================================================================= +# Usage: ./parse_pcap [Option]... +# ========================================================================= +# Arguments: +# PCAP Packet Capture to parse +# JSON JSON mapping MAC addresses to integer flows +# +# Options: +# -h --help Display this message +# +# --num-packets N No. packets in PCAP to parse +# [default: 1000] +# +# --clock-period C Clock period of hardware in ns +# [default: 7] +# +# --line-rate L Target line rate for pop frequency calculation in Gbit/s +# [default: 1] +# +# --pop-tick P Time between consecutive pops in ns +# [default: calulated to achieve line rate] +# +# Example: +# ./parse_pcap example.pcap addr2flow.json --num-packets 500 + +import sys +import random +import json +import dpkt +import argparse + +CMD_PUSH = 1 +CMD_POP = 0 +DONTCARE = 0xFFFF_FFFF + +CLOCK_PERIOD = 7 # in ns +NUM_PKTS = 500 +POP_TICK = None # in ns +LINE_RATE = 1 # in Gbit/s + + +def parse_pcap(pcap_file, addr2flow_json): + global POP_TICK + + pcap = dpkt.pcap.Reader(pcap_file) + addr2flow = json.load(addr2flow_json) + + offset = None + total_size = 0 + for i, (ts, buf) in zip(range(NUM_PKTS), pcap): + if i == 0: + offset = ts + total_size += len(buf) + + if POP_TICK is None: + POP_TICK = int((total_size * 8) // (LINE_RATE * NUM_PKTS)) + + def mac_addr(addr): + return ":".join("%02x" % dpkt.compat.compat_ord(b) for b in addr) + + pcap_file.seek(0) + pcap = dpkt.pcap.Reader(pcap_file) + out = {"commands": [], "arrival_cycles": [], "flows": []} + prev_time = 0 + for i, (ts, buf) in zip(range(NUM_PKTS), pcap): + eth = dpkt.ethernet.Ethernet(buf) + flow = addr2flow[mac_addr(eth.src)] + time = (ts - offset) * 10**9 + cycle = int(time // CLOCK_PERIOD) + + num_pops = int((time - prev_time) // POP_TICK) + out["commands"].extend([CMD_POP] * num_pops) + out["arrival_cycles"].extend([DONTCARE] * num_pops) + out["flows"].extend([DONTCARE] * num_pops) + + out["commands"].append(CMD_PUSH) + out["arrival_cycles"].append(cycle) + out["flows"].append(flow) + + prev_time = time + return out + + +def gen_json(data): + commands = data["commands"] + arrival_cycles = data["arrival_cycles"] + flows = data["flows"] + values = [random.randint(0, 400) for _ in range(len(commands))] + departure_cycles = [0] * len(commands) + + def format_gen(width): + return {"is_signed": False, "numeric_type": "bitnum", "width": width} + + commands = {"commands": {"data": commands, "format": format_gen(1)}} + arrival_cycles = { + "arrival_cycles": {"data": arrival_cycles, "format": format_gen(32)} + } + flows = {"flows": {"data": flows, "format": format_gen(32)}} + values = {"values": {"data": values, "format": format_gen(32)}} + departure_cycles = { + "departure_cycles": {"data": departure_cycles, "format": format_gen(32)} + } + + return json.dumps( + commands | arrival_cycles | flows | values | departure_cycles, indent=2 + ) + + +class ArgumentParserWithCustomError(argparse.ArgumentParser): + def __init__(self): + super().__init__(add_help=False) + + def error(self, msg=None): + if msg: + print("ERROR: %s" % msg) + file = open(sys.argv[0]) + for i, line in enumerate(file): + if i == 0: + continue + elif line[0] == "#": + print(line[1:].rstrip("\n")) + else: + sys.exit(1 if msg else 0) + + +def parse_cmdline(): + parser = ArgumentParserWithCustomError() + + parser.add_argument("-h", "--help", action="store_true") + parser.add_argument("PCAP") + parser.add_argument("JSON") + + def check_positive_int(x): + try: + x = int(x) + if x <= 0: + raise argparse.ArgumentTypeError(f"{x} is not a positive integer") + except ValueError: + raise argparse.ArgumentTypeError(f"{x} is not an integer") + return x + + parser.add_argument( + "--num-packets", type=check_positive_int, action="store", default=NUM_PKTS + ) + parser.add_argument( + "--clock-period", type=check_positive_int, action="store", default=CLOCK_PERIOD + ) + parser.add_argument( + "--pop-tick", type=check_positive_int, action="store", default=POP_TICK + ) + + def check_positive_float(x): + try: + x = float(x) + if x <= 0: + raise argparse.ArgumentTypeError(f"{x} is not a positive float") + except ValueError: + raise argparse.ArgumentTypeError(f"{x} is not a float") + return x + + parser.add_argument( + "--line-rate", type=check_positive_float, action="store", default=LINE_RATE + ) + + if "-h" in sys.argv or "--help" in sys.argv: + parser.error() + + return parser.parse_args() + + +if __name__ == "__main__": + opts = parse_cmdline() + + CLOCK_PERIOD = opts.clock_period + NUM_PKTS = opts.num_packets + POP_TICK = opts.pop_tick + + with open(opts.PCAP, "rb") as pcap_file: + with open(opts.JSON) as addr2flow_json: + data = parse_pcap(pcap_file, addr2flow_json) + json = gen_json(data) + print(json) From 0213affd3ce9d1756be2989f98b638b4058a8d0d Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Sat, 26 Oct 2024 16:53:53 -0400 Subject: [PATCH 02/22] Refactor queue_call --- frontends/queues/pcaps/parse_pcap | 2 +- frontends/queues/queues/queue_call.py | 232 ++++++++++++++------------ 2 files changed, 125 insertions(+), 109 deletions(-) diff --git a/frontends/queues/pcaps/parse_pcap b/frontends/queues/pcaps/parse_pcap index f83fca34e..301a27a45 100755 --- a/frontends/queues/pcaps/parse_pcap +++ b/frontends/queues/pcaps/parse_pcap @@ -119,7 +119,7 @@ class ArgumentParserWithCustomError(argparse.ArgumentParser): if i == 0: continue elif line[0] == "#": - print(line[1:].rstrip("\n")) + print(line[1:].strip()) else: sys.exit(1 if msg else 0) diff --git a/frontends/queues/queues/queue_call.py b/frontends/queues/queues/queue_call.py index 8fd6ad87a..11875757f 100644 --- a/frontends/queues/queues/queue_call.py +++ b/frontends/queues/queues/queue_call.py @@ -2,6 +2,9 @@ from calyx.py_ast import Empty import calyx.builder as cb +ERR_CODE = 2**32 - 1 +PUSH_CODE = 2**32 - 2 + def insert_runner(prog, queue, name, num_cmds, use_ranks, stats_component=None): """Inserts the component `name` into the program. @@ -19,11 +22,15 @@ def insert_runner(prog, queue, name, num_cmds, use_ranks, stats_component=None): - 2: `values`, a list of values. Where each value is a 32-bit unsigned integer. The value at `i` is pushed if the command at `i` is `2`. - - 3: `has_ans`, a 1-bit unsigned integer. + - 3: `ranks`, a list of ranks. [optional] + Where each rank is a 32-bit unsigned integer. + The value at `i` is pushed with the rank at `i` if the command at `i` is `2`. + - 4: `cycles`, a list of cycles. + - 5: `has_ans`, a 1-bit unsigned integer. We raise/lower this to indicate whether the queue had a reply to the command. - - 4: `component_ans`, a 32-bit unsigned integer. + - 6: `component_ans`, a 32-bit unsigned integer. We put in this register the answer, if any. - - 5: `component_err`, a 1-bit unsigned integer. + - 7: `component_err`, a 1-bit unsigned integer. We raise/lower it to indicate whether an error occurred. """ assert ( @@ -34,11 +41,9 @@ def insert_runner(prog, queue, name, num_cmds, use_ranks, stats_component=None): # We take a stats component by reference, # but all we'll really do with it is pass it to the queue component. - stats_cell = ( - runner.cell("stats_runner", stats_component, is_ref=True) - if stats_component - else None - ) + stats_cell = None + if stats_component: + stats_cell = runner.cell("stats_runner", stats_component, is_ref=True) # We'll invoke the queue component. queue = runner.cell("myqueue", queue) @@ -49,15 +54,17 @@ def insert_runner(prog, queue, name, num_cmds, use_ranks, stats_component=None): # `1`: push # - input `value` # which is a 32-bit unsigned integer. If `cmd` is `1`, push this value. + # - input `rank` [optional] + # which is a 32-bit unsigned integer. If `cmd` is `1`, push `value` with this rank. # - ref register `ans`, into which the result of a pop is written. # - ref register `err`, which is raised if an error occurs. # Our memories and registers, all of which are passed to us by reference. commands = runner.seq_mem_d1("commands", 1, num_cmds, 32, is_ref=True) values = runner.seq_mem_d1("values", 32, num_cmds, 32, is_ref=True) - ranks = ( - runner.seq_mem_d1("ranks", 32, num_cmds, 32, is_ref=True) if use_ranks else None - ) + ranks = None + if use_ranks: + ranks = runner.seq_mem_d1("ranks", 32, num_cmds, 32, is_ref=True) has_ans = runner.reg(1, "has_ans", is_ref=True) ans = runner.reg(32, "component_ans", is_ref=True) err = runner.reg(1, "component_err", is_ref=True) @@ -65,60 +72,64 @@ def insert_runner(prog, queue, name, num_cmds, use_ranks, stats_component=None): i = runner.reg(32) # The index of the command we're currently processing cmd = runner.reg(1) # The command we're currently processing value = runner.reg(32) # The value we're currently processing - rank = runner.reg(32) # The rank we're currently processing - - load_data = ( - [ # `cmd := commands[i]`, `value := values[i]`, `rank := ranks[i]` - runner.mem_load_d1(commands, i.out, cmd, "write_cmd"), - runner.mem_load_d1(values, i.out, value, "write_value"), - runner.mem_load_d1(ranks, i.out, rank, "write_rank"), - ] - if use_ranks - else [ # `cmd := commands[i]`, `value := values[i]` - runner.mem_load_d1(commands, i.out, cmd, "write_cmd"), - runner.mem_load_d1(values, i.out, value, "write_value"), - ] + rank = None # The rank we're currently processing + if use_ranks: + rank = runner.reg(32) + + load_data = [ + # `cmd := commands[i]`, `value := values[i]` + runner.mem_load_d1(commands, i.out, cmd, "write_cmd"), + runner.mem_load_d1(values, i.out, value, "write_value"), + ] + if use_ranks: + # `cmd := commands[i]`, `value := values[i]`, `rank := ranks[i]` + load_data += [runner.mem_load_d1(ranks, i.out, rank, "write_rank")] + + # Invoke the queue without stats or ranks. + invoke_queue = cb.invoke( + queue, in_cmd=cmd.out, in_value=value.out, ref_ans=ans, ref_err=err ) + if stats_component and use_ranks: + # with ranks and a stats component. + invoke_queue = cb.invoke( + queue, + in_cmd=cmd.out, + in_value=value.out, + in_rank=rank.out, + ref_ans=ans, + ref_err=err, + ref_stats=stats_cell, + ) + elif stats_component: + # with only a stats component. + invoke_queue = cb.invoke( + queue, + in_cmd=cmd.out, + in_value=value.out, + ref_ans=ans, + ref_err=err, + ref_stats=stats_cell, + ) + elif use_ranks: + # with only ranks. + invoke_queue = cb.invoke( + queue, + in_cmd=cmd.out, + in_value=value.out, + in_rank=rank.out, + ref_ans=ans, + ref_err=err, + ) runner.control += [ load_data, - ( - cb.invoke( # Invoke the queue with a stats component. - queue, - in_cmd=cmd.out, - in_value=value.out, - ref_ans=ans, - ref_err=err, - ref_stats=stats_cell, - ) - if stats_component - else ( - cb.invoke( # Invoke the queue with ranks. - queue, - in_cmd=cmd.out, - in_value=value.out, - in_rank=rank.out, - ref_ans=ans, - ref_err=err, - ) - if use_ranks - else cb.invoke( # Invoke the queue without stats or ranks. - queue, - in_cmd=cmd.out, - in_value=value.out, - ref_ans=ans, - ref_err=err, - ) - ) - ), + invoke_queue, # We're back from the invoke, and it's time for some post-mortem analysis. cb.if_with( runner.not_use(err.out), # If there was no error - [ - # If cmd = 1, meaning cmd is pop, raise the `has_ans` flag. - # Otherwise, lower the `has_ans` flag. - runner.eq_store_in_reg(cmd.out, 0, has_ans)[0] - ], + # If cmd = 0, meaning cmd is pop, raise the `has_ans` flag. + # has_ans := cmd == 0 + runner.eq_store_in_reg(cmd.out, 0, has_ans)[0], ), runner.incr(i), # i++ ] @@ -157,11 +168,9 @@ def insert_main( commands = main.seq_mem_d1("commands", 1, num_cmds, 32, is_external=True) values = main.seq_mem_d1("values", 32, num_cmds, 32, is_external=True) ans_mem = main.seq_mem_d1("ans_mem", 32, num_cmds, 32, is_external=True) - ranks = ( - main.seq_mem_d1("ranks", 32, num_cmds, 32, is_external=True) - if use_ranks - else None - ) + ranks = None + if use_ranks: + ranks = main.seq_mem_d1("ranks", 32, num_cmds, 32, is_external=True) i = main.reg(32) # A counter for how many times we have invoked the dataplane. keep_looping = main.and_(1) # If this is high, we keep going. Otherwise, we stop. lt = main.lt(32) @@ -178,6 +187,54 @@ def insert_main( keep_looping.left = lt.out keep_looping.right = cb.HI if keepgoing else not_err.out + # Invoke the dataplane component without stats or ranks. + invoke_dataplane = cb.invoke( + dataplane, + ref_commands=commands, + ref_values=values, + ref_has_ans=has_ans, + ref_component_ans=dataplane_ans, + ref_component_err=dataplane_err, + ) + if stats_component and use_ranks: + # with ranks and a stats component. + invoke_dataplane = cb.invoke( + dataplane, + ref_commands=commands, + ref_values=values, + ref_ranks=ranks, + ref_has_ans=has_ans, + ref_component_ans=dataplane_ans, + ref_component_err=dataplane_err, + ref_stats_runner=stats, + ) + elif stats_component: + # with only a stats component. + invoke_dataplane = cb.invoke( + dataplane, + ref_commands=commands, + ref_values=values, + ref_has_ans=has_ans, + ref_component_ans=dataplane_ans, + ref_component_err=dataplane_err, + ref_stats_runner=stats, + ) + elif use_ranks: + # with only ranks. + invoke_dataplane = cb.invoke( + dataplane, + ref_commands=commands, + ref_values=values, + ref_ranks=ranks, + ref_has_ans=has_ans, + ref_component_ans=dataplane_ans, + ref_component_err=dataplane_err, + ) + + invoke_controller = Empty + if controller is not None: + invoke_controller = cb.invoke(controller, ref_stats_controller=stats) + main.control += cb.while_with( # We will run the dataplane and controller components in sequence, # in a while loop. The loop will terminate when `break_` has a value of `1`. @@ -185,41 +242,7 @@ def insert_main( [ main.reg_store(has_ans, 0, "lower_has_ans"), # Lower the has-ans flag. main.reg_store(dataplane_err, 0, "lower_err"), # Lower the has-err flag. - ( - cb.invoke( - # Invoke the dataplane component with a stats component. - dataplane, - ref_commands=commands, - ref_values=values, - ref_has_ans=has_ans, - ref_component_ans=dataplane_ans, - ref_component_err=dataplane_err, - ref_stats_runner=stats, - ) - if stats_component - else ( - cb.invoke( - # Invoke the dataplane component with ranks. - dataplane, - ref_commands=commands, - ref_values=values, - ref_ranks=ranks, - ref_has_ans=has_ans, - ref_component_ans=dataplane_ans, - ref_component_err=dataplane_err, - ) - if use_ranks - else cb.invoke( - # Invoke the dataplane component without stats or ranks. - dataplane, - ref_commands=commands, - ref_values=values, - ref_has_ans=has_ans, - ref_component_ans=dataplane_ans, - ref_component_err=dataplane_err, - ) - ) - ), + invoke_dataplane, # If the dataplane component has an answer, # write it to the answer-list and increment the index `i`. cb.if_( @@ -230,25 +253,18 @@ def insert_main( main.mem_store_d1( ans_mem, i.out, - cb.const(32, 4294967295), + cb.const(32, ERR_CODE), "write_err", # store the value 2^32 - 1 (code for error) to `ans_mem` ), main.mem_store_d1( # if we're here, we must be here because we were a successful push. ans_mem, i.out, - cb.const(32, 4294967294), + cb.const(32, PUSH_CODE), "write_push", # store the value 2^32 - 2 (code for push) to `ans_mem` ), ), ), - ( - cb.invoke( # Invoke the controller component. - controller, - ref_stats_controller=stats, - ) - if controller - else Empty - ), + invoke_controller, main.incr(i), # i++ ], ) From 39a8138b2ce0120894fd9a023e34166cf147e784 Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Sun, 27 Oct 2024 16:20:19 -0400 Subject: [PATCH 03/22] C -> c --- frontends/queues/queues/queue_call.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontends/queues/queues/queue_call.py b/frontends/queues/queues/queue_call.py index 11875757f..b1c4eee14 100644 --- a/frontends/queues/queues/queue_call.py +++ b/frontends/queues/queues/queue_call.py @@ -176,7 +176,7 @@ def insert_main( lt = main.lt(32) not_err = main.not_(1) - with main.comb_group("Compute_keep_looping") as compute_keep_looping: + with main.comb_group("compute_keep_looping") as compute_keep_looping: # The condition to keep looping is: # The index `i` is less than the number of commands `num_cmds` # AND From 61ec5da15f8e424e518854bab8f87674737daf2a Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Sun, 27 Oct 2024 16:21:03 -0400 Subject: [PATCH 04/22] Make flow inference a separate component --- .../queues/queues/binheap/flow_inference.py | 20 ++++++++---- .../queues/queues/binheap/round_robin.py | 31 ++++++++----------- frontends/queues/queues/binheap/strict.py | 13 ++++---- 3 files changed, 34 insertions(+), 30 deletions(-) diff --git a/frontends/queues/queues/binheap/flow_inference.py b/frontends/queues/queues/binheap/flow_inference.py index 659e6dd69..a76fabfaa 100644 --- a/frontends/queues/queues/binheap/flow_inference.py +++ b/frontends/queues/queues/binheap/flow_inference.py @@ -1,11 +1,17 @@ # pylint: disable=import-error import calyx.builder as cb +from calyx.utils import bits_needed +def insert_boundary_flow_inference(prog, name, boundaries): + n = len(boundaries) + + comp = prog.component(name) -def insert_flow_inference(comp, value, flow, boundaries, name): - bound_checks = [] + value = comp.input("value", 32) + flow = comp.reg(bits_needed(n - 1), "flow", is_ref=True) - for b in range(len(boundaries)): + bound_checks = [] + for b in range(n): lt = comp.lt(32) le = comp.le(32) guard = comp.and_(1) @@ -22,9 +28,11 @@ def insert_flow_inference(comp, value, flow, boundaries, name): guard.left = le.out guard.right = lt.out - set_flow_b = comp.reg_store(flow, b, f"{name}_set_flow_{b}") - bound_check = cb.if_with(cb.CellAndGroup(guard, bound_check_b), set_flow_b) + set_flow_b = comp.reg_store(flow, b, groupname=f"set_flow_{b}") + bound_check = cb.if_with(cb.CellAndGroup(guard, bound_check_b), set_flow_b) bound_checks.append(bound_check) + + comp.control += [ cb.par(*bound_checks) ] - return cb.par(*bound_checks) + return comp diff --git a/frontends/queues/queues/binheap/round_robin.py b/frontends/queues/queues/binheap/round_robin.py index 312878dce..256f7fc1b 100644 --- a/frontends/queues/queues/binheap/round_robin.py +++ b/frontends/queues/queues/binheap/round_robin.py @@ -2,19 +2,19 @@ import calyx.builder as cb from calyx.utils import bits_needed from queues.binheap.stable_binheap import insert_stable_binheap -from queues.binheap.flow_inference import insert_flow_inference +from queues.binheap.flow_inference import insert_boundary_flow_inference FACTOR = 4 -def insert_binheap_rr(prog, name, boundaries, queue_size_factor=FACTOR): - n = len(boundaries) - +def insert_binheap_rr(prog, name, n, flow_infer, queue_size_factor=FACTOR): comp = prog.component(name) binheap = insert_stable_binheap(prog, "binheap", queue_size_factor) binheap = comp.cell("binheap", binheap) + flow_infer = comp.cell("flow_infer", flow_infer) + cmd = comp.input("cmd", 1) value = comp.input("value", 32) @@ -23,21 +23,15 @@ def insert_binheap_rr(prog, name, boundaries, queue_size_factor=FACTOR): err_eq_0 = comp.eq_use(err.out, 0) - flow_in = comp.reg(bits_needed(n - 1), "flow_in") - infer_flow_in = insert_flow_inference( - comp, value, flow_in, boundaries, "infer_flow_in" - ) - - flow_out = comp.reg(bits_needed(n - 1), "flow_out") - infer_flow_out = insert_flow_inference( - comp, ans.out, flow_out, boundaries, "infer_flow_out" - ) + flow = comp.reg(bits_needed(n - 1), "flow") + infer_flow_in = cb.invoke(flow_infer, in_value=value, ref_flow=flow) + infer_flow_out = cb.invoke(flow_infer, in_value=ans.out, ref_flow=flow) rank_ptrs = [comp.reg(32, f"r_{i}") for i in range(n)] rank_ptr_incrs = dict([(i, comp.incr(rank_ptrs[i], n)) for i in range(n)]) turn = comp.reg(bits_needed(n - 1), "turn") - turn_neq_flow_out = comp.neq_use(turn.out, flow_out.out) + turn_neq_flow = comp.neq_use(turn.out, flow.out) turn_incr_mod_n = cb.if_with( comp.eq_use(turn.out, n - 1), comp.reg_store(turn, 0), comp.incr(turn) ) @@ -66,16 +60,16 @@ def binheap_invoke(value, rank): update_state_pop = [ infer_flow_out, cb.while_with( - turn_neq_flow_out, [comp.case(turn.out, rank_ptr_incrs), turn_incr_mod_n] + turn_neq_flow, [comp.case(turn.out, rank_ptr_incrs), turn_incr_mod_n] ), turn_incr_mod_n, ] - update_state_push = comp.case(flow_in.out, rank_ptr_incrs) + update_state_push = comp.case(flow.out, rank_ptr_incrs) comp.control += [ init_state, infer_flow_in, - comp.case(flow_in.out, binheap_invokes), + comp.case(flow.out, binheap_invokes), cb.if_with( err_eq_0, comp.case(cmd, {0: update_state_pop, 1: update_state_push}) ), @@ -102,6 +96,7 @@ def generate(prog, numflows): else: raise ValueError("Unsupported number of flows") - pifo = insert_binheap_rr(prog, "pifo", boundaries) + flow_infer = insert_boundary_flow_inference(prog, "flow_inference", boundaries) + pifo = insert_binheap_rr(prog, "pifo", numflows, flow_infer) return pifo diff --git a/frontends/queues/queues/binheap/strict.py b/frontends/queues/queues/binheap/strict.py index 1dcc74c5d..f8848a915 100644 --- a/frontends/queues/queues/binheap/strict.py +++ b/frontends/queues/queues/binheap/strict.py @@ -2,19 +2,19 @@ import calyx.builder as cb from calyx.utils import bits_needed from queues.binheap.stable_binheap import insert_stable_binheap -from queues.binheap.flow_inference import insert_flow_inference +from queues.binheap.flow_inference import insert_boundary_flow_inference FACTOR = 4 -def insert_binheap_strict(prog, name, boundaries, order, queue_size_factor=FACTOR): - n = len(boundaries) - +def insert_binheap_strict(prog, name, n, order, flow_infer, queue_size_factor=FACTOR): comp = prog.component(name) binheap = insert_stable_binheap(prog, "binheap", queue_size_factor) binheap = comp.cell("binheap", binheap) + flow_infer = comp.cell("flow_infer", flow_infer) + cmd = comp.input("cmd", 1) value = comp.input("value", 32) @@ -22,7 +22,7 @@ def insert_binheap_strict(prog, name, boundaries, order, queue_size_factor=FACTO err = comp.reg(1, "err", is_ref=True) flow = comp.reg(bits_needed(n - 1), "flow") - infer_flow = insert_flow_inference(comp, value, flow, boundaries, "infer_flow") + infer_flow = cb.invoke(flow_infer, in_value=value, ref_flow=flow) def binheap_invoke(value, rank): return cb.invoke( @@ -67,6 +67,7 @@ def generate(prog, numflows): else: raise ValueError("Unsupported number of flows") - pifo = insert_binheap_strict(prog, "pifo", boundaries, order) + flow_infer = insert_boundary_flow_inference(prog, "flow_inference", boundaries) + pifo = insert_binheap_strict(prog, "pifo", numflows, order, flow_infer) return pifo From a41a15dbc18cdf27ef1f07255a2b38e4f1e3b585 Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Sun, 27 Oct 2024 23:36:36 -0400 Subject: [PATCH 05/22] Remove generate from binheap strict and RR - move this function to the test files - better seperation of testing and library --- frontends/queues/queues/__init__.py | 7 ---- .../queues/queues/binheap/round_robin.py | 25 --------------- frontends/queues/queues/binheap/strict.py | 32 +------------------ .../queues/{binheap => }/flow_inference.py | 0 .../binheap/round_robin/rr_2flow_test.py | 17 ++++++++-- .../binheap/round_robin/rr_3flow_test.py | 17 ++++++++-- .../binheap/round_robin/rr_4flow_test.py | 17 ++++++++-- .../binheap/round_robin/rr_5flow_test.py | 17 ++++++++-- .../binheap/round_robin/rr_6flow_test.py | 17 ++++++++-- .../binheap/round_robin/rr_7flow_test.py | 17 ++++++++-- .../tests/binheap/strict/strict_2flow_test.py | 18 +++++++++-- .../tests/binheap/strict/strict_3flow_test.py | 18 +++++++++-- .../tests/binheap/strict/strict_4flow_test.py | 18 +++++++++-- .../tests/binheap/strict/strict_5flow_test.py | 18 +++++++++-- .../tests/binheap/strict/strict_6flow_test.py | 18 +++++++++-- 15 files changed, 160 insertions(+), 96 deletions(-) rename frontends/queues/queues/{binheap => }/flow_inference.py (100%) diff --git a/frontends/queues/queues/__init__.py b/frontends/queues/queues/__init__.py index 1f57ae076..6140fa302 100644 --- a/frontends/queues/queues/__init__.py +++ b/frontends/queues/queues/__init__.py @@ -1,10 +1,3 @@ """Queues for Packet Scheduling""" __version__ = "0.1.0" - -import queues.binheap.stable_binheap as stable_binheap -import queues.binheap.fifo as binheap_fifo -import queues.binheap.pifo as binheap_pifo -import queues.binheap.round_robin as binheap_rr -import queues.binheap.strict as binheap_strict -import queues.binheap.binheap as binheap diff --git a/frontends/queues/queues/binheap/round_robin.py b/frontends/queues/queues/binheap/round_robin.py index 256f7fc1b..75584aca3 100644 --- a/frontends/queues/queues/binheap/round_robin.py +++ b/frontends/queues/queues/binheap/round_robin.py @@ -2,7 +2,6 @@ import calyx.builder as cb from calyx.utils import bits_needed from queues.binheap.stable_binheap import insert_stable_binheap -from queues.binheap.flow_inference import insert_boundary_flow_inference FACTOR = 4 @@ -76,27 +75,3 @@ def binheap_invoke(value, rank): ] return comp - - -def generate(prog, numflows): - """Generates queue with specific `boundaries`""" - - if numflows == 2: - boundaries = [200, 400] - elif numflows == 3: - boundaries = [133, 266, 400] - elif numflows == 4: - boundaries = [100, 200, 300, 400] - elif numflows == 5: - boundaries = [80, 160, 240, 320, 400] - elif numflows == 6: - boundaries = [66, 100, 200, 220, 300, 400] - elif numflows == 7: - boundaries = [50, 100, 150, 200, 250, 300, 400] - else: - raise ValueError("Unsupported number of flows") - - flow_infer = insert_boundary_flow_inference(prog, "flow_inference", boundaries) - pifo = insert_binheap_rr(prog, "pifo", numflows, flow_infer) - - return pifo diff --git a/frontends/queues/queues/binheap/strict.py b/frontends/queues/queues/binheap/strict.py index f8848a915..a1e288c7d 100644 --- a/frontends/queues/queues/binheap/strict.py +++ b/frontends/queues/queues/binheap/strict.py @@ -2,7 +2,7 @@ import calyx.builder as cb from calyx.utils import bits_needed from queues.binheap.stable_binheap import insert_stable_binheap -from queues.binheap.flow_inference import insert_boundary_flow_inference +from queues.flow_inference import insert_boundary_flow_inference FACTOR = 4 @@ -41,33 +41,3 @@ def binheap_invoke(value, rank): comp.control += [infer_flow, comp.case(flow.out, binheap_invokes)] return comp - - -def generate(prog, numflows): - """Generates queue with specific `boundaries`""" - - if numflows == 2: - boundaries = [200, 400] - order = [1, 0] - elif numflows == 3: - boundaries = [133, 266, 400] - order = [1, 2, 0] - elif numflows == 4: - boundaries = [100, 200, 300, 400] - order = [3, 0, 2, 1] - elif numflows == 5: - boundaries = [80, 160, 240, 320, 400] - order = [0, 1, 2, 3, 4] - elif numflows == 6: - boundaries = [66, 100, 200, 220, 300, 400] - order = [3, 1, 5, 2, 4, 0] - elif numflows == 7: - boundaries = [50, 100, 150, 200, 250, 300, 400] - order = [0, 1, 2, 3, 4, 5, 6] - else: - raise ValueError("Unsupported number of flows") - - flow_infer = insert_boundary_flow_inference(prog, "flow_inference", boundaries) - pifo = insert_binheap_strict(prog, "pifo", numflows, order, flow_infer) - - return pifo diff --git a/frontends/queues/queues/binheap/flow_inference.py b/frontends/queues/queues/flow_inference.py similarity index 100% rename from frontends/queues/queues/binheap/flow_inference.py rename to frontends/queues/queues/flow_inference.py diff --git a/frontends/queues/tests/binheap/round_robin/rr_2flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_2flow_test.py index 572f1dfb1..30edb2724 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_2flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_2flow_test.py @@ -1,15 +1,26 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues import binheap_rr +import queues.binheap.round_robin as rr +import queues.flow_inference as fi + +NUMFLOWS = 2 if __name__ == "__main__": """Invoke the top-level function to build the program, with 2 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = binheap_rr.generate(prog, 2) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + if sim_pcap: + raise Exception("Not Implemented") + else: + boundaries = [200, 400] + flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/binheap/round_robin/rr_3flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_3flow_test.py index 2162b757f..96293ca43 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_3flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_3flow_test.py @@ -1,15 +1,26 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues import binheap_rr +import queues.binheap.round_robin as rr +import queues.flow_inference as fi + +NUMFLOWS = 3 if __name__ == "__main__": """Invoke the top-level function to build the program, with 3 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = binheap_rr.generate(prog, 3) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + if sim_pcap: + raise Exception("Not Implemented") + else: + boundaries = [133, 266, 400] + flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/binheap/round_robin/rr_4flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_4flow_test.py index d326567ee..3259802d0 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_4flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_4flow_test.py @@ -1,15 +1,26 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues import binheap_rr +import queues.binheap.round_robin as rr +import queues.flow_inference as fi + +NUMFLOWS = 4 if __name__ == "__main__": """Invoke the top-level function to build the program, with 4 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = binheap_rr.generate(prog, 4) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + if sim_pcap: + raise Exception("Not Implemented") + else: + boundaries = [100, 200, 300, 400] + flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/binheap/round_robin/rr_5flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_5flow_test.py index 44fd71e2c..0127aea31 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_5flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_5flow_test.py @@ -1,15 +1,26 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues import binheap_rr +import queues.binheap.round_robin as rr +import queues.flow_inference as fi + +NUMFLOWS = 5 if __name__ == "__main__": """Invoke the top-level function to build the program, with 5 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = binheap_rr.generate(prog, 5) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + if sim_pcap: + raise Exception("Not Implemented") + else: + boundaries = [80, 160, 240, 320, 400] + flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/binheap/round_robin/rr_6flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_6flow_test.py index 01596c534..78a9aabcf 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_6flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_6flow_test.py @@ -1,15 +1,26 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues import binheap_rr +import queues.binheap.round_robin as rr +import queues.flow_inference as fi + +NUMFLOWS = 6 if __name__ == "__main__": """Invoke the top-level function to build the program, with 6 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = binheap_rr.generate(prog, 6) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + if sim_pcap: + raise Exception("Not Implemented") + else: + boundaries = [66, 100, 200, 220, 300, 400] + flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/binheap/round_robin/rr_7flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_7flow_test.py index 0c89de620..ab84f4d08 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_7flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_7flow_test.py @@ -1,15 +1,26 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues import binheap_rr +import queues.binheap.round_robin as rr +import queues.flow_inference as fi + +NUMFLOWS = 7 if __name__ == "__main__": """Invoke the top-level function to build the program, with 7 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = binheap_rr.generate(prog, 7) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + if sim_pcap: + raise Exception("Not Implemented") + else: + boundaries = [50, 100, 150, 200, 250, 300, 400] + flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/binheap/strict/strict_2flow_test.py b/frontends/queues/tests/binheap/strict/strict_2flow_test.py index dc66bb147..36fb2311f 100644 --- a/frontends/queues/tests/binheap/strict/strict_2flow_test.py +++ b/frontends/queues/tests/binheap/strict/strict_2flow_test.py @@ -1,15 +1,27 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues import binheap_strict +import queues.binheap.strict as st +import queues.flow_inference as fi + +NUMFLOWS = 2 if __name__ == "__main__": """Invoke the top-level function to build the program, with 2 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = binheap_strict.generate(prog, 2) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + if sim_pcap: + raise Exception("Not Implemented") + else: + boundaries = [200, 400] + order = [1, 0] + flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/binheap/strict/strict_3flow_test.py b/frontends/queues/tests/binheap/strict/strict_3flow_test.py index 8b2322c3f..1290c1a4f 100644 --- a/frontends/queues/tests/binheap/strict/strict_3flow_test.py +++ b/frontends/queues/tests/binheap/strict/strict_3flow_test.py @@ -1,15 +1,27 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues import binheap_strict +import queues.binheap.strict as st +import queues.flow_inference as fi + +NUMFLOWS = 3 if __name__ == "__main__": """Invoke the top-level function to build the program, with 3 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = binheap_strict.generate(prog, 3) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + if sim_pcap: + raise Exception("Not Implemented") + else: + boundaries = [133, 266, 400] + order = [1, 2, 0] + flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/binheap/strict/strict_4flow_test.py b/frontends/queues/tests/binheap/strict/strict_4flow_test.py index 3e1227c8c..8f513dfb2 100644 --- a/frontends/queues/tests/binheap/strict/strict_4flow_test.py +++ b/frontends/queues/tests/binheap/strict/strict_4flow_test.py @@ -1,15 +1,27 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues import binheap_strict +import queues.binheap.strict as st +import queues.flow_inference as fi + +NUMFLOWS = 4 if __name__ == "__main__": """Invoke the top-level function to build the program, with 4 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = binheap_strict.generate(prog, 4) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + if sim_pcap: + raise Exception("Not Implemented") + else: + boundaries = [100, 200, 300, 400] + order = [3, 0, 2, 1] + flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/binheap/strict/strict_5flow_test.py b/frontends/queues/tests/binheap/strict/strict_5flow_test.py index d20517110..27ba4357c 100644 --- a/frontends/queues/tests/binheap/strict/strict_5flow_test.py +++ b/frontends/queues/tests/binheap/strict/strict_5flow_test.py @@ -1,15 +1,27 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues import binheap_strict +import queues.binheap.strict as st +import queues.flow_inference as fi + +NUMFLOWS = 5 if __name__ == "__main__": """Invoke the top-level function to build the program, with 5 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = binheap_strict.generate(prog, 5) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + if sim_pcap: + raise Exception("Not Implemented") + else: + boundaries = [80, 160, 240, 320, 400] + order = [0, 1, 2, 3, 4] + flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/binheap/strict/strict_6flow_test.py b/frontends/queues/tests/binheap/strict/strict_6flow_test.py index 2d7692d3f..da2933319 100644 --- a/frontends/queues/tests/binheap/strict/strict_6flow_test.py +++ b/frontends/queues/tests/binheap/strict/strict_6flow_test.py @@ -1,15 +1,27 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues import binheap_strict +import queues.binheap.strict as st +import queues.flow_inference as fi + +NUMFLOWS = 6 if __name__ == "__main__": """Invoke the top-level function to build the program, with 6 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = binheap_strict.generate(prog, 6) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + if sim_pcap: + raise Exception("Not Implemented") + else: + boundaries = [66, 100, 200, 220, 300, 400] + order = [3, 1, 5, 2, 4, 0] + flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() From 20352f939cfe7dc68ed59ba1501dc892e28ad17b Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Sun, 27 Oct 2024 23:40:22 -0400 Subject: [PATCH 06/22] Add strict test for 7 flows --- .../queues/test_data_gen/gen_test_data.sh | 2 +- .../test_data_gen/strict_queue_oracle.py | 3 +++ .../tests/binheap/strict/strict_7flow_test.py | 27 +++++++++++++++++++ .../queues/tests/strict/strict_7flow_test.py | 15 +++++++++++ 4 files changed, 46 insertions(+), 1 deletion(-) create mode 100644 frontends/queues/tests/binheap/strict/strict_7flow_test.py create mode 100644 frontends/queues/tests/strict/strict_7flow_test.py diff --git a/frontends/queues/test_data_gen/gen_test_data.sh b/frontends/queues/test_data_gen/gen_test_data.sh index 1448f826d..31bdf3024 100755 --- a/frontends/queues/test_data_gen/gen_test_data.sh +++ b/frontends/queues/test_data_gen/gen_test_data.sh @@ -67,7 +67,7 @@ done # for queues with 2..6 flows, each with a different strict ordering. This generates 5 # expect file pairs. -for n in {2..6}; do +for n in {2..7}; do python3 $data_gen_dir/gen_oracle_data.py $num_cmds > $tests_dir/strict/strict_${n}flow_test.data [[ $? -eq 0 ]] && echo "Generated strict/strict_${n}flow_test.data" cat $tests_dir/strict/strict_${n}flow_test.data | python3 $data_gen_dir/strict_queue_oracle.py $num_cmds $queue_size $n --keepgoing > $tests_dir/strict/strict_${n}flow_test.expect diff --git a/frontends/queues/test_data_gen/strict_queue_oracle.py b/frontends/queues/test_data_gen/strict_queue_oracle.py index b7c6d0583..ef5ca216e 100644 --- a/frontends/queues/test_data_gen/strict_queue_oracle.py +++ b/frontends/queues/test_data_gen/strict_queue_oracle.py @@ -25,6 +25,9 @@ elif numflows == 6: boundaries = [66, 100, 200, 220, 300, 400] order = [3, 1, 5, 2, 4, 0] + elif numflows == 7: + boundaries = [50, 100, 150, 200, 250, 300, 400] + order = [0, 1, 2, 3, 4, 5, 6] else: raise ValueError("Unsupported number of flows") diff --git a/frontends/queues/tests/binheap/strict/strict_7flow_test.py b/frontends/queues/tests/binheap/strict/strict_7flow_test.py new file mode 100644 index 000000000..7eebd97e9 --- /dev/null +++ b/frontends/queues/tests/binheap/strict/strict_7flow_test.py @@ -0,0 +1,27 @@ +import sys +import calyx.builder as cb +import queues.queue_call as qc +import queues.binheap.strict as st +import queues.flow_inference as fi + +NUMFLOWS = 7 + + +if __name__ == "__main__": + """Invoke the top-level function to build the program, with 6 flows.""" + num_cmds = int(sys.argv[1]) + keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv + + prog = cb.Builder() + + if sim_pcap: + raise Exception("Not Implemented") + else: + boundaries = [50, 100, 150, 200, 250, 300, 400] + order = [0, 1, 2, 3, 4, 5, 6] + flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + prog.program.emit() diff --git a/frontends/queues/tests/strict/strict_7flow_test.py b/frontends/queues/tests/strict/strict_7flow_test.py new file mode 100644 index 000000000..ec9d28ebe --- /dev/null +++ b/frontends/queues/tests/strict/strict_7flow_test.py @@ -0,0 +1,15 @@ +import sys +import calyx.builder as cb +import queues.queue_call as qc +from queues.strict_or_rr import generate + + +if __name__ == "__main__": + """Invoke the top-level function to build the program, with 6 flows.""" + num_cmds = int(sys.argv[1]) + keepgoing = "--keepgoing" in sys.argv + + prog = cb.Builder() + pifo = generate(prog, 7, False) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() From 5141884e0c200bc836b810ef4b00584323b47678 Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Sun, 27 Oct 2024 23:41:06 -0400 Subject: [PATCH 07/22] Fix imports in binheap tests --- frontends/queues/tests/binheap/binheap_test.py | 15 +++++---------- frontends/queues/tests/binheap/fifo_test.py | 13 ++++++------- frontends/queues/tests/binheap/pifo_test.py | 13 ++++++------- .../queues/tests/binheap/stable_binheap_test.py | 13 ++++++------- 4 files changed, 23 insertions(+), 31 deletions(-) diff --git a/frontends/queues/tests/binheap/binheap_test.py b/frontends/queues/tests/binheap/binheap_test.py index 6645248cb..aaf021316 100644 --- a/frontends/queues/tests/binheap/binheap_test.py +++ b/frontends/queues/tests/binheap/binheap_test.py @@ -1,6 +1,6 @@ # pylint: disable=import-error import calyx.builder as cb -from queues import binheap +import queues.binheap.binheap as bh def insert_main(prog): @@ -31,7 +31,7 @@ def insert_main(prog): queue_size_factor = 4 - heap = binheap.insert_binheap(prog, "heap", queue_size_factor, 64, 32) + heap = bh.insert_binheap(prog, "heap", queue_size_factor, 64, 32) heap = comp.cell("heap", heap) out = comp.seq_mem_d1("out", 32, 15, queue_size_factor, is_external=True) @@ -89,13 +89,8 @@ def pop_and_store(): ] -def build(): - """Top-level function to build the program.""" - +if __name__ == "__main__": + """Invoke the top-level function to build the program.""" prog = cb.Builder() insert_main(prog) - return prog.program - - -if __name__ == "__main__": - build().emit() + prog.program.emit() diff --git a/frontends/queues/tests/binheap/fifo_test.py b/frontends/queues/tests/binheap/fifo_test.py index 344204356..185a838de 100644 --- a/frontends/queues/tests/binheap/fifo_test.py +++ b/frontends/queues/tests/binheap/fifo_test.py @@ -2,18 +2,17 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues import binheap_fifo +import queues.binheap.fifo as bhf -def build(): +if __name__ == "__main__": """Top-level function to build the program.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + prog = cb.Builder() - fifo = binheap_fifo.insert_binheap_fifo(prog, "fifo") - qc.insert_main(prog, fifo, num_cmds, keepgoing=keepgoing) - return prog.program + fifo = bhf.insert_binheap_fifo(prog, "fifo") + qc.insert_main(prog, fifo, num_cmds, keepgoing=keepgoing) -if __name__ == "__main__": - build().emit() + prog.program.emit() diff --git a/frontends/queues/tests/binheap/pifo_test.py b/frontends/queues/tests/binheap/pifo_test.py index 81955efa8..e33a7d16f 100644 --- a/frontends/queues/tests/binheap/pifo_test.py +++ b/frontends/queues/tests/binheap/pifo_test.py @@ -2,18 +2,17 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues import binheap_pifo +import queues.binheap.pifo as bhp -def build(): +if __name__ == "__main__": """Top-level function to build the program.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + prog = cb.Builder() - pifo = binheap_pifo.insert_binheap_pifo(prog, "pifo") - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) - return prog.program + pifo = bhp.insert_binheap_pifo(prog, "pifo") + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) -if __name__ == "__main__": - build().emit() + prog.program.emit() diff --git a/frontends/queues/tests/binheap/stable_binheap_test.py b/frontends/queues/tests/binheap/stable_binheap_test.py index cd3c8b49f..cd7b10d10 100644 --- a/frontends/queues/tests/binheap/stable_binheap_test.py +++ b/frontends/queues/tests/binheap/stable_binheap_test.py @@ -2,18 +2,17 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues import stable_binheap +import queues.binheap.stable_binheap as sbh -def build(): +if __name__ == "__main__": """Top-level function to build the program.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + prog = cb.Builder() - binheap = stable_binheap.insert_stable_binheap(prog, "stable_binheap") - qc.insert_main(prog, binheap, num_cmds, keepgoing=keepgoing, use_ranks=True) - return prog.program + binheap = sbh.insert_stable_binheap(prog, "stable_binheap") + qc.insert_main(prog, binheap, num_cmds, keepgoing=keepgoing, use_ranks=True) -if __name__ == "__main__": - build().emit() + prog.program.emit() From 126bb2d7e637c209aab098aa1b667b9fc430a651 Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Mon, 28 Oct 2024 01:46:54 -0400 Subject: [PATCH 08/22] Rename RR and Strict oracles --- frontends/queues/test_data_gen/gen_test_data.sh | 8 ++++---- .../test_data_gen/{rr_queue_oracle.py => rr_oracle.py} | 0 .../{strict_queue_oracle.py => strict_oracle.py} | 0 3 files changed, 4 insertions(+), 4 deletions(-) rename frontends/queues/test_data_gen/{rr_queue_oracle.py => rr_oracle.py} (100%) rename frontends/queues/test_data_gen/{strict_queue_oracle.py => strict_oracle.py} (100%) diff --git a/frontends/queues/test_data_gen/gen_test_data.sh b/frontends/queues/test_data_gen/gen_test_data.sh index 31bdf3024..6234ed079 100755 --- a/frontends/queues/test_data_gen/gen_test_data.sh +++ b/frontends/queues/test_data_gen/gen_test_data.sh @@ -52,25 +52,25 @@ cat $tests_dir/binheap/stable_binheap_test.data | python3 $data_gen_dir/binheap_ [[ $? -eq 0 ]] && echo "Generated binheap/stable_binheap_test.expect" -# For the Round Robin queues, we drop piezo mode as well and use rrqueue_oracle to +# For the Round Robin queues, we drop piezo mode as well and use rr_oracle to # generate the expected output for queues with 2..7 flows. This generates 6 data expect file pairs. for n in {2..7}; do python3 $data_gen_dir/gen_oracle_data.py $num_cmds > $tests_dir/round_robin/rr_${n}flow_test.data [[ $? -eq 0 ]] && echo "Generated round_robin/rr_${n}flow_test.data" - cat $tests_dir/round_robin/rr_${n}flow_test.data | python3 $data_gen_dir/rr_queue_oracle.py $num_cmds $queue_size $n --keepgoing > $tests_dir/round_robin/rr_${n}flow_test.expect + cat $tests_dir/round_robin/rr_${n}flow_test.data | python3 $data_gen_dir/rr_oracle.py $num_cmds $queue_size $n --keepgoing > $tests_dir/round_robin/rr_${n}flow_test.expect [[ $? -eq 0 ]] && echo "Generated round_robin/rr_${n}flow_test.expect" done -# For Strict queues, we use strict_queue_oracle.py to generate the expected output +# For Strict queues, we use strict_oracle.py to generate the expected output # for queues with 2..6 flows, each with a different strict ordering. This generates 5 # expect file pairs. for n in {2..7}; do python3 $data_gen_dir/gen_oracle_data.py $num_cmds > $tests_dir/strict/strict_${n}flow_test.data [[ $? -eq 0 ]] && echo "Generated strict/strict_${n}flow_test.data" - cat $tests_dir/strict/strict_${n}flow_test.data | python3 $data_gen_dir/strict_queue_oracle.py $num_cmds $queue_size $n --keepgoing > $tests_dir/strict/strict_${n}flow_test.expect + cat $tests_dir/strict/strict_${n}flow_test.data | python3 $data_gen_dir/strict_oracle.py $num_cmds $queue_size $n --keepgoing > $tests_dir/strict/strict_${n}flow_test.expect [[ $? -eq 0 ]] && echo "Generated strict/strict_${n}flow_test.expect" done diff --git a/frontends/queues/test_data_gen/rr_queue_oracle.py b/frontends/queues/test_data_gen/rr_oracle.py similarity index 100% rename from frontends/queues/test_data_gen/rr_queue_oracle.py rename to frontends/queues/test_data_gen/rr_oracle.py diff --git a/frontends/queues/test_data_gen/strict_queue_oracle.py b/frontends/queues/test_data_gen/strict_oracle.py similarity index 100% rename from frontends/queues/test_data_gen/strict_queue_oracle.py rename to frontends/queues/test_data_gen/strict_oracle.py From 0ebd53390347a11ff9ce806556e78478cea8bece Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Mon, 28 Oct 2024 01:47:39 -0400 Subject: [PATCH 09/22] Rename binheap (un)tuplify components --- frontends/queues/queues/binheap/binheap.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/frontends/queues/queues/binheap/binheap.py b/frontends/queues/queues/binheap/binheap.py index 0d74f5f5e..52cffd46d 100644 --- a/frontends/queues/queues/binheap/binheap.py +++ b/frontends/queues/queues/binheap/binheap.py @@ -63,13 +63,13 @@ def insert_binheap(prog, name, queue_size_factor, rnk_w, val_w): rank = comp.input("rank", rnk_w) value = comp.input("value", val_w) - swap = comp.cell( - "swap", insert_swap(prog, "swap", rnk_w + val_w, max_queue_size, addr_size) - ) - tuplify = comp.cell("tuplify", insert_tuplify(prog, "tuplify", rnk_w, val_w)) - untuplify = comp.cell( - "untuplify", insert_untuplify(prog, "untuplify", rnk_w, val_w) - ) + swap = insert_swap(prog, f"{name}_swap", rnk_w + val_w, max_queue_size, addr_size) + swap = comp.cell("swap", swap) + + tuplify = insert_tuplify(prog, f"{name}_tuplify", rnk_w, val_w) + tuplify = comp.cell("tuplify", tuplify) + untuplify = insert_untuplify(prog, f"{name}_untuplify", rnk_w, val_w) + untuplify = comp.cell("untuplify", untuplify) mem = comp.seq_mem_d1("mem", 96, max_queue_size, addr_size) # The memory to store the heap, represented as an array. From 789dfccb5bff0a9c5eefd1abcc9f4445b71926d2 Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Mon, 28 Oct 2024 06:15:44 -0400 Subject: [PATCH 10/22] Sketch sim_pcap.py --- frontends/queues/queues/flow_inference.py | 21 +++ frontends/queues/queues/sim_pcap.py | 178 ++++++++++++++++++ .../binheap/round_robin/rr_2flow_test.py | 5 +- 3 files changed, 203 insertions(+), 1 deletion(-) create mode 100644 frontends/queues/queues/sim_pcap.py diff --git a/frontends/queues/queues/flow_inference.py b/frontends/queues/queues/flow_inference.py index a76fabfaa..3bb08d7bd 100644 --- a/frontends/queues/queues/flow_inference.py +++ b/frontends/queues/queues/flow_inference.py @@ -1,6 +1,8 @@ # pylint: disable=import-error import calyx.builder as cb from calyx.utils import bits_needed +from calyx.tuple import insert_untuplify + def insert_boundary_flow_inference(prog, name, boundaries): n = len(boundaries) @@ -36,3 +38,22 @@ def insert_boundary_flow_inference(prog, name, boundaries): comp.control += [ cb.par(*bound_checks) ] return comp + + +def insert_tuple_flow_inference(prog, name, num_flows): + flow_bits = bits_needed(num_flows - 1) + + comp = prog.component(name) + + untuplify = insert_untuplify(prog, f"{name}_untuplify", flow_bits, 32 - flow_bits) + untuplify = comp.cell("untuplify", untuplify) + + value = comp.input("value", 32) + flow = comp.reg(flow_bits, "flow", is_ref=True) + + with comp.continuous: + untuplify.tup = value + + comp.control += [ comp.reg_store(flow, untuplify.fst) ] + + return comp diff --git a/frontends/queues/queues/sim_pcap.py b/frontends/queues/queues/sim_pcap.py new file mode 100644 index 000000000..e0fd0825c --- /dev/null +++ b/frontends/queues/queues/sim_pcap.py @@ -0,0 +1,178 @@ +# pylint: disable=import-error +import calyx.builder as cb +from calyx.utils import bits_needed +from calyx.tuple import insert_tuplify + +ERR_CODE = 2**32 - 1 +PUSH_CODE = 2**32 - 2 + + +def insert_runner(prog, queue, name, num_cmds, num_flows): + """Inserts the component `name` into the program. + This will be used to `invoke` the component `queue` and feed it _one command_. + """ + flow_bits = bits_needed(num_flows - 1) + + runner = prog.component(name) + + tuplify = insert_tuplify(prog, f"{name}_tuplify", flow_bits, 32 - flow_bits) + tuplify = runner.cell("tuplify", tuplify) + + queue = runner.cell("myqueue", queue) + # The user-facing interface of the `queue` component is assumed to be: + # - input `cmd` + # where each command is a 2-bit unsigned integer, with the following format: + # `0`: pop + # `1`: push + # - input `value` + # which is a 32-bit unsigned integer. If `cmd` is `1`, push this value. + # - ref register `ans`, into which the result of a pop is written. + # - ref register `err`, which is raised if an error occurs. + + commands = runner.seq_mem_d1("commands", 1, num_cmds, 32, is_ref=True) + values = runner.seq_mem_d1("values", 32, num_cmds, 32, is_ref=True) + arrival_cycles = runner.seq_mem_d1("arrival_cycles", 32, num_cmds, 32, is_ref=True) + flows = runner.seq_mem_d1("flows", flow_bits, num_cmds, 32, is_ref=True) + + has_ans = runner.reg(1, "has_ans", is_ref=True) + ans = runner.reg(32, "ans", is_ref=True) + err = runner.reg(1, "err", is_ref=True) + + cycle_counter = runner.reg(32, "cycle_counter", is_ref=True) + i = runner.reg(32, "i", is_ref=True) # Index of the command we're currently processing + wait = runner.reg(1, "wait", is_ref=True) # Flag indicating if the `i`th packet has arrived + + cmd = runner.reg(1) + value = runner.reg(32) + arrival_cycle = runner.reg(32) + flow = runner.reg(flow_bits) + + load_data = [ + runner.mem_load_d1(commands, i.out, cmd, "write_cmd"), + runner.mem_load_d1(values, i.out, value, "write_value"), + runner.mem_load_d1(arrival_cycles, i.out, arrival_cycle, "write_arrival_cycle"), + runner.mem_load_d1(flows, i.out, flow, "write_flow") + ] + + slice = runner.slice("slice", 32, 32 - flow_bits) + with runner.group("package_flow_and_value") as package_flow_and_value: + slice.in_ = value.out + tuplify.fst = flow.out + tuplify.snd = slice.out + + ge = runner.ge(32) + eq = runner.eq(1) + if_or = runner.and_(1) + with runner.comb_group("push_arrival_check") as push_arrival_check: + ge.left = cycle_counter.out + ge.right = arrival_cycles.out + + eq.left = cmd.out + eq.right = 0 + + if_or.left = ge.out + if_or.right = eq.out + + and_ = runner.and_(32) + with runner.group("zero_out_top") as zero_out_top: + and_.left = ans.out + and_.right = cb.const(2**(32 - flow_bits) - 1, 32) + ans.in_ = and_.out + ans.write_en = cb.HI + + runner.control += [ + load_data, + package_flow_and_value, + cb.if_with( + cb.CellAndGroup(if_or, push_arrival_check), + [ + cb.invoke(queue, in_cmd=cmd.out, in_value=tuplify.out, ref_ans=ans, ref_err=err), + cb.if_with( + runner.not_use(err.out), + [ + runner.eq_store_in_reg(cmd.out, 0, has_ans)[0], + zero_out_top + ] + ) + ], + runner.reg_store(wait, 1) + ) + ] + + return runner + + +def insert_main(prog, queue, num_cmds, num_flows): + flow_bits = bits_needed(num_flows - 1) + + main = prog.component("main") + + cycle_counter = main.reg(32, "cycle_counter") + cycle_adder = main.add(32) + with main.continuous: + cycle_adder.left = cycle_counter.out + cycle_adder.right = 1 + cycle_counter.in_ = cycle_adder.out + cycle_counter.write_en = cb.HI + + dataplane = insert_runner(prog, queue, "dataplane", num_cmds, num_flows) + dataplane = main.cell("dataplane", dataplane) + + has_ans = main.reg(1) + ans = main.reg(32) + err = main.reg(1) + + commands = main.seq_mem_d1("commands", 1, num_cmds, 32, is_external=True) + values = main.seq_mem_d1("values", 32, num_cmds, 32, is_external=True) + arrival_cycles = main.seq_mem_d1("arrival_cycles", 32, num_cmds, 32, is_external=True) + flows = main.seq_mem_d1("flows", flow_bits, num_cmds, 32, is_external=True) + ans_mem = main.seq_mem_d1("ans_mem", 32, num_cmds, 32, is_external=True) + departure_cycles = main.seq_mem_d1("departure_cycles", 32, num_cmds, 32, is_external=True) + + i = main.reg(32, "i") + wait = main.reg(1, "wait") + + # Lower the has-ans, err, and wait flags + lower_flags = [ + main.reg_store(has_ans, 0, "lower_has_ans"), + main.reg_store(err, 0, "lower_err"), + main.reg_store(wait, 0, "lower_wait") + ] + + main.control += cb.while_with( + main.lt_use(i.out, num_cmds), + [ + lower_flags, + cb.invoke(dataplane, + ref_commands=commands, + ref_values=values, + ref_arrival_cycles=arrival_cycles, + ref_flows=flows, + ref_has_ans=has_ans, + ref_ans=ans, + ref_err=err, + ref_cycle_counter=cycle_counter, + ref_i=i, + ref_wait=wait), + cb.if_( + has_ans.out, + main.mem_store_d1(ans_mem, i.out, ans.out, "write_ans"), + cb.if_( + err.out, + main.mem_store_d1( + ans_mem, + i.out, + cb.const(32, ERR_CODE), + "write_err", + ), + main.mem_store_d1( + ans_mem, + i.out, + cb.const(32, PUSH_CODE), + "write_push", + ), + ), + ), + cb.if_with(main.not_use(wait.out), main.incr(i)) # i++ if wait == 0 + ] + ) diff --git a/frontends/queues/tests/binheap/round_robin/rr_2flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_2flow_test.py index 30edb2724..980bec738 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_2flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_2flow_test.py @@ -1,6 +1,7 @@ import sys import calyx.builder as cb import queues.queue_call as qc +import queues.sim_pcap as sp import queues.binheap.round_robin as rr import queues.flow_inference as fi @@ -16,7 +17,9 @@ prog = cb.Builder() if sim_pcap: - raise Exception("Not Implemented") + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) else: boundaries = [200, 400] flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) From 7a6602901877023f2f6cc91758bc79a613b37ee3 Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Wed, 30 Oct 2024 18:10:59 -0400 Subject: [PATCH 11/22] More sketching sim_pcap.py and flow_inference.py --- .../parse_pcap => pcap-sim/parse_pcap.py} | 207 +++++++++++------- frontends/queues/queues/flow_inference.py | 13 +- frontends/queues/queues/sim_pcap.py | 129 +++++------ .../binheap/round_robin/rr_2flow_test.py | 6 +- .../binheap/round_robin/rr_3flow_test.py | 8 +- .../binheap/round_robin/rr_4flow_test.py | 6 +- .../binheap/round_robin/rr_5flow_test.py | 6 +- .../binheap/round_robin/rr_6flow_test.py | 6 +- .../binheap/round_robin/rr_7flow_test.py | 6 +- .../tests/binheap/strict/strict_2flow_test.py | 6 +- .../tests/binheap/strict/strict_3flow_test.py | 6 +- .../tests/binheap/strict/strict_4flow_test.py | 6 +- .../tests/binheap/strict/strict_5flow_test.py | 6 +- .../tests/binheap/strict/strict_6flow_test.py | 6 +- .../tests/binheap/strict/strict_7flow_test.py | 6 +- 15 files changed, 250 insertions(+), 173 deletions(-) rename frontends/queues/{pcaps/parse_pcap => pcap-sim/parse_pcap.py} (57%) diff --git a/frontends/queues/pcaps/parse_pcap b/frontends/queues/pcap-sim/parse_pcap.py similarity index 57% rename from frontends/queues/pcaps/parse_pcap rename to frontends/queues/pcap-sim/parse_pcap.py index 301a27a45..68378f5d5 100755 --- a/frontends/queues/pcaps/parse_pcap +++ b/frontends/queues/pcap-sim/parse_pcap.py @@ -1,10 +1,10 @@ -#!/usr/bin/env python3 -# ========================================================================= -# Usage: ./parse_pcap [Option]... -# ========================================================================= +# ============================================================================= +# Usage: python3 parse_pcap.py [Option]... +# ============================================================================= # Arguments: # PCAP Packet Capture to parse -# JSON JSON mapping MAC addresses to integer flows +# Addr2Flow JSON mapping MAC addresses to integer flows +# Out Path to save generated .data file # # Options: # -h --help Display this message @@ -21,18 +21,23 @@ # --pop-tick P Time between consecutive pops in ns # [default: calulated to achieve line rate] # +# --num-flows F No. flows +# The flow of a packet is Addr2Flow[Packet Address] mod F +# [default: max value in Addr2Flow + 1] +# # Example: -# ./parse_pcap example.pcap addr2flow.json --num-packets 500 +# python3 parse_pcap.py example.pcap addr2flow.json example.data --num-packets 500 import sys import random import json import dpkt import argparse +from calyx.utils import bits_needed CMD_PUSH = 1 CMD_POP = 0 -DONTCARE = 0xFFFF_FFFF +DONTCARE = 0 CLOCK_PERIOD = 7 # in ns NUM_PKTS = 500 @@ -40,73 +45,6 @@ LINE_RATE = 1 # in Gbit/s -def parse_pcap(pcap_file, addr2flow_json): - global POP_TICK - - pcap = dpkt.pcap.Reader(pcap_file) - addr2flow = json.load(addr2flow_json) - - offset = None - total_size = 0 - for i, (ts, buf) in zip(range(NUM_PKTS), pcap): - if i == 0: - offset = ts - total_size += len(buf) - - if POP_TICK is None: - POP_TICK = int((total_size * 8) // (LINE_RATE * NUM_PKTS)) - - def mac_addr(addr): - return ":".join("%02x" % dpkt.compat.compat_ord(b) for b in addr) - - pcap_file.seek(0) - pcap = dpkt.pcap.Reader(pcap_file) - out = {"commands": [], "arrival_cycles": [], "flows": []} - prev_time = 0 - for i, (ts, buf) in zip(range(NUM_PKTS), pcap): - eth = dpkt.ethernet.Ethernet(buf) - flow = addr2flow[mac_addr(eth.src)] - time = (ts - offset) * 10**9 - cycle = int(time // CLOCK_PERIOD) - - num_pops = int((time - prev_time) // POP_TICK) - out["commands"].extend([CMD_POP] * num_pops) - out["arrival_cycles"].extend([DONTCARE] * num_pops) - out["flows"].extend([DONTCARE] * num_pops) - - out["commands"].append(CMD_PUSH) - out["arrival_cycles"].append(cycle) - out["flows"].append(flow) - - prev_time = time - return out - - -def gen_json(data): - commands = data["commands"] - arrival_cycles = data["arrival_cycles"] - flows = data["flows"] - values = [random.randint(0, 400) for _ in range(len(commands))] - departure_cycles = [0] * len(commands) - - def format_gen(width): - return {"is_signed": False, "numeric_type": "bitnum", "width": width} - - commands = {"commands": {"data": commands, "format": format_gen(1)}} - arrival_cycles = { - "arrival_cycles": {"data": arrival_cycles, "format": format_gen(32)} - } - flows = {"flows": {"data": flows, "format": format_gen(32)}} - values = {"values": {"data": values, "format": format_gen(32)}} - departure_cycles = { - "departure_cycles": {"data": departure_cycles, "format": format_gen(32)} - } - - return json.dumps( - commands | arrival_cycles | flows | values | departure_cycles, indent=2 - ) - - class ArgumentParserWithCustomError(argparse.ArgumentParser): def __init__(self): super().__init__(add_help=False) @@ -116,10 +54,8 @@ def error(self, msg=None): print("ERROR: %s" % msg) file = open(sys.argv[0]) for i, line in enumerate(file): - if i == 0: - continue - elif line[0] == "#": - print(line[1:].strip()) + if line[0] == "#": + print(line[2:].rstrip("\n")) else: sys.exit(1 if msg else 0) @@ -129,7 +65,8 @@ def parse_cmdline(): parser.add_argument("-h", "--help", action="store_true") parser.add_argument("PCAP") - parser.add_argument("JSON") + parser.add_argument("Addr2Flow") + parser.add_argument("Out") def check_positive_int(x): try: @@ -149,6 +86,7 @@ def check_positive_int(x): parser.add_argument( "--pop-tick", type=check_positive_int, action="store", default=POP_TICK ) + parser.add_argument("--num-flows", type=check_positive_int, action="store") def check_positive_float(x): try: @@ -169,6 +107,98 @@ def check_positive_float(x): return parser.parse_args() +def parse_pcap(pcap, addr2flow, num_flows): + global POP_TICK + + offset = None + total_size = 0 + for i, (ts, buf) in zip(range(NUM_PKTS), pcap): + if i == 0: + offset = ts + total_size += len(buf) + + if POP_TICK is None: + POP_TICK = int((total_size * 8) // (LINE_RATE * NUM_PKTS)) + + def mac_addr(addr): + return ":".join("%02x" % dpkt.compat.compat_ord(b) for b in addr) + + pcap_file.seek(0) + pcap = dpkt.pcap.Reader(pcap_file) + out = {"commands": [], "arrival_cycles": [], "flows": [], "pkt_ids": []} + prev_time = 0 + pkts_in_switch = 0 + for i, (ts, buf) in zip(range(NUM_PKTS), pcap): + time = (ts - offset) * 10**9 + + pop_time = (prev_time % POP_TICK) + prev_time + num_pops = int((time - pop_time) // POP_TICK) if time > pop_time else 0 + pkts_in_switch = 0 if pkts_in_switch < num_pops else pkts_in_switch - num_pops + for _ in range(num_pops): + out["commands"].append(CMD_POP) + + pop_cycle = int(pop_time // CLOCK_PERIOD) + out["arrival_cycles"].append(pop_cycle) + pop_time += POP_TICK + + out["flows"].append(DONTCARE) + out["pkt_ids"].append(DONTCARE) + + eth = dpkt.ethernet.Ethernet(buf) + flow = addr2flow[mac_addr(eth.src)] % num_flows + cycle = int(time // CLOCK_PERIOD) + pkts_in_switch += 1 + + out["commands"].append(CMD_PUSH) + out["arrival_cycles"].append(cycle) + out["flows"].append(flow) + out["pkt_ids"].append(i) + + prev_time = time + + pop_time = (prev_time % POP_TICK) + prev_time + for _ in range(pkts_in_switch): + out["commands"].append(CMD_POP) + + pop_cycle = int(pop_time // CLOCK_PERIOD) + out["arrival_cycles"].append(pop_cycle) + pop_time += POP_TICK + + out["flows"].append(DONTCARE) + out["pkt_ids"].append(DONTCARE) + + return out + + +def dump_json(data, flow_bits, data_file): + commands = data["commands"] + arrival_cycles = data["arrival_cycles"] + flows = data["flows"] + values = data["pkt_ids"] + ans_mem = [0] * len(commands) + departure_cycles = [0] * len(commands) + + def format_gen(width): + return {"is_signed": False, "numeric_type": "bitnum", "width": width} + + commands = {"commands": {"data": commands, "format": format_gen(1)}} + arrival_cycles = { + "arrival_cycles": {"data": arrival_cycles, "format": format_gen(32)} + } + flows = {"flows": {"data": flows, "format": format_gen(flow_bits)}} + values = {"values": {"data": values, "format": format_gen(32)}} + ans_mem = {"ans_mem": {"data": ans_mem, "format": format_gen(32)}} + departure_cycles = { + "departure_cycles": {"data": departure_cycles, "format": format_gen(32)} + } + + json.dump( + commands | values | arrival_cycles | flows | ans_mem | departure_cycles, + data_file, + indent=2, + ) + + if __name__ == "__main__": opts = parse_cmdline() @@ -177,7 +207,18 @@ def check_positive_float(x): POP_TICK = opts.pop_tick with open(opts.PCAP, "rb") as pcap_file: - with open(opts.JSON) as addr2flow_json: - data = parse_pcap(pcap_file, addr2flow_json) - json = gen_json(data) - print(json) + with open(opts.Addr2Flow) as addr2flow_json: + pcap = dpkt.pcap.Reader(pcap_file) + addr2flow = json.load(addr2flow_json) + if opts.num_flows is None: + num_flows = max(addr2flow[addr] for addr in addr2flow) + 1 + else: + num_flows = opts.num_flows + data = parse_pcap(pcap, addr2flow, num_flows) + + num_cmds = len(data["commands"]) + print(f'len(data["commands"] = {num_cmds}') + + with open(opts.Out, "w") as data_file: + flow_bits = bits_needed(num_flows - 1) + json = dump_json(data, flow_bits, data_file) diff --git a/frontends/queues/queues/flow_inference.py b/frontends/queues/queues/flow_inference.py index 3bb08d7bd..dc3ff07c2 100644 --- a/frontends/queues/queues/flow_inference.py +++ b/frontends/queues/queues/flow_inference.py @@ -6,7 +6,7 @@ def insert_boundary_flow_inference(prog, name, boundaries): n = len(boundaries) - + comp = prog.component(name) value = comp.input("value", 32) @@ -34,8 +34,8 @@ def insert_boundary_flow_inference(prog, name, boundaries): bound_check = cb.if_with(cb.CellAndGroup(guard, bound_check_b), set_flow_b) bound_checks.append(bound_check) - - comp.control += [ cb.par(*bound_checks) ] + + comp.control += [cb.par(*bound_checks)] return comp @@ -54,6 +54,11 @@ def insert_tuple_flow_inference(prog, name, num_flows): with comp.continuous: untuplify.tup = value - comp.control += [ comp.reg_store(flow, untuplify.fst) ] + comp.control += [ + comp.reg_store(flow, untuplify.fst), + comp.reg_store(flow, flow.out, "nop"), # temporary no-op to get it to run + # question about this on Zulip: + # https://calyx.zulipchat.com/#narrow/channel/423433-general/topic/Circular.20Combinational.20Logic/near/479748231 + ] return comp diff --git a/frontends/queues/queues/sim_pcap.py b/frontends/queues/queues/sim_pcap.py index e0fd0825c..9612116ff 100644 --- a/frontends/queues/queues/sim_pcap.py +++ b/frontends/queues/queues/sim_pcap.py @@ -11,7 +11,7 @@ def insert_runner(prog, queue, name, num_cmds, num_flows): """Inserts the component `name` into the program. This will be used to `invoke` the component `queue` and feed it _one command_. """ - flow_bits = bits_needed(num_flows - 1) + flow_bits = bits_needed(num_flows - 1) runner = prog.component(name) @@ -39,10 +39,14 @@ def insert_runner(prog, queue, name, num_cmds, num_flows): err = runner.reg(1, "err", is_ref=True) cycle_counter = runner.reg(32, "cycle_counter", is_ref=True) - i = runner.reg(32, "i", is_ref=True) # Index of the command we're currently processing - wait = runner.reg(1, "wait", is_ref=True) # Flag indicating if the `i`th packet has arrived - - cmd = runner.reg(1) + i = runner.reg( + 32, "i", is_ref=True + ) # Index of the command we're currently processing + try_again = runner.reg( + 1, "try_again", is_ref=True + ) # Flag indicating if the `i`th packet has arrived + + cmd = runner.reg(1) value = runner.reg(32) arrival_cycle = runner.reg(32) flow = runner.reg(flow_bits) @@ -51,62 +55,52 @@ def insert_runner(prog, queue, name, num_cmds, num_flows): runner.mem_load_d1(commands, i.out, cmd, "write_cmd"), runner.mem_load_d1(values, i.out, value, "write_value"), runner.mem_load_d1(arrival_cycles, i.out, arrival_cycle, "write_arrival_cycle"), - runner.mem_load_d1(flows, i.out, flow, "write_flow") + runner.mem_load_d1(flows, i.out, flow, "write_flow"), ] - + slice = runner.slice("slice", 32, 32 - flow_bits) - with runner.group("package_flow_and_value") as package_flow_and_value: + with runner.continuous: slice.in_ = value.out tuplify.fst = flow.out tuplify.snd = slice.out - ge = runner.ge(32) - eq = runner.eq(1) - if_or = runner.and_(1) - with runner.comb_group("push_arrival_check") as push_arrival_check: - ge.left = cycle_counter.out - ge.right = arrival_cycles.out - - eq.left = cmd.out - eq.right = 0 - - if_or.left = ge.out - if_or.right = eq.out - and_ = runner.and_(32) with runner.group("zero_out_top") as zero_out_top: and_.left = ans.out - and_.right = cb.const(2**(32 - flow_bits) - 1, 32) + and_.right = cb.const(32, 2 ** (32 - flow_bits) - 1) ans.in_ = and_.out ans.write_en = cb.HI - + zero_out_top.done = ans.done + runner.control += [ load_data, - package_flow_and_value, cb.if_with( - cb.CellAndGroup(if_or, push_arrival_check), + runner.ge_use(cycle_counter.out, arrival_cycle.out), [ - cb.invoke(queue, in_cmd=cmd.out, in_value=tuplify.out, ref_ans=ans, ref_err=err), + cb.invoke( + queue, + in_cmd=cmd.out, + in_value=tuplify.tup, + ref_ans=ans, + ref_err=err, + ), cb.if_with( - runner.not_use(err.out), - [ - runner.eq_store_in_reg(cmd.out, 0, has_ans)[0], - zero_out_top - ] - ) + runner.not_use(err.out), + [runner.eq_store_in_reg(cmd.out, 0, has_ans)[0], zero_out_top], + ), ], - runner.reg_store(wait, 1) - ) + runner.reg_store(try_again, 1), + ), ] return runner def insert_main(prog, queue, num_cmds, num_flows): - flow_bits = bits_needed(num_flows - 1) + flow_bits = bits_needed(num_flows - 1) main = prog.component("main") - + cycle_counter = main.reg(32, "cycle_counter") cycle_adder = main.add(32) with main.continuous: @@ -124,55 +118,68 @@ def insert_main(prog, queue, num_cmds, num_flows): commands = main.seq_mem_d1("commands", 1, num_cmds, 32, is_external=True) values = main.seq_mem_d1("values", 32, num_cmds, 32, is_external=True) - arrival_cycles = main.seq_mem_d1("arrival_cycles", 32, num_cmds, 32, is_external=True) + arrival_cycles = main.seq_mem_d1( + "arrival_cycles", 32, num_cmds, 32, is_external=True + ) flows = main.seq_mem_d1("flows", flow_bits, num_cmds, 32, is_external=True) ans_mem = main.seq_mem_d1("ans_mem", 32, num_cmds, 32, is_external=True) - departure_cycles = main.seq_mem_d1("departure_cycles", 32, num_cmds, 32, is_external=True) + departure_cycles = main.seq_mem_d1( + "departure_cycles", 32, num_cmds, 32, is_external=True + ) i = main.reg(32, "i") - wait = main.reg(1, "wait") + try_again = main.reg(1, "try_again") - # Lower the has-ans, err, and wait flags + # Lower the has-ans, err, and try_again flags lower_flags = [ - main.reg_store(has_ans, 0, "lower_has_ans"), + main.reg_store(has_ans, 0, "lower_has_ans"), main.reg_store(err, 0, "lower_err"), - main.reg_store(wait, 0, "lower_wait") + main.reg_store(try_again, 0, "lower_try_again"), ] main.control += cb.while_with( main.lt_use(i.out, num_cmds), [ lower_flags, - cb.invoke(dataplane, - ref_commands=commands, - ref_values=values, - ref_arrival_cycles=arrival_cycles, - ref_flows=flows, - ref_has_ans=has_ans, - ref_ans=ans, - ref_err=err, - ref_cycle_counter=cycle_counter, - ref_i=i, - ref_wait=wait), + cb.invoke( + dataplane, + ref_commands=commands, + ref_values=values, + ref_arrival_cycles=arrival_cycles, + ref_flows=flows, + ref_has_ans=has_ans, + ref_ans=ans, + ref_err=err, + ref_cycle_counter=cycle_counter, + ref_i=i, + ref_try_again=try_again, + ), cb.if_( - has_ans.out, - main.mem_store_d1(ans_mem, i.out, ans.out, "write_ans"), + has_ans.out, + [ + main.mem_store_d1(ans_mem, i.out, ans.out, "write_ans"), + main.mem_store_d1( + departure_cycles, i.out, cycle_counter.out, "write_cycle" + ), + ], cb.if_( - err.out, + err.out, main.mem_store_d1( ans_mem, i.out, cb.const(32, ERR_CODE), - "write_err", + "write_err", ), - main.mem_store_d1( + main.mem_store_d1( ans_mem, i.out, cb.const(32, PUSH_CODE), - "write_push", + "write_push", ), ), ), - cb.if_with(main.not_use(wait.out), main.incr(i)) # i++ if wait == 0 - ] + cb.if_with( + main.not_use(try_again.out), main.incr(i) + ), # i++ if try_again == 0 + ], ) diff --git a/frontends/queues/tests/binheap/round_robin/rr_2flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_2flow_test.py index 980bec738..a7e7b6c7d 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_2flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_2flow_test.py @@ -3,7 +3,7 @@ import queues.queue_call as qc import queues.sim_pcap as sp import queues.binheap.round_robin as rr -import queues.flow_inference as fi +import queues.flow_inference as fi NUMFLOWS = 2 @@ -22,7 +22,9 @@ sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) else: boundaries = [200, 400] - flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) diff --git a/frontends/queues/tests/binheap/round_robin/rr_3flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_3flow_test.py index 96293ca43..34faee6f8 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_3flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_3flow_test.py @@ -2,7 +2,7 @@ import calyx.builder as cb import queues.queue_call as qc import queues.binheap.round_robin as rr -import queues.flow_inference as fi +import queues.flow_inference as fi NUMFLOWS = 3 @@ -14,12 +14,14 @@ sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - + if sim_pcap: raise Exception("Not Implemented") else: boundaries = [133, 266, 400] - flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) diff --git a/frontends/queues/tests/binheap/round_robin/rr_4flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_4flow_test.py index 3259802d0..30b8c22ab 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_4flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_4flow_test.py @@ -2,7 +2,7 @@ import calyx.builder as cb import queues.queue_call as qc import queues.binheap.round_robin as rr -import queues.flow_inference as fi +import queues.flow_inference as fi NUMFLOWS = 4 @@ -19,7 +19,9 @@ raise Exception("Not Implemented") else: boundaries = [100, 200, 300, 400] - flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) diff --git a/frontends/queues/tests/binheap/round_robin/rr_5flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_5flow_test.py index 0127aea31..969ee225b 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_5flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_5flow_test.py @@ -2,7 +2,7 @@ import calyx.builder as cb import queues.queue_call as qc import queues.binheap.round_robin as rr -import queues.flow_inference as fi +import queues.flow_inference as fi NUMFLOWS = 5 @@ -19,7 +19,9 @@ raise Exception("Not Implemented") else: boundaries = [80, 160, 240, 320, 400] - flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) diff --git a/frontends/queues/tests/binheap/round_robin/rr_6flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_6flow_test.py index 78a9aabcf..8e9fc0f14 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_6flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_6flow_test.py @@ -2,7 +2,7 @@ import calyx.builder as cb import queues.queue_call as qc import queues.binheap.round_robin as rr -import queues.flow_inference as fi +import queues.flow_inference as fi NUMFLOWS = 6 @@ -19,7 +19,9 @@ raise Exception("Not Implemented") else: boundaries = [66, 100, 200, 220, 300, 400] - flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) diff --git a/frontends/queues/tests/binheap/round_robin/rr_7flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_7flow_test.py index ab84f4d08..fce4dfaa4 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_7flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_7flow_test.py @@ -2,7 +2,7 @@ import calyx.builder as cb import queues.queue_call as qc import queues.binheap.round_robin as rr -import queues.flow_inference as fi +import queues.flow_inference as fi NUMFLOWS = 7 @@ -19,7 +19,9 @@ raise Exception("Not Implemented") else: boundaries = [50, 100, 150, 200, 250, 300, 400] - flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) diff --git a/frontends/queues/tests/binheap/strict/strict_2flow_test.py b/frontends/queues/tests/binheap/strict/strict_2flow_test.py index 36fb2311f..0d62c5be9 100644 --- a/frontends/queues/tests/binheap/strict/strict_2flow_test.py +++ b/frontends/queues/tests/binheap/strict/strict_2flow_test.py @@ -2,7 +2,7 @@ import calyx.builder as cb import queues.queue_call as qc import queues.binheap.strict as st -import queues.flow_inference as fi +import queues.flow_inference as fi NUMFLOWS = 2 @@ -20,7 +20,9 @@ else: boundaries = [200, 400] order = [1, 0] - flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) diff --git a/frontends/queues/tests/binheap/strict/strict_3flow_test.py b/frontends/queues/tests/binheap/strict/strict_3flow_test.py index 1290c1a4f..0f976ce80 100644 --- a/frontends/queues/tests/binheap/strict/strict_3flow_test.py +++ b/frontends/queues/tests/binheap/strict/strict_3flow_test.py @@ -2,7 +2,7 @@ import calyx.builder as cb import queues.queue_call as qc import queues.binheap.strict as st -import queues.flow_inference as fi +import queues.flow_inference as fi NUMFLOWS = 3 @@ -20,7 +20,9 @@ else: boundaries = [133, 266, 400] order = [1, 2, 0] - flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) diff --git a/frontends/queues/tests/binheap/strict/strict_4flow_test.py b/frontends/queues/tests/binheap/strict/strict_4flow_test.py index 8f513dfb2..44540a1d4 100644 --- a/frontends/queues/tests/binheap/strict/strict_4flow_test.py +++ b/frontends/queues/tests/binheap/strict/strict_4flow_test.py @@ -2,7 +2,7 @@ import calyx.builder as cb import queues.queue_call as qc import queues.binheap.strict as st -import queues.flow_inference as fi +import queues.flow_inference as fi NUMFLOWS = 4 @@ -20,7 +20,9 @@ else: boundaries = [100, 200, 300, 400] order = [3, 0, 2, 1] - flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) diff --git a/frontends/queues/tests/binheap/strict/strict_5flow_test.py b/frontends/queues/tests/binheap/strict/strict_5flow_test.py index 27ba4357c..7a6ae1cef 100644 --- a/frontends/queues/tests/binheap/strict/strict_5flow_test.py +++ b/frontends/queues/tests/binheap/strict/strict_5flow_test.py @@ -2,7 +2,7 @@ import calyx.builder as cb import queues.queue_call as qc import queues.binheap.strict as st -import queues.flow_inference as fi +import queues.flow_inference as fi NUMFLOWS = 5 @@ -20,7 +20,9 @@ else: boundaries = [80, 160, 240, 320, 400] order = [0, 1, 2, 3, 4] - flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) diff --git a/frontends/queues/tests/binheap/strict/strict_6flow_test.py b/frontends/queues/tests/binheap/strict/strict_6flow_test.py index da2933319..df3fcbb2a 100644 --- a/frontends/queues/tests/binheap/strict/strict_6flow_test.py +++ b/frontends/queues/tests/binheap/strict/strict_6flow_test.py @@ -2,7 +2,7 @@ import calyx.builder as cb import queues.queue_call as qc import queues.binheap.strict as st -import queues.flow_inference as fi +import queues.flow_inference as fi NUMFLOWS = 6 @@ -20,7 +20,9 @@ else: boundaries = [66, 100, 200, 220, 300, 400] order = [3, 1, 5, 2, 4, 0] - flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) diff --git a/frontends/queues/tests/binheap/strict/strict_7flow_test.py b/frontends/queues/tests/binheap/strict/strict_7flow_test.py index 7eebd97e9..886cca5b2 100644 --- a/frontends/queues/tests/binheap/strict/strict_7flow_test.py +++ b/frontends/queues/tests/binheap/strict/strict_7flow_test.py @@ -2,7 +2,7 @@ import calyx.builder as cb import queues.queue_call as qc import queues.binheap.strict as st -import queues.flow_inference as fi +import queues.flow_inference as fi NUMFLOWS = 7 @@ -20,7 +20,9 @@ else: boundaries = [50, 100, 150, 200, 250, 300, 400] order = [0, 1, 2, 3, 4, 5, 6] - flow_infer = fi.insert_boundary_flow_inference(prog, "flow_inference", boundaries) + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) From 31fa876d5dfa44a7bcd71ecd682eac225af8c604 Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Mon, 4 Nov 2024 15:38:50 -0500 Subject: [PATCH 12/22] Move evaluation scripts to separate directory --- frontends/queues/{ => evaluation}/cycles.sh | 2 +- frontends/queues/evaluation/plot_pcap_sim.py | 16 +++++ .../{plot.py => evaluation/plot_stats.py} | 59 +++++++++---------- .../queues/{ => evaluation}/resources.sh | 2 +- .../tests/binheap/strict/strict_2flow_test.py | 7 ++- 5 files changed, 50 insertions(+), 36 deletions(-) rename frontends/queues/{ => evaluation}/cycles.sh (94%) create mode 100644 frontends/queues/evaluation/plot_pcap_sim.py rename frontends/queues/{plot.py => evaluation/plot_stats.py} (71%) rename frontends/queues/{ => evaluation}/resources.sh (94%) diff --git a/frontends/queues/cycles.sh b/frontends/queues/evaluation/cycles.sh similarity index 94% rename from frontends/queues/cycles.sh rename to frontends/queues/evaluation/cycles.sh index 63d362947..5ea18748d 100755 --- a/frontends/queues/cycles.sh +++ b/frontends/queues/evaluation/cycles.sh @@ -2,7 +2,7 @@ shopt -s globstar -cd "$(dirname "$0")/../.." # move to root +cd "$(dirname "$0")/../../.." # move to root declare -a files=(frontends/queues/tests/**/*.py) num_files=${#files[@]} diff --git a/frontends/queues/evaluation/plot_pcap_sim.py b/frontends/queues/evaluation/plot_pcap_sim.py new file mode 100644 index 000000000..1ba8702ab --- /dev/null +++ b/frontends/queues/evaluation/plot_pcap_sim.py @@ -0,0 +1,16 @@ +import os +import json + + +def append_path_prefix(file): + path_to_script = os.path.dirname(__file__) + path_to_file = os.path.join(path_to_script, file) + return path_to_file + + +def parse(file): + out[] = + + +if __name__ == "__main__": + diff --git a/frontends/queues/plot.py b/frontends/queues/evaluation/plot_stats.py similarity index 71% rename from frontends/queues/plot.py rename to frontends/queues/evaluation/plot_stats.py index 44775a209..49c56f7ab 100644 --- a/frontends/queues/plot.py +++ b/frontends/queues/evaluation/plot_stats.py @@ -53,8 +53,6 @@ def draw(data, stat, logic, unit): else: ax.set_ylabel(f"{stat} ({unit})", fontsize=20) - file = "" - if logic == Logic.RR: specialized = ax.scatter( data["specialized"]["round_robin"].keys(), @@ -83,39 +81,36 @@ def draw(data, stat, logic, unit): ax.set_title("Strict Queues", fontweight="bold", fontsize=20) file = append_path_prefix(f"{stat}_strict") - plt.legend( - (specialized, binheap), - ("Specialized (i.e. Cassandra style)", "Binary Heap"), - fontsize=12, - ) + plt.legend((specialized, binheap), ("Specialized", "Binary Heap"), fontsize=12) plt.savefig(file) print(f"Generated {file}.png") -# Parse data for round_robin and strict queues -stat = sys.argv[1] -data = {} -if stat == "total_time": - file1 = sys.argv[2] - file2 = sys.argv[3] - - cycle_data = parse("cycles", file1) - slack_data = parse("worst_slack", file2) - - data = cycle_data.copy() - for impl in data.keys(): - for logic in data[impl].keys(): - for flow_no in data[impl][logic].keys(): - cycles = cycle_data[impl][logic][flow_no] - slack = slack_data[impl][logic][flow_no] - data[impl][logic][flow_no] = (1000 * cycles) / (7 - slack) -else: - file = sys.argv[2] - data = parse(stat, file) - -# Draw results -unit = "μs" if stat == "total_time" else None -draw(data, stat, Logic.RR, unit) -draw(data, stat, Logic.STRICT, unit) +if __name__ == "__main__": + # Parse data for round_robin and strict queues + stat = sys.argv[1] + data = {} + if stat == "total_time": + file1 = sys.argv[2] + file2 = sys.argv[3] + + cycle_data = parse("cycles", file1) + slack_data = parse("worst_slack", file2) + + data = cycle_data.copy() + for impl in data.keys(): + for logic in data[impl].keys(): + for flow_no in data[impl][logic].keys(): + cycles = cycle_data[impl][logic][flow_no] + slack = slack_data[impl][logic][flow_no] + data[impl][logic][flow_no] = (1000 * cycles) / (7 - slack) + else: + file = sys.argv[2] + data = parse(stat, file) + + # Draw results + unit = "μs" if stat == "total_time" else None + draw(data, stat, Logic.RR, unit) + draw(data, stat, Logic.STRICT, unit) diff --git a/frontends/queues/resources.sh b/frontends/queues/evaluation/resources.sh similarity index 94% rename from frontends/queues/resources.sh rename to frontends/queues/evaluation/resources.sh index c79dc08fa..661e095d9 100755 --- a/frontends/queues/resources.sh +++ b/frontends/queues/evaluation/resources.sh @@ -7,7 +7,7 @@ if [ "$#" -gt 1 ]; then exit 1 fi -cd "$(dirname "$0")/../.." # move to root +cd "$(dirname "$0")/../../.." # move to root declare -a files=(frontends/queues/tests/**/*.py) num_files=${#files[@]} diff --git a/frontends/queues/tests/binheap/strict/strict_2flow_test.py b/frontends/queues/tests/binheap/strict/strict_2flow_test.py index 0d62c5be9..031ae069e 100644 --- a/frontends/queues/tests/binheap/strict/strict_2flow_test.py +++ b/frontends/queues/tests/binheap/strict/strict_2flow_test.py @@ -1,6 +1,7 @@ import sys import calyx.builder as cb import queues.queue_call as qc +import queues.sim_pcap as sp import queues.binheap.strict as st import queues.flow_inference as fi @@ -15,11 +16,13 @@ prog = cb.Builder() + order = [1, 0] if sim_pcap: - raise Exception("Not Implemented") + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) else: boundaries = [200, 400] - order = [1, 0] flow_infer = fi.insert_boundary_flow_inference( prog, "flow_inference", boundaries ) From 2dd295ffd0520e5329ea02acb5f2309b162caf71 Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Mon, 4 Nov 2024 18:39:44 -0500 Subject: [PATCH 13/22] Pass open files instead of names --- frontends/queues/evaluation/plot_stats.py | 61 ++++++++++++----------- 1 file changed, 31 insertions(+), 30 deletions(-) diff --git a/frontends/queues/evaluation/plot_stats.py b/frontends/queues/evaluation/plot_stats.py index 49c56f7ab..bbf421813 100644 --- a/frontends/queues/evaluation/plot_stats.py +++ b/frontends/queues/evaluation/plot_stats.py @@ -22,24 +22,23 @@ def parse(stat, file): "specialized": {"round_robin": {}, "strict": {}}, } - with open(file) as file: - data = json.load(file) - for file, data in data.items(): - if isinstance(data, dict): - data = data[stat] - - flow_no = file.split("flow")[0][-1] - - if "round_robin" in file: - if "binheap" in file: - out["binheap"]["round_robin"][flow_no] = data - else: - out["specialized"]["round_robin"][flow_no] = data - if "strict" in file: - if "binheap" in file: - out["binheap"]["strict"][flow_no] = data - else: - out["specialized"]["strict"][flow_no] = data + data = json.load(file) + for file, data in data.items(): + if isinstance(data, dict): + data = data[stat] + + flow_no = file.split("flow")[0][-1] + + if "round_robin" in file: + if "binheap" in file: + out["binheap"]["round_robin"][flow_no] = data + else: + out["specialized"]["round_robin"][flow_no] = data + if "strict" in file: + if "binheap" in file: + out["binheap"]["strict"][flow_no] = data + else: + out["specialized"]["strict"][flow_no] = data return out @@ -95,20 +94,22 @@ def draw(data, stat, logic, unit): if stat == "total_time": file1 = sys.argv[2] file2 = sys.argv[3] - - cycle_data = parse("cycles", file1) - slack_data = parse("worst_slack", file2) - - data = cycle_data.copy() - for impl in data.keys(): - for logic in data[impl].keys(): - for flow_no in data[impl][logic].keys(): - cycles = cycle_data[impl][logic][flow_no] - slack = slack_data[impl][logic][flow_no] - data[impl][logic][flow_no] = (1000 * cycles) / (7 - slack) + with open(file1) as file1: + with open(file2) as file2: + cycle_data = parse("cycles", file1) + slack_data = parse("worst_slack", file2) + + data = cycle_data.copy() + for impl in data.keys(): + for logic in data[impl].keys(): + for flow_no in data[impl][logic].keys(): + cycles = cycle_data[impl][logic][flow_no] + slack = slack_data[impl][logic][flow_no] + data[impl][logic][flow_no] = (1000 * cycles) / (7 - slack) else: file = sys.argv[2] - data = parse(stat, file) + with open(file) as file: + data = parse(stat, file) # Draw results unit = "μs" if stat == "total_time" else None From 614b028ed025ce7a2316a6229d40b9cbf9e827b5 Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Sun, 10 Nov 2024 21:55:43 -0500 Subject: [PATCH 14/22] Various tweaks - finish up plot_pcap_sim.py - move parse_pcap.py to evaluation/ - revert most changes to queue_call.py --- .../{pcap-sim => evaluation}/parse_pcap.py | 107 ++++++--- frontends/queues/evaluation/plot_pcap_sim.py | 106 +++++++- frontends/queues/queues/queue_call.py | 226 +++++++++--------- 3 files changed, 282 insertions(+), 157 deletions(-) rename frontends/queues/{pcap-sim => evaluation}/parse_pcap.py (67%) diff --git a/frontends/queues/pcap-sim/parse_pcap.py b/frontends/queues/evaluation/parse_pcap.py similarity index 67% rename from frontends/queues/pcap-sim/parse_pcap.py rename to frontends/queues/evaluation/parse_pcap.py index 68378f5d5..394b38f37 100755 --- a/frontends/queues/pcap-sim/parse_pcap.py +++ b/frontends/queues/evaluation/parse_pcap.py @@ -1,7 +1,8 @@ -# ============================================================================= -# Usage: python3 parse_pcap.py [Option]... -# ============================================================================= -# Arguments: +# Usage: python3 parse_pcap.py [Options]... +# +# Parses PCAP files to generate data files +# +# Positional Arguments: # PCAP Packet Capture to parse # Addr2Flow JSON mapping MAC addresses to integer flows # Out Path to save generated .data file @@ -10,7 +11,7 @@ # -h --help Display this message # # --num-packets N No. packets in PCAP to parse -# [default: 1000] +# [default: 500] # # --clock-period C Clock period of hardware in ns # [default: 7] @@ -21,18 +22,22 @@ # --pop-tick P Time between consecutive pops in ns # [default: calulated to achieve line rate] # +# --addr2int A JSON mapping MAC addresses to non-negative integers +# [default: ith encountered address -> i] +# # --num-flows F No. flows -# The flow of a packet is Addr2Flow[Packet Address] mod F -# [default: max value in Addr2Flow + 1] +# The flow of a packet is Addr2Int[Packet Address] mod F +# [default: max value in Addr2Int + 1] # # Example: -# python3 parse_pcap.py example.pcap addr2flow.json example.data --num-packets 500 +# python3 parse_pcap.py example.pcap example.data --addr2int addr2int.json --num-packets 250 import sys import random import json import dpkt import argparse +from contextlib import nullcontext from calyx.utils import bits_needed CMD_PUSH = 1 @@ -45,6 +50,10 @@ LINE_RATE = 1 # in Gbit/s +class UnknownAddress(Exception): + pass + + class ArgumentParserWithCustomError(argparse.ArgumentParser): def __init__(self): super().__init__(add_help=False) @@ -65,7 +74,6 @@ def parse_cmdline(): parser.add_argument("-h", "--help", action="store_true") parser.add_argument("PCAP") - parser.add_argument("Addr2Flow") parser.add_argument("Out") def check_positive_int(x): @@ -86,6 +94,7 @@ def check_positive_int(x): parser.add_argument( "--pop-tick", type=check_positive_int, action="store", default=POP_TICK ) + parser.add_argument("--addr2int", action="store") parser.add_argument("--num-flows", type=check_positive_int, action="store") def check_positive_float(x): @@ -107,25 +116,44 @@ def check_positive_float(x): return parser.parse_args() -def parse_pcap(pcap, addr2flow, num_flows): +def parse_pcap(pcap, addr2int, num_flows): global POP_TICK + def mac_addr(addr): + return ":".join("%02x" % dpkt.compat.compat_ord(b) for b in addr) + offset = None total_size = 0 + make_addr_map = addr2int == None + if make_addr_map: + addr2int = {} + addr_count = 0 for i, (ts, buf) in zip(range(NUM_PKTS), pcap): if i == 0: offset = ts + + eth = dpkt.ethernet.Ethernet(buf) + addr = mac_addr(eth.src) + if addr not in addr2int: + if make_addr_map: + addr2int[addr] = addr_count + addr_count += 1 + else: + raise UnknownAddress( + f"MAC address {addr} for packet {i} not found in Addr2Flow map:\n {addr2int}" + ) + total_size += len(buf) + if num_flows is None: + num_flows = max(addr2int[addr] for addr in addr2int) + 1 + if POP_TICK is None: POP_TICK = int((total_size * 8) // (LINE_RATE * NUM_PKTS)) - def mac_addr(addr): - return ":".join("%02x" % dpkt.compat.compat_ord(b) for b in addr) - pcap_file.seek(0) pcap = dpkt.pcap.Reader(pcap_file) - out = {"commands": [], "arrival_cycles": [], "flows": [], "pkt_ids": []} + data = {"commands": [], "arrival_cycles": [], "flows": [], "pkt_ids": []} prev_time = 0 pkts_in_switch = 0 for i, (ts, buf) in zip(range(NUM_PKTS), pcap): @@ -135,39 +163,41 @@ def mac_addr(addr): num_pops = int((time - pop_time) // POP_TICK) if time > pop_time else 0 pkts_in_switch = 0 if pkts_in_switch < num_pops else pkts_in_switch - num_pops for _ in range(num_pops): - out["commands"].append(CMD_POP) + data["commands"].append(CMD_POP) pop_cycle = int(pop_time // CLOCK_PERIOD) - out["arrival_cycles"].append(pop_cycle) + data["arrival_cycles"].append(pop_cycle) pop_time += POP_TICK - out["flows"].append(DONTCARE) - out["pkt_ids"].append(DONTCARE) + data["flows"].append(DONTCARE) + data["pkt_ids"].append(DONTCARE) eth = dpkt.ethernet.Ethernet(buf) - flow = addr2flow[mac_addr(eth.src)] % num_flows + addr = mac_addr(eth.src) + flow = addr2int[addr] % num_flows cycle = int(time // CLOCK_PERIOD) + pkt_id = i + 1 pkts_in_switch += 1 - out["commands"].append(CMD_PUSH) - out["arrival_cycles"].append(cycle) - out["flows"].append(flow) - out["pkt_ids"].append(i) + data["commands"].append(CMD_PUSH) + data["arrival_cycles"].append(cycle) + data["flows"].append(flow) + data["pkt_ids"].append(pkt_id) prev_time = time pop_time = (prev_time % POP_TICK) + prev_time for _ in range(pkts_in_switch): - out["commands"].append(CMD_POP) + data["commands"].append(CMD_POP) pop_cycle = int(pop_time // CLOCK_PERIOD) - out["arrival_cycles"].append(pop_cycle) + data["arrival_cycles"].append(pop_cycle) pop_time += POP_TICK - out["flows"].append(DONTCARE) - out["pkt_ids"].append(DONTCARE) + data["flows"].append(DONTCARE) + data["pkt_ids"].append(DONTCARE) - return out + return data, num_flows, addr2int def dump_json(data, flow_bits, data_file): @@ -207,18 +237,21 @@ def format_gen(width): POP_TICK = opts.pop_tick with open(opts.PCAP, "rb") as pcap_file: - with open(opts.Addr2Flow) as addr2flow_json: + with ( + nullcontext() if opts.addr2int is None else open(opts.addr2int) + ) as addr2int_json: pcap = dpkt.pcap.Reader(pcap_file) - addr2flow = json.load(addr2flow_json) - if opts.num_flows is None: - num_flows = max(addr2flow[addr] for addr in addr2flow) + 1 - else: - num_flows = opts.num_flows - data = parse_pcap(pcap, addr2flow, num_flows) + addr2int = None if addr2int_json is None else json.load(addr2int_json) + num_flows = opts.num_flows - num_cmds = len(data["commands"]) - print(f'len(data["commands"] = {num_cmds}') + data, num_flows, addr2int = parse_pcap(pcap, addr2int, num_flows) with open(opts.Out, "w") as data_file: flow_bits = bits_needed(num_flows - 1) json = dump_json(data, flow_bits, data_file) + + print(f"Number of commands = {len(data['commands'])}") + print("Addresses to flows:") + for addr in addr2int: + print(f"\t{addr} -> {addr2int[addr] % num_flows}") + print(f"Pop tick = {POP_TICK} ns or {POP_TICK / CLOCK_PERIOD} cycles") diff --git a/frontends/queues/evaluation/plot_pcap_sim.py b/frontends/queues/evaluation/plot_pcap_sim.py index 1ba8702ab..48a2e6ab2 100644 --- a/frontends/queues/evaluation/plot_pcap_sim.py +++ b/frontends/queues/evaluation/plot_pcap_sim.py @@ -1,5 +1,42 @@ import os +import sys import json +import numpy as np +import matplotlib.pyplot as plt +from matplotlib.patches import Patch + +CLOCK_PERIOD = 7 # in ns +NUM_FLOWS = 2 + + +class OutOfColors(Exception): + pass + + +class Packet: + def __init__(self, id, flow, punch_in, punch_out=None): + self.id = id + self.flow = flow + self.punch_in = punch_in + self.punch_out = punch_out + + def color(self): + colors = [ + "red", + "skyblue", + "forestgreen", + "lightsalmon", + "dodgerblue", + "darkseagreen", + "orchid", + ] + if self.flow < len(colors): + return colors[self.flow] + + raise OutOfColors(f"No color for flow {self.flow}; Extend the colors list!") + + def __str__(self): + return f"({self.id}, {self.flow}, {self.punch_in}, {self.punch_out})" def append_path_prefix(file): @@ -9,8 +46,73 @@ def append_path_prefix(file): def parse(file): - out[] = + data = json.load(file) + packets = [] + for i, cmd in enumerate(data["commands"]): + if cmd == 0: + continue -if __name__ == "__main__": + id = data["values"][i] + flow = data["flows"][i] + punch_in = data["arrival_cycles"][i] * CLOCK_PERIOD + if id in data["ans_mem"]: + j = data["ans_mem"].index(id) + punch_out = data["departure_cycles"][j] * CLOCK_PERIOD + pkt = Packet(id, flow, punch_in, punch_out) + else: + pkt = Packet(id, flow, punch_in) + packets += [pkt] + packets.sort( + key=lambda p: float("inf") if p.punch_out is None else float(p.punch_out) + ) + + return packets + + +def draw(packets, name): + fig, ax = plt.subplots(1, 1) + fig.set_size_inches(20, 10, forward=True) + ax.set_ylim(0, len(packets)) + ax.axes.yaxis.set_visible(False) + + patches = [] + labels = [] + for i, pkt in enumerate(packets): + color = pkt.color() + if pkt.punch_out is not None: + treetime = pkt.punch_out - pkt.punch_in + handle = ax.broken_barh( + [(pkt.punch_in, treetime)], (i, 1), facecolors=color + ) + + label = f"Flow {pkt.flow}" + if label not in labels: + patches += [Patch(color=color)] + labels += [label] + else: + treetime = 0 + ax.broken_barh([(pkt.punch_in, treetime)], (i, 1), facecolors=color) + ax.text( + x=pkt.punch_in + 0.2, + y=i + 0.7, + s="OVERFLOW", + color="black", + fontsize="x-small", + ) + ax.invert_yaxis() + ax.legend(handles=patches, labels=labels) + + file = append_path_prefix(name) + plt.savefig(file) + print(f"Generated {file}") + + +if __name__ == "__main__": + file = sys.argv[1] + basename = os.path.basename(file) + with open(file) as file: + packets = parse(file) + name = basename.split(".")[0] + ".png" + draw(packets, name) diff --git a/frontends/queues/queues/queue_call.py b/frontends/queues/queues/queue_call.py index b1c4eee14..2a8b71f86 100644 --- a/frontends/queues/queues/queue_call.py +++ b/frontends/queues/queues/queue_call.py @@ -21,16 +21,15 @@ def insert_runner(prog, queue, name, num_cmds, use_ranks, stats_component=None): `1`: push - 2: `values`, a list of values. Where each value is a 32-bit unsigned integer. - The value at `i` is pushed if the command at `i` is `2`. + The value at `i` is pushed if the command at `i` is `1`. - 3: `ranks`, a list of ranks. [optional] Where each rank is a 32-bit unsigned integer. - The value at `i` is pushed with the rank at `i` if the command at `i` is `2`. - - 4: `cycles`, a list of cycles. - - 5: `has_ans`, a 1-bit unsigned integer. + The value at `i` is pushed with the rank at `i` if the command at `i` is `1`. + - 4: `has_ans`, a 1-bit unsigned integer. We raise/lower this to indicate whether the queue had a reply to the command. - - 6: `component_ans`, a 32-bit unsigned integer. + - 5: `component_ans`, a 32-bit unsigned integer. We put in this register the answer, if any. - - 7: `component_err`, a 1-bit unsigned integer. + - 6: `component_err`, a 1-bit unsigned integer. We raise/lower it to indicate whether an error occurred. """ assert ( @@ -41,9 +40,11 @@ def insert_runner(prog, queue, name, num_cmds, use_ranks, stats_component=None): # We take a stats component by reference, # but all we'll really do with it is pass it to the queue component. - stats_cell = None - if stats_component: - stats_cell = runner.cell("stats_runner", stats_component, is_ref=True) + stats_cell = ( + runner.cell("stats_runner", stats_component, is_ref=True) + if stats_component + else None + ) # We'll invoke the queue component. queue = runner.cell("myqueue", queue) @@ -54,17 +55,15 @@ def insert_runner(prog, queue, name, num_cmds, use_ranks, stats_component=None): # `1`: push # - input `value` # which is a 32-bit unsigned integer. If `cmd` is `1`, push this value. - # - input `rank` [optional] - # which is a 32-bit unsigned integer. If `cmd` is `1`, push `value` with this rank. # - ref register `ans`, into which the result of a pop is written. # - ref register `err`, which is raised if an error occurs. # Our memories and registers, all of which are passed to us by reference. commands = runner.seq_mem_d1("commands", 1, num_cmds, 32, is_ref=True) values = runner.seq_mem_d1("values", 32, num_cmds, 32, is_ref=True) - ranks = None - if use_ranks: - ranks = runner.seq_mem_d1("ranks", 32, num_cmds, 32, is_ref=True) + ranks = ( + runner.seq_mem_d1("ranks", 32, num_cmds, 32, is_ref=True) if use_ranks else None + ) has_ans = runner.reg(1, "has_ans", is_ref=True) ans = runner.reg(32, "component_ans", is_ref=True) err = runner.reg(1, "component_err", is_ref=True) @@ -72,64 +71,60 @@ def insert_runner(prog, queue, name, num_cmds, use_ranks, stats_component=None): i = runner.reg(32) # The index of the command we're currently processing cmd = runner.reg(1) # The command we're currently processing value = runner.reg(32) # The value we're currently processing - rank = None # The rank we're currently processing - if use_ranks: - rank = runner.reg(32) - - load_data = [ - # `cmd := commands[i]`, `value := values[i]` - runner.mem_load_d1(commands, i.out, cmd, "write_cmd"), - runner.mem_load_d1(values, i.out, value, "write_value"), - ] - if use_ranks: - # `cmd := commands[i]`, `value := values[i]`, `rank := ranks[i]` - load_data += [runner.mem_load_d1(ranks, i.out, rank, "write_rank")] + rank = runner.reg(32) # The rank we're currently processing - # Invoke the queue without stats or ranks. - invoke_queue = cb.invoke( - queue, in_cmd=cmd.out, in_value=value.out, ref_ans=ans, ref_err=err + load_data = ( + [ # `cmd := commands[i]`, `value := values[i]`, `rank := ranks[i]` + runner.mem_load_d1(commands, i.out, cmd, "write_cmd"), + runner.mem_load_d1(values, i.out, value, "write_value"), + runner.mem_load_d1(ranks, i.out, rank, "write_rank"), + ] + if use_ranks + else [ # `cmd := commands[i]`, `value := values[i]` + runner.mem_load_d1(commands, i.out, cmd, "write_cmd"), + runner.mem_load_d1(values, i.out, value, "write_value"), + ] ) - if stats_component and use_ranks: - # with ranks and a stats component. - invoke_queue = cb.invoke( - queue, - in_cmd=cmd.out, - in_value=value.out, - in_rank=rank.out, - ref_ans=ans, - ref_err=err, - ref_stats=stats_cell, - ) - elif stats_component: - # with only a stats component. - invoke_queue = cb.invoke( - queue, - in_cmd=cmd.out, - in_value=value.out, - ref_ans=ans, - ref_err=err, - ref_stats=stats_cell, - ) - elif use_ranks: - # with only ranks. - invoke_queue = cb.invoke( - queue, - in_cmd=cmd.out, - in_value=value.out, - in_rank=rank.out, - ref_ans=ans, - ref_err=err, - ) runner.control += [ load_data, - invoke_queue, + ( + cb.invoke( # Invoke the queue with a stats component. + queue, + in_cmd=cmd.out, + in_value=value.out, + ref_ans=ans, + ref_err=err, + ref_stats=stats_cell, + ) + if stats_component + else ( + cb.invoke( # Invoke the queue with ranks. + queue, + in_cmd=cmd.out, + in_value=value.out, + in_rank=rank.out, + ref_ans=ans, + ref_err=err, + ) + if use_ranks + else cb.invoke( # Invoke the queue without stats or ranks. + queue, + in_cmd=cmd.out, + in_value=value.out, + ref_ans=ans, + ref_err=err, + ) + ) + ), # We're back from the invoke, and it's time for some post-mortem analysis. cb.if_with( runner.not_use(err.out), # If there was no error - # If cmd = 0, meaning cmd is pop, raise the `has_ans` flag. - # has_ans := cmd == 0 - runner.eq_store_in_reg(cmd.out, 0, has_ans)[0], + [ + # If cmd = 1, meaning cmd is pop, raise the `has_ans` flag. + # Otherwise, lower the `has_ans` flag. + runner.eq_store_in_reg(cmd.out, 0, has_ans)[0] + ], ), runner.incr(i), # i++ ] @@ -168,15 +163,17 @@ def insert_main( commands = main.seq_mem_d1("commands", 1, num_cmds, 32, is_external=True) values = main.seq_mem_d1("values", 32, num_cmds, 32, is_external=True) ans_mem = main.seq_mem_d1("ans_mem", 32, num_cmds, 32, is_external=True) - ranks = None - if use_ranks: - ranks = main.seq_mem_d1("ranks", 32, num_cmds, 32, is_external=True) + ranks = ( + main.seq_mem_d1("ranks", 32, num_cmds, 32, is_external=True) + if use_ranks + else None + ) i = main.reg(32) # A counter for how many times we have invoked the dataplane. keep_looping = main.and_(1) # If this is high, we keep going. Otherwise, we stop. lt = main.lt(32) not_err = main.not_(1) - with main.comb_group("compute_keep_looping") as compute_keep_looping: + with main.comb_group("Compute_keep_looping") as compute_keep_looping: # The condition to keep looping is: # The index `i` is less than the number of commands `num_cmds` # AND @@ -187,54 +184,6 @@ def insert_main( keep_looping.left = lt.out keep_looping.right = cb.HI if keepgoing else not_err.out - # Invoke the dataplane component without stats or ranks. - invoke_dataplane = cb.invoke( - dataplane, - ref_commands=commands, - ref_values=values, - ref_has_ans=has_ans, - ref_component_ans=dataplane_ans, - ref_component_err=dataplane_err, - ) - if stats_component and use_ranks: - # with ranks and a stats component. - invoke_dataplane = cb.invoke( - dataplane, - ref_commands=commands, - ref_values=values, - ref_ranks=ranks, - ref_has_ans=has_ans, - ref_component_ans=dataplane_ans, - ref_component_err=dataplane_err, - ref_stats_runner=stats, - ) - elif stats_component: - # with only a stats component. - invoke_dataplane = cb.invoke( - dataplane, - ref_commands=commands, - ref_values=values, - ref_has_ans=has_ans, - ref_component_ans=dataplane_ans, - ref_component_err=dataplane_err, - ref_stats_runner=stats, - ) - elif use_ranks: - # with only ranks. - invoke_dataplane = cb.invoke( - dataplane, - ref_commands=commands, - ref_values=values, - ref_ranks=ranks, - ref_has_ans=has_ans, - ref_component_ans=dataplane_ans, - ref_component_err=dataplane_err, - ) - - invoke_controller = Empty - if controller is not None: - invoke_controller = cb.invoke(controller, ref_stats_controller=stats) - main.control += cb.while_with( # We will run the dataplane and controller components in sequence, # in a while loop. The loop will terminate when `break_` has a value of `1`. @@ -242,7 +191,41 @@ def insert_main( [ main.reg_store(has_ans, 0, "lower_has_ans"), # Lower the has-ans flag. main.reg_store(dataplane_err, 0, "lower_err"), # Lower the has-err flag. - invoke_dataplane, + ( + cb.invoke( + # Invoke the dataplane component with a stats component. + dataplane, + ref_commands=commands, + ref_values=values, + ref_has_ans=has_ans, + ref_component_ans=dataplane_ans, + ref_component_err=dataplane_err, + ref_stats_runner=stats, + ) + if stats_component + else ( + cb.invoke( + # Invoke the dataplane component with ranks. + dataplane, + ref_commands=commands, + ref_values=values, + ref_ranks=ranks, + ref_has_ans=has_ans, + ref_component_ans=dataplane_ans, + ref_component_err=dataplane_err, + ) + if use_ranks + else cb.invoke( + # Invoke the dataplane component without stats or ranks. + dataplane, + ref_commands=commands, + ref_values=values, + ref_has_ans=has_ans, + ref_component_ans=dataplane_ans, + ref_component_err=dataplane_err, + ) + ) + ), # If the dataplane component has an answer, # write it to the answer-list and increment the index `i`. cb.if_( @@ -264,7 +247,14 @@ def insert_main( ), ), ), - invoke_controller, + ( + cb.invoke( # Invoke the controller component. + controller, + ref_stats_controller=stats, + ) + if controller + else Empty + ), main.incr(i), # i++ ], ) From 4b84482249541443b323dae8e8284915db7bc70e Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Mon, 11 Nov 2024 18:51:21 -0500 Subject: [PATCH 15/22] Add --start and --end options to parse_pcap.py --- frontends/queues/evaluation/parse_pcap.py | 149 ++++++++++++++-------- 1 file changed, 99 insertions(+), 50 deletions(-) diff --git a/frontends/queues/evaluation/parse_pcap.py b/frontends/queues/evaluation/parse_pcap.py index 394b38f37..dbdcc2aef 100755 --- a/frontends/queues/evaluation/parse_pcap.py +++ b/frontends/queues/evaluation/parse_pcap.py @@ -4,19 +4,21 @@ # # Positional Arguments: # PCAP Packet Capture to parse -# Addr2Flow JSON mapping MAC addresses to integer flows # Out Path to save generated .data file # # Options: # -h --help Display this message # -# --num-packets N No. packets in PCAP to parse -# [default: 500] +# --start S Start parsing from packet number S (0-indexed) +# [default: 0] # -# --clock-period C Clock period of hardware in ns +# --end E Stop parsing at packet number E - 1 (0-indexed) +# [default: last packet in PCAP] +# +# --clock-period C Clock period of the hardware in ns # [default: 7] # -# --line-rate L Target line rate for pop frequency calculation in Gbit/s +# --line-rate L Target line rate for the pop frequency calculation in Gbit/s # [default: 1] # # --pop-tick P Time between consecutive pops in ns @@ -30,7 +32,7 @@ # [default: max value in Addr2Int + 1] # # Example: -# python3 parse_pcap.py example.pcap example.data --addr2int addr2int.json --num-packets 250 +# python3 parse_pcap.py example.pcap example.data --start 10 --end 20 --num-flows 3 import sys import random @@ -45,15 +47,30 @@ DONTCARE = 0 CLOCK_PERIOD = 7 # in ns -NUM_PKTS = 500 -POP_TICK = None # in ns LINE_RATE = 1 # in Gbit/s +START = 0 + +POP_TICK = None # in ns +ADDR2INT = None +NUM_FLOWS = None +END = None + +PKTS_PER_SEC = None +BITS_PER_SEC = None class UnknownAddress(Exception): pass +class OutOfBounds(Exception): + pass + + +class InvalidRange(Exception): + pass + + class ArgumentParserWithCustomError(argparse.ArgumentParser): def __init__(self): super().__init__(add_help=False) @@ -86,8 +103,9 @@ def check_positive_int(x): return x parser.add_argument( - "--num-packets", type=check_positive_int, action="store", default=NUM_PKTS + "--start", type=check_positive_int, action="store", default=START ) + parser.add_argument("--end", type=check_positive_int, action="store") parser.add_argument( "--clock-period", type=check_positive_int, action="store", default=CLOCK_PERIOD ) @@ -116,48 +134,78 @@ def check_positive_float(x): return parser.parse_args() -def parse_pcap(pcap, addr2int, num_flows): +def parse_pcap(pcap_file): global POP_TICK + global ADDR2INT + global NUM_FLOWS + global END + + global PKTS_PER_SEC + global BITS_PER_SEC def mac_addr(addr): return ":".join("%02x" % dpkt.compat.compat_ord(b) for b in addr) - offset = None + pcap = dpkt.pcap.Reader(pcap_file) + + star_ts = None + end_ts = None total_size = 0 - make_addr_map = addr2int == None - if make_addr_map: - addr2int = {} - addr_count = 0 - for i, (ts, buf) in zip(range(NUM_PKTS), pcap): - if i == 0: - offset = ts + make_addr_map = ADDR2INT is None + ADDR2INT = {} if ADDR2INT is None else ADDR2INT + addr_count, pkt_count = 0, 0 + for i, (ts, buf) in enumerate(pcap): + if i < START: + continue + elif i == START: + start_ts = ts + elif END is not None and i >= END: + break eth = dpkt.ethernet.Ethernet(buf) addr = mac_addr(eth.src) - if addr not in addr2int: + if addr not in ADDR2INT: if make_addr_map: - addr2int[addr] = addr_count + ADDR2INT[addr] = addr_count addr_count += 1 else: raise UnknownAddress( - f"MAC address {addr} for packet {i} not found in Addr2Flow map:\n {addr2int}" + f"MAC address {addr} for packet {i} not found in Addr2Int map" ) total_size += len(buf) + pkt_count += 1 + end_ts = ts + END = START + pkt_count if END is None else END - if num_flows is None: - num_flows = max(addr2int[addr] for addr in addr2int) + 1 + if start_ts is None: + raise OutOfBounds(f"Index {START} out of bounds for PCAP {pcap_file.name}") + elif START >= END: + raise InvalidRange(f"Start index {START} >= end index {END}") + + total_time = end_ts - start_ts + PKTS_PER_SEC = (END - START) / total_time + BITS_PER_SEC = (total_size * 8) / total_time + + if NUM_FLOWS is None: + NUM_FLOWS = max(ADDR2INT[addr] for addr in ADDR2INT) + 1 if POP_TICK is None: - POP_TICK = int((total_size * 8) // (LINE_RATE * NUM_PKTS)) + POP_TICK = int((total_size * 8) // (LINE_RATE * (END - START))) pcap_file.seek(0) pcap = dpkt.pcap.Reader(pcap_file) + data = {"commands": [], "arrival_cycles": [], "flows": [], "pkt_ids": []} prev_time = 0 pkts_in_switch = 0 - for i, (ts, buf) in zip(range(NUM_PKTS), pcap): - time = (ts - offset) * 10**9 + for i, (ts, buf) in enumerate(pcap): + if i < START: + continue + elif i >= END: + break + + time = (ts - start_ts) * 10**9 pop_time = (prev_time % POP_TICK) + prev_time num_pops = int((time - pop_time) // POP_TICK) if time > pop_time else 0 @@ -174,7 +222,7 @@ def mac_addr(addr): eth = dpkt.ethernet.Ethernet(buf) addr = mac_addr(eth.src) - flow = addr2int[addr] % num_flows + flow = ADDR2INT[addr] % NUM_FLOWS cycle = int(time // CLOCK_PERIOD) pkt_id = i + 1 pkts_in_switch += 1 @@ -197,10 +245,10 @@ def mac_addr(addr): data["flows"].append(DONTCARE) data["pkt_ids"].append(DONTCARE) - return data, num_flows, addr2int + return data -def dump_json(data, flow_bits, data_file): +def dump_json(data, data_file): commands = data["commands"] arrival_cycles = data["arrival_cycles"] flows = data["flows"] @@ -215,7 +263,7 @@ def format_gen(width): arrival_cycles = { "arrival_cycles": {"data": arrival_cycles, "format": format_gen(32)} } - flows = {"flows": {"data": flows, "format": format_gen(flow_bits)}} + flows = {"flows": {"data": flows, "format": format_gen(bits_needed(NUM_FLOWS - 1))}} values = {"values": {"data": values, "format": format_gen(32)}} ans_mem = {"ans_mem": {"data": ans_mem, "format": format_gen(32)}} departure_cycles = { @@ -232,26 +280,27 @@ def format_gen(width): if __name__ == "__main__": opts = parse_cmdline() + pcap_file = open(opts.PCAP, "rb") + addr2int_json = None if opts.addr2int is None else open(opts.addr2int) + + ADDR2INT = None if addr2int_json is None else json.load(addr2int_json) CLOCK_PERIOD = opts.clock_period - NUM_PKTS = opts.num_packets POP_TICK = opts.pop_tick + NUM_FLOWS = opts.num_flows + START = opts.start + END = opts.end + + data = parse_pcap(pcap_file) + + data_file = open(opts.Out, "w") + dump_json(data, data_file) + + stats = {} + stats["num_cmds"] = len(data["commands"]) + stats["num_flows"] = NUM_FLOWS + stats["addr2flow"] = {addr: ADDR2INT[addr] % NUM_FLOWS for addr in ADDR2INT} + stats["pop_tick_ns"] = POP_TICK + stats["pkts_per_sec"] = PKTS_PER_SEC + stats["bits_per_sec"] = BITS_PER_SEC - with open(opts.PCAP, "rb") as pcap_file: - with ( - nullcontext() if opts.addr2int is None else open(opts.addr2int) - ) as addr2int_json: - pcap = dpkt.pcap.Reader(pcap_file) - addr2int = None if addr2int_json is None else json.load(addr2int_json) - num_flows = opts.num_flows - - data, num_flows, addr2int = parse_pcap(pcap, addr2int, num_flows) - - with open(opts.Out, "w") as data_file: - flow_bits = bits_needed(num_flows - 1) - json = dump_json(data, flow_bits, data_file) - - print(f"Number of commands = {len(data['commands'])}") - print("Addresses to flows:") - for addr in addr2int: - print(f"\t{addr} -> {addr2int[addr] % num_flows}") - print(f"Pop tick = {POP_TICK} ns or {POP_TICK / CLOCK_PERIOD} cycles") + print(json.dumps(stats, indent=2)) From b3c8941a95023f6be3924891452bbd032d9f4cd0 Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Mon, 11 Nov 2024 18:51:56 -0500 Subject: [PATCH 16/22] Add CalyxPy state to fud2 --- fud2/scripts/calyx_py-to-calyx.rhai | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 fud2/scripts/calyx_py-to-calyx.rhai diff --git a/fud2/scripts/calyx_py-to-calyx.rhai b/fud2/scripts/calyx_py-to-calyx.rhai new file mode 100644 index 000000000..ba4416b19 --- /dev/null +++ b/fud2/scripts/calyx_py-to-calyx.rhai @@ -0,0 +1,10 @@ +import "calyx" as c; + +export const calyx_py = state("calyx_py", ["py"]); + +fn calyx_py_setup(e) { + e.config_var_or("py_args", "py.args", ""); + e.rule("calyx_py-to-calyx", "python3 $in $py_args > $out"); +} + +rule([calyx_py_setup], calyx_py, c::calyx_state, "calyx_py-to-calyx"); From 33a93d705b88d56557c89b96323e86f2aea628ed Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Mon, 11 Nov 2024 19:43:50 -0500 Subject: [PATCH 17/22] Fix fud2 tests --- fud2/tests/snapshots/tests__list_ops.snap | 6 ++++++ fud2/tests/snapshots/tests__list_states.snap | 2 ++ .../tests__test@plan_calyx_py-to-calyx.snap | 16 ++++++++++++++++ 3 files changed, 24 insertions(+) create mode 100644 fud2/tests/snapshots/tests__test@plan_calyx_py-to-calyx.snap diff --git a/fud2/tests/snapshots/tests__list_ops.snap b/fud2/tests/snapshots/tests__list_ops.snap index ed1652224..8f9003535 100644 --- a/fud2/tests/snapshots/tests__list_ops.snap +++ b/fud2/tests/snapshots/tests__list_ops.snap @@ -1,5 +1,6 @@ --- source: fud2/tests/tests.rs +assertion_line: 251 --- [ ( @@ -37,6 +38,11 @@ source: fud2/tests/tests.rs "calyx", "yxi", ), + ( + "calyx_py-to-calyx", + "calyx_py", + "calyx", + ), ( "cider", "cider", diff --git a/fud2/tests/snapshots/tests__list_states.snap b/fud2/tests/snapshots/tests__list_states.snap index 8872d6c33..de09a7c58 100644 --- a/fud2/tests/snapshots/tests__list_states.snap +++ b/fud2/tests/snapshots/tests__list_states.snap @@ -1,8 +1,10 @@ --- source: fud2/tests/tests.rs +assertion_line: 229 --- [ "calyx", + "calyx_py", "cider", "cider-debug", "cocotb-axi", diff --git a/fud2/tests/snapshots/tests__test@plan_calyx_py-to-calyx.snap b/fud2/tests/snapshots/tests__test@plan_calyx_py-to-calyx.snap new file mode 100644 index 000000000..a2879431e --- /dev/null +++ b/fud2/tests/snapshots/tests__test@plan_calyx_py-to-calyx.snap @@ -0,0 +1,16 @@ +--- +source: fud2/tests/tests.rs +assertion_line: 66 +description: "emit plan: calyx_py-to-calyx" +--- +build-tool = fud2 +rule get-rsrc + command = $build-tool get-rsrc $out + +py_args = +rule calyx_py-to-calyx + command = python3 $in $py_args > $out + +build /output.ext: calyx_py-to-calyx /input.ext + +default /output.ext From 90a59af71a54ff48fac5845f4d5fe23a58995e85 Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Tue, 12 Nov 2024 12:47:53 -0500 Subject: [PATCH 18/22] Add pkts/bits per sec stats to parse_pcap.py --- frontends/queues/evaluation/parse_pcap.py | 53 +++++++++++++---------- 1 file changed, 31 insertions(+), 22 deletions(-) diff --git a/frontends/queues/evaluation/parse_pcap.py b/frontends/queues/evaluation/parse_pcap.py index dbdcc2aef..64104413f 100755 --- a/frontends/queues/evaluation/parse_pcap.py +++ b/frontends/queues/evaluation/parse_pcap.py @@ -93,39 +93,48 @@ def parse_cmdline(): parser.add_argument("PCAP") parser.add_argument("Out") - def check_positive_int(x): + def error(s): + raise argparse.ArgumentTypeError(s) + + def nonnegative_int(x): try: x = int(x) - if x <= 0: - raise argparse.ArgumentTypeError(f"{x} is not a positive integer") + if x < 0: + raise error(f"{x} is not a non-negative integer") except ValueError: - raise argparse.ArgumentTypeError(f"{x} is not an integer") + raise error(f"{x} is not an integer") return x - parser.add_argument( - "--start", type=check_positive_int, action="store", default=START - ) - parser.add_argument("--end", type=check_positive_int, action="store") - parser.add_argument( - "--clock-period", type=check_positive_int, action="store", default=CLOCK_PERIOD - ) - parser.add_argument( - "--pop-tick", type=check_positive_int, action="store", default=POP_TICK - ) - parser.add_argument("--addr2int", action="store") - parser.add_argument("--num-flows", type=check_positive_int, action="store") + def positive_int(x): + try: + x = int(x) + if x <= 0: + raise error(f"{x} is not a positive integer") + except ValueError: + raise error(f"{x} is not an integer") + return x - def check_positive_float(x): + def positive_float(x): try: x = float(x) if x <= 0: - raise argparse.ArgumentTypeError(f"{x} is not a positive float") + raise error(f"{x} is not a positive float") except ValueError: - raise argparse.ArgumentTypeError(f"{x} is not a float") + raise error(f"{x} is not a float") return x + parser.add_argument("--start", type=nonnegative_int, action="store", default=START) + parser.add_argument("--end", type=positive_int, action="store") + parser.add_argument( + "--clock-period", type=positive_int, action="store", default=CLOCK_PERIOD + ) + parser.add_argument( + "--pop-tick", type=positive_int, action="store", default=POP_TICK + ) + parser.add_argument("--addr2int", action="store") + parser.add_argument("--num-flows", type=positive_int, action="store") parser.add_argument( - "--line-rate", type=check_positive_float, action="store", default=LINE_RATE + "--line-rate", type=positive_float, action="store", default=LINE_RATE ) if "-h" in sys.argv or "--help" in sys.argv: @@ -184,8 +193,8 @@ def mac_addr(addr): raise InvalidRange(f"Start index {START} >= end index {END}") total_time = end_ts - start_ts - PKTS_PER_SEC = (END - START) / total_time - BITS_PER_SEC = (total_size * 8) / total_time + PKTS_PER_SEC = float("inf") if total_time == 0 else (END - START) / total_time + BITS_PER_SEC = float("inf") if total_time == 0 else (total_size * 8) / total_time if NUM_FLOWS is None: NUM_FLOWS = max(ADDR2INT[addr] for addr in ADDR2INT) + 1 From f3300852968f07bec066773cde6357f66e6a9234 Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Tue, 12 Nov 2024 16:47:07 -0500 Subject: [PATCH 19/22] Add more PCAP sim logic for binheap rr and strict --- .../queues/tests/binheap/round_robin/rr_3flow_test.py | 5 ++++- .../queues/tests/binheap/round_robin/rr_4flow_test.py | 5 ++++- .../queues/tests/binheap/round_robin/rr_5flow_test.py | 5 ++++- .../queues/tests/binheap/round_robin/rr_6flow_test.py | 5 ++++- .../queues/tests/binheap/round_robin/rr_7flow_test.py | 5 ++++- frontends/queues/tests/binheap/strict/strict_3flow_test.py | 7 +++++-- frontends/queues/tests/binheap/strict/strict_4flow_test.py | 7 +++++-- frontends/queues/tests/binheap/strict/strict_5flow_test.py | 7 +++++-- frontends/queues/tests/binheap/strict/strict_6flow_test.py | 7 +++++-- frontends/queues/tests/binheap/strict/strict_7flow_test.py | 7 +++++-- 10 files changed, 45 insertions(+), 15 deletions(-) diff --git a/frontends/queues/tests/binheap/round_robin/rr_3flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_3flow_test.py index 34faee6f8..36e66a943 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_3flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_3flow_test.py @@ -1,6 +1,7 @@ import sys import calyx.builder as cb import queues.queue_call as qc +import queues.sim_pcap as sp import queues.binheap.round_robin as rr import queues.flow_inference as fi @@ -16,7 +17,9 @@ prog = cb.Builder() if sim_pcap: - raise Exception("Not Implemented") + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) else: boundaries = [133, 266, 400] flow_infer = fi.insert_boundary_flow_inference( diff --git a/frontends/queues/tests/binheap/round_robin/rr_4flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_4flow_test.py index 30b8c22ab..b940d0064 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_4flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_4flow_test.py @@ -1,6 +1,7 @@ import sys import calyx.builder as cb import queues.queue_call as qc +import queues.sim_pcap as sp import queues.binheap.round_robin as rr import queues.flow_inference as fi @@ -16,7 +17,9 @@ prog = cb.Builder() if sim_pcap: - raise Exception("Not Implemented") + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) else: boundaries = [100, 200, 300, 400] flow_infer = fi.insert_boundary_flow_inference( diff --git a/frontends/queues/tests/binheap/round_robin/rr_5flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_5flow_test.py index 969ee225b..4ca894d53 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_5flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_5flow_test.py @@ -1,6 +1,7 @@ import sys import calyx.builder as cb import queues.queue_call as qc +import queues.sim_pcap as sp import queues.binheap.round_robin as rr import queues.flow_inference as fi @@ -16,7 +17,9 @@ prog = cb.Builder() if sim_pcap: - raise Exception("Not Implemented") + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) else: boundaries = [80, 160, 240, 320, 400] flow_infer = fi.insert_boundary_flow_inference( diff --git a/frontends/queues/tests/binheap/round_robin/rr_6flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_6flow_test.py index 8e9fc0f14..ee140cdbf 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_6flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_6flow_test.py @@ -1,6 +1,7 @@ import sys import calyx.builder as cb import queues.queue_call as qc +import queues.sim_pcap as sp import queues.binheap.round_robin as rr import queues.flow_inference as fi @@ -16,7 +17,9 @@ prog = cb.Builder() if sim_pcap: - raise Exception("Not Implemented") + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) else: boundaries = [66, 100, 200, 220, 300, 400] flow_infer = fi.insert_boundary_flow_inference( diff --git a/frontends/queues/tests/binheap/round_robin/rr_7flow_test.py b/frontends/queues/tests/binheap/round_robin/rr_7flow_test.py index fce4dfaa4..2d51d6015 100644 --- a/frontends/queues/tests/binheap/round_robin/rr_7flow_test.py +++ b/frontends/queues/tests/binheap/round_robin/rr_7flow_test.py @@ -1,6 +1,7 @@ import sys import calyx.builder as cb import queues.queue_call as qc +import queues.sim_pcap as sp import queues.binheap.round_robin as rr import queues.flow_inference as fi @@ -16,7 +17,9 @@ prog = cb.Builder() if sim_pcap: - raise Exception("Not Implemented") + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = rr.insert_binheap_rr(prog, "pifo", NUMFLOWS, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) else: boundaries = [50, 100, 150, 200, 250, 300, 400] flow_infer = fi.insert_boundary_flow_inference( diff --git a/frontends/queues/tests/binheap/strict/strict_3flow_test.py b/frontends/queues/tests/binheap/strict/strict_3flow_test.py index 0f976ce80..30ab49b6f 100644 --- a/frontends/queues/tests/binheap/strict/strict_3flow_test.py +++ b/frontends/queues/tests/binheap/strict/strict_3flow_test.py @@ -1,6 +1,7 @@ import sys import calyx.builder as cb import queues.queue_call as qc +import queues.sim_pcap as sp import queues.binheap.strict as st import queues.flow_inference as fi @@ -15,11 +16,13 @@ prog = cb.Builder() + order = [1, 2, 0] if sim_pcap: - raise Exception("Not Implemented") + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) else: boundaries = [133, 266, 400] - order = [1, 2, 0] flow_infer = fi.insert_boundary_flow_inference( prog, "flow_inference", boundaries ) diff --git a/frontends/queues/tests/binheap/strict/strict_4flow_test.py b/frontends/queues/tests/binheap/strict/strict_4flow_test.py index 44540a1d4..d9d4f8c18 100644 --- a/frontends/queues/tests/binheap/strict/strict_4flow_test.py +++ b/frontends/queues/tests/binheap/strict/strict_4flow_test.py @@ -1,6 +1,7 @@ import sys import calyx.builder as cb import queues.queue_call as qc +import queues.sim_pcap as sp import queues.binheap.strict as st import queues.flow_inference as fi @@ -15,11 +16,13 @@ prog = cb.Builder() + order = [3, 0, 2, 1] if sim_pcap: - raise Exception("Not Implemented") + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) else: boundaries = [100, 200, 300, 400] - order = [3, 0, 2, 1] flow_infer = fi.insert_boundary_flow_inference( prog, "flow_inference", boundaries ) diff --git a/frontends/queues/tests/binheap/strict/strict_5flow_test.py b/frontends/queues/tests/binheap/strict/strict_5flow_test.py index 7a6ae1cef..62deb88a5 100644 --- a/frontends/queues/tests/binheap/strict/strict_5flow_test.py +++ b/frontends/queues/tests/binheap/strict/strict_5flow_test.py @@ -1,6 +1,7 @@ import sys import calyx.builder as cb import queues.queue_call as qc +import queues.sim_pcap as sp import queues.binheap.strict as st import queues.flow_inference as fi @@ -15,11 +16,13 @@ prog = cb.Builder() + order = [0, 1, 2, 3, 4] if sim_pcap: - raise Exception("Not Implemented") + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) else: boundaries = [80, 160, 240, 320, 400] - order = [0, 1, 2, 3, 4] flow_infer = fi.insert_boundary_flow_inference( prog, "flow_inference", boundaries ) diff --git a/frontends/queues/tests/binheap/strict/strict_6flow_test.py b/frontends/queues/tests/binheap/strict/strict_6flow_test.py index df3fcbb2a..154022a23 100644 --- a/frontends/queues/tests/binheap/strict/strict_6flow_test.py +++ b/frontends/queues/tests/binheap/strict/strict_6flow_test.py @@ -1,6 +1,7 @@ import sys import calyx.builder as cb import queues.queue_call as qc +import queues.sim_pcap as sp import queues.binheap.strict as st import queues.flow_inference as fi @@ -15,11 +16,13 @@ prog = cb.Builder() + order = [3, 1, 5, 2, 4, 0] if sim_pcap: - raise Exception("Not Implemented") + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) else: boundaries = [66, 100, 200, 220, 300, 400] - order = [3, 1, 5, 2, 4, 0] flow_infer = fi.insert_boundary_flow_inference( prog, "flow_inference", boundaries ) diff --git a/frontends/queues/tests/binheap/strict/strict_7flow_test.py b/frontends/queues/tests/binheap/strict/strict_7flow_test.py index 886cca5b2..8deefe446 100644 --- a/frontends/queues/tests/binheap/strict/strict_7flow_test.py +++ b/frontends/queues/tests/binheap/strict/strict_7flow_test.py @@ -1,6 +1,7 @@ import sys import calyx.builder as cb import queues.queue_call as qc +import queues.sim_pcap as sp import queues.binheap.strict as st import queues.flow_inference as fi @@ -15,11 +16,13 @@ prog = cb.Builder() + order = [0, 1, 2, 3, 4, 5, 6] if sim_pcap: - raise Exception("Not Implemented") + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st.insert_binheap_strict(prog, "pifo", NUMFLOWS, order, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) else: boundaries = [50, 100, 150, 200, 250, 300, 400] - order = [0, 1, 2, 3, 4, 5, 6] flow_infer = fi.insert_boundary_flow_inference( prog, "flow_inference", boundaries ) From 5b7ad8bcf05fbec612c8571046f0627a6181e7f6 Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Tue, 19 Nov 2024 15:26:41 -0500 Subject: [PATCH 20/22] Factor out flow_inference for strict_or_rr.py --- frontends/queues/queues/strict_or_rr.py | 118 ++++++------------ frontends/queues/tests/complex_tree_test.py | 39 +++--- frontends/queues/tests/pifo_tree_test.py | 15 ++- .../queues/tests/round_robin/rr_2flow_test.py | 25 +++- .../queues/tests/round_robin/rr_3flow_test.py | 25 +++- .../queues/tests/round_robin/rr_4flow_test.py | 25 +++- .../queues/tests/round_robin/rr_5flow_test.py | 25 +++- .../queues/tests/round_robin/rr_6flow_test.py | 25 +++- .../queues/tests/round_robin/rr_7flow_test.py | 25 +++- frontends/queues/tests/sdn_test.py | 21 +++- .../queues/tests/strict/strict_2flow_test.py | 30 ++++- .../queues/tests/strict/strict_3flow_test.py | 30 ++++- .../queues/tests/strict/strict_4flow_test.py | 30 ++++- .../queues/tests/strict/strict_5flow_test.py | 30 ++++- .../queues/tests/strict/strict_6flow_test.py | 30 ++++- .../queues/tests/strict/strict_7flow_test.py | 32 ++++- 16 files changed, 376 insertions(+), 149 deletions(-) diff --git a/frontends/queues/queues/strict_or_rr.py b/frontends/queues/queues/strict_or_rr.py index 5f279711d..ac48b0660 100644 --- a/frontends/queues/queues/strict_or_rr.py +++ b/frontends/queues/queues/strict_or_rr.py @@ -1,7 +1,9 @@ # pylint: disable=import-error import calyx.builder as cb import calyx.py_ast as ast +from calyx.utils import bits_needed import queues.fifo as fifo +import queues.flow_inference as fi # This determines the maximum possible length of the queue: # The max length of the queue will be 2^QUEUE_LEN_FACTOR. @@ -27,22 +29,23 @@ def invoke_subqueue(queue_cell, cmd, value, ans, err) -> cb.invoke: def insert_queue( prog, name, + is_round_robin, subqueues, - boundaries, - numflows, - order, - round_robin, + flow_infer, + order=None, queue_len_factor=QUEUE_LEN_FACTOR, ): """ - Inserts the component `pifo` into the program. If round_robin is true, it - inserts a round robin queue, otherwise it inserts a strict queue. `numflows` - is the number of flows, which must be an integer greater than 0. Boundaries - must be of length `numflows` + 1, where the first boundary is the smallest - number a value can take (eg. 0). `order` is used for strict queues to determine - the order of priority of the subqueues. `order` must be a list of length - `numflows`. + Inserts the component `pifo` into the program, operating over n flows (where n is `len(subqueues)`). + If `is_round_robin` is true, it inserts a round robin queue, otherwise it inserts a strict queue. + `flow_infer` is the component used for flow inference; it must be invoked with an input `value` + and reference register `flow` of size floor(log_2(n)). + `order` is used for strict queues to determine the priority of the subqueues. + `order` must be a permutation of {0, ..., n - 1}. """ + numflows = len(subqueues) + + assert is_round_robin or sorted(order) == list(range(numflows)) pifo: cb.ComponentBuilder = prog.component(name) cmd = pifo.input("cmd", 1) # the size in bits is 1 @@ -53,6 +56,10 @@ def insert_queue( pifo.cell(f"queue_{i}", queue_i) for i, queue_i in enumerate(subqueues) ] + flow = pifo.reg(bits_needed(numflows - 1), "flow") + flow_infer = pifo.cell("flow_infer", flow_infer) + infer_flow = cb.invoke(flow_infer, in_value=value, ref_flow=flow) + # If the user wants to pop, we will write the popped value to `ans`. ans = pifo.reg(32, "ans", is_ref=True) # We'll raise this as a general error flag for overflow and underflow. @@ -88,45 +95,21 @@ def insert_queue( ) # We create a list of invoke-statement handles. - # Each invoke is guarded by a pair of inequality checks on the value register, - # and each pair of guards is unique to the subqueue it is associated with. + # Each invoke is uniquely guarded by an equality check on the flow register. # This means we can eventually execute all of these invokes in parallel. - invoke_subqueues_value_guard_seq = [ - cb.if_with( - pifo.le_use(value, boundaries[b + 1]), # value <= boundaries[b+1] - ( - invoke_subqueue(subqueue_cells[b], cmd, value, ans, err) - # In the special case when b = 0, - # we don't need to check the lower bound and we can just `invoke`. - if b == 0 and round_robin - else ( - invoke_subqueue( - subqueue_cells[order.index(b)], cmd, value, ans, err - ) - if b == 0 and not round_robin - else ( - cb.if_with( - pifo.gt_use(value, boundaries[b]), # value > boundaries[b] - invoke_subqueue( - subqueue_cells[order.index(b)], cmd, value, ans, err - ), - ) - if not round_robin - # Otherwise, we need to check the lower bound and `invoke` - # only if the value is in the interval. - else cb.if_with( - pifo.gt_use(value, boundaries[b]), # value > boundaries[b] - invoke_subqueue(subqueue_cells[b], cmd, value, ans, err), - ) - ) + invoke_subqueues_flow_guard = pifo.case( + flow.out, + { + n: ( + invoke_subqueue(subqueue_cells[n], cmd, value, ans, err) + if is_round_robin + else invoke_subqueue( + subqueue_cells[order.index(n)], cmd, value, ans, err ) - ), - ) - for b in range(numflows) - ] - invoke_subqueues_value_guard = cb.par( - *invoke_subqueues_value_guard_seq - ) # Execute in parallel. + ) + for n in range(numflows) + }, + ) incr_hot_wraparound = cb.if_with( # If hot = numflows - 1, we need to wrap around to 0. Otherwise, we increment. @@ -159,7 +142,7 @@ def insert_queue( len_decr, ( pifo.reg_store(hot, og_hot.out) - if not round_robin + if not is_round_robin else ast.Empty # If we are not generating a round-robin PIFO, # we are generating a strict PIFO. @@ -173,8 +156,10 @@ def insert_queue( raise_err, # The queue is full: overflow. [ # The queue is not full. Proceed. lower_err, + # flow := flow of incoming packet + infer_flow, # We'll push to the subqueue that the value belongs to. - invoke_subqueues_value_guard, + invoke_subqueues_flow_guard, # If all went well, we'll increment the length of the queue. cb.if_with(err_is_low, len_incr), ], @@ -188,36 +173,3 @@ def insert_queue( ) return pifo - - -def generate(prog, numflows, roundrobin): - """Top-level function to build the program.""" - - if numflows == 2: - boundaries = [0, 200, 400] - order = [1, 0] - elif numflows == 3: - boundaries = [0, 133, 266, 400] - order = [1, 2, 0] - elif numflows == 4: - boundaries = [0, 100, 200, 300, 400] - order = [3, 0, 2, 1] - elif numflows == 5: - boundaries = [0, 80, 160, 240, 320, 400] - order = [0, 1, 2, 3, 4] - elif numflows == 6: - boundaries = [0, 66, 100, 200, 220, 300, 400] - order = [3, 1, 5, 2, 4, 0] - elif numflows == 7: - boundaries = [0, 50, 100, 150, 200, 250, 300, 400] - order = [0, 1, 2, 3, 4, 5, 6] - else: - raise ValueError("Unsupported number of flows") - - fifo_queue = fifo.insert_fifo(prog, "fifo", QUEUE_LEN_FACTOR) - subqueues = [fifo_queue] * numflows - pifo = insert_queue( - prog, "pifo", subqueues, boundaries, numflows, order, roundrobin - ) - - return pifo diff --git a/frontends/queues/tests/complex_tree_test.py b/frontends/queues/tests/complex_tree_test.py index ede422196..c31ae57de 100644 --- a/frontends/queues/tests/complex_tree_test.py +++ b/frontends/queues/tests/complex_tree_test.py @@ -4,6 +4,7 @@ import queues.queue_call as qc import queues.strict_or_rr as strict_or_rr import queues.fifo as fifo +import queues.flow_inference as fi # This complex tree has the shape rr(strict(A, B, C), rr(D, E, F), strict(G, H)) @@ -16,41 +17,29 @@ def build(): fifo_queue = fifo.insert_fifo(prog, "fifo") + fi_strict1 = fi.insert_boundary_flow_inference(prog, "fi_strict1", [44, 88, 133]) pifo_strict1 = strict_or_rr.insert_queue( prog, "pifo_strict1", - [fifo_queue, fifo_queue, fifo_queue], - [0, 44, 88, 133], - 3, - [0, 1, 2], False, + [fifo_queue, fifo_queue, fifo_queue], + fi_strict1, + order=[0, 1, 2], ) + + fi_rr = fi.insert_boundary_flow_inference(prog, "fi_rr", [177, 221, 266]) pifo_rr = strict_or_rr.insert_queue( - prog, - "pifo_rr", - [fifo_queue, fifo_queue, fifo_queue], - [133, 177, 221, 266], - 3, - [0, 1, 2], - True, + prog, "pifo_rr", True, [fifo_queue, fifo_queue, fifo_queue], fi_rr ) + + fi_strict2 = fi.insert_boundary_flow_inference(prog, "fi_strict2", [333, 400]) pifo_strict2 = strict_or_rr.insert_queue( - prog, - "pifo_strict2", - [fifo_queue, fifo_queue], - [266, 333, 400], - 2, - [0, 1], - False, + prog, "pifo_strict2", False, [fifo_queue, fifo_queue], fi_strict2, order=[0, 1] ) + + fi_root = fi.insert_boundary_flow_inference(prog, "fi_root", [133, 266, 400]) pifo_root = strict_or_rr.insert_queue( - prog, - "pifo_root", - [pifo_strict1, pifo_rr, pifo_strict2], - [0, 133, 266, 400], - 3, - [], - True, + prog, "pifo_root", True, [pifo_strict1, pifo_rr, pifo_strict2], fi_root ) qc.insert_main(prog, pifo_root, num_cmds, keepgoing=keepgoing) diff --git a/frontends/queues/tests/pifo_tree_test.py b/frontends/queues/tests/pifo_tree_test.py index ac07e9737..d27b3a564 100644 --- a/frontends/queues/tests/pifo_tree_test.py +++ b/frontends/queues/tests/pifo_tree_test.py @@ -4,6 +4,7 @@ import calyx.builder as cb import queues.queue_call as qc import queues.strict_or_rr as strict_or_rr +import queues.flow_inference as fi def build(): @@ -11,13 +12,23 @@ def build(): num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv prog = cb.Builder() + fifo_queue = fifo.insert_fifo(prog, "fifo") + + flow_infer_red = fi.insert_boundary_flow_inference( + prog, "flow_infer_red", [100, 200] + ) pifo_red = strict_or_rr.insert_queue( - prog, "pifo_red", [fifo_queue, fifo_queue], [0, 100, 200], 2, [], True + prog, "pifo_red", True, [fifo_queue, fifo_queue], flow_infer_red + ) + + flow_infer_root = fi.insert_boundary_flow_inference( + prog, "flow_infer_root", [200, 400] ) pifo_root = strict_or_rr.insert_queue( - prog, "pifo_root", [pifo_red, fifo_queue], [0, 200, 400], 2, [], True + prog, "pifo_root", True, [pifo_red, fifo_queue], flow_infer_root ) + qc.insert_main(prog, pifo_root, num_cmds, keepgoing=keepgoing) return prog.program diff --git a/frontends/queues/tests/round_robin/rr_2flow_test.py b/frontends/queues/tests/round_robin/rr_2flow_test.py index 1c199aff7..d3b73d023 100644 --- a/frontends/queues/tests/round_robin/rr_2flow_test.py +++ b/frontends/queues/tests/round_robin/rr_2flow_test.py @@ -1,15 +1,34 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues.strict_or_rr import generate +import queues.sim_pcap as sp +import queues.strict_or_rr as st_or_rr +import queues.fifo as fifo +import queues.flow_inference as fi + +NUMFLOWS = 2 if __name__ == "__main__": """Invoke the top-level function to build the program, with 2 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = generate(prog, 2, True) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + fifo_queue = fifo.insert_fifo(prog, "fifo") + subqueues = [fifo_queue] * NUMFLOWS + if sim_pcap: + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st_or_rr.insert_queue(prog, "pifo", True, subqueues, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) + else: + boundaries = [200, 400] + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) + pifo = st_or_rr.insert_queue(prog, "pifo", True, subqueues, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/round_robin/rr_3flow_test.py b/frontends/queues/tests/round_robin/rr_3flow_test.py index 3c5e86382..d97dc15cc 100644 --- a/frontends/queues/tests/round_robin/rr_3flow_test.py +++ b/frontends/queues/tests/round_robin/rr_3flow_test.py @@ -1,15 +1,34 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues.strict_or_rr import generate +import queues.sim_pcap as sp +import queues.strict_or_rr as st_or_rr +import queues.fifo as fifo +import queues.flow_inference as fi + +NUMFLOWS = 3 if __name__ == "__main__": """Invoke the top-level function to build the program, with 3 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = generate(prog, 3, True) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + fifo_queue = fifo.insert_fifo(prog, "fifo") + subqueues = [fifo_queue] * NUMFLOWS + if sim_pcap: + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st_or_rr.insert_queue(prog, "pifo", True, subqueues, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) + else: + boundaries = [133, 266, 400] + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) + pifo = st_or_rr.insert_queue(prog, "pifo", True, subqueues, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/round_robin/rr_4flow_test.py b/frontends/queues/tests/round_robin/rr_4flow_test.py index 1ba1c5c1f..8b20dfa8b 100644 --- a/frontends/queues/tests/round_robin/rr_4flow_test.py +++ b/frontends/queues/tests/round_robin/rr_4flow_test.py @@ -1,15 +1,34 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues.strict_or_rr import generate +import queues.sim_pcap as sp +import queues.strict_or_rr as st_or_rr +import queues.fifo as fifo +import queues.flow_inference as fi + +NUMFLOWS = 4 if __name__ == "__main__": """Invoke the top-level function to build the program, with 4 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = generate(prog, 4, True) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + fifo_queue = fifo.insert_fifo(prog, "fifo") + subqueues = [fifo_queue] * NUMFLOWS + if sim_pcap: + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st_or_rr.insert_queue(prog, "pifo", True, subqueues, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) + else: + boundaries = [100, 200, 300, 400] + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) + pifo = st_or_rr.insert_queue(prog, "pifo", True, subqueues, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/round_robin/rr_5flow_test.py b/frontends/queues/tests/round_robin/rr_5flow_test.py index 19ae11846..4515b279a 100644 --- a/frontends/queues/tests/round_robin/rr_5flow_test.py +++ b/frontends/queues/tests/round_robin/rr_5flow_test.py @@ -1,15 +1,34 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues.strict_or_rr import generate +import queues.sim_pcap as sp +import queues.strict_or_rr as st_or_rr +import queues.fifo as fifo +import queues.flow_inference as fi + +NUMFLOWS = 5 if __name__ == "__main__": """Invoke the top-level function to build the program, with 5 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = generate(prog, 5, True) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + fifo_queue = fifo.insert_fifo(prog, "fifo") + subqueues = [fifo_queue] * NUMFLOWS + if sim_pcap: + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st_or_rr.insert_queue(prog, "pifo", True, subqueues, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) + else: + boundaries = [80, 160, 240, 320, 400] + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) + pifo = st_or_rr.insert_queue(prog, "pifo", True, subqueues, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/round_robin/rr_6flow_test.py b/frontends/queues/tests/round_robin/rr_6flow_test.py index b8a8d2ffd..f817c5d80 100644 --- a/frontends/queues/tests/round_robin/rr_6flow_test.py +++ b/frontends/queues/tests/round_robin/rr_6flow_test.py @@ -1,15 +1,34 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues.strict_or_rr import generate +import queues.sim_pcap as sp +import queues.strict_or_rr as st_or_rr +import queues.fifo as fifo +import queues.flow_inference as fi + +NUMFLOWS = 6 if __name__ == "__main__": """Invoke the top-level function to build the program, with 6 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = generate(prog, 6, True) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + fifo_queue = fifo.insert_fifo(prog, "fifo") + subqueues = [fifo_queue] * NUMFLOWS + if sim_pcap: + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st_or_rr.insert_queue(prog, "pifo", True, subqueues, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) + else: + boundaries = [66, 100, 200, 220, 300, 400] + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) + pifo = st_or_rr.insert_queue(prog, "pifo", True, subqueues, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/round_robin/rr_7flow_test.py b/frontends/queues/tests/round_robin/rr_7flow_test.py index 3c1bca03c..f414b030e 100644 --- a/frontends/queues/tests/round_robin/rr_7flow_test.py +++ b/frontends/queues/tests/round_robin/rr_7flow_test.py @@ -1,15 +1,34 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues.strict_or_rr import generate +import queues.sim_pcap as sp +import queues.strict_or_rr as st_or_rr +import queues.fifo as fifo +import queues.flow_inference as fi + +NUMFLOWS = 7 if __name__ == "__main__": """Invoke the top-level function to build the program, with 7 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = generate(prog, 7, True) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + fifo_queue = fifo.insert_fifo(prog, "fifo") + subqueues = [fifo_queue] * NUMFLOWS + if sim_pcap: + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st_or_rr.insert_queue(prog, "pifo", True, subqueues, flow_infer) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) + else: + boundaries = [50, 100, 150, 200, 250, 300, 400] + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) + pifo = st_or_rr.insert_queue(prog, "pifo", True, subqueues, flow_infer) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/sdn_test.py b/frontends/queues/tests/sdn_test.py index 37ffa0f15..0d1f3a868 100644 --- a/frontends/queues/tests/sdn_test.py +++ b/frontends/queues/tests/sdn_test.py @@ -4,6 +4,7 @@ import queues.queue_call as qc import queues.strict_or_rr as strict_or_rr import queues.fifo as fifo +import queues.flow_inference as fi def insert_stats(prog, name, static=False): @@ -109,11 +110,27 @@ def build(static=False): controller = insert_controller(prog, "controller", stats_component) fifo_queue = fifo.insert_fifo(prog, "fifo") + + flow_infer_red = fi.insert_boundary_flow_inference( + prog, "flow_infer_red", [100, 200] + ) pifo_red = strict_or_rr.insert_queue( - prog, "pifo_red", [fifo_queue, fifo_queue], [0, 100, 200], 2, [], True + prog, + "pifo_red", + True, + [fifo_queue, fifo_queue], + flow_infer_red, + ) + + flow_infer_root = fi.insert_boundary_flow_inference( + prog, "flow_infer_root", [200, 400] ) pifo_root = strict_or_rr.insert_queue( - prog, "pifo_root", [pifo_red, fifo_queue], [0, 200, 400], 2, [], True + prog, + "pifo_root", + True, + [pifo_red, fifo_queue], + flow_infer_root, ) qc.insert_main(prog, pifo_root, num_cmds, keepgoing=keepgoing) diff --git a/frontends/queues/tests/strict/strict_2flow_test.py b/frontends/queues/tests/strict/strict_2flow_test.py index e819a4801..e5c6840d4 100644 --- a/frontends/queues/tests/strict/strict_2flow_test.py +++ b/frontends/queues/tests/strict/strict_2flow_test.py @@ -1,15 +1,39 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues.strict_or_rr import generate +import queues.sim_pcap as sp +import queues.strict_or_rr as st_or_rr +import queues.fifo as fifo +import queues.flow_inference as fi + +NUMFLOWS = 2 if __name__ == "__main__": """Invoke the top-level function to build the program, with 2 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = generate(prog, 2, False) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + fifo_queue = fifo.insert_fifo(prog, "fifo") + subqueues = [fifo_queue] * NUMFLOWS + order = [1, 0] + if sim_pcap: + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st_or_rr.insert_queue( + prog, "pifo", False, subqueues, flow_infer, order=order + ) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) + else: + boundaries = [200, 400] + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) + pifo = st_or_rr.insert_queue( + prog, "pifo", False, subqueues, flow_infer, order=order + ) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/strict/strict_3flow_test.py b/frontends/queues/tests/strict/strict_3flow_test.py index 6f02afcec..ad25950ec 100644 --- a/frontends/queues/tests/strict/strict_3flow_test.py +++ b/frontends/queues/tests/strict/strict_3flow_test.py @@ -1,15 +1,39 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues.strict_or_rr import generate +import queues.sim_pcap as sp +import queues.strict_or_rr as st_or_rr +import queues.fifo as fifo +import queues.flow_inference as fi + +NUMFLOWS = 3 if __name__ == "__main__": """Invoke the top-level function to build the program, with 3 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = generate(prog, 3, False) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + fifo_queue = fifo.insert_fifo(prog, "fifo") + subqueues = [fifo_queue] * NUMFLOWS + order = [1, 2, 0] + if sim_pcap: + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st_or_rr.insert_queue( + prog, "pifo", False, subqueues, flow_infer, order=order + ) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) + else: + boundaries = [133, 266, 400] + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) + pifo = st_or_rr.insert_queue( + prog, "pifo", False, subqueues, flow_infer, order=order + ) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/strict/strict_4flow_test.py b/frontends/queues/tests/strict/strict_4flow_test.py index b5a068d11..f7fc7a0e0 100644 --- a/frontends/queues/tests/strict/strict_4flow_test.py +++ b/frontends/queues/tests/strict/strict_4flow_test.py @@ -1,15 +1,39 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues.strict_or_rr import generate +import queues.sim_pcap as sp +import queues.strict_or_rr as st_or_rr +import queues.fifo as fifo +import queues.flow_inference as fi + +NUMFLOWS = 4 if __name__ == "__main__": """Invoke the top-level function to build the program, with 4 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = generate(prog, 4, False) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + fifo_queue = fifo.insert_fifo(prog, "fifo") + subqueues = [fifo_queue] * NUMFLOWS + order = [3, 0, 2, 1] + if sim_pcap: + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st_or_rr.insert_queue( + prog, "pifo", False, subqueues, flow_infer, order=order + ) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) + else: + boundaries = [100, 200, 300, 400] + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) + pifo = st_or_rr.insert_queue( + prog, "pifo", False, subqueues, flow_infer, order=order + ) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/strict/strict_5flow_test.py b/frontends/queues/tests/strict/strict_5flow_test.py index 8c6e177ea..6313e2290 100644 --- a/frontends/queues/tests/strict/strict_5flow_test.py +++ b/frontends/queues/tests/strict/strict_5flow_test.py @@ -1,15 +1,39 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues.strict_or_rr import generate +import queues.sim_pcap as sp +import queues.strict_or_rr as st_or_rr +import queues.fifo as fifo +import queues.flow_inference as fi + +NUMFLOWS = 5 if __name__ == "__main__": """Invoke the top-level function to build the program, with 5 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = generate(prog, 5, False) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + fifo_queue = fifo.insert_fifo(prog, "fifo") + subqueues = [fifo_queue] * NUMFLOWS + order = [0, 1, 2, 3, 4] + if sim_pcap: + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st_or_rr.insert_queue( + prog, "pifo", False, subqueues, flow_infer, order=order + ) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) + else: + boundaries = [80, 160, 240, 320, 400] + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) + pifo = st_or_rr.insert_queue( + prog, "pifo", False, subqueues, flow_infer, order=order + ) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/strict/strict_6flow_test.py b/frontends/queues/tests/strict/strict_6flow_test.py index d1065b5b0..29c5ff865 100644 --- a/frontends/queues/tests/strict/strict_6flow_test.py +++ b/frontends/queues/tests/strict/strict_6flow_test.py @@ -1,15 +1,39 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues.strict_or_rr import generate +import queues.sim_pcap as sp +import queues.strict_or_rr as st_or_rr +import queues.fifo as fifo +import queues.flow_inference as fi + +NUMFLOWS = 6 if __name__ == "__main__": """Invoke the top-level function to build the program, with 6 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = generate(prog, 6, False) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + fifo_queue = fifo.insert_fifo(prog, "fifo") + subqueues = [fifo_queue] * NUMFLOWS + order = [3, 1, 5, 2, 4, 0] + if sim_pcap: + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st_or_rr.insert_queue( + prog, "pifo", False, subqueues, flow_infer, order=order + ) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) + else: + boundaries = [66, 100, 200, 220, 300, 400] + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) + pifo = st_or_rr.insert_queue( + prog, "pifo", False, subqueues, flow_infer, order=order + ) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() diff --git a/frontends/queues/tests/strict/strict_7flow_test.py b/frontends/queues/tests/strict/strict_7flow_test.py index ec9d28ebe..ae8a5ea78 100644 --- a/frontends/queues/tests/strict/strict_7flow_test.py +++ b/frontends/queues/tests/strict/strict_7flow_test.py @@ -1,15 +1,39 @@ import sys import calyx.builder as cb import queues.queue_call as qc -from queues.strict_or_rr import generate +import queues.sim_pcap as sp +import queues.strict_or_rr as st_or_rr +import queues.fifo as fifo +import queues.flow_inference as fi + +NUMFLOWS = 7 if __name__ == "__main__": - """Invoke the top-level function to build the program, with 6 flows.""" + """Invoke the top-level function to build the program, with 7 flows.""" num_cmds = int(sys.argv[1]) keepgoing = "--keepgoing" in sys.argv + sim_pcap = "--sim-pcap" in sys.argv prog = cb.Builder() - pifo = generate(prog, 7, False) - qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + + fifo_queue = fifo.insert_fifo(prog, "fifo") + subqueues = [fifo_queue] * NUMFLOWS + order = [0, 1, 2, 3, 4, 5, 6] + if sim_pcap: + flow_infer = fi.insert_tuple_flow_inference(prog, "flow_inference", NUMFLOWS) + pifo = st_or_rr.insert_queue( + prog, "pifo", False, subqueues, flow_infer, order=order + ) + sp.insert_main(prog, pifo, num_cmds, NUMFLOWS) + else: + boundaries = [50, 100, 150, 200, 250, 300, 400] + flow_infer = fi.insert_boundary_flow_inference( + prog, "flow_inference", boundaries + ) + pifo = st_or_rr.insert_queue( + prog, "pifo", False, subqueues, flow_infer, order=order + ) + qc.insert_main(prog, pifo, num_cmds, keepgoing=keepgoing) + prog.program.emit() From 80b0ef49fc5ce0b7277bffd12a7596e588b72d53 Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Tue, 19 Nov 2024 20:11:34 -0500 Subject: [PATCH 21/22] Comment parse_pcap.py --- frontends/queues/evaluation/parse_pcap.py | 27 +++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/frontends/queues/evaluation/parse_pcap.py b/frontends/queues/evaluation/parse_pcap.py index 64104413f..df80fa3aa 100755 --- a/frontends/queues/evaluation/parse_pcap.py +++ b/frontends/queues/evaluation/parse_pcap.py @@ -152,11 +152,13 @@ def parse_pcap(pcap_file): global PKTS_PER_SEC global BITS_PER_SEC + # converts numeric address to hexadecimal strings def mac_addr(addr): return ":".join("%02x" % dpkt.compat.compat_ord(b) for b in addr) pcap = dpkt.pcap.Reader(pcap_file) + # first pass over PCAP star_ts = None end_ts = None total_size = 0 @@ -164,6 +166,7 @@ def mac_addr(addr): ADDR2INT = {} if ADDR2INT is None else ADDR2INT addr_count, pkt_count = 0, 0 for i, (ts, buf) in enumerate(pcap): + # only process packet indices >= `START` and < `END` if i < START: continue elif i == START: @@ -171,20 +174,25 @@ def mac_addr(addr): elif END is not None and i >= END: break + # if ADDR2INT JSON isn't supplied (i.e. `make_addr_map = True`), make one eth = dpkt.ethernet.Ethernet(buf) addr = mac_addr(eth.src) if addr not in ADDR2INT: if make_addr_map: + # `i`th encountered address maps to integer `i` ADDR2INT[addr] = addr_count addr_count += 1 else: + # complain if ADDR2INT JSON is supplied but `addr` isn't a key raise UnknownAddress( f"MAC address {addr} for packet {i} not found in Addr2Int map" ) + # keep track of number of packets and number of bytes processed total_size += len(buf) pkt_count += 1 end_ts = ts + # set `END` to the last packet if unspecified END = START + pkt_count if END is None else END if start_ts is None: @@ -192,33 +200,46 @@ def mac_addr(addr): elif START >= END: raise InvalidRange(f"Start index {START} >= end index {END}") + # compute PCAP stats total_time = end_ts - start_ts PKTS_PER_SEC = float("inf") if total_time == 0 else (END - START) / total_time BITS_PER_SEC = float("inf") if total_time == 0 else (total_size * 8) / total_time + # set `NUM_FLOWS` to the max value in ADDR2INT + 1 if unspecified if NUM_FLOWS is None: NUM_FLOWS = max(ADDR2INT[addr] for addr in ADDR2INT) + 1 + # set `POP_TICK` so `LINE_RATE` Gbits are processed every second if unspecified + # avg_pkt_size := total_size * 8 / (END - START) (in bits) + # num_pops_per_sec := (LINE_RATE * 10^9) / avg_pkt_size + # Therefore, + # POP_TICK := 10^9 / num_pops_per_sec = avg_pkt_size / LINE_RATE (in 1/ns) if POP_TICK is None: POP_TICK = int((total_size * 8) // (LINE_RATE * (END - START))) pcap_file.seek(0) pcap = dpkt.pcap.Reader(pcap_file) + # second pass over PCAP data = {"commands": [], "arrival_cycles": [], "flows": [], "pkt_ids": []} prev_time = 0 pkts_in_switch = 0 for i, (ts, buf) in enumerate(pcap): + # only process packet indices >= `START` and < `END` if i < START: continue elif i >= END: break + # compute time since first packet's arrival in ns time = (ts - start_ts) * 10**9 + # compute number of pops between current and previous packet's arrival pop_time = (prev_time % POP_TICK) + prev_time num_pops = int((time - pop_time) // POP_TICK) if time > pop_time else 0 + # keep track of number of unpopped packet's pkts_in_switch = 0 if pkts_in_switch < num_pops else pkts_in_switch - num_pops + # insert `num_pops` pops for _ in range(num_pops): data["commands"].append(CMD_POP) @@ -229,6 +250,7 @@ def mac_addr(addr): data["flows"].append(DONTCARE) data["pkt_ids"].append(DONTCARE) + # push current packet eth = dpkt.ethernet.Ethernet(buf) addr = mac_addr(eth.src) flow = ADDR2INT[addr] % NUM_FLOWS @@ -243,6 +265,7 @@ def mac_addr(addr): prev_time = time + # pop all unpopped packets pop_time = (prev_time % POP_TICK) + prev_time for _ in range(pkts_in_switch): data["commands"].append(CMD_POP) @@ -292,6 +315,7 @@ def format_gen(width): pcap_file = open(opts.PCAP, "rb") addr2int_json = None if opts.addr2int is None else open(opts.addr2int) + # unpack command line arguments ADDR2INT = None if addr2int_json is None else json.load(addr2int_json) CLOCK_PERIOD = opts.clock_period POP_TICK = opts.pop_tick @@ -299,11 +323,14 @@ def format_gen(width): START = opts.start END = opts.end + # construct memories for data file: commands, arrival_cycles, flows, pkt_ids data = parse_pcap(pcap_file) + # construct data file in `data_file` data_file = open(opts.Out, "w") dump_json(data, data_file) + # report stats stats = {} stats["num_cmds"] = len(data["commands"]) stats["num_flows"] = NUM_FLOWS From d2e4aa57cad050216be216c9c398ab887ea44c4f Mon Sep 17 00:00:00 2001 From: Akash Dhiraj Date: Wed, 11 Dec 2024 15:49:16 -0500 Subject: [PATCH 22/22] Typo Co-authored-by: Anshuman Mohan <10830208+anshumanmohan@users.noreply.github.com> --- frontends/queues/evaluation/parse_pcap.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/frontends/queues/evaluation/parse_pcap.py b/frontends/queues/evaluation/parse_pcap.py index df80fa3aa..87e9fa4b6 100755 --- a/frontends/queues/evaluation/parse_pcap.py +++ b/frontends/queues/evaluation/parse_pcap.py @@ -237,7 +237,7 @@ def mac_addr(addr): # compute number of pops between current and previous packet's arrival pop_time = (prev_time % POP_TICK) + prev_time num_pops = int((time - pop_time) // POP_TICK) if time > pop_time else 0 - # keep track of number of unpopped packet's + # keep track of number of unpopped packets pkts_in_switch = 0 if pkts_in_switch < num_pops else pkts_in_switch - num_pops # insert `num_pops` pops for _ in range(num_pops):