Skip to content

Commit

Permalink
Merge pull request #12 from ai-for-decision-making-tue/reformat_fjsp_drl
Browse files Browse the repository at this point in the history
Reformatting
  • Loading branch information
RobbertReijnen authored Apr 5, 2024
2 parents 595118a + 606ec45 commit a31349c
Show file tree
Hide file tree
Showing 36 changed files with 402 additions and 483 deletions.
10 changes: 5 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,14 @@ We aim to include a wide range of solution methods capable of solving machine sc



| Solution methods | Job Shop | Flow Show | Flexible Job Shop | SDST | Assembly | Online Arrivals |
| Solution methods | Job Shop (JSP) | Flow Show (FSP) | Flexible Job Shop (FJSP) | FJSP SDST | FAJSP | Online (F)JSP |
| :---: | :---:| :---: | :---: | :---: | :---: | :---: |
| Load Balancing Heuristics |||||| |
| Dispatching Rules |||| ||* |
| Genetic Algorithm |||||| |
| Load Balancing Heuristics |||||| |
| Genetic Algorithm |||||| |
| MILP ||||| | |
| CP-SAT |||| | | |
| FJSP-DRL |||| | | |
| MILP | | ||| | |
| OR-Tools CP-SAT |||| | | |

*Capable of online arrivals of FJSP problems

Expand Down
23 changes: 12 additions & 11 deletions configs/FJSP_DRL.toml
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
[env_parameters]
num_jobs = 10 # Number of jobs in the environment
num_mas = 5 # Number of machine agents
batch_size = 20 # Batch size for training
ope_feat_dim = 6 # Dimension of operation features
ma_feat_dim = 3 # Dimension of machine agent features
valid_batch_size = 5 # Batch size for validation
num_jobs = 10 # Number of jobs in the environment
num_mas = 5 # Number of machine agents
batch_size = 20 # Batch size for training
ope_feat_dim = 6 # Dimension of operation features
ma_feat_dim = 3 # Dimension of machine agent features
valid_batch_size = 5 # Batch size for validation
device = "cpu" # Device for training ("cpu" or "cuda")

[model_parameters]
in_size_ma = 3 # Input size for machine agent
out_size_ma = 8 # Output size for machine agent
in_size_ope = 6 # Input size for operation
in_size_ope = 6 # Input size for operation
out_size_ope = 8 # Output size for operation
hidden_size_ope = 128 # Hidden size for operation model
num_heads = [1, 1] # Number of attention heads
Expand Down Expand Up @@ -44,9 +44,10 @@ device = "cpu" # Device for training ("cpu" or "cuda")

# Configuration for test parameters
[test_parameters]
problem_instance = "/fjsp/1_Brandimarte/Mk02.fjs" # Problem instance for testing
seed = 1
problem_instance = "/fjsp/1_Brandimarte/Mk02.fjs" # Problem instance for testing
trained_policy = "/solution_methods/FJSP_DRL/save/train_20240314_192906/song_10_5.pt" # Load pretrained policy
sample = true # Sampling flag for testing
num_sample = 1 # Number of samples for testing (nr )
sample = false # Sampling flag for testing
num_sample = 1 # Number of samples for testing (nr )
plotting = true # plot instance representation and ganttchart
device = "cpu" # Device for testing ("cpu" or "cuda")
device = "cpu" # Device for testing ("cpu" or "cuda")
4 changes: 2 additions & 2 deletions data_parsers/parser_fajsp.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from pathlib import Path
import re
from pathlib import Path

from scheduling_environment.machine import Machine
from scheduling_environment.job import Job
from scheduling_environment.machine import Machine
from scheduling_environment.operation import Operation


Expand Down
4 changes: 2 additions & 2 deletions data_parsers/parser_fjsp.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from pathlib import Path
import re
from pathlib import Path

from scheduling_environment.machine import Machine
from scheduling_environment.job import Job
from scheduling_environment.machine import Machine
from scheduling_environment.operation import Operation


Expand Down
4 changes: 2 additions & 2 deletions data_parsers/parser_fjsp_sdst.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from pathlib import Path

from scheduling_environment.operation import Operation
from scheduling_environment.machine import Machine
from scheduling_environment.job import Job
from scheduling_environment.machine import Machine
from scheduling_environment.operation import Operation


def parse(JobShop, instance, from_absolute_path=False):
Expand Down
6 changes: 3 additions & 3 deletions data_parsers/parser_jsp_fsp.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
from pathlib import Path
import re
from pathlib import Path

from scheduling_environment.operation import Operation
from scheduling_environment.machine import Machine
from scheduling_environment.job import Job
from scheduling_environment.machine import Machine
from scheduling_environment.operation import Operation


def parse(JobShop, instance, from_absolute_path=False):
Expand Down
12 changes: 7 additions & 5 deletions plotting/drawer.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
import copy
import random
import os
import networkx as nx
import random
from statistics import mean
from scheduling_environment.jobShop import JobShop
import numpy as np
import matplotlib.pyplot as plt

import matplotlib.colors as mcolors
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np

from scheduling_environment.jobShop import JobShop


def create_colormap():
Expand Down
Binary file modified requirements.txt
Binary file not shown.
92 changes: 75 additions & 17 deletions run_FJSP_DRL.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,82 @@
import argparse
from solution_methods.FJSP_DRL import training
from solution_methods.FJSP_DRL import test
import logging
import os
import time

import torch

from plotting.drawer import draw_gantt_chart
from solution_methods.FJSP_DRL.env_test import FJSPEnv_test
from solution_methods.FJSP_DRL.PPO import HGNNScheduler
from solution_methods.helper_functions import load_job_shop_env, load_parameters

PARAM_FILE = "configs/FJSP_DRL.toml"


def main(execute_mode, param_file: str):
if execute_mode == 'train':
training.train_FJSP_DRL(param_file)
if execute_mode == 'test':
test.test_instance(param_file)
def initialize_device(parameters: dict) -> torch.device:
device_str = "cpu"
if parameters['test_parameters']['device'] == "cuda":
device_str = "cuda:0" if torch.cuda.is_available() else "cpu"
return torch.device(device_str)


def run_method(**parameters):
# Extract parameters
device = initialize_device(parameters)
model_parameters = parameters["model_parameters"]
test_parameters = parameters["test_parameters"]

# Configure default device
torch.set_default_tensor_type('torch.cuda.FloatTensor' if device.type == 'cuda' else 'torch.FloatTensor')
if device.type == 'cuda':
torch.cuda.set_device(device)

# Load trained policy
trained_policy = os.getcwd() + test_parameters['trained_policy']
if trained_policy.endswith('.pt'):
if device.type == 'cuda':
policy = torch.load(trained_policy)
else:
policy = torch.load(trained_policy, map_location='cpu')

model_parameters["actor_in_dim"] = model_parameters["out_size_ma"] * 2 + model_parameters["out_size_ope"] * 2
model_parameters["critic_in_dim"] = model_parameters["out_size_ma"] + model_parameters["out_size_ope"]

hgnn_model = HGNNScheduler(model_parameters).to(device)
print('\nloading saved model:', trained_policy)
hgnn_model.load_state_dict(policy)

# Configure environment and load instance
instance_path = test_parameters['problem_instance']
JSMEnv = load_job_shop_env(instance_path)
env_test = FJSPEnv_test(JSMEnv, test_parameters)

# Get state and completion signal
state = env_test.state
done = False
last_time = time.time()

# Generate schedule for instance
while ~done:
with torch.no_grad():
actions = hgnn_model.act(state, [], done, flag_train=False, flag_sample=test_parameters['sample'])
state, _, done = env_test.step(actions)

print("spend_time:", time.time() - last_time)
print("makespan(s):", env_test.JSP_instance.makespan)

if test_parameters['plotting']:
draw_gantt_chart(env_test.JSP_instance)


def main(param_file=PARAM_FILE):
try:
parameters = load_parameters(param_file)
except FileNotFoundError:
logging.error(f"Parameter file {param_file} not found.")
return

run_method(**parameters)


if __name__ == "__main__":
Expand All @@ -24,13 +91,4 @@ def main(execute_mode, param_file: str):
)

args = parser.parse_args()

# if train the FJSP_DRL model with a tensor env, please specify the mode as 'train'
# mode = 'train'
# the size of training cases (number of jobs and machines) can be modified in /configs/FJSP_DRL.toml

# if test a saved model using a simulation env, please specify the mode as 'test'
# the test instance can be specified in /configs/FJSP_DRL.toml
mode = 'test'

main(mode, param_file=args.config_file)
main(param_file=args.config_file)
4 changes: 2 additions & 2 deletions run_basic_heuristics.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import argparse
import time
import logging
import time

from solution_methods.helper_functions import *
from plotting.drawer import draw_gantt_chart, draw_precedence_relations
from solution_methods.helper_functions import *

logging.basicConfig(level=logging.INFO)
PARAM_FILE = "configs/basic_heuristics.toml"
Expand Down
4 changes: 2 additions & 2 deletions run_dispatching_rules.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import argparse
import logging

from scheduling_environment.simulationEnv import SimulationEnv
from data_parsers import parser_fajsp, parser_fjsp, parser_jsp_fsp
from plotting.drawer import draw_gantt_chart, draw_precedence_relations
from scheduling_environment.simulationEnv import SimulationEnv
from solution_methods.dispatching_rules.helper_functions import *
from solution_methods.helper_functions import load_parameters
from plotting.drawer import draw_gantt_chart, draw_precedence_relations

PARAM_FILE = "configs/dispatching_rules.toml"

Expand Down
18 changes: 9 additions & 9 deletions run_genetic_algorithm.py
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
import argparse
import logging
import multiprocessing
import numpy as np
from deap import base, creator, tools
from multiprocessing.pool import Pool

import numpy as np
from deap import base, creator, tools

from plotting.drawer import draw_precedence_relations, draw_gantt_chart
from solution_methods.helper_functions import record_stats, load_parameters, load_job_shop_env, dict_to_excel
from solution_methods.genetic_algorithm.operators import (evaluate_population, evaluate_individual, variation,
init_individual, init_population, mutate_shortest_proc_time,
mutate_sequence_exchange, pox_crossover, repair_precedence_constraints)
from plotting.drawer import draw_gantt_chart, draw_precedence_relations
from solution_methods.genetic_algorithm.operators import (
evaluate_individual, evaluate_population, init_individual, init_population, mutate_sequence_exchange,
mutate_shortest_proc_time, pox_crossover, repair_precedence_constraints, variation)
from solution_methods.helper_functions import dict_to_excel, load_job_shop_env, load_parameters, record_stats

logging.basicConfig(level=logging.INFO)

Expand Down Expand Up @@ -70,7 +70,7 @@ def initialize_run(pool: Pool, **kwargs):
return initial_population, toolbox, stats, hof, jobShopEnv


def algo_run(jobShopEnv, population, toolbox, folder, exp_name, stats=None, hof=None, **kwargs):
def run_method(jobShopEnv, population, toolbox, folder, exp_name, stats=None, hof=None, **kwargs):
"""Executes the genetic algorithm and returns the best individual.
Args:
Expand Down Expand Up @@ -161,7 +161,7 @@ def main(param_file=PARAM_FILE):

exp_name = ("/rseed" + str(parameters['algorithm']["rseed"]) + "/")
population, toolbox, stats, hof, jobShopEnv = initialize_run(pool, **parameters)
best_individual = algo_run(jobShopEnv, population, toolbox, folder, exp_name, stats, hof, **parameters)
best_individual = run_method(jobShopEnv, population, toolbox, folder, exp_name, stats, hof, **parameters)
return best_individual


Expand Down
59 changes: 33 additions & 26 deletions run_milp.py
Original file line number Diff line number Diff line change
@@ -1,17 +1,19 @@
import argparse
from gurobipy import GRB
from solution_methods.MILP import FJSPSDSTmodel, FJSPmodel, JSPmodel
from solution_methods.helper_functions import load_parameters
import logging
import json
import logging
import os

from gurobipy import GRB

from solution_methods.helper_functions import load_parameters
from solution_methods.MILP import FJSPmodel, FJSPSDSTmodel, JSPmodel

logging.basicConfig(level=logging.INFO)
DEFAULT_RESULTS_ROOT = "./results/milp"
PARAM_FILE = "configs/milp.toml"


def main(param_file=PARAM_FILE):
def run_method(folder, exp_name, **kwargs):
"""
Solve the FJSP problem for the provided input file.
Expand All @@ -21,26 +23,16 @@ def main(param_file=PARAM_FILE):
Returns:
None. Prints the optimization result.
"""
try:
parameters = load_parameters(param_file)
except FileNotFoundError:
logging.error(f"Parameter file {param_file} not found.")
return

folder = DEFAULT_RESULTS_ROOT

exp_name = "gurobi_" + str(parameters['solver']["time_limit"]) + "/" + \
str(parameters['instance']['problem_instance'])

if 'fjsp_sdst' in str(parameters['instance']['problem_instance']):
data = FJSPSDSTmodel.parse_file(parameters['instance']['problem_instance'])
model = FJSPSDSTmodel.fjsp_sdst_milp(data, parameters['solver']['time_limit'])
elif 'fjsp' in str(parameters['instance']['problem_instance']):
data = FJSPmodel.parse_file(parameters['instance']['problem_instance'])
model = FJSPmodel.fjsp_milp(data, parameters['solver']['time_limit'])
elif 'jsp' in str(parameters['instance']['problem_instance']):
data = JSPmodel.parse_file(parameters['instance']['problem_instance'])
model = JSPmodel.jsp_milp(data, parameters['solver']['time_limit'])
if 'fjsp_sdst' in str(kwargs['instance']['problem_instance']):
data = FJSPSDSTmodel.parse_file(kwargs['instance']['problem_instance'])
model = FJSPSDSTmodel.fjsp_sdst_milp(data, kwargs['solver']['time_limit'])
elif 'fjsp' in str(kwargs['instance']['problem_instance']):
data = FJSPmodel.parse_file(kwargs['instance']['problem_instance'])
model = FJSPmodel.fjsp_milp(data, kwargs['solver']['time_limit'])
elif 'jsp' in str(kwargs['instance']['problem_instance']):
data = JSPmodel.parse_file(kwargs['instance']['problem_instance'])
model = JSPmodel.jsp_milp(data, kwargs['solver']['time_limit'])

model.optimize()

Expand All @@ -63,7 +55,7 @@ def main(param_file=PARAM_FILE):
}

results = {
'time_limit': str(parameters['solver']["time_limit"]),
'time_limit': str(kwargs['solver']["time_limit"]),
'status': model.status,
'statusString': status_dict.get(model.status, 'UNKNOWN'),
'objValue': model.objVal if model.status == GRB.OPTIMAL else None,
Expand All @@ -90,6 +82,21 @@ def main(param_file=PARAM_FILE):
json.dump(results, outfile, indent=4)


def main(param_file=PARAM_FILE):

try:
parameters = load_parameters(param_file)
except FileNotFoundError:
logging.error(f"Parameter file {param_file} not found.")
return

folder = DEFAULT_RESULTS_ROOT

exp_name = "gurobi_" + str(parameters['solver']["time_limit"]) + "/" + str(parameters['instance']['problem_instance'])

run_method(folder, exp_name, **parameters)


if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run MILP")
parser.add_argument("config_file",
Expand All @@ -100,4 +107,4 @@ def main(param_file=PARAM_FILE):
help="path to config file",
)
args = parser.parse_args()
main(param_file=args.config_file)
main(param_file=args.config_file)
Loading

0 comments on commit a31349c

Please sign in to comment.