Skip to content

Commit

Permalink
Merge branch 'idea-fasoc:main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
chetanyagoyal authored Dec 5, 2023
2 parents b869fe2 + a545970 commit f3abf5e
Show file tree
Hide file tree
Showing 11 changed files with 649 additions and 25 deletions.
6 changes: 3 additions & 3 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ For more info on getting-started, please refer to ["Getting Started" section ](h

Below are the tool requirements along with their currently support versions that are updated regularly upon testing againsts the generators.

1. `Magic <https://github.com/RTimothyEdwards/magic>`_ (version:8.3.451)
1. `Magic <https://github.com/RTimothyEdwards/magic>`_ (version:8.3.452)

2. `Netgen <https://github.com/RTimothyEdwards/netgen>`_ (version:1.5.263)

Expand All @@ -55,12 +55,12 @@ Below are the tool requirements along with their currently support versions that
- Please use this command to build preferably: `./build.sh -option '-j8' -noruby -without-qt-multimedia -without-qt-xml -without-qt-svg`


4. `Yosys <https://github.com/The-OpenROAD-Project/yosys>`_ (version:0.35+39)
4. `Yosys <https://github.com/The-OpenROAD-Project/yosys>`_ (version:0.35+56)


5. `OpenROAD <https://github.com/The-OpenROAD-Project/OpenROAD>`_ (version:2.0_10905)

6. `Open_pdks <https://github.com/RTimothyEdwards/open_pdks>`_ (version:1.0.286)
6. `Open_pdks <https://github.com/RTimothyEdwards/open_pdks>`_ (version:1.0.459)

- open_pdks is required to run drc/lvs check and the simulations
- After open_pdks is installed, please update the **open_pdks** key in `common/platform_config.json` with the installed path, down to the sky130A folder
Expand Down
6 changes: 3 additions & 3 deletions conda_versions.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
magic==8.3.451_0_g84af801
magic==8.3.452_0_g83ed73a
netgen==1.5.263_0_g4250525
open_pdks.sky130a==1.0.286_0_g52af776
open_pdks.sky130a==1.0.459_0_g369e64e
openroad==2.0_10905_ge89829335
yosys==0.35_40_g031ad38b5
yosys==0.35_57_g8614d9b32
46 changes: 46 additions & 0 deletions openfasoc/MLoptimization/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
# Machine Learning Optimization
Code for reinforcement learning loop with openfasoc generators for optimizing metrics

## Code Setup
The code is setup as follows:

The top level directory contains two sub-directories:
* model.py: top level RL script, used to set hyperparameters and run training
* run_training.py: contains all OpenAI Gym environments. These function as the agent in the RL loop and contain information about parameter space, valid action steps and reward.
* eval.py: contains all of the code for evaluation
* gen_spec.py: contains all of the random specification generation

## Training
Make sure that you have OpenAI Gym and Ray installed. To do this, run the following command:

To generate the design specifications that the agent trains on, run:
```
python3.10 gen_specs.py
```
The result is a yaml file dumped to the ../generators/gdsfactory-gen/.

To train the agent, open ipython from the top level directory and then:
```
python3.10 model.py
```
The training checkpoints will be saved in your home directory under ray\_results. Tensorboard can be used to load reward and loss plots using the command:

```
tensorboard --logdir path/to/checkpoint
```

## Validation
The evaluation script takes the trained agent and gives it new specs that the agent has never seen before. To generate new design specs, run the gen_specs.py file again with your desired number of specs to validate on. To run validation:

```
python3.10 eval.py
```

The evaluation result will be saved to the ../generators/gdsfactory-gen/.

## Results
Please note that results vary greatly based on random seed and spec generation (both for testing and validation). An example spec file is provided that was used to generate the results below.

<p float="left">
<img src="mean_reward_versus_step.png" width="400" />
</p>
122 changes: 122 additions & 0 deletions openfasoc/MLoptimization/eval.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
# Add glayout to path
import sys
sys.path.append('../generators/gdsfactory-gen')
sys.path.append('../generators/gdsfactory-gen/tapeout_and_RL')

#training import
import numpy as np
from ray.rllib.algorithms.ppo import PPO
from run_training import Envir
import pickle
import yaml
from pathlib import Path
import argparse

def unlookup(norm_spec, goal_spec):
spec = -1*np.multiply((norm_spec+1), goal_spec)/(norm_spec-1)
return spec

def evaluate_model(checkpoint_dir: str = "./last_checkpoint"):
specs = yaml.safe_load(Path('newnew_eval_3.yaml').read_text())

#training set up
env_config = {
"generalize":True,
"num_valid":2,
"save_specs":False,
"inputspec":specs,
"run_valid":True,
"horizon":25,
}

config_eval = {
#"sample_batch_size": 200,
"env": Envir,
"env_config":{
"generalize":True,
"num_valid":2,
"save_specs":False,
"inputspec":specs,
"run_valid":True,
"horizon":25,
},
}

parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint_dir', '-cpd', type=str)
args = parser.parse_args()
env = Envir(env_config=env_config)

agent = PPO.from_checkpoint(checkpoint_dir)

norm_spec_ref = env.global_g
spec_num = len(env.specs)

rollouts = []
next_states = []
obs_reached = []
obs_nreached = []
action_array = []
action_arr_comp = []
rollout_steps = 0
reached_spec = 0
f = open("newnewnew_eval__3.txt", "a")

while rollout_steps < 100:
rollout_num = []
state, info = env.reset()

done = False
truncated = False
reward_total = 0.0
steps=0
f.write('new----------------------------------------')
while not done and not truncated:
action = agent.compute_single_action(state)
action_array.append(action)

next_state, reward, done, truncated, info = env.step(action)
f.write(str(action)+'\n')
f.write(str(reward)+'\n')
f.write(str(done)+'n')
print(next_state)
print(action)
print(reward)
print(done)
reward_total += reward

rollout_num.append(reward)
next_states.append(next_state)

state = next_state

norm_ideal_spec = state[spec_num:spec_num+spec_num]
ideal_spec = unlookup(norm_ideal_spec, norm_spec_ref)
if done == True:
reached_spec += 1
obs_reached.append(ideal_spec)
action_arr_comp.append(action_array)
action_array = []
pickle.dump(action_arr_comp, open("action_arr_test", "wb"))
else:
obs_nreached.append(ideal_spec) #save unreached observation
action_array=[]

f.write('done----------------------------------------')
rollouts.append(rollout_num)

print("Episode reward", reward_total)
rollout_steps+=1

#if out is not None:
#pickle.dump(rollouts, open(str(out)+'reward', "wb"))
pickle.dump(obs_reached, open("opamp_obs_reached_test","wb"))
pickle.dump(obs_nreached, open("opamp_obs_nreached_test","wb"))

f.write("Specs reached: " + str(reached_spec) + "/" + str(len(obs_nreached)))
print("Specs reached: " + str(reached_spec) + "/" + str(len(obs_nreached)))

print("Num specs reached: " + str(reached_spec) + "/" + str(1))

if __name__ == "__main__":
evaluate_model()
37 changes: 37 additions & 0 deletions openfasoc/MLoptimization/gen_spec.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#!/usr/bin/env python3
## Generate the design specifications and then save to a pickle file

import random
import argparse

def generate_random_specs(env, num_specs):
specs_range = {
"gain_min" : [float(14003380.0), float(50003380.0)],
"FOM" : [float(4e11), float(4e11)]
}
specs_range_vals = list(specs_range.values())
specs_valid = []
for spec in specs_range_vals:
if isinstance(spec[0],int):
list_val = [random.randint(int(spec[0]),int(spec[1])) for x in range(0,num_specs)]
else:
list_val = [random.uniform(float(spec[0]),float(spec[1])) for x in range(0,num_specs)]
specs_valid.append(tuple(list_val))
i=0
for key,value in specs_range.items():
specs_range[key] = specs_valid[i]
i+=1

output = str(specs_range)
with open(env, 'w') as f:
f.write(output.replace('(','[').replace(')',']').replace(',',',\n'))

def main():
parser = argparse.ArgumentParser()
parser.add_argument('--num_specs', type=str)
args = parser.parse_args()

generate_random_specs("train.yaml", int(100))

if __name__=="__main__":
main()
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
42 changes: 42 additions & 0 deletions openfasoc/MLoptimization/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
# Add glayout to path
import sys
sys.path.append('../generators/gdsfactory-gen')
sys.path.append('../generators/gdsfactory-gen/tapeout_and_RL')

#training import
import ray
import ray.tune as tune
from ray.rllib.algorithms.ppo import PPO
from run_training import Envir

import argparse

def train_model(save_checkpoint_dir: str = "./last_checkpoint"):
ray.init(num_cpus=33, num_gpus=0,include_dashboard=True, ignore_reinit_error=True)

#configures training of the agent with associated hyperparameters
config_train = {
"env": Envir,
"train_batch_size": 1000,
"model":{"fcnet_hiddens": [64, 64]},
"num_workers": 32,
"env_config":{"generalize":True, "run_valid":False, "horizon":20},
}

#Runs training and saves the result in ~/ray_results/train_ngspice_45nm
#If checkpoint fails for any reason, training can be restored
trials = tune.run(
"PPO", #You can replace this string with ppo.PPOTrainer if you want / have customized it
name="new_train_with_new_params_3", # The name can be different.
stop={"episode_reward_mean": 12, "training_iteration": 12},
checkpoint_freq=1,
config=config_train,
)
trials.get_last_checkpoint().to_directory(save_checkpoint_dir)

if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint_dir', '-cpd', type=str)
args = parser.parse_args()

train_model()
Loading

0 comments on commit f3abf5e

Please sign in to comment.