Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add trust_remote_code Parameter and Set Default to False #5819

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions examples/extras/nlg_eval/llama3_lora_predict.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
adapter_name_or_path: saves/llama3-8b/lora/sft
trust_remote_code: true

### method
stage: sft
Expand Down
1 change: 1 addition & 0 deletions examples/inference/llama3.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
template: llama3
infer_backend: huggingface # choices: [huggingface, vllm]
trust_remote_code: true
1 change: 1 addition & 0 deletions examples/inference/llama3_lora_sft.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@ adapter_name_or_path: saves/llama3-8b/lora/sft
template: llama3
finetuning_type: lora
infer_backend: huggingface # choices: [huggingface, vllm]
trust_remote_code: true
1 change: 1 addition & 0 deletions examples/inference/llama3_vllm.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,4 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
template: llama3
infer_backend: vllm
vllm_enforce_eager: true
trust_remote_code: true
1 change: 1 addition & 0 deletions examples/inference/llava1_5.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
model_name_or_path: llava-hf/llava-1.5-7b-hf
template: llava
infer_backend: huggingface # choices: [huggingface, vllm]
trust_remote_code: true
1 change: 1 addition & 0 deletions examples/inference/qwen2_vl.yaml
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
template: qwen2_vl
infer_backend: huggingface # choices: [huggingface, vllm]
trust_remote_code: true
1 change: 1 addition & 0 deletions examples/merge_lora/llama3_gptq.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
template: llama3
trust_remote_code: true

### export
export_dir: models/llama3_gptq
Expand Down
1 change: 1 addition & 0 deletions examples/merge_lora/llama3_lora_sft.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
adapter_name_or_path: saves/llama3-8b/lora/sft
template: llama3
finetuning_type: lora
trust_remote_code: true

### export
export_dir: models/llama3_lora_sft
Expand Down
1 change: 1 addition & 0 deletions examples/merge_lora/qwen2vl_lora_sft.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
adapter_name_or_path: saves/qwen2_vl-7b/lora/sft
template: qwen2_vl
finetuning_type: lora
trust_remote_code: true

### export
export_dir: models/qwen2_vl_lora_sft
Expand Down
1 change: 1 addition & 0 deletions examples/train_full/llama3_full_sft.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
trust_remote_code: true

### method
stage: sft
Expand Down
1 change: 1 addition & 0 deletions examples/train_full/qwen2vl_full_sft.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
### model
model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
trust_remote_code: true

### method
stage: sft
Expand Down
1 change: 1 addition & 0 deletions examples/train_lora/llama3_lora_dpo.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
trust_remote_code: true

### method
stage: dpo
Expand Down
1 change: 1 addition & 0 deletions examples/train_lora/llama3_lora_eval.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
adapter_name_or_path: saves/llama3-8b/lora/sft
trust_remote_code: true

### method
finetuning_type: lora
Expand Down
1 change: 1 addition & 0 deletions examples/train_lora/llama3_lora_kto.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
trust_remote_code: true

### method
stage: kto
Expand Down
1 change: 1 addition & 0 deletions examples/train_lora/llama3_lora_ppo.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
reward_model: saves/llama3-8b/lora/reward
trust_remote_code: true

### method
stage: ppo
Expand Down
1 change: 1 addition & 0 deletions examples/train_lora/llama3_lora_pretrain.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
trust_remote_code: true

### method
stage: pt
Expand Down
1 change: 1 addition & 0 deletions examples/train_lora/llama3_lora_reward.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
trust_remote_code: true

### method
stage: rm
Expand Down
1 change: 1 addition & 0 deletions examples/train_lora/llama3_lora_sft.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
trust_remote_code: true

### method
stage: sft
Expand Down
1 change: 1 addition & 0 deletions examples/train_lora/llama3_lora_sft_ds3.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
trust_remote_code: true

### method
stage: sft
Expand Down
1 change: 1 addition & 0 deletions examples/train_lora/llama3_preprocess.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
### model
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
trust_remote_code: true

### method
stage: sft
Expand Down
1 change: 1 addition & 0 deletions examples/train_lora/llava1_5_lora_sft.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
### model
model_name_or_path: llava-hf/llava-1.5-7b-hf
trust_remote_code: true

### method
stage: sft
Expand Down
1 change: 1 addition & 0 deletions examples/train_lora/qwen2vl_lora_dpo.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
### model
model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
trust_remote_code: true

### method
stage: dpo
Expand Down
1 change: 1 addition & 0 deletions examples/train_lora/qwen2vl_lora_sft.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
### model
model_name_or_path: Qwen/Qwen2-VL-7B-Instruct
trust_remote_code: true

### method
stage: sft
Expand Down
1 change: 1 addition & 0 deletions examples/train_qlora/llama3_lora_sft_aqlm.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
### model
model_name_or_path: ISTA-DASLab/Meta-Llama-3-8B-Instruct-AQLM-2Bit-1x16
trust_remote_code: true

### method
stage: sft
Expand Down
1 change: 1 addition & 0 deletions examples/train_qlora/llama3_lora_sft_awq.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
### model
model_name_or_path: TechxGenus/Meta-Llama-3-8B-Instruct-AWQ
trust_remote_code: true

### method
stage: sft
Expand Down
1 change: 1 addition & 0 deletions examples/train_qlora/llama3_lora_sft_gptq.yaml
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
### model
model_name_or_path: TechxGenus/Meta-Llama-3-8B-Instruct-GPTQ
trust_remote_code: true

### method
stage: sft
Expand Down
1 change: 1 addition & 0 deletions examples/train_qlora/llama3_lora_sft_otfq.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
model_name_or_path: meta-llama/Meta-Llama-3-8B-Instruct
quantization_bit: 4
quantization_method: bitsandbytes # choices: [bitsandbytes (4/8), hqq (2/3/4/5/6/8), eetq (8)]
trust_remote_code: true

### method
stage: sft
Expand Down
2 changes: 1 addition & 1 deletion src/llamafactory/chat/vllm_engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def __init__(

engine_args = {
"model": model_args.model_name_or_path,
"trust_remote_code": True,
"trust_remote_code": model_args.trust_remote_code,
"download_dir": model_args.cache_dir,
"dtype": model_args.infer_dtype,
"max_model_len": model_args.vllm_maxlen,
Expand Down
2 changes: 1 addition & 1 deletion src/llamafactory/data/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@ def _load_single_dataset(
token=model_args.hf_hub_token,
streaming=data_args.streaming,
num_proc=data_args.preprocessing_num_workers,
trust_remote_code=True,
trust_remote_code=model_args.trust_remote_code,
)

if dataset_attr.num_samples is not None and not data_args.streaming:
Expand Down
2 changes: 1 addition & 1 deletion src/llamafactory/eval/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def eval(self) -> None:
cache_dir=self.model_args.cache_dir,
download_mode=self.eval_args.download_mode,
token=self.model_args.hf_hub_token,
trust_remote_code=True,
trust_remote_code=self.model_args.trust_remote_code,
)
pbar.set_postfix_str(categorys[subject]["name"])
inputs, outputs, labels = [], [], []
Expand Down
10 changes: 10 additions & 0 deletions src/llamafactory/hparams/model_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,6 +301,16 @@ class ModelArguments(QuantizationArguments, ProcessorArguments, ExportArguments,
init=False,
metadata={"help": "Whether use block diag attention or not, derived from `neat_packing`. Do not specify it."},
)
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether to trust the execution of code from datasets/models defined on the Hub. "
"This option should only be set to `True` for repositories you trust and in which "
"you have read the code, as it will execute code present on the Hub on your local machine."
)
},
)

def __post_init__(self):
if self.model_name_or_path is None:
Expand Down
5 changes: 3 additions & 2 deletions src/llamafactory/model/loader.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def _get_init_kwargs(model_args: "ModelArguments") -> Dict[str, Any]:
skip_check_imports()
model_args.model_name_or_path = try_download_model_from_other_hub(model_args)
return {
"trust_remote_code": True,
"trust_remote_code": model_args.trust_remote_code,
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"token": model_args.hf_hub_token,
Expand Down Expand Up @@ -155,8 +155,9 @@ def load_model(
load_class = AutoModelForCausalLM

if model_args.train_from_scratch:
model = load_class.from_config(config, trust_remote_code=True)
model = load_class.from_config(config, trust_remote_code=model_args.trust_remote_code)
else:
init_kwargs["trust_remote_code"] = model_args.trust_remote_code
model = load_class.from_pretrained(**init_kwargs)

if model_args.mixture_of_depths == "convert":
Expand Down
2 changes: 1 addition & 1 deletion src/llamafactory/model/model_utils/unsloth.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def _get_unsloth_kwargs(
"device_map": {"": get_current_device()},
"rope_scaling": getattr(config, "rope_scaling", None),
"fix_tokenizer": False,
"trust_remote_code": True,
"trust_remote_code": model_args.trust_remote_code,
"use_gradient_checkpointing": "unsloth",
}

Expand Down