Merge pull request #35 from thunlp/parallel-adapter

Parallel adapter
This commit is contained in:
DingDing 2022-10-17 16:46:25 +08:00 committed by GitHub
commit f944683087
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
38 changed files with 1703 additions and 109 deletions

View File

@ -7,3 +7,8 @@
2. **Available Models with default configurations are ..., Please manually add the delta models by speicifying 'modified_modules' based on the visualization of your model structure** 2. **Available Models with default configurations are ..., Please manually add the delta models by speicifying 'modified_modules' based on the visualization of your model structure**
Although most pre-trained models (PTMs) use the transformers archtecture, they are implemented differently. For example, the attention module in GPT2 and BERT is not only named differently, but also implemented in different ways. Common structure mapping mapps the different name conventions of different PTMs into a unified name convention. But there are many PTMs that we do not currently cover. But don't worry! For these models, you can figure out which modules should you modify by simply [visualizing the PTMs](visualization), and then specify the `modified modules` manually (See [name-based addressing](namebasedaddr)). Although most pre-trained models (PTMs) use the transformers archtecture, they are implemented differently. For example, the attention module in GPT2 and BERT is not only named differently, but also implemented in different ways. Common structure mapping mapps the different name conventions of different PTMs into a unified name convention. But there are many PTMs that we do not currently cover. But don't worry! For these models, you can figure out which modules should you modify by simply [visualizing the PTMs](visualization), and then specify the `modified modules` manually (See [name-based addressing](namebasedaddr)).
3. **Requires a dummy_inputs to be passed through the model to understand the dimensionality of each tensor in the computation graph. The {module.__class__.__name__} Class has no dummy_inputs, and automatically created dummy_inputs failed.**
The `dummy_inputs` can be any data that make `backbone_model.forward(**dummy_inputs)` succeed. Only the form and shape of the `dummy_inputs` matter. To set dummy_inputs for your model, please use: `setattr(backbone_model, 'dummy_inputs', some_dummy_inputs)` before initializing `{self.__class__.__name__}`.

View File

@ -71,8 +71,21 @@ AllConfigs['adapter_bart-base'].update({
"output_dir": "outputs/adapter/bart-base/", "output_dir": "outputs/adapter/bart-base/",
}) })
AllConfigs['lora_bart-base'] = copy.deepcopy(BaseConfigs['bart-base']) AllConfigs['parallel_adapter_t5-base'] = copy.deepcopy(BaseConfigs['t5-base'])
AllConfigs['lora_bart-base'].update({ AllConfigs['parallel_adapter_t5-base'].update({
"delta_type": "parallel_adapter",
"learning_rate": 3e-4,
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm"
],
"bottleneck_dim":24,
"output_dir": "outputs/parallel_adapter/t5-base/",
})
AllConfigs['lora_t5-base'] = copy.deepcopy(BaseConfigs['t5-base'])
AllConfigs['lora_t5-base'].update({
"delta_type": "lora", "delta_type": "lora",
"learning_rate": 3e-4, "learning_rate": 3e-4,
"unfrozen_modules": [ "unfrozen_modules": [

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "cola",
"eval_steps": 100,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 128,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 20,
"output_dir": "outputs/adapter/roberta-base/cola",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 100,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "cola",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "cola",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "mnli",
"eval_steps": 200,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 128,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 3,
"output_dir": "outputs/adapter/roberta-base/mnli",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 200,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "mnli",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "mnli",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "mrpc",
"eval_steps": 200,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 128,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 20,
"output_dir": "outputs/adapter/roberta-base/mrpc",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 200,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "mrpc",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "mrpc",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "qnli",
"eval_steps": 200,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 128,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 3,
"output_dir": "outputs/adapter/roberta-base/qnli",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 200,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "qnli",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "qnli",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "qqp",
"eval_steps": 200,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 128,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 3,
"output_dir": "outputs/adapter/roberta-base/qqp",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 200,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "qqp",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "qqp",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "rte",
"eval_steps": 100,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 128,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 20,
"output_dir": "outputs/adapter/roberta-base/rte",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": false,
"save_steps": 100,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "rte",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "rte",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "sst2",
"eval_steps": 200,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 128,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 3,
"output_dir": "outputs/adapter/roberta-base/sst2",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 200,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "sst2",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "sst2",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "stsb",
"eval_steps": 100,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 128,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 20,
"output_dir": "outputs/adapter/roberta-base/stsb",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 100,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "stsb",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "stsb",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "superglue-boolq",
"eval_steps": 200,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 256,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 20,
"output_dir": "outputs/adapter/roberta-base/superglue-boolq",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 200,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "superglue-boolq",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "superglue-boolq",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "superglue-cb",
"eval_steps": 100,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 256,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 20,
"output_dir": "outputs/adapter/roberta-base/superglue-cb",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 100,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "superglue-cb",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "superglue-cb",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "superglue-copa",
"eval_steps": 50,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 256,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 40,
"output_dir": "outputs/adapter/roberta-base/superglue-copa",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 50,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "superglue-copa",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "superglue-copa",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "superglue-multirc",
"eval_steps": 200,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 256,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 3,
"output_dir": "outputs/adapter/roberta-base/superglue-multirc",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 200,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "superglue-multirc",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "superglue-multirc",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "superglue-record",
"eval_steps": 200,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 512,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 3,
"output_dir": "outputs/adapter/roberta-base/superglue-record",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 16,
"per_device_train_batch_size": 16,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 200,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "superglue-record",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "superglue-record",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "superglue-wic",
"eval_steps": 100,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 256,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 20,
"output_dir": "outputs/adapter/roberta-base/superglue-wic",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 100,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "superglue-wic",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "superglue-wic",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "superglue-wsc.fixed",
"eval_steps": 100,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 256,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 20,
"output_dir": "outputs/adapter/roberta-base/superglue-wsc.fixed",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 100,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "superglue-wsc.fixed",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "superglue-wsc.fixed",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "parallel_adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "cola",
"eval_steps": 100,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 128,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 20,
"output_dir": "outputs/parallel_adapter/roberta-base/cola",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 100,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "cola",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "cola",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "parallel_adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "mnli",
"eval_steps": 200,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 128,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 3,
"output_dir": "outputs/parallel_adapter/roberta-base/mnli",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 200,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "mnli",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "mnli",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "parallel_adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "mrpc",
"eval_steps": 200,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 128,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 20,
"output_dir": "outputs/parallel_adapter/roberta-base/mrpc",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 200,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "mrpc",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "mrpc",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "parallel_adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "qnli",
"eval_steps": 200,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 128,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 3,
"output_dir": "outputs/parallel_adapter/roberta-base/qnli",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 200,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "qnli",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "qnli",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "parallel_adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "qqp",
"eval_steps": 200,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 128,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 3,
"output_dir": "outputs/parallel_adapter/roberta-base/qqp",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 200,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "qqp",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "qqp",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "parallel_adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "rte",
"eval_steps": 100,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 128,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 20,
"output_dir": "outputs/parallel_adapter/roberta-base/rte",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 100,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "rte",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "rte",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "parallel_adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "sst2",
"eval_steps": 200,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 128,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 3,
"output_dir": "outputs/parallel_adapter/roberta-base/sst2",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 200,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "sst2",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "sst2",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "parallel_adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "stsb",
"eval_steps": 100,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 128,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 20,
"output_dir": "outputs/parallel_adapter/roberta-base/stsb",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 100,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "stsb",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "stsb",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "parallel_adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "superglue-boolq",
"eval_steps": 200,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 256,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 20,
"output_dir": "outputs/parallel_adapter/roberta-base/superglue-boolq",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 200,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "superglue-boolq",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "superglue-boolq",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "parallel_adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "superglue-cb",
"eval_steps": 100,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 256,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 20,
"output_dir": "outputs/parallel_adapter/roberta-base/superglue-cb",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 100,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "superglue-cb",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "superglue-cb",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "parallel_adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "superglue-copa",
"eval_steps": 50,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 256,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 40,
"output_dir": "outputs/parallel_adapter/roberta-base/superglue-copa",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 50,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "superglue-copa",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "superglue-copa",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "parallel_adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "superglue-multirc",
"eval_steps": 200,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 256,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 3,
"output_dir": "outputs/parallel_adapter/roberta-base/superglue-multirc",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 200,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "superglue-multirc",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "superglue-multirc",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "parallel_adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "superglue-record",
"eval_steps": 200,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 512,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 3,
"output_dir": "outputs/parallel_adapter/roberta-base/superglue-record",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 16,
"per_device_train_batch_size": 16,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 200,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "superglue-record",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "superglue-record",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "parallel_adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "superglue-wic",
"eval_steps": 100,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 256,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 20,
"output_dir": "outputs/parallel_adapter/roberta-base/superglue-wic",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 100,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "superglue-wic",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "superglue-wic",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -0,0 +1,46 @@
{
"bottleneck_dim": 24,
"dataset_config_name": [
"en"
],
"delta_type": "parallel_adapter",
"do_eval": true,
"do_test": true,
"do_train": true,
"eval_dataset_config_name": [
"en"
],
"eval_dataset_name": "superglue-wsc.fixed",
"eval_steps": 100,
"evaluation_strategy": "steps",
"greater_is_better": true,
"learning_rate": 0.0003,
"load_best_model_at_end": true,
"max_source_length": 256,
"metric_for_best_model": "eval_accuracy",
"model_name_or_path": "roberta-base",
"num_train_epochs": 20,
"output_dir": "outputs/parallel_adapter/roberta-base/superglue-wsc.fixed",
"overwrite_output_dir": true,
"per_device_eval_batch_size": 32,
"per_device_train_batch_size": 32,
"predict_with_generate": true,
"push_to_hub": true,
"save_steps": 100,
"save_strategy": "steps",
"save_total_limit": 1,
"seed": 42,
"task_name": "superglue-wsc.fixed",
"test_dataset_config_name": [
"en"
],
"test_dataset_name": "superglue-wsc.fixed",
"tokenizer_name": "roberta-base",
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier"
],
"warmup_steps": 0
}

View File

@ -161,6 +161,20 @@ AllConfigs['adapter_roberta-base'].update({
"output_dir": "outputs/adapter/roberta-base/", "output_dir": "outputs/adapter/roberta-base/",
}) })
AllConfigs['parallel_adapter_roberta-base'] = copy.deepcopy(BaseConfigs['roberta-base'])
AllConfigs['parallel_adapter_roberta-base'].update({
"delta_type": "parallel_adapter",
"learning_rate": 3e-4,
"unfrozen_modules": [
"deltas",
"layer_norm",
"final_layer_norm",
"classifier",
],
"bottleneck_dim":24,
"output_dir": "outputs/parallel_adapter/roberta-base/",
})
AllConfigs['lora_roberta-base'] = copy.deepcopy(BaseConfigs['roberta-base']) AllConfigs['lora_roberta-base'] = copy.deepcopy(BaseConfigs['roberta-base'])
AllConfigs['lora_roberta-base'].update({ AllConfigs['lora_roberta-base'].update({
"delta_type": "lora", "delta_type": "lora",

View File

@ -17,6 +17,7 @@ DELTA_CONFIG_MAPPING = {
"compacter":"CompacterConfig", "compacter":"CompacterConfig",
"prefix": "PrefixConfig", "prefix": "PrefixConfig",
"soft_prompt": "SoftPromptConfig", "soft_prompt": "SoftPromptConfig",
"parallel_adapter": "ParallelAdapterConfig",
} }
DELTA_MODEL_MAPPING = { DELTA_MODEL_MAPPING = {
@ -27,6 +28,7 @@ DELTA_MODEL_MAPPING = {
"compacter": "CompacterModel", "compacter": "CompacterModel",
"prefix": "PrefixModel", "prefix": "PrefixModel",
"soft_prompt": "SoftPromptModel", "soft_prompt": "SoftPromptModel",
"parallel_adapter": "ParallelAdapterModel",
} }
class _LazyConfigMapping(OrderedDict): class _LazyConfigMapping(OrderedDict):

View File

@ -26,6 +26,7 @@ from opendelta.utils.data_parallel import new_replicate_for_data_parallel
from opendelta.utils.cuda import move_dict_to_cuda from opendelta.utils.cuda import move_dict_to_cuda
import sys import sys
from opendelta.utils.data_parallel import caller_map
logger = logging.get_logger(__name__) logger = logging.get_logger(__name__)
def is_leaf_module(module): def is_leaf_module(module):
@ -371,7 +372,7 @@ class DeltaBase(nn.Module, SaveLoadMixin):
except: except:
_auto_dummy_fail = True _auto_dummy_fail = True
if _auto_dummy_fail: if _auto_dummy_fail:
raise AttributeError(f"\nThe {self.__class__.__name__} requires a pseudo-data to be passed through the model to understand the dimensionality of each tensor in the computation graph. \nThe automatically created dummy inputs failed.\nThe `dummy_inputs` can be any data that make `backbone_model.forward(**dummy_inputs)` succeed. Only the form and shape of the `dummy_inputs` matter.\n\tTo set dummy_inputs for your model, please use: `setattr(backbone_model, 'dummy_inputs', some_dummy_inputs)` before initializing `{self.__class__.__name__}` ") raise AttributeError(f"\n\tThe {self.__class__.__name__} requires a dummy_inputs to be passed through the model to understand the dimensionality of each tensor in the computation graph. \n\t The {module.__class__.__name__} Class has no dummy_inputs, and automatically created dummy_inputs failed.\n\t Refer to `https://opendelta.readthedocs.io/en/latest/notes/faq.html` for detail.")
@ -531,7 +532,7 @@ class DeltaBase(nn.Module, SaveLoadMixin):
""" """
raise NotImplementedError raise NotImplementedError
def insert_sequential_module(self, module, delta_module=None, delta_name='delta', strict=False, _delta_info=None): def insert_module(self, module, method='sequential', delta_module=None, delta_name='delta', strict=False, _delta_info=None):
r"""insert a module (previous not exists in the code base) before/after a module. Specifically, it modifies the forward r"""insert a module (previous not exists in the code base) before/after a module. Specifically, it modifies the forward
function of the original module to firstly pass the arguments into the new module's forward function and then pass function of the original module to firstly pass the arguments into the new module's forward function and then pass
it into the original ones. The new module can also be inserted after the original module with similar mechanism. it into the original ones. The new module can also be inserted after the original module with similar mechanism.
@ -547,15 +548,6 @@ class DeltaBase(nn.Module, SaveLoadMixin):
original delta is passed through ``_delta_info``. original delta is passed through ``_delta_info``.
""" """
def _caller(_org_func, org_module, delta_name, *args, **kwargs):
args = args[1:] # the first argument here is ``self``
delta_module = getattr(org_module, delta_name)
if hasattr(delta_module, "pre_forward"):# is not None:
args, kwargs = delta_module.pre_forward(*args, **kwargs)
ret = _org_func(*args, **kwargs)
if hasattr(delta_module, "post_forward"):# is not None:
ret = delta_module.post_forward(ret)
return ret
if strict: if strict:
@ -566,9 +558,9 @@ class DeltaBase(nn.Module, SaveLoadMixin):
if _delta_info is None: if _delta_info is None:
if delta_module is None: if delta_module is None:
raise RuntimeError("delta module can't be none to ensure successful replicate of the parent module.") raise RuntimeError("delta module can't be none to ensure successful replicate of the parent module.")
_delta_info = {"method": "insert_sequential", _delta_info = {"method": method,
"delta_module": delta_module, "delta_module": delta_module,
"delta_name": delta_name, "delta_name": delta_name,
"delta_belong": self, "delta_belong": self,
"state": "on"} "state": "on"}
@ -580,12 +572,36 @@ class DeltaBase(nn.Module, SaveLoadMixin):
setattr(module, _delta_info['delta_name'], _delta_info["delta_module"]) setattr(module, _delta_info['delta_name'], _delta_info["delta_module"])
new_forward = decorate(module.forward, _caller, extras=(module, _delta_info['delta_name']), kwsyntax=True) # decorator.decorate helps preserving the functions metadata (signature, etc.).
module.forward = new_forward.__get__(module, type(module)) # func.__get__(object, type(object)) register a function as an object's method
# for DataParallel's copy behavior. Experimental:
# may have bugs when module.forward is nestedly wrapped.
module._replicate_for_data_parallel = new_replicate_for_data_parallel.__get__(module, type(module))
if _delta_info["method"] in caller_map.keys():
caller = caller_map[_delta_info["method"]]
new_forward = decorate(module.forward, caller, extras=(module, _delta_info['delta_name']), kwsyntax=True) # decorator.decorate helps preserving the functions metadata (signature, etc.).
module.forward = new_forward.__get__(module, type(module)) # func.__get__(object, type(object)) register a function as an object's method
# for DataParallel's copy behavior. Experimental:
# may have bugs when module.forward is nestedly wrapped.
module._replicate_for_data_parallel = new_replicate_for_data_parallel.__get__(module, type(module))
else:
raise NotImplementedError(f"_delta_info['method']=='{_delta_info['method']}' is not supported")
def insert_sequential_module(self, module, delta_module=None, delta_name='delta', strict=False, _delta_info=None):
r"""insert a module (previous not exists in the code base) before/after a module. Specifically, it modifies the forward
function of the original module to firstly pass the arguments into the new module's forward function and then pass
it into the original ones. The new module can also be inserted after the original module with similar mechanism.
When implementing the new module , researchers should be aware of the components of arguments of the original module's forward function.
Args:
module: (:obj:`nn.Module`): The (sub)module to inserted a delta module.
delta_module: (:obj:`DeltaBase`): The delta module to be inserted.
name: (:obj:`str`, *optional*): The name of the delta in the backbone module.
strict: (:obj:`bool`, *optional*): Whether to prohibit modify a modified module.
_delta_info (:obj:`Dict`, *optional*): Used in attach(), reattach a delta module to backbone. The info of
original delta is passed through ``_delta_info``.
"""
self.insert_module(module, "sequential", delta_module, delta_name, strict, _delta_info)
def insert_parallel_module(self, module, delta_module=None, delta_name='delta', strict=False, _delta_info=None): def insert_parallel_module(self, module, delta_module=None, delta_name='delta', strict=False, _delta_info=None):
"""insert a module (previous not exists in the code base) across a module. Specifically, it modifies the forward """insert a module (previous not exists in the code base) across a module. Specifically, it modifies the forward
@ -604,41 +620,8 @@ class DeltaBase(nn.Module, SaveLoadMixin):
""" """
def _caller(_org_func, org_module, delta_name, *args, **kwargs): self.insert_module(module, "parallel", delta_module, delta_name, strict, _delta_info)
args = args[1:] # the first argument here is ``self``
delta_module = getattr(org_module, delta_name)
ret_1 = _org_func(*args, **kwargs)
ret_2 = delta_module.forward(*args, **kwargs)
return ret_1 + ret_2
if strict:
if hasattr(module.forward, "__wrapped__"):
raise RuntimeWarning("The forward function might have been wrapped by a decorator, is it intended?")
# record info for plug and unplug and nested wrap
if _delta_info is None:
if delta_module is None:
raise RuntimeError("delta module can't be none to ensure successful replicate of the parent module.")
_delta_info = {"method": "insert_parallel",
"delta_module": delta_module,
"delta_name": delta_name,
"delta_belong": self,
"state": "on"}
self._register_delta_infos(parent_module=module,
_delta_info = _delta_info)
else:
delta_module = _delta_info["delta_module"]
delta_name = _delta_info["delta_name"]
setattr(module, _delta_info['delta_name'], _delta_info["delta_module"])
new_forward = decorate(module.forward, _caller, extras=(module, _delta_info['delta_name']), kwsyntax=True) # decorator.decorate helps preserving the functions metadata (signature, etc.).
module.forward = new_forward.__get__(module, type(module)) # func.__get__(object, type(object)) register a function as an object's method
# for DataParallel's copy behavior. Experimental:
# may have bugs when module.forward is nestedly wrapped.
module._replicate_for_data_parallel = new_replicate_for_data_parallel.__get__(module, type(module))
def set_active_state_dict(self, module: nn.Module): def set_active_state_dict(self, module: nn.Module):
r"""modify the state_dict function of the model (by default, the backbone model) to return only the tunable part. r"""modify the state_dict function of the model (by default, the backbone model) to return only the tunable part.
@ -821,13 +804,7 @@ class DeltaBase(nn.Module, SaveLoadMixin):
if _delta_info['method'] == "replace": if _delta_info['method'] == "replace":
setattr(submodule, _delta_info["child_name"], _delta_info['org_module']) setattr(submodule, _delta_info["child_name"], _delta_info['org_module'])
elif _delta_info['method'] == "insert_sequential": elif _delta_info['method'] in ["sequential", "before", "after", "parallel"]:
if hasattr(submodule.forward, "__wrapped__"):
submodule.forward = submodule.forward.__wrapped__
delattr(submodule, _delta_info["delta_name"])
else:
raise AttributeError("submodule {}'s forward has no attribute __wrapped__. It's not a wrapped function.".format(name))
elif _delta_info['method'] == "insert_parallel":
if hasattr(submodule.forward, "__wrapped__"): if hasattr(submodule.forward, "__wrapped__"):
submodule.forward = submodule.forward.__wrapped__ submodule.forward = submodule.forward.__wrapped__
delattr(submodule, _delta_info["delta_name"]) delattr(submodule, _delta_info["delta_name"])

View File

@ -151,7 +151,6 @@ class AdapterConfig(BaseDeltaConfig):
self, self,
bottleneck_dim: Optional[int]=24, bottleneck_dim: Optional[int]=24,
non_linearity: Optional[str]='gelu_new', non_linearity: Optional[str]='gelu_new',
sequential: Optional[bool] = True,
**kwargs **kwargs
): ):
super().__init__(**kwargs) super().__init__(**kwargs)
@ -182,14 +181,9 @@ class AdapterModel(DeltaBase):
backbone_model (:obj:`transformers.PretrainedModels`): The backbone model to be modified. backbone_model (:obj:`transformers.PretrainedModels`): The backbone model to be modified.
bottleneck_dim (:obj:`int`): The dimension of the adapter's bottleneck. bottleneck_dim (:obj:`int`): The dimension of the adapter's bottleneck.
non_linearity (:obj:`str`): The non linearity of the adapter. non_linearity (:obj:`str`): The non linearity of the adapter.
sequential (:obj:`str`): Whether insert the adapter in a sequential manner, as opposed to a parallel manner. modified_modules (:obj:`List[str]`): modules to add adapter after them.
See `Towards a Unified View of Parameter-Efficient Transfer Learning <https://arxiv.org/abs/2110.04366>`_ unfrozen_modules (:obj:`List[str]`, *optional*, default to :obj:`None`): The modules that should be unfrozen together with the adapter parameters.
for detail. common_structure (:obj:`bool`): whether using name-based addressing witha common structure mapping.
modified_modules (:obj:`List[str]`): For prefix tuning, the it must refer to an attention layer (Currently, only
the implemented ones)
unfrozen_modules (:obj:`List[str]`, *optional*, default to :obj:`None`): The modules that should be unfrozen
together with the prefix parameters.
common_structure (:obj:`bool`): whether using name-based addressing with a common structure mapping.
""" """
config_class = AdapterConfig config_class = AdapterConfig
@ -200,10 +194,8 @@ class AdapterModel(DeltaBase):
backbone_model: nn.Module, backbone_model: nn.Module,
bottleneck_dim: Optional[int]=24, bottleneck_dim: Optional[int]=24,
non_linearity: Optional[str]='gelu_new', non_linearity: Optional[str]='gelu_new',
sequential: Optional[str] = True, modified_modules: Optional[bool] = None,
modified_modules: Optional[List[str]] = None, unfrozen_modules: Optional[bool] = None,
exclude_modules: Optional[List[str]] = None,
unfrozen_modules: Optional[List[str]] = None,
common_structure: Optional[bool] = None, common_structure: Optional[bool] = None,
interactive_modify: Optional[Union[bool, int]] = False, interactive_modify: Optional[Union[bool, int]] = False,
): ):
@ -225,19 +217,8 @@ class AdapterModel(DeltaBase):
self.add_all_delta_to_backbone(self.backbone_model, self.add_all_delta_to_backbone(self.backbone_model,
self.modified_modules, self.modified_modules,
) )
# def add_all_delta_to_backbone(self,
# module: nn.Module,
# modified_modules: List[str],
# ) -> nn.Module:
# for key, _ in module.named_modules():
# if self.find_key(key, modified_modules):
# self.update_module(module, key)
# self._pseudo_data_to_instantiate(module)
# self.mark_as_delta()
# return module
def update_module(self, module: nn.Module, key: str): def update_module(self, module: nn.Module, key: str):
_, _, ref = self.find_module(module, key) _, _, ref = self.find_module(module, key)
adapterlayer = self.new_module_like(ref) adapterlayer = self.new_module_like(ref)

View File

@ -0,0 +1,199 @@
from functools import partial
from random import random
from typing import Optional, Union
from opendelta.utils.signature import get_arg_names_inside_func
from opendelta.utils.name_based_addressing import *
from opendelta.utils.cuda import get_device
from opendelta.basemodel import DeltaBase
import torch.nn as nn
import torch
from opendelta.delta_models.layers.activations import Activations
from opendelta import BaseDeltaConfig
import opendelta.utils.logging as logging
logger = logging.get_logger(__name__)
class ParallelAdapterLayer(nn.Module):
r"""A layer of adapter tuning module.
"""
layer_count = 0
@classmethod
def count_layer(cls):
cls.layer_count += 1
@classmethod
def get_layer_count(cls):
return cls.layer_count
def __init__(self, bottleneck_dim=24, non_linearity='gelu_new', scaled=1, device=None):
super().__init__()
self.bottleneck_dim = bottleneck_dim
self.device = device
self.instantiated = False
self.non_linearity = non_linearity
self.scaled = scaled
self.layer_id = ParallelAdapterLayer.get_layer_count()
ParallelAdapterLayer.count_layer()
def instantiate(self, hidden_dim):
self.modulelist = nn.Sequential()
self.modulelist.add_module("down_proj",nn.Linear(hidden_dim, self.bottleneck_dim, device=self.device))
# select non-linearity
self.modulelist.add_module("non_linear", Activations(self.non_linearity.lower()))
self.modulelist.add_module("up_proj", nn.Linear(self.bottleneck_dim, self.hidden_dim, device=self.device))
self.instantiated = True
# initialize the weight, which is important for fast convergence and better performance.
self.apply(self._init_weight)
def _init_weight(self, module):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=0.01)
if module.bias is not None:
module.bias.data.zero_()
def pre_forward(self, *args, **kwargs):
r""" Get the hidden_states from the PLM's layer output, pass it into the adapter,
then combined with the main hidden_states. Finally pass it into the subsequent layer.
"""
if isinstance(args, tuple):
hiddens = args[0]
elif isinstance(args, torch.Tensor):
hiddens = args
else:
raise TypeError
if not self.instantiated:
self.hidden_dim = hiddens.shape[-1]
logger.debug(f"Got hidden dim hidden_dim {self.hidden_dim}")
self.instantiate(hidden_dim=self.hidden_dim)
self.adapter_output = self.modulelist(hiddens) * self.scaled
return args, kwargs
def post_forward(self, output, **kwargs):
if isinstance(output, tuple):
hidden = output[0]
elif isinstance(output, torch.Tensor):
hidden = output
else:
raise TypeError
modified_output = self.adapter_output + hidden
if isinstance(output, tuple):
output = (modified_output,) + output[1:]
elif isinstance(output, torch.Tensor):
output = modified_output
else:
raise TypeError
return output
class ParallelAdapterConfig(BaseDeltaConfig):
r"""
This is the configuration class to store the configuration of a :py:class:`~ParallelAdapterModel`
"""
def __init__(
self,
bottleneck_dim: Optional[int]=24,
non_linearity: Optional[str]='gelu_new',
scaled: Optional[float]=1.,
**kwargs
):
super().__init__(**kwargs)
arg_names = get_arg_names_inside_func(self.__init__)
for arg_name in arg_names:
if not hasattr(self, arg_name): # the arg has not been registered in parent config
setattr(self, arg_name, locals()[arg_name])
class ParallelAdapterModel(DeltaBase):
r""" The implementation of Parallel Adapter(`TOWARDS A UNIFIED VIEW OF PARAMETER-EFFICIENT TRANSFER LEARNING <https://arxiv.org/abs/2110.04366>`_ ) .
Add adapter to the designated ``modified_modules``. In parallel paradigm, The modules' output is then passed into the adapter's
post_forward.
.. note::
We **assume** the output of the modified module is the hidden state or a tuple where hidden state is the
first element. This is true for most PLMs. However, we admit that currently it's not rigorous, We will improve
it in the next version. Currently, if you encount an error here for you backbone, you can modify the code to
get the hidden state.
class attributes:
- default_modified_modules = ["attn", "ff"] According to the Adapter paper, we add adapter to the attention layer
and feed forward layer.
- delta_type = "adapter"
Args:
backbone_model (:obj:`transformers.PretrainedModels`): The backbone model to be modified.
bottleneck_dim (:obj:`int`): The dimension of the adapter's bottleneck.
non_linearity (:obj:`str`): The non linearity of the adapter.
modified_modules (:obj:`List[str]`): modules to add parallel adapter. Must be paired and have the save order in layer. For examples, ["attn", "attn", "ff.w1", "ff.w2"] add one parallel adapter from attn's input to attn's output, and another one from ff.w1's input to ff.w2's output.
unfrozen_modules (:obj:`List[str]`, *optional*, default to :obj:`None`): The modules that should be unfrozen together with the parallel adapter parameters.
common_structure (:obj:`bool`): whether using name-based addressing witha common structure mapping.
"""
config_class = ParallelAdapterConfig
delta_type = "parallel_adapter"
default_modified_modules = ["attn@", "attn@", "ff@.w1@", "ff@.w2@"]
# default_modified_modules = ["attn", "attn", "ff.w1", "ff.w2"]
_need_pseudo_data = True
def __init__(self,
backbone_model: nn.Module,
bottleneck_dim: Optional[int]=24,
non_linearity: Optional[str]='gelu_new',
modified_modules: Optional[bool] = None,
exclude_modules: Optional[List[str]] = None,
unfrozen_modules: Optional[bool] = None,
common_structure: Optional[bool] = None,
interactive_modify: Optional[Union[bool, int]] = False,
):
DeltaBase.__init__(self,
backbone_model,
modified_modules=modified_modules,
exclude_modules=exclude_modules,
unfrozen_modules=unfrozen_modules,
common_structure=common_structure,
interactive_modify=interactive_modify,
)
arg_names = get_arg_names_inside_func(self.__init__)
for arg_name in arg_names:
if not hasattr(self, arg_name): # not registered in parent class
setattr(self, arg_name, locals()[arg_name])
self.delta_modules = nn.ModuleList()
self.ith = 0
self.add_all_delta_to_backbone(self.backbone_model,
self.modified_modules,
)
def update_module(self, module: nn.Module, key: str):
_, _, ref = self.find_module(module, key)
if self.ith % 2 == 0:
adapterlayer = self.new_module_like(ref)
self.insert_module(ref, "before", delta_module=adapterlayer, delta_name="parallel_adapter")
if self.ith % 2 == 1 or self.modified_modules[self.ith] == self.modified_modules[self.ith + 1]:
adapterlayer = self.delta_modules[-1]
self.insert_module(ref, "after", delta_module=adapterlayer, delta_name="parallel_adapter")
self.ith |= 1
self.ith += 1
self.ith %= len(self.modified_modules)
def new_module_like(self, module):
module_device = get_device(module)
adapterlayer = ParallelAdapterLayer(bottleneck_dim=self.bottleneck_dim, non_linearity=self.non_linearity, device=module_device)
self.delta_modules.append(adapterlayer)
return adapterlayer

View File

@ -4,26 +4,50 @@
from opendelta.utils.decorate import decorate from opendelta.utils.decorate import decorate
from collections import OrderedDict from collections import OrderedDict
def sequential_caller(_org_func, org_module, delta_name, *args, **kwargs):
args = args[1:] # the first argument here is ``self``
delta_module = getattr(org_module, delta_name)
if hasattr(delta_module, "pre_forward"):
args, kwargs = delta_module.pre_forward(*args, **kwargs)
ret = _org_func(*args, **kwargs)
if hasattr(delta_module, "post_forward"):
ret = delta_module.post_forward(ret)
return ret
def before_caller(_org_func, org_module, delta_name, *args, **kwargs):
args = args[1:] # the first argument here is ``self``
delta_module = getattr(org_module, delta_name)
if hasattr(delta_module, "pre_forward"):
args, kwargs = delta_module.pre_forward(*args, **kwargs)
ret = _org_func(*args, **kwargs)
return ret
def after_caller(_org_func, org_module, delta_name, *args, **kwargs):
args = args[1:] # the first argument here is ``self``
delta_module = getattr(org_module, delta_name)
ret = _org_func(*args, **kwargs)
if hasattr(delta_module, "post_forward"):
ret = delta_module.post_forward(ret)
return ret
def parallel_caller(_org_func, org_module, delta_name, *args, **kwargs):
args = args[1:] # the first argument here is ``self``
delta_module = getattr(org_module, delta_name)
ret_1 = _org_func(*args, **kwargs)
ret_2 = delta_module.forward(*args, **kwargs)
return ret_1 + ret_2
caller_map = {
"sequential": sequential_caller,
"parallel": parallel_caller,
"before": before_caller,
"after": after_caller,
}
def new_replicate_for_data_parallel(self): def new_replicate_for_data_parallel(self):
r""" self is the parent module. r""" self is the parent module.
""" """
# rewrite the replicate in DataParallel. # rewrite the replicate in DataParallel.
def _sequential_caller(_org_func, org_module, delta_name, *args, **kwargs):
args = args[1:] # the first argument here is ``self``
delta_module = getattr(org_module, delta_name)
if hasattr(delta_module, "pre_forward"):
args, kwargs = delta_module.pre_forward(*args, **kwargs)
ret = _org_func(*args, **kwargs)
if hasattr(delta_module, "post_forward"):
ret = delta_module.post_forward(ret)
return ret
def _parallel_caller(_org_func, org_module, delta_name, *args, **kwargs):
args = args[1:] # the first argument here is ``self``
delta_module = getattr(org_module, delta_name)
ret_1 = _org_func(*args, **kwargs)
ret_2 = delta_module.forward(*args, **kwargs)
return ret_1 + ret_2
replica = self.__new__(type(self)) replica = self.__new__(type(self))
org_forward = replica.forward org_forward = replica.forward
replica.__dict__ = self.__dict__.copy() replica.__dict__ = self.__dict__.copy()
@ -33,10 +57,9 @@ def new_replicate_for_data_parallel(self):
for _delta_info in self._delta_infos: for _delta_info in self._delta_infos:
if _delta_info['state'] == 'on': if _delta_info['state'] == 'on':
if _delta_info['method'] == "insert_sequential": if _delta_info['method'] in caller_map.keys():
new_forward = decorate(replica.forward, _sequential_caller, extras=(replica, _delta_info['delta_name']), kwsyntax=True) caller = caller_map[_delta_info['method']]
elif _delta_info['method'] == "insert_parallel": new_forward = decorate(replica.forward, caller, extras=(replica, _delta_info['delta_name']), kwsyntax=True)
new_forward = decorate(replica.forward, _parallel_caller, extras=(replica, _delta_info['delta_name']), kwsyntax=True)
else: else:
raise NotImplementedError(f"data_parallel for _delta_info['method']=='{_delta_info['method']}' is not supported") raise NotImplementedError(f"data_parallel for _delta_info['method']=='{_delta_info['method']}' is not supported")
replica.__dict__['forward'] = new_forward.__get__(replica, type(replica)) replica.__dict__['forward'] = new_forward.__get__(replica, type(replica))