Merge pull request #33 from thunlp/check_pr_33

merge main into delta_center_dev
This commit is contained in:
DingDing 2022-10-10 13:06:58 +08:00 committed by GitHub
commit e8b349f1e1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 21 additions and 17 deletions

View File

@ -24,7 +24,7 @@
## Overview
OpenDelta is a toolkit for parameter efficient methods (we dub it as *delta tuning*), by which users could flexibly assign (or add) a small amount parameters to update while keeping the most paramters frozen. By using OpenDelta, users could easily implement prefix-tuning, adapters, Lora, or any other types of delta tuning with preferred PTMs.
OpenDelta is a toolkit for parameter-efficient tuning methods (we dub it as *delta tuning*), by which users could flexibly assign (or add) a small amount parameters to update while keeping the most paramters frozen. By using OpenDelta, users could easily implement prefix-tuning, adapters, Lora, or any other types of delta tuning with preferred PTMs.
- Our repo is tested on Python 3.8 and PyTorch 1.9.0. Lower version may also be supported.
@ -33,7 +33,7 @@ OpenDelta is a toolkit for parameter efficient methods (we dub it as *delta tuni
## Updates
- 2022.03.24 We notice several bugs in Soft Prompt Tuning and Prefix Tuning, mainly due to their need to customize attention ids, token_type_ids, we are fixing it! Currently, please use the other methods since they are stabler and better in performance.
- 2022.03.20 Add a [colab example](https://colab.research.google.com/drive/1hM_zu11s6plpK-YQSkz3CrowJyxtHneY?usp=sharing) to illustrate efficient training and space-saving multitask-serving.
- 2022.03.20 Add a [colab example](https://colab.research.google.com/drive/1uAhgAdc8Qr42UKYDlgUv0f7W1-gAFwGo?usp=sharing) to illustrate efficient training and space-saving multitask-serving.
- 2022.03.20 A new pip version released.
- 2022.02.16 Support [regular expression](https://opendelta.readthedocs.io/en/latest/notes/namebasedaddr.html#regexexpr) in named-based addressing.
@ -81,9 +81,9 @@ pip install -r requirements.txt && python setup.py develop
```python
from transformers import AutoModelForSeq2SeqLM
t5 = AutoModelForSeq2SeqLM.from_pretrained("t5-base")
t5 = AutoModelForSeq2SeqLM.from_pretrained("t5-large")
from opendelta import AutoDeltaModel
delta = AutoDeltaModel.from_finetuned("DeltaHub/lora_t5-base_mrpc", backbone_model=t5)
delta = AutoDeltaModel.from_finetuned("thunlp/FactQA_T5-large_Adapter", backbone_model=t5)
delta.log()
```
@ -109,13 +109,6 @@ used models that OpenDelta are sure to support.
| T5-3b(parallel)| ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ |
| Deberta-v2 | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | |
| CTRL | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | ✅ | | |
| ViT | ✅ | | | | | | | | |
## Performance Checked Combination
Google sheet [here](https://docs.google.com/spreadsheets/d/1BIVa8ocAPga-u7rBOXLYaTfaJSjI1dWfwohmLjmFDrY/edit?usp=sharing)
Subject to change at any moment.

View File

@ -2,8 +2,15 @@ from transformers import BertForMaskedLM
model = BertForMaskedLM.from_pretrained("bert-base-cased")
# suppose we load BERT
import sys
if len(sys.argv) == 1:
port=True
else:
port=int(sys.argv[1])
from opendelta import LoraModel
delta_model = LoraModel(backbone_model=model, interactive_modify=True)
delta_model = LoraModel(backbone_model=model, interactive_modify=port)
# This will visualize the backbone after modification and other information.
delta_model.freeze_module(exclude=["deltas", "layernorm_embedding"], set_state_dict=True)

View File

@ -92,6 +92,7 @@ class DeltaBase(nn.Module, SaveLoadMixin):
default_exclude_modules = ["lm_head"]
config_class = BaseDeltaConfig
default_unfrozen_modules = ["deltas"]
pass_pseudo_data = True
def __init__(self,
backbone_model: nn.Module,
modified_modules: Optional[List[str]] = None,
@ -200,6 +201,7 @@ class DeltaBase(nn.Module, SaveLoadMixin):
if self.find_key(key, modified_modules): #TODO may have bugs when commonstructure has a virtual node and it's refered
logger.debug("find key: {}".format(key))
self.update_module(backbone, key)
if self.pass_pseudo_data:
self._pseudo_data_to_instantiate(backbone)
# mark the paratmers that are the delta parameters for easily displaying the delta_paramters.
self.mark_as_delta()

View File

@ -102,6 +102,7 @@ class LoraModel(DeltaBase):
config_class = LoraConfig
delta_type = "lora"
default_modified_modules = ['attn.q', 'attn.v']
pass_pseudo_data = False
def __init__(self,
backbone_model: nn.Module,
lora_r=8,

View File

@ -165,7 +165,7 @@ for (i = 0; i < coll.length; i++) {
var submit = document.getElementById("submit");
submit.addEventListener("click", function() {
const Http = new XMLHttpRequest();
const url='/submit/?name='+array.join(";");
const url='/submit/?name='+array.join("&name=");
Http.open("GET", url);
Http.send();
alert("Now go back to your console")

View File

@ -107,7 +107,7 @@ class hello:
class submit:
def GET(self, _):
global names
names = [name.strip("root.") for name in web.input().name.split(";")]
names = [name[5:] for name in web.input(name=[]).name]
app.stop()
def interactive(model, port=8888):
@ -120,7 +120,7 @@ def interactive(model, port=8888):
print("If on your machine, open the link below for interactive modification.\n "
"If on remote host, you could use port mapping, "
"or run in vscode terminal, which automatically do port mapping for you.")
app.run()
app.run(port)
global names
print("modified_modules:")
print(names)

View File

@ -280,7 +280,7 @@ def mapping_for_SequenceClassification(mapping, type):
"out_proj": {"__name__":"out_proj"}
}
elif type == "bert":
mapping.pop("lm_head")
mapping.pop("cls.predictions")
mapping["classifier"] = {"__name__": "classifier"}
elif type == "deberta":
mapping.pop("lm_predictions.lm_head")
@ -394,3 +394,4 @@ if __name__ == "__main__":
for name, _ in plm.named_modules():
transform(name, t5_mapping, strict=True, warning=False)