pytorch微调bert_PytorchtransformersBERT微调清晰明了的代码

更新时间:2023-06-06 03:57:59 阅读: 评论:0

赛题详情(Competition Details) - DataFountain w ww.datafountain
⽂本长度:
效率的意思
import
SEP_TOKEN_ID = 102
g_ids[i] = g_idx
if e == SEP_TOKEN_ID:
g_idx += 1
max_idx = o(g_ids == g_idx)
g_ids[max_idx] = 0
return g_ids
def get_label(lf, row):
sor(row[Target_nums].values.astype(np.float32))
def get_token_ids(lf,row):
tokens= ['[CLS]'] + lf.lect_t),MAX_LEN-2)+['[SEP]']
token_ids = vert_tokens_to_ids(tokens)
#padding
if len(token_ids) < MAX_LEN:
token_ids += [0] * (MAX_LEN- len(token_ids))
#totensor
ids = sor(token_ids)[:MAX_LEN]
#gid
g_ids = lf.get_g_ids(ids)
return ids,g_ids
whitehou govdef collate_fn(lf, batch):
token_ids = torch.stack([x[0] for x in batch])
g_ids = torch.stack([x[1] for x in batch])
labels = torch.stack([x[2] for x in batch])
return token_ids, g_ids, labels.squeeze()
#DataLoader载客
def get_loader(df,batch_size=8,is_train=True):
ds_df = EmotionDataSet(df)
loader = torch.utils.data.DataLoader(ds_df, batch_size=batch_size, shuffle=is_train, num_workers=0, collate_fn=llate_fn, drop_last=is_train)    loader.num = len(ds_df)
return loader
def test_train_loader(train):
loader = get_loader(train,2)
for token_ids, g_ids,labels in loader:
print(token_ids)
print(g_ids)
print(labels)
break
def test_test_loader(test):
if Target_nums not in lumns):
test[Target_nums[0]]=0
loader = get_loader(test,2)
for token_ids, g_ids,labels in loader:
print(token_ids)
print(g_ids)
break
搭建微调BERT模型,这⾥直接选择BERT的[CLS]层:
class EmotionModel(nn.Module):
def __init__(lf, n_class=1):
super(EmotionModel, lf).__init__()
lf.bert_model = BertModel.from_pretrained(BERT_MODEL_PATH,cache_dir=None)
lf.fcc= nn.Sequential(nn.Linear(768, n_class))
def forward(lf,ids,g_ids):
attention_mask = (ids > 0)
#last q 是最后⼀层的输出
last_q,pooled_output=lf.bert_model(input_ids=ids, attention_mask=attention_mask)
out=lf.fcc(last_q[:,0,:]).sigmoid()
return out
def test_model():
x = sor([[1,2,3,4,5, 0, 0], [1,2,3,4,5, 0, 0]])
g_ids = sor([[1,0,0,0,0, 0, 0], [1,0,0,0,0, 0, 0]])
model = EmotionModel()
y = model(x, g_ids)
print(y)
netG = EmotionModel()
print('# generator parameters:', sum(param.numel() for param in netG.parameters()))
训练相关函数代码:
train_model-训练主体代码函数
validation_fn-验证代码函数
瑜伽基本动作
metric_fn-验证⽅法
logs='- epoch - {0:2d} - train_loss - {1:.4f} train_score - {2:.3f} - val_loss - {3:.4f} - val_score - {4:.3f} - best_loss - {5:.3f}'
#验证
def metric_fn(p, t):
p=(p>0.5)*1
return f1_score(t,p)
def validation_fn(model, loader, loss_fn):
model.eval()
apart fromy_pred, y_true, tloss = [], [], []
for ids,g_ids,target in loader:
outputs = model(ids.cuda(DEVICE),g_ids.cuda(DEVICE))
loss = loss_fn(outputs.squeeze(), target.cuda(DEVICE))
tloss.append(loss.item())
y_true.append(target.detach().cpu().numpy())
y_pred.append(outputs.detach().cpu().numpy())
tloss = np.array(tloss).mean()
y_pred = np.concatenate(y_pred)
y_true = np.concatenate(y_true)
metric = metric_fn(y_pred, y_true)
return tloss, metric
def predict_model_test(model,loader):akz
model.eval()
y_pred=[]
for ids,g_ids,_ in loader:
outputs = model(ids.cuda(DEVICE),g_ids.cuda(DEVICE))
y_pred.append(outputs.detach().cpu().numpy())
y_pred = np.concatenate(y_pred)
小学三年级英语mp3return y_pred
def train_model(model,train_loader,val_loader,early_stop_epochs=2,accumulation_steps=2,epochs=4,model_save_path='pytorch_Emotion_model.pkl'):      no_improve_epochs=0
>###优化器学习率
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
暑期
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)], 'weight_decay': 0.8},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=2e-5, eps=1e-8)
train_len=len(train_loader)
#loss function
loss_fn = nn.BCELoss().cuda(DEVICE)
university of washingtonbest_vmetric=1.
logss=[]
for epoch in range(1,epochs+1):
y_pred, y_true = [], []
start_time = time.time
tloss = []
bar = tqdm_notebook(train_loader)
for i,(ids, g_ids,labels) in enumerate(bar):
outputs = model(ids.cuda(DEVICE),g_ids.cuda(DEVICE))
#            print(labels)
loss = loss_fn(outputs.squeeze(), labels.cuda(DEVICE))
fbi是什么意思啊tloss.append(loss.item())
loss.backward()
#梯度累计
if (i+1) % accumulation_steps == 0 or (i+1)==train_len:
optimizer.step()
<_grad()
y_true.append(labels.detach().cpu().numpy())
y_pred.append(outputs.detach().cpu().numpy())
y_t = np.concatenate(y_pred)
y_p = np.concatenate(y_true)
metric = metric_fn(y_t,y_p)
bar.t_postfix(loss=np.array(tloss).mean(),score=metric)
tloss = np.array(tloss).mean()
y_pred = np.concatenate(y_pred)
y_true = np.concatenate(y_true)

本文发布于:2023-06-06 03:57:59,感谢您对本站的认可!

本文链接:https://www.wtabcd.cn/fanwen/fan/78/879900.html

版权声明:本站内容均来自互联网,仅供演示用,请勿用于商业和其他非法用途。如果侵犯了您的权益请与我们联系,我们将在24小时内删除。

标签:函数   代码   训练   模型   瑜伽
相关文章
留言与评论(共有 0 条评论)
   
验证码:
推荐文章
排行榜
Copyright ©2019-2022 Comsenz Inc.Powered by © 专利检索| 网站地图