登录
注册
开源
企业版
高校版
搜索
帮助中心
使用条款
关于我们
开源
企业版
高校版
私有云
模力方舟
AI 队友
登录
注册
Gitee 2025年度开源项目评选启动,快来选出你心中的最佳开源项目!
代码拉取完成,页面将自动刷新
开源项目
>
人工智能
>
机器学习/深度学习
&&
捐赠
捐赠前请先登录
取消
前往登录
扫描微信二维码支付
取消
支付完成
支付提示
将跳转至支付宝完成支付
确定
取消
Watch
不关注
关注所有动态
仅关注版本发行动态
关注但不提醒动态
51
Star
466
Fork
161
PaddlePaddle
/
PaddleHub
代码
Issues
35
Pull Requests
1
统计
流水线
服务
质量分析
Jenkins for Gitee
腾讯云托管
腾讯云 Serverless
悬镜安全
阿里云 SAE
Codeblitz
SBOM
我知道了,不再自动展开
更新失败,请稍后重试!
移除标识
内容风险标识
本任务被
标识为内容中包含有代码安全 Bug 、隐私泄露等敏感信息,仓库外成员不可访问
paddlehub1.8在finetune时报错“ZeroDivisionError: float division by zero”
待办的
#I4QBWY
PaddlePaddle-Gardener
创建于
2022-01-11 14:54
[<b>源自github用户rela0426</b>](https://github.com/PaddlePaddle/PaddleHub/issues/1203): #904 跟904楼问题差不多,按照904帖子内容改过了,还是出问题 @Steffy-zxf 因为paddlehub1.8改变了finetune的调用方式,因此已经按照相关帖子进行了更改,在调用系统自动下载的数据集时 dataset = hub.dataset.MSRA_NER(tokenizer=tokenizer, max_seq_len=128)没问题,一旦切换到自定义的数据集,就会报ZeroDivisionError: float division by zero 自定义数据集的序列标注finetune代码如下,是有什么问题吗?能看下吗?官方能否出一个序列标注自定义数据集完整demo ```python import argparse import ast import json import numpy as np import paddle.fluid as fluid import paddlehub as hub from paddlehub.dataset.base_nlp_dataset import BaseNLPDataset from paddlehub.dataset import InputExample from data_process import data_process from data_process import schema_process from data_process import write_by_lines import sys import csv import codecs csv.field_size_limit(sys.maxsize) # yapf: disable parser = argparse.ArgumentParser(__doc__) parser.add_argument("--num_epoch", type=int, default=3, help="Number of epoches for fine-tuning.") parser.add_argument("--use_gpu", type=ast.literal_eval, default=True, help="Whether use GPU for finetuning, input should be True or False") parser.add_argument("--learning_rate", type=float, default=5e-5, help="Learning rate used to train with warmup.") parser.add_argument("--data_dir", type=str, default=None, help="data save dir") parser.add_argument("--schema_path", type=str, default=None, help="schema path") parser.add_argument("--train_data", type=str, default=None, help="train data") parser.add_argument("--dev_data", type=str, default=None, help="dev data") parser.add_argument("--test_data", type=str, default=None, help="test data") parser.add_argument("--predict_data", type=str, default=None, help="predict data") parser.add_argument("--do_train", type=ast.literal_eval, default=False, help="do train") parser.add_argument("--do_predict", type=ast.literal_eval, default=True, help="do predict") parser.add_argument("--do_model", type=str, default="trigger", choices=["trigger", "role"], help="trigger or role") parser.add_argument("--weight_decay", type=float, default=0.01, help="Weight decay rate for L2 regularizer.") parser.add_argument("--warmup_proportion", type=float, default=0.1, help="Warmup proportion params for warmup strategy") parser.add_argument("--max_seq_len", type=int, default=512, help="Number of words of the longest seqence.") parser.add_argument("--eval_step", type=int, default=200, help="eval step") parser.add_argument("--model_save_step", type=int, default=3000, help="model save step") parser.add_argument("--batch_size", type=int, default=32, help="Total examples' number in batch for training.") parser.add_argument("--add_crf", type=ast.literal_eval, default=True, help="add crf") parser.add_argument("--checkpoint_dir", type=str, default=None, help="Directory to model checkpoint") parser.add_argument("--use_data_parallel", type=ast.literal_eval, default=False, help="Whether use data parallel.") args = parser.parse_args() # yapf: enable. # 先把数据处理好保存下来 train_data = data_process(args.train_data, args.do_model) # 处理训练数据 dev_data = data_process(args.dev_data, args.do_model) # 处理dev数据 test_data = data_process(args.test_data, args.do_model) predict_sents, predict_data = data_process(args.predict_data, args.do_model, is_predict=True) write_by_lines("{}/{}_train.tsv".format(args.data_dir, args.do_model), train_data) write_by_lines("{}/{}_dev.tsv".format(args.data_dir, args.do_model), dev_data) write_by_lines("{}/{}_test.tsv".format(args.data_dir, args.do_model), test_data) write_by_lines("{}/{}_predict.tsv".format(args.data_dir, args.do_model), predict_data) schema_labels = schema_process(args.schema_path, args.do_model) class EEDataset(BaseNLPDataset): """EEDataset""" def __init__(self, data_dir, labels, tokenizer=None,max_seq_len=None,model="trigger"): # 数据集存放位置 base_path = data_dir super(EEDataset, self).__init__( base_path=base_path, train_file="{}_train.tsv".format(model), dev_file="{}_dev.tsv".format(model), test_file="{}_test.tsv".format(model), tokenizer=tokenizer, max_seq_len=max_seq_len, # 如果还有预测数据(不需要文本类别label),可以放在predict.tsv predict_file="{}_predict.tsv".format(model), train_file_with_header=True, dev_file_with_header=True, test_file_with_header=True, predict_file_with_header=True, # 数据集类别集合 label_list=labels) def main(): # Load Paddlehub pretrained model # 更多预训练模型 https://www.paddlepaddle.org.cn/hublist?filter=en_category&value=SemanticModel #model_name = "ernie_tiny" model_name = "chinese-roberta-wwm-ext-large" module = hub.Module(name=model_name) inputs, outputs, program = module.context( trainable=True, max_seq_len=args.max_seq_len) # Download dataset and use SequenceLabelReader to read dataset tokenizer = hub.BertTokenizer(vocab_file=module.get_vocab_path()) dataset = EEDataset(data_dir=args.data_dir, labels=schema_labels, tokenizer=tokenizer, max_seq_len=args.max_seq_len,model=args.do_model) reader = hub.reader.SequenceLabelReader( # dataset=dataset, vocab_path=module.get_vocab_path(), max_seq_len=args.max_seq_len, sp_model_path=module.get_spm_path(), word_dict_path=module.get_word_dict_path()) # Construct transfer learning network # Use "sequence_output" for token-level output. sequence_output = outputs["sequence_output"] # Setup feed list for data feeder # Must feed all the tensor of module need feed_list = [ inputs["input_ids"].name, inputs["position_ids"].name, inputs["segment_ids"].name, inputs["input_mask"].name ] # Select a finetune strategy strategy = hub.AdamWeightDecayStrategy( warmup_proportion=args.warmup_proportion, weight_decay=args.weight_decay, learning_rate=args.learning_rate) # Setup runing config for PaddleHub Finetune API config = hub.RunConfig( eval_interval=args.eval_step, save_ckpt_interval=args.model_save_step, use_data_parallel=args.use_data_parallel, use_cuda=args.use_gpu, num_epoch=args.num_epoch, batch_size=args.batch_size, checkpoint_dir=args.checkpoint_dir, strategy=strategy) # Define a sequence labeling finetune task by PaddleHub's API # If add crf, the network use crf as decoder seq_label_task = hub.SequenceLabelTask( dataset=dataset, feature=sequence_output, feed_list=feed_list, max_seq_len=args.max_seq_len, num_classes=dataset.num_labels, config=config, add_crf=args.add_crf) # Finetune and evaluate model by PaddleHub's API # will finish training, evaluation, testing, save model automatically if args.do_train: print("start finetune and eval process") seq_label_task.finetune_and_eval() if args.do_predict: print("start predict process") ret = [] id2label = {val: key for key, val in reader.label_map.items()} input_data = [[d] for d in predict_data] run_states = seq_label_task.predict(data=input_data[1:]) results = [] for batch_states in run_states: batch_results = batch_states.run_results batch_infers = batch_results[0].reshape([-1]).astype(np.int32).tolist() seq_lens = batch_results[1].reshape([-1]).astype(np.int32).tolist() current_id = 0 for length in seq_lens: seq_infers = batch_infers[current_id:current_id + length] seq_result = list(map(id2label.get, seq_infers[1: -1])) current_id += length if args.add_crf else args.max_seq_len results.append(seq_result) ret = [] for sent, r_label in zip(predict_sents, results): sent["labels"] = r_label ret.append(json.dumps(sent, ensure_ascii=False)) write_by_lines("{}.{}.pred".format(args.predict_data, args.do_model), ret) if __name__ == "__main__": main()```
[<b>源自github用户rela0426</b>](https://github.com/PaddlePaddle/PaddleHub/issues/1203): #904 跟904楼问题差不多,按照904帖子内容改过了,还是出问题 @Steffy-zxf 因为paddlehub1.8改变了finetune的调用方式,因此已经按照相关帖子进行了更改,在调用系统自动下载的数据集时 dataset = hub.dataset.MSRA_NER(tokenizer=tokenizer, max_seq_len=128)没问题,一旦切换到自定义的数据集,就会报ZeroDivisionError: float division by zero 自定义数据集的序列标注finetune代码如下,是有什么问题吗?能看下吗?官方能否出一个序列标注自定义数据集完整demo ```python import argparse import ast import json import numpy as np import paddle.fluid as fluid import paddlehub as hub from paddlehub.dataset.base_nlp_dataset import BaseNLPDataset from paddlehub.dataset import InputExample from data_process import data_process from data_process import schema_process from data_process import write_by_lines import sys import csv import codecs csv.field_size_limit(sys.maxsize) # yapf: disable parser = argparse.ArgumentParser(__doc__) parser.add_argument("--num_epoch", type=int, default=3, help="Number of epoches for fine-tuning.") parser.add_argument("--use_gpu", type=ast.literal_eval, default=True, help="Whether use GPU for finetuning, input should be True or False") parser.add_argument("--learning_rate", type=float, default=5e-5, help="Learning rate used to train with warmup.") parser.add_argument("--data_dir", type=str, default=None, help="data save dir") parser.add_argument("--schema_path", type=str, default=None, help="schema path") parser.add_argument("--train_data", type=str, default=None, help="train data") parser.add_argument("--dev_data", type=str, default=None, help="dev data") parser.add_argument("--test_data", type=str, default=None, help="test data") parser.add_argument("--predict_data", type=str, default=None, help="predict data") parser.add_argument("--do_train", type=ast.literal_eval, default=False, help="do train") parser.add_argument("--do_predict", type=ast.literal_eval, default=True, help="do predict") parser.add_argument("--do_model", type=str, default="trigger", choices=["trigger", "role"], help="trigger or role") parser.add_argument("--weight_decay", type=float, default=0.01, help="Weight decay rate for L2 regularizer.") parser.add_argument("--warmup_proportion", type=float, default=0.1, help="Warmup proportion params for warmup strategy") parser.add_argument("--max_seq_len", type=int, default=512, help="Number of words of the longest seqence.") parser.add_argument("--eval_step", type=int, default=200, help="eval step") parser.add_argument("--model_save_step", type=int, default=3000, help="model save step") parser.add_argument("--batch_size", type=int, default=32, help="Total examples' number in batch for training.") parser.add_argument("--add_crf", type=ast.literal_eval, default=True, help="add crf") parser.add_argument("--checkpoint_dir", type=str, default=None, help="Directory to model checkpoint") parser.add_argument("--use_data_parallel", type=ast.literal_eval, default=False, help="Whether use data parallel.") args = parser.parse_args() # yapf: enable. # 先把数据处理好保存下来 train_data = data_process(args.train_data, args.do_model) # 处理训练数据 dev_data = data_process(args.dev_data, args.do_model) # 处理dev数据 test_data = data_process(args.test_data, args.do_model) predict_sents, predict_data = data_process(args.predict_data, args.do_model, is_predict=True) write_by_lines("{}/{}_train.tsv".format(args.data_dir, args.do_model), train_data) write_by_lines("{}/{}_dev.tsv".format(args.data_dir, args.do_model), dev_data) write_by_lines("{}/{}_test.tsv".format(args.data_dir, args.do_model), test_data) write_by_lines("{}/{}_predict.tsv".format(args.data_dir, args.do_model), predict_data) schema_labels = schema_process(args.schema_path, args.do_model) class EEDataset(BaseNLPDataset): """EEDataset""" def __init__(self, data_dir, labels, tokenizer=None,max_seq_len=None,model="trigger"): # 数据集存放位置 base_path = data_dir super(EEDataset, self).__init__( base_path=base_path, train_file="{}_train.tsv".format(model), dev_file="{}_dev.tsv".format(model), test_file="{}_test.tsv".format(model), tokenizer=tokenizer, max_seq_len=max_seq_len, # 如果还有预测数据(不需要文本类别label),可以放在predict.tsv predict_file="{}_predict.tsv".format(model), train_file_with_header=True, dev_file_with_header=True, test_file_with_header=True, predict_file_with_header=True, # 数据集类别集合 label_list=labels) def main(): # Load Paddlehub pretrained model # 更多预训练模型 https://www.paddlepaddle.org.cn/hublist?filter=en_category&value=SemanticModel #model_name = "ernie_tiny" model_name = "chinese-roberta-wwm-ext-large" module = hub.Module(name=model_name) inputs, outputs, program = module.context( trainable=True, max_seq_len=args.max_seq_len) # Download dataset and use SequenceLabelReader to read dataset tokenizer = hub.BertTokenizer(vocab_file=module.get_vocab_path()) dataset = EEDataset(data_dir=args.data_dir, labels=schema_labels, tokenizer=tokenizer, max_seq_len=args.max_seq_len,model=args.do_model) reader = hub.reader.SequenceLabelReader( # dataset=dataset, vocab_path=module.get_vocab_path(), max_seq_len=args.max_seq_len, sp_model_path=module.get_spm_path(), word_dict_path=module.get_word_dict_path()) # Construct transfer learning network # Use "sequence_output" for token-level output. sequence_output = outputs["sequence_output"] # Setup feed list for data feeder # Must feed all the tensor of module need feed_list = [ inputs["input_ids"].name, inputs["position_ids"].name, inputs["segment_ids"].name, inputs["input_mask"].name ] # Select a finetune strategy strategy = hub.AdamWeightDecayStrategy( warmup_proportion=args.warmup_proportion, weight_decay=args.weight_decay, learning_rate=args.learning_rate) # Setup runing config for PaddleHub Finetune API config = hub.RunConfig( eval_interval=args.eval_step, save_ckpt_interval=args.model_save_step, use_data_parallel=args.use_data_parallel, use_cuda=args.use_gpu, num_epoch=args.num_epoch, batch_size=args.batch_size, checkpoint_dir=args.checkpoint_dir, strategy=strategy) # Define a sequence labeling finetune task by PaddleHub's API # If add crf, the network use crf as decoder seq_label_task = hub.SequenceLabelTask( dataset=dataset, feature=sequence_output, feed_list=feed_list, max_seq_len=args.max_seq_len, num_classes=dataset.num_labels, config=config, add_crf=args.add_crf) # Finetune and evaluate model by PaddleHub's API # will finish training, evaluation, testing, save model automatically if args.do_train: print("start finetune and eval process") seq_label_task.finetune_and_eval() if args.do_predict: print("start predict process") ret = [] id2label = {val: key for key, val in reader.label_map.items()} input_data = [[d] for d in predict_data] run_states = seq_label_task.predict(data=input_data[1:]) results = [] for batch_states in run_states: batch_results = batch_states.run_results batch_infers = batch_results[0].reshape([-1]).astype(np.int32).tolist() seq_lens = batch_results[1].reshape([-1]).astype(np.int32).tolist() current_id = 0 for length in seq_lens: seq_infers = batch_infers[current_id:current_id + length] seq_result = list(map(id2label.get, seq_infers[1: -1])) current_id += length if args.add_crf else args.max_seq_len results.append(seq_result) ret = [] for sent, r_label in zip(predict_sents, results): sent["labels"] = r_label ret.append(json.dumps(sent, ensure_ascii=False)) write_by_lines("{}.{}.pred".format(args.predict_data, args.do_model), ret) if __name__ == "__main__": main()```
评论 (
7
)
登录
后才可以发表评论
状态
待办的
待办的
进行中
已完成
已关闭
负责人
未设置
标签
未设置
标签管理
里程碑
未关联里程碑
未关联里程碑
Pull Requests
未关联
未关联
关联的 Pull Requests 被合并后可能会关闭此 issue
分支
未关联
分支 (23)
标签 (29)
develop
dependabot/pip/modules/audio/asr/u2_conformer_aishell/scipy-1.10.0
dependabot/pip/modules/audio/asr/deepspeech2_librispeech/scipy-1.10.0
dependabot/pip/modules/audio/asr/deepspeech2_aishell/scipy-1.10.0
dependabot/pip/modules/audio/asr/u2_conformer_librispeech/scipy-1.10.0
release/v1.8
release/v1.7
release/v1.5
release/v1.6
release/v2.3
add_release_note
release/v2.2
release/v2.1
release/v2.0.0-rc
release/v2.0.0-beta
release/v2.0
npu+xpu
release/v1.4
release/v1.2
release/v1.3
release/v1.1.0
release/v1.0.0
release/v0.5.0
v2.3.1
v2.3.0
v2.1.0
v2.0.0
v2.0.0-rc0
v2.0.0-beta1
v2.0.0-beta0
v1.8.1
v1.8.0
v1.7.0
v1.6.1
v1.6.0
v1.5.3
v1.5.4
v1.5.2
v1.5.1
v1.5.0
v1.4.1
v1.4.0
v1.3.0
v1.2.0
v1.1.2
v1.1.1
1.1.0
v1.0.1
1.0.0
v0.5.0
v0.3.1.alpha
V0.2-alpha
开始日期   -   截止日期
-
置顶选项
不置顶
置顶等级:高
置顶等级:中
置顶等级:低
优先级
不指定
严重
主要
次要
不重要
参与者(1)
Python
1
https://gitee.com/paddlepaddle/PaddleHub.git
git@gitee.com:paddlepaddle/PaddleHub.git
paddlepaddle
PaddleHub
PaddleHub
点此查找更多帮助
搜索帮助
Git 命令在线学习
如何在 Gitee 导入 GitHub 仓库
Git 仓库基础操作
企业版和社区版功能对比
SSH 公钥设置
如何处理代码冲突
仓库体积过大,如何减小?
如何找回被删除的仓库数据
Gitee 产品配额说明
GitHub仓库快速导入Gitee及同步更新
什么是 Release(发行版)
将 PHP 项目自动发布到 packagist.org
评论
仓库举报
回到顶部
登录提示
该操作需登录 Gitee 帐号,请先登录后再操作。
立即登录
没有帐号,去注册