审题
- 比赛目的:
以人工智能辅助糖尿病知识图谱构建为题,通过糖尿病相关的教科书、研究论文来进行糖尿病文献挖掘并构建糖尿病知识图谱。 - 比赛时间
大赛分为第一赛季、第二赛季和总决赛三个阶段
第一赛季为实体抽取
第二赛季为关系抽取
第三赛季为总决赛 -
比赛数据
数据为相关教科书或者文献
训练数据内容
标注好的数据;
image.png
以及测试数据和需要提交的格式
image.png
解题思路
实体抽取就是自然语言中的命名实体识别,命名实体识别的算法非常多,
比如隐马尔科夫、条件随机场、rnn、lstm等等
用标注好的数据训练模型参数,调优,预测就可以啦
- 模型选择
选择用的比较广泛的LSTM+CRF
试一试用CNN来做一下特征提取 - 数据处理
数据处理是耗时比较多的,有很多步,把文字转化为数字,把标注好的文件进行格式化转换,把最后得到的数据进行转换,得到要求的数据格式等等。
解题
- embedding
对每一个字进行数字化处理,可以吧输入直接转化为一个向量
这里用到的word2vect
代码如下:
#-*- conding:utf-8 -*-
from gensim.models import word2vec
import random
sentences = word2vec.Text8Corpus("fenzi.txt")
model = word2vec.Word2Vec(sentences, size=100)
model.save("yixue.model")
print("模型训练完成")
# model = word2vec.load("yixue.model")
import os
import jieba
split_num = 0
zidian = []
fp = open('fenzi.txt', 'r', encoding='utf8')
fc = open('vec.txt', 'w', encoding='utf8')
for line in fp:
split_num += 1
for k in line:
if k!=" ":
# fenzi.write(k+" ")
if k not in zidian:
zidian.append(k)
for i in zidian:
try :
vect = model[i]
vect.tolist()
except KeyError:
vect = [random.random() for i in range(100)]
fc.write(i+" ")
m = 0
for k in vect:
print(k)
m+=1
if m !=100:
fc.write(str(k)+" ")
else:
fc.write(str(k)+"\n")
print(zidian)
得到的结果如下:


-
对标注进行处理
这里用BIO的方式对实体进行标注
得到如下的形式:
image.png
得到实体词典:
代码入下:
import csv
import os
import pandas as pd
c_root = os.getcwd() + os.sep + "data_source" + os.sep
li01 = []
li02 = []
k = 0
for file in os.listdir(c_root):
if ".ann" in file:
fp = open(c_root + file, 'r', encoding='utf8')
k+=1
# print(k)
for i in fp:
a = i.strip("\n").split("\t")[-1]
b = i.strip("\n").split("\t")[-2].split(" ")[0]
li01.append(a)
li02.append(b)
# print(a)
data=pd.DataFrame({'a':li01,'b':li02})
# print(data)
da=data.drop_duplicates(subset="a",keep='first', inplace=False)
# da.to_csv("./DICT_NOW.csv",index=False,header=False,encoding='utf8')
print(data["b"].value_counts())
# print(da)
到这里数据前期处理进行完毕,训练过程中还需要进行的处理如下:
-
得到所有的类别:
image.png
训练模型编写
用TensorFlow编写训练程序,核心代码如下:
def project_layer_bilstm(self, lstm_outputs, name=None):
"""
hidden layer between lstm layer and logits
:param lstm_outputs: [batch_size, num_steps, emb_size]
:return: [batch_size, num_steps, num_tags]
"""
with tf.variable_scope("project" if not name else name):
with tf.variable_scope("hidden"):
W = tf.get_variable("W", shape=[self.lstm_dim*2, self.lstm_dim],
dtype=tf.float32, initializer=self.initializer)
b = tf.get_variable("b", shape=[self.lstm_dim], dtype=tf.float32,
initializer=tf.zeros_initializer())
output = tf.reshape(lstm_outputs, shape=[-1, self.lstm_dim*2])
hidden = tf.tanh(tf.nn.xw_plus_b(output, W, b))
# project to score of tags
with tf.variable_scope("logits"):
W = tf.get_variable("W", shape=[self.lstm_dim, self.num_tags],
dtype=tf.float32, initializer=self.initializer)
b = tf.get_variable("b", shape=[self.num_tags], dtype=tf.float32,
initializer=tf.zeros_initializer())
pred = tf.nn.xw_plus_b(hidden, W, b)
return tf.reshape(pred, [-1, self.num_steps, self.num_tags])
#IDCNN layer
def IDCNN_layer(self, model_inputs,
name=None):
"""
:param idcnn_inputs: [batch_size, num_steps, emb_size]
:return: [batch_size, num_steps, cnn_output_width]
"""
#ft.expand_dims会向tensor中插入一个维度,插入位置就是参数代表的位置(维度从0开始)
model_inputs = tf.expand_dims(model_inputs, 1)
reuse = False
if self.dropout == 1.0:
reuse = True
with tf.variable_scope("idcnn" if not name else name):
shape=[1, self.filter_width, self.embedding_dim,
self.num_filter]
print(shape)
filter_weights = tf.get_variable(
"idcnn_filter",
shape=[1, self.filter_width, self.embedding_dim,
self.num_filter],
initializer=self.initializer)
"""
shape of input = [batch, in_height, in_width, in_channels]
shape of filter = [filter_height, filter_width, in_channels, out_channels]
height是默认1,width是句子长度,通道是120维
shape of input = [batch, in_height, in_width, in_channels]
shape of filter = [filter_height, filter_width, in_channels, out_channels]
"""
layerInput = tf.nn.conv2d(model_inputs,
filter_weights,
strides=[1, 1, 1, 1],
padding="SAME",
name="init_layer",use_cudnn_on_gpu=True)
finalOutFromLayers = []
totalWidthForLastDim = 0
#多次卷积,就会将膨胀的时候单次没有卷到的数据在下次卷到
for j in range(self.repeat_times):
for i in range(len(self.layers)):
dilation = self.layers[i]['dilation']
isLast = True if i == (len(self.layers) - 1) else False
with tf.variable_scope("atrous-conv-layer-%d" % i,
reuse=True
if (reuse or j > 0) else False):
w = tf.get_variable(
"filterW",
shape=[1, self.filter_width, self.num_filter,
self.num_filter],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.get_variable("filterB", shape=[self.num_filter])
#膨胀卷积:插入rate-1个0 这里三层{1,1,2}相当于前两个没有膨胀
conv = tf.nn.atrous_conv2d(layerInput,
w,
rate=dilation,
padding="SAME")
conv = tf.nn.bias_add(conv, b)
conv = tf.nn.relu(conv)
if isLast:
finalOutFromLayers.append(conv)
totalWidthForLastDim += self.num_filter
layerInput = conv
finalOut = tf.concat(axis=3, values=finalOutFromLayers)
keepProb = 1.0 if reuse else 0.5
finalOut = tf.nn.dropout(finalOut, keepProb)
#踢掉指定的维度,值不变
finalOut = tf.squeeze(finalOut, [1])
finalOut = tf.reshape(finalOut, [-1, totalWidthForLastDim])
self.cnn_output_width = totalWidthForLastDim
return finalOut
def project_layer_idcnn(self, idcnn_outputs, name=None):
"""
:param lstm_outputs: [batch_size, num_steps, emb_size]
:return: [batch_size, num_steps, num_tags]
"""
with tf.variable_scope("project" if not name else name):
# project to score of tags
with tf.variable_scope("logits"):
W = tf.get_variable("W", shape=[self.cnn_output_width, self.num_tags],
dtype=tf.float32, initializer=self.initializer)
b = tf.get_variable("b", initializer=tf.constant(0.001, shape=[self.num_tags]))
pred = tf.nn.xw_plus_b(idcnn_outputs, W, b)
return tf.reshape(pred, [-1, self.num_steps, self.num_tags])
def loss_layer(self, project_logits, lengths, name=None):
"""
calculate crf loss
:param project_logits: [1, num_steps, num_tags]
:return: scalar loss
"""
#num_steps是句子长度;project_logits是特征提取并全连接后的输出
with tf.variable_scope("crf_loss" if not name else name):
small = -1000.0
# pad logits for crf loss #start_logits=[batch_size,1,num_tags+1]
start_logits = tf.concat(
[small * tf.ones(shape=[self.batch_size, 1, self.num_tags]), tf.zeros(shape=[self.batch_size, 1, 1])], axis=-1)
#pad_logits=[batch_size,num_steps,1]
pad_logits = tf.cast(small * tf.ones([self.batch_size, self.num_steps, 1]), tf.float32)
#logits=[batch_size,num_steps,num_tags+1]
logits = tf.concat([project_logits, pad_logits], axis=-1)
#logits=[batch_size,num_steps+1,num_tags+1]
logits = tf.concat([start_logits, logits], axis=1)
targets = tf.concat(
[tf.cast(self.num_tags*tf.ones([self.batch_size, 1]), tf.int32), self.targets], axis=-1)
#targets=[batch_size,1+实际标签数]
self.trans = tf.get_variable(
"transitions",
shape=[self.num_tags + 1, self.num_tags + 1],
initializer=self.initializer)
#logits是模型的特征输出;targets是label;trans是条件随机场的输出
#crf_log_likelihood在一个条件随机场里计算标签序列的log-likelihood
#inputs:一个形状为[batch_size,max_seq_len,num_tags]的tensor
#一般使用BILSTM处理之后输出转换为他要求的形状作为CRF层的输入
#tag_indices:一个形状为[batch_size]的向量,表示每个序列的长度
#sequence_lengths:一个形状为[batch_size]的向量,表示每个序列的长度
#transition_params:形状为[num_tags,num_tags]的转移矩阵
#log_likelihood:标量,log-likelihood
#注意:由于条件随机场有标记,故真实维度+1
#inputs=[char_inputs,seg_inputs]
#高:3 血:22 糖:23 和:24 高:3 血:22 压:25 char_inputs=[3,22,23,24,3,22,25]
#高血糖 和 高血压 seg_inputs 高血糖=[1,2,3] 和=[0] 高血压=[1,2,3] seg_inputs=[1,2,3,0,1,2,3]
log_likelihood, self.trans = crf_log_likelihood(
inputs=logits,
tag_indices=targets,
transition_params=self.trans,
sequence_lengths=lengths+1)
return tf.reduce_mean(-log_likelihood)
- 进行训练
损失值如下:
2018-10-31 14:41:39,399 - log\train.log - INFO - iteration:77 step:4/996, NER loss:10.520254
2018-10-31 14:43:19,310 - log\train.log - INFO - iteration:77 step:104/996, NER loss:12.477299
2018-10-31 14:44:43,748 - log\train.log - INFO - iteration:77 step:204/996, NER loss:12.602566
2018-10-31 14:45:48,943 - log\train.log - INFO - iteration:77 step:304/996, NER loss: 9.900908
2018-10-31 14:47:19,396 - log\train.log - INFO - iteration:77 step:404/996, NER loss:12.695493
2018-10-31 14:49:51,545 - log\train.log - INFO - iteration:77 step:504/996, NER loss:14.701593
- 模型保存
预测结果
def evaluate_line():
config = load_config(FLAGS.config_file)
logger = get_logger(FLAGS.log_file)
# limit GPU memory
tf_config = tf.ConfigProto()
tf_config.gpu_options.allow_growth = True
with open(FLAGS.map_file, "rb") as f:
char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)
with tf.Session(config=tf_config) as sess:
model = create_model(sess, Model, FLAGS.ckpt_path, load_word2vec, config, id_to_char, logger)
c_root = os.getcwd() + os.sep + "data_test" + os.sep
c_root01 = os.getcwd() + os.sep + "data_finall" + os.sep
for file in os.listdir(c_root):
f = open(c_root + file, 'r', encoding='utf8')
k = open(c_root01+file.strip(".txt")+".ann",'w',encoding='utf-8')
f = f.readlines()
f = [i.strip('\n') for i in f]
f = ''.join(f)
print(f)
line = f
result = model.evaluate_line(sess, input_from_line(line, char_to_id), id_to_tag)
result = result['entities']
id = len(result)
for i in range(len(result)):# {'word': '基于胰高血糖', 'start': 0, 'end': 6, 'type': 'Treatment'}
k.write("T"+str(i+1)+'\t'+str(result[i]["type"])+' '+str(result[i]['start'])+" "+str(result[i]['end'])+"\t"+str(result[i]['word']+"\n"))
def main(_):
if 0:
if FLAGS.clean:
clean(FLAGS)
train()
else:
evaluate_line()
得到的结果如下:

个人感觉还是挺准的,结果已经提交等待明天出结果吧
网友评论