keras bert
1.0.0
[中文|English]
BERT 的实施。可以加载官方预训练模型进行特征提取和预测。
pip install keras-bert
在特征提取演示中,您应该能够获得与官方模型chinese_L-12_H-768_A-12
相同的提取结果。在预测演示中,可以预测句子中缺失的单词。
提取演示展示了如何转换为在 TPU 上运行的模型。
分类演示展示了如何将模型应用于简单的分类任务。
Tokenizer
类用于分割文本和生成索引:
from keras_bert import Tokenizer
token_dict = {
'[CLS]' : 0 ,
'[SEP]' : 1 ,
'un' : 2 ,
'##aff' : 3 ,
'##able' : 4 ,
'[UNK]' : 5 ,
}
tokenizer = Tokenizer ( token_dict )
print ( tokenizer . tokenize ( 'unaffable' )) # The result should be `['[CLS]', 'un', '##aff', '##able', '[SEP]']`
indices , segments = tokenizer . encode ( 'unaffable' )
print ( indices ) # Should be `[0, 2, 3, 4, 1]`
print ( segments ) # Should be `[0, 0, 0, 0, 0]`
print ( tokenizer . tokenize ( first = 'unaffable' , second = '钢' ))
# The result should be `['[CLS]', 'un', '##aff', '##able', '[SEP]', '钢', '[SEP]']`
indices , segments = tokenizer . encode ( first = 'unaffable' , second = '钢' , max_len = 10 )
print ( indices ) # Should be `[0, 2, 3, 4, 1, 5, 1, 0, 0, 0]`
print ( segments ) # Should be `[0, 0, 0, 0, 0, 1, 1, 0, 0, 0]`
from tensorflow import keras
from keras_bert import get_base_dict , get_model , compile_model , gen_batch_inputs
# A toy input example
sentence_pairs = [
[[ 'all' , 'work' , 'and' , 'no' , 'play' ], [ 'makes' , 'jack' , 'a' , 'dull' , 'boy' ]],
[[ 'from' , 'the' , 'day' , 'forth' ], [ 'my' , 'arm' , 'changed' ]],
[[ 'and' , 'a' , 'voice' , 'echoed' ], [ 'power' , 'give' , 'me' , 'more' , 'power' ]],
]
# Build token dictionary
token_dict = get_base_dict () # A dict that contains some special tokens
for pairs in sentence_pairs :
for token in pairs [ 0 ] + pairs [ 1 ]:
if token not in token_dict :
token_dict [ token ] = len ( token_dict )
token_list = list ( token_dict . keys ()) # Used for selecting a random word
# Build & train the model
model = get_model (
token_num = len ( token_dict ),
head_num = 5 ,
transformer_num = 12 ,
embed_dim = 25 ,
feed_forward_dim = 100 ,
seq_len = 20 ,
pos_num = 20 ,
dropout_rate = 0.05 ,
)
compile_model ( model )
model . summary ()
def _generator ():
while True :
yield gen_batch_inputs (
sentence_pairs ,
token_dict ,
token_list ,
seq_len = 20 ,
mask_rate = 0.3 ,
swap_sentence_rate = 1.0 ,
)
model . fit_generator (
generator = _generator (),
steps_per_epoch = 1000 ,
epochs = 100 ,
validation_data = _generator (),
validation_steps = 100 ,
callbacks = [
keras . callbacks . EarlyStopping ( monitor = 'val_loss' , patience = 5 )
],
)
# Use the trained model
inputs , output_layer = get_model (
token_num = len ( token_dict ),
head_num = 5 ,
transformer_num = 12 ,
embed_dim = 25 ,
feed_forward_dim = 100 ,
seq_len = 20 ,
pos_num = 20 ,
dropout_rate = 0.05 ,
training = False , # The input layers and output layer will be returned if `training` is `False`
trainable = False , # Whether the model is trainable. The default value is the same with `training`
output_layer_num = 4 , # The number of layers whose outputs will be concatenated as a single output.
# Only available when `training` is `False`.
)
AdamWarmup
优化器用于预热和衰减。学习率将在warmpup_steps
步中达到lr
,并在decay_steps
步中衰减到min_lr
。有一个辅助函数calc_train_steps
用于计算这两个步骤:
import numpy as np
from keras_bert import AdamWarmup , calc_train_steps
train_x = np . random . standard_normal (( 1024 , 100 ))
total_steps , warmup_steps = calc_train_steps (
num_example = train_x . shape [ 0 ],
batch_size = 32 ,
epochs = 10 ,
warmup_proportion = 0.1 ,
)
optimizer = AdamWarmup ( total_steps , warmup_steps , lr = 1e-3 , min_lr = 1e-5 )
添加了几个下载网址。您可以通过以下方式获取检查点的下载和解压路径:
from keras_bert import get_pretrained , PretrainedList , get_checkpoint_paths
model_path = get_pretrained ( PretrainedList . multi_cased_base )
paths = get_checkpoint_paths ( model_path )
print ( paths . config , paths . checkpoint , paths . vocab )
如果您需要标记或句子的特征(无需进一步调整),您可以使用辅助函数extract_embeddings
。提取所有token的特征:
from keras_bert import extract_embeddings
model_path = 'xxx/yyy/uncased_L-12_H-768_A-12'
texts = [ 'all work and no play' , 'makes jack a dull boy~' ]
embeddings = extract_embeddings ( model_path , texts )
返回的结果是一个与文本长度相同的列表。列表中的每个项目都是一个按输入长度截断的 numpy 数组。本例中输出的形状为(7, 768)
和(8, 768)
。
当输入是成对句子时,需要NSP
和最后 4 层 max-pooling 的输出:
from keras_bert import extract_embeddings , POOL_NSP , POOL_MAX
model_path = 'xxx/yyy/uncased_L-12_H-768_A-12'
texts = [
( 'all work and no play' , 'makes jack a dull boy' ),
( 'makes jack a dull boy' , 'all work and no play' ),
]
embeddings = extract_embeddings ( model_path , texts , output_layer_num = 4 , poolings = [ POOL_NSP , POOL_MAX ])
结果中没有令牌特征。 NSP
和 max-pooling 的输出将与最终形状(768 x 4 x 2,)
连接。
辅助函数中的第二个参数是生成器。要从文件中提取特征:
import codecs
from keras_bert import extract_embeddings
model_path = 'xxx/yyy/uncased_L-12_H-768_A-12'
with codecs . open ( 'xxx.txt' , 'r' , 'utf8' ) as reader :
texts = map ( lambda x : x . strip (), reader )
embeddings = extract_embeddings ( model_path , texts )