美文网首页
2019-08-20工作进展

2019-08-20工作进展

作者: Songger | 来源:发表于2019-08-20 17:29 被阅读0次

    昨天工作:
    在dssm网络的基础上增加了self attention模块。构造训练测试数据。网络训练效果较修改前有所改善,当前(训练完一个epoch)网络训练阶段效果:acc:0.85;auc:0.63;precision:0.71

    今天计划:

    1. 使用ugc数据对网络进行inference测试其效果

    2. 调研最近的文章,寻找其他优化网络的方式

    3. 程序总是模型奇妙地崩溃,每次都很难到达好的效果的step
      尝试中……

    4. 找论文

    Gated Attentive-Autoencoder for Content-Aware Recommendation

    1. 构造inference数据

    inference_query : hs_dssm_dic_query_inf_7 - | query_id | query |
    hs_dssm_dic_query_inf_11 : | id | query_emb |
    inference_title : hs_dssm_dic_title_inf_10 - | item_id | title |
    hs_dssm_dic_title_inf_14 : | id | title_emb |


    hs_tmp_157 : | title_id | query_id |

    create table graph_embedding.hs_dssm_dic_query_inf_8 as
    select graph_embedding:hs_split_1(query_id, pair, "|") as (query_id, word, weight) from
    (select bi_udf:bi_split_value(query_id, tag_result, "%") as (query_id, pair) from
    (select query_id, search_kg:alinlp_termweight_ecom(query, "%", "{word}|{weight}", 1, 0) as tag_result from graph_embedding.hs_dssm_dic_query_inf_7 where lengthb(query) > 0)a)b where lengthb(b.pair) > 0;

    create table hs_dssm_dic_query_inf_9 as select query_id as id, word, search_kg:alinlp_word_embedding(hs_return_clean(word), "100", "CONTENT_SEARCH") as emb from hs_dssm_dic_query_inf_8;

    create table hs_dssm_dic_query_inf_10 as
    select b.id, a.word, b.emb, a.weight, graph_embedding:change_weight_query_key_1(a.word, a.weight) as new_weight from hs_dssm_dic_query_inf_8 a join hs_dssm_dic_query_inf_9 b on a.query_id == b.id and a.word == b.word;

    create table hs_dssm_dic_query_inf_11 as
    select id, return_concat_1(new_weight, emb) as query_emb from hs_dssm_dic_query_inf_10 group by id;


    create table graph_embedding.hs_dssm_dic_title_inf_11 as
    select graph_embedding:hs_split_1(item_id, pair, "|") as (item_id, word, weight) from
    (select bi_udf:bi_split_value(item_id, tag_result, "%") as (item_id, pair) from
    (select item_id, search_kg:alinlp_termweight_ecom(title, "%", "{word}|{weight}", 1, 0) as tag_result from graph_embedding.hs_dssm_dic_title_inf_10 where lengthb(title) > 0)a)b where lengthb(b.pair) > 0;

    create table hs_dssm_dic_title_inf_12 as select item_id as id, word, search_kg:alinlp_word_embedding(hs_return_clean(word), "100", "CONTENT_SEARCH") as emb from hs_dssm_dic_title_inf_11;

    create table hs_dssm_dic_title_inf_13 as
    select b.id, a.word, b.emb, a.weight, graph_embedding:change_weight_query_key_1(a.word, a.weight) as new_weight from hs_dssm_dic_title_inf_11 a join hs_dssm_dic_title_inf_12 b on a.item_id == b.id and a.word == b.word;

    create table hs_dssm_dic_title_inf_14 as
    select id, return_concat_1(new_weight, emb) as title_emb from hs_dssm_dic_title_inf_13 group by id;


    inference_query : hs_dssm_dic_query_inf_7 - | query_id | query |
    hs_dssm_dic_query_inf_11 : | id | query_emb |
    inference_title : hs_dssm_dic_title_inf_10 - | item_id | title |
    hs_dssm_dic_title_inf_14 : | id | title_emb |


    hs_tmp_157 : | title_id | query_id |

    create table hs_tmp_265 as
    select distinct c.query_id, c.query_ws, c.title_id as video_id, d.title_emb as video_ws from
    (select a.*, b.query_emb as query_ws from hs_tmp_157 a join hs_dssm_dic_query_inf_11 b on a.query_id == b.id)c join hs_dssm_dic_title_inf_14 d on c.title_id == d.id;

    1. 开始inference

    create table hs_tmp_266 as select * from hs_tmp_265 limit 10000;

    pai -name tensorflow140 -Dscript="file:///home/hengsong/origin_deep_cluster_odps_8.tar.gz" -DentryFile="inference_v8.py" -Dcluster='{"worker":{"count":30, "cpu":200, "memory":4000}, "ps":{"count":10, "cpu":200, "memory":5000}}' -DuseSparseClusterSchema=True -DenableDynamicCluster=True -Dtables="odps://graph_embedding/tables/hs_train_data_dssm_v2_5,odps://graph_embedding/tables/hs_test_data_dssm_v2_5,odps://graph_embedding/tables/hs_tmp_265" -Doutputs="odps://graph_embedding/tables/hs_dssm_result_3" -DcheckpointDir="oss://bucket-automl/hengsong/?role_arn=acs:ram::1293303983251548:role/graph2018&host=cn-hangzhou.oss-internal.aliyun-inc.com" -DuserDefinedParameters="--learning_rate=3e-4 --batch_size=1024 --is_save_model=True --attention_type=1 --num_epochs=1 --ckpt=hs_ugc_video_4e_ .ckpt-1" -DuseSparseClusterSchema=True;

    20190820150818354ggwzfk89

    1. 重新训练

    pai -name tensorflow140 -Dscript="file:///home/hengsong/origin_deep_cluster_odps_8.tar.gz" -DentryFile="train_inference_v8.py" -Dcluster='{"worker":{"count":50, "cpu":200, "memory":4000}, "ps":{"count":10, "cpu":200, "memory":5000}}' -Dtables="odps://graph_embedding/tables/hs_train_data_dssm_v2_5,odps://graph_embedding/tables/hs_test_data_dssm_v2_5,odps://graph_embedding/tables/hs_tmp_265" -Doutputs="odps://graph_embedding/tables/hs_dssm_result_4" -DcheckpointDir="oss://bucket-automl/hengsong/?role_arn=acs:ram::1293303983251548:role/graph2018&host=cn-hangzhou.oss-internal.aliyun-inc.com" -DuserDefinedParameters="--learning_rate=3e-2 --batch_size=1024 --is_save_model=True --attention_type=1 --num_epochs=1 --ckpt=hs_ugc_video_4e_6.ckpt" -DuseSparseClusterSchema=True;

    20190820161451451guw09zp3

    相关文章

      网友评论

          本文标题:2019-08-20工作进展

          本文链接:https://www.haomeiwen.com/subject/gqyusctx.html