美文网首页
20190723工作进展

20190723工作进展

作者: Songger | 来源:发表于2019-07-23 12:43 被阅读0次
    1. rm -rf ../../origin_deep_cluster_odps_8.tar.gz
      tar -cvzf ../../origin_deep_cluster_odps_8.tar.gz *

    sudo docker run -ti --name hengsong2 -v /Users/songge/Desktop/beifen/hengsong.lhs:/home/hengsong --net=host reg.docker.alibaba-inc.com/zhiji/imgtoolkit_video:nightly-dev bash

    1. docker 的 java 环境

    source /etc/profile

    JAVA_HOME=/home/hengsong/jdk1.8.0_221
    JRE_HOME=JAVA_HOME/jre PATH=PATH:JAVA_HOME/bin CLASSPATH=.:JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
    export JAVA_HOME
    export JRE_HOME
    export PATH
    export CLASSPATH

    1. 给阔姐的表

    graph_embedding.jl_jingyan_query_related_video_pool
    where type_biz=2
    body里的 item_id, ugc_old_memo/s":"feedback","text/s": video_url, video_id 字段

    create table hs_jingyan_query_related_video_pool_ugc as
    select id, coalesce(CONCAT('http://cloud.video.taobao.com', get_json_object(body, '.entities.k2.play_url/s')),CONCAT('http:', get_json_object(body, '.entities.k3.play_url/s')))as video_url, coalesce(get_json_object(body, '.entities.k3.video_id/l'), get_json_object(body, '.entities.k2.video_id/l')) as video_id, coalesce(get_json_object(body, '.entities.k1.item_id/l') ,get_json_object(body, '.entities.k0.item_id/l') )as item_id, coalesce(get_json_object(body, '.entities.k2.text/s'),get_json_object(body, '.entities.k1.text/s')) as text
    from graph_embedding.jl_jingyan_query_related_video_pool where ds=max_pt('graph_embedding.jl_jingyan_query_related_video_pool') and type_biz=2;

    graph_embedding.hs_jingyan_query_related_video_pool_ugc

    1. 得到负采样

    hs_tmp_dssm_6

    hs_tmp_dssm_index_1
    hs_tmp_dssm_item_id_1
    create table hs_tmp_36 as select item_id from hs_tmp_dssm_6;
    create table hs_tmp_dssm_item_id_1 as select int(rand() * 135525) as randint, item_id from hs_tmp_36;
    (5 * ) insert into table hs_tmp_dssm_item_id_1 select int(rand() * 135525) as randint, item_id from hs_tmp_36;

    hs_tmp_dssm_8
    create table hs_tmp_dssm_8 as
    select a.index, b.item_id from
    (select index from hs_tmp_dssm_index_1)a join (select * from hs_tmp_dssm_item_id_1)b on a.index == b.randint % 5421;

    select index, count(*) as freq from hs_tmp_dssm_8 group by index order by freq desc limit 10;

    去重

    hs_tmp_dssm_9->hs_tmp_dssm_10

    create table hs_tmp_dssm_9 as select a.*, b.index as indexb, b.item_id as item_idb from (select * from hs_tmp_dssm_8)a left join (select * from hs_tmp_dssm_6)b on a.index=b.index and a.item_id=b.item_id;

    create table hs_tmp_dssm_10 as select index, item_id, 0 as label from hs_tmp_dssm_9 where indexb is NULL and item_idb is NULL;

    create table hs_tmp_dssm_11 as select index, item_id, 1 as label from hs_tmp_dssm_6;

    得到样本集合
    positive samples : hs_tmp_dssm_11
    negetive samples : hs_tmp_dssm_10

    乱序
    insert into table hs_tmp_dssm_11 select * from hs_tmp_10;
    create table hs_tmp_38 as select int(rand() * 11000000000) as id, * from hs_tmp_dssm_11;
    create table hs_tmp_dssm_12 as select index, item_id, label from hs_tmp_38 order by id;

    create table hs_tmp_dssm_13 as select * from hs_tmp_38 order by id;

    下面这个更好:

    drop table hs_tmp_dssm_12;
    yes
    create table hs_tmp_dssm_12 lifecycle 30 as select * from hs_tmp_dssm_13 DISTRIBUTE by random();

    drop table hs_tmp_dssm_13;
    yes
    create table hs_tmp_dssm_13 lifecycle 30 as select * from hs_tmp_dssm_12 DISTRIBUTE by random();

    +------------+------------+
    | label | freq |
    +------------+------------+
    | 0 | 5828333140 |
    | 1 | 1171862133 |
    +------------+------------+

    drop table if exists graph_embedding.zj_xhs_dssm_pos_neg_sample_info_shuffle_;
    create table if not exists graph_embedding.zj_xhs_dssm_pos_neg_sample_info_shuffle_ LIFECYCLE 30
    as select * from graph_embedding.zj_xhs_dssm_pos_neg_sample_info_ DISTRIBUTE by random();

    取得query_ws和title_ws字段:
    create table hs_tmp_39 as select distinct index, se_keyword_ws from hs_tmp_dssm_3;
    create table hs_tmp_40 as select distinct item_id, title_ws from hs_tmp_dssm_3;

    create table hs_tmp_41 as
    select a.index, a.se_keyword_ws, b.item_id, b.label from (select * from hs_tmp_39)a join (select * from hs_tmp_dssm_12)b on a.index == b.index;

    create table hs_tmp_42 as
    select a.title_ws, b.* from (select * from hs_tmp_40)a join (select * from hs_tmp_41)b on a.item_id == b.item_id;

    训练数据:hs_train_data_dssm_1 测试数据:hs_test_data_dssm_1
    drop table if exists hs_train_data_dssm_1;
    yes
    drop table if exists hs_test_data_dssm_1;
    yes
    PAI -name split -project algo_public
    -DinputTableName=graph_embedding.hs_tmp_42
    -Doutput1TableName=graph_embedding.hs_train_data_dssm_1
    -Doutput2TableName=graph_embedding.hs_test_data_dssm_1
    -Dfraction=0.8
    -DmemSizePerCore=4096
    -DcoreNum=300
    ;

    判断title补全长度
    create table hs_title_length as select REGEXP_COUNT(title_ws, ' ') as title_len, REGEXP_COUNT(se_keyword_ws, ' ') as query_len from hs_tmp_42;
    总数量:7000195273

    25: 129186737
    30: 43367246

    10: 0
    取20更好一点

    pai -name tensorflow140 -Dscript="file:///home/hengsong/origin_deep_cluster_odps_8.tar.gz" -DentryFile="train_v4.py" -Dcluster='{"worker":{"count":30, "cpu":200, "memory":4000}, "ps":{"count":30, "cpu":200, "memory":5000}}' -Dtables="odps://graph_embedding/tables/hs_train_data_dssm_1,odps://graph_embedding/tables/hs_test_data_dssm_1" -DcheckpointDir="oss://bucket-automl/hengsong/?role_arn=acs:ram::1293303983251548:role/graph2018&host=cn-hangzhou.oss-internal.aliyun-inc.com" -DuserDefinedParameters="--learning_rate=1e-2 --batch_size=2048 --is_save_model=True --attention_type=1 --num_epochs=100 --ckpt=hs_ugc_video.ckpt" -DuseSparseClusterSchema=True;

    当前进程:

    1. 给之己的hive语句

    相关文章

      网友评论

          本文标题:20190723工作进展

          本文链接:https://www.haomeiwen.com/subject/daanlctx.html