美文网首页
20190711工作进展

20190711工作进展

作者: Songger | 来源:发表于2019-07-11 15:11 被阅读0次
    1. 得到了title表和叶子类目的对应关系
      hs_leaf_class_for_title
      确认有些title表中的项目在原始数据表中找不到对应项目,大概每个表有1000条找不到

    2. 取得商品的item_id
      select coalesce(get_json_object(body, '.entities.k0.item_id/l'), get_json_object(body, '.entities.k1.item_id/l')) as item_id, title, id from hs_jingyan_query_related_video_pool_2_3 limit 100;

    3. 按照item_id取得叶子类目

    create table hs_leaf_class_for_title_3 as select item_id, title, cate_id, cate_name, cate_level, commodity_id, commodity_name from tbcdm.dim_tb_itm where ds=max_pt('tbcdm.dim_tb_itm') and item_id in(select coalesce(get_json_object(body, '.entities.k0.item_id/l'), get_json_object(body, '.entities.k1.item_id/l')) from hs_jingyan_query_related_video_pool_3_3);

    select count(*) from as select coalesce(get_json_object(body, '.entities.k0.item_id/l'), get_json_object(body, '.entities.k1.item_id/l')) from hs_jingyan_query_related_video_pool_3_3;

    create table hs_tmp_1 as select coalesce(get_json_object(body, '.entities.k0.item_id/l'), get_json_object(body, '.entities.k1.item_id/l')) as item_id, id from hs_jingyan_query_related_video_pool_3_3;

    1. 得到query对应的商品id列表

    create table hs_tmp_0 as select se_keyword, item_list from graph_embedding.jl_jingyan_query_related_top_query_detailed;

    create table hs_tmp_1 as select b.id, b.query, a.item_list from (select se_keyword, item_list from hs_tmp_0)a left join (select id, query from hs_jingyan_query_related_top_query_1)b on a.se_keyword == b.query;

    create table hs_tmp_4 as select a.id, b.query, a.item_id from
    (select id, item_id from hs_tmp_3)a left join (select query, id from hs_jingyan_query_related_top_query_1)b on a.id == b.id;

    create table hs_leaf_class_for_query as select item_id, title, cate_id, cate_name, cate_level, commodity_id, commodity_name from tbcdm.dim_tb_itm where ds=max_pt('tbcdm.dim_tb_itm') and item_id in(select coalesce(get_json_object(body, '.entities.k0.item_id/l'), get_json_object(body, '.entities.k1.item_id/l')) from hs_jingyan_query_related_video_pool_3_3);

    select se_keywork, item_list from graph_embedding.jl_jingyan_query_related_top_query_detailed where se_keyword is NULL limit 100;

    1. 得到的query中能与原始query对应上的只有9150条数据,也就是说有850个query没有对应的叶子类目
      hs_leaf_class_for_query_0

    create table hs_tmp_7 as select b.id, b.query, b.item_id, a.title, a.cate_id, a.cate_name, a.cate_level, a.commodity_id, a.commodity_name from (select item_id, title, cate_id, cate_name, cate_level, commodity_id, commodity_name from hs_tmp_6 where item_id in(select item_id from hs_tmp_5))a left join (select id, query, item_id from hs_tmp_5)b on a.item_id == b.item_id;

    1. 过滤

    hs_result_title_query_1w_2, hs_leaf_class_for_query_0 -> hs_result_title_query_1w_filtered

    pai -name pytorch -project algo_public_dev -Dpython=3.6 -Dscript="file:///apsarapangu/disk1/hengsong.lhs/origin_deep_cluster_odps_5.tar.gz" -DentryFile="test_query_with_title.py" -Dtables="odps://graph_embedding/tables/hs_result_title_query_1w_2,odps://graph_embedding/tables/hs_leaf_class_for_query_0" -Doutputs="odps://graph_embedding/tables/hs_result_title_query_1w_filtered_tmp" -Dbucket="oss://bucket-automl/" -Darn="acs:ram::1293303983251548:role/graph2018" -Dhost="cn-hangzhou.oss-internal.aliyun-inc.com" -DuserDefinedParameters="" -DworkerCount=1;

    create table hs_result_title_query_1w_filtered_1 as
    select a.* from
    (select * from hs_result_title_query_1w_2)a right join
    (select * from hs_result_title_query_1w_filtered)b on a.index == b.index and b.item_id == b.item_id;

    1. 除去叶子类目中找不到的结果

    create table hs_result_title_query_1w_2 as
    select a.index, a.origin_query, a.query, a.title_id, a.title, b.item_id, a.score, b.cate_id, b.cate_name, a.url from
    (select * from hs_result_title_query_1w_1 where title in (select title from hs_leaf_class_for_title_2))a join (select * from hs_leaf_class_for_title_2)b on a.title == b.title;

    1. 处理url
      select index, origin_query, query, title_id, title, item_id, score, cate_id, cate_name, url when url is not "\N" then CONCAT("http://cloud.video.taobao.com", url) from hs_result_title_query_1w_filtered_2 limit 10;

    select index as qid, origin_query as query , title as video_titile,
    case when url_k2 != "\N" then CONCAT("http://cloud.video.taobao.com", url_k2)
    ELSE CONCAT("http:", url_k3)

    select index as qid, origin_query as query , title as video_titile,
    CONCAT("http://cloud.video.taobao.com", url) from hs_result_title_query_1w_filtered_2 limit 10;

    1. 使用top1000来取title

    (0) 得到query_title对应表
    create table if not exists graph_embedding.hs_heter_graph_embedding_out_nearest_neighbor_007(
    node_id bigint,
    emb string
    ) LIFECYCLE 14;

    hs_heter_graph_embedding_out_nearest_neighbor_007

    PAI -name am_vsearch_nearest_neighbor_014 -project algo_market
    -Dcluster="{"worker":{"count":1,"gpu":100}}"
    -Ddim=100
    -Did_col="node_id"
    -Dvector_col="emb"
    -Dinput_slice=1
    -Dtopk=1000
    -Dnprob=1024
    -Dmetric="l2"
    -Dinput="odps://graph_embedding/tables/hs_heter_graph_embedding_video_recall_"
    -Dquery="odps://graph_embedding/tables/hs_heter_graph_embedding_ave_info_"
    -Doutputs="odps://graph_embedding/tables/hs_heter_graph_embedding_out_nearest_neighbor_007"
    -DenableDynamicCluster=true -DmaxTrainingTimeInHour=60;

    1000 result : hs_heter_graph_embedding_out_nearest_neighbor_007

    (1) 分割result
    create table hs_tmp_10 as select bi_udf:bi_split_value(node_id, emb, " ") as (query_id, title_id) from hs_heter_graph_embedding_out_nearest_neighbor_007;

    create table hs_tmp_11 as select graph_embedding:hs_split(query_id, title_id, ":") as (query_id, title_id, score) from hs_tmp_10;

    加title:

    create table hs_tmp_12 as
    select a.query_id, a.title_id, b.title, a.score from
    (select * from hs_tmp_11)a join
    (select title, id from hs_jingyan_query_related_video_pool_2_3)b
    on a.title_id == b.id;

    (2) 除去叶子类目中找不到的结果,顺便加上叶子类目信息
    create table hs_tmp_13 as
    select a.query_id as index, a.title_id, a.title, b.item_id, a.score, b.cate_id, b.cate_name from
    (select * from hs_tmp_12 where title in (select title from hs_leaf_class_for_title_2))a join (select * from hs_leaf_class_for_title_2)b on a.title == b.title;

    (3)过滤

    pai -name pytorch -project algo_public_dev -Dpython=3.6 -Dscript="file:///apsarapangu/disk1/hengsong.lhs/origin_deep_cluster_odps_5.tar.gz" -DentryFile="test_query_with_title.py" -Dtables="odps://graph_embedding/tables/hs_tmp_13,odps://graph_embedding/tables/hs_leaf_class_for_query_0" -Doutputs="odps://graph_embedding/tables/hs_tmp_14" -Dbucket="oss://bucket-automl/" -Darn="acs:ram::1293303983251548:role/graph2018" -Dhost="cn-hangzhou.oss-internal.aliyun-inc.com" -DuserDefinedParameters="" -DworkerCount=1;

    1. 构造UDTF
      http://help.aliyun-inc.com/internaldoc/detail/27811.html?spm=a2c1f.8259796.3.8.733f96d5LV8C1z

    /apsarapangu/disk1/hengsong.lhs/deep_cluster_odps/IDEC-pytorch/hs_udf.py
    CREATE FUNCTION hs_split AS hs_udf.Processor USING hs_udf.py;

    select graph_embedding:hs_split(query, title_id, ":") as (query_id, title_id, score) from hs_heter_graph_embedding_out_nearest_neighbor_007 limit 100;

    相关文章

      网友评论

          本文标题:20190711工作进展

          本文链接:https://www.haomeiwen.com/subject/nrfrkctx.html