美文网首页
xgboost to rank训练

xgboost to rank训练

作者: 我永远喜欢高木同学 | 来源:发表于2020-05-06 16:16 被阅读0次

    首先拿到的数据为样本id 特征数据 以及 样本id 和对应的label两个文档
    先把libsvm的特征处理成xgboost能够使用的格式,并按8:1:1的比例将数据分成训练集、验证集以及测试集。

    #train id:0-42622
    #valid id:42623-47957
    #test id:47958-53285
    def transdata(feature_file_path,group_file_path,out_feature_path_train,out_feature_path_vaild,out_feature_path_test):
        output_feature_train = open(out_feature_path_train,"w")
        output_feature_valid = open(out_feature_path_vaild, "w")
        output_feature_test = open(out_feature_path_test, "w")
        with open (feature_file_path) as features, open(group_file_path) as groups:
            for x,y in zip(features,groups):
                splits_x = x.strip().split("    ")
                splits_y = y.strip().split("    ")
                if int(splits_x[0])<42623:
                    output_feature_train.write(splits_y[0]+" "+splits_x[1]+"\n")
                if int(splits_x[0])>42622 and int(splits_x[0])<47958:
                    output_feature_valid.write(splits_y[0]+" "+splits_x[1]+"\n")
                if int(splits_x[0])>47957:
                    output_feature_test.write(splits_y[0]+" "+splits_x[1]+"\n")
        output_feature_train.close()
        output_feature_valid.close()
        output_feature_test.close()
    
    if __name__ =="__main__":
        transdata("topic_features.txt","gt.txt","libsvm_format.train.txt","libsvm_format.valid.txt","libsvm_format.test.txt")
    

    对label进行分组

    #train id:0-42622
    #valid id:42623-47957
    #test id:47958-53285
    def transdata(group_file_path,out_feature_path_train,out_feature_path_vaild,out_feature_path_test):
        output_feature_train = open(out_feature_path_train,"w")
        output_feature_valid = open(out_feature_path_vaild, "w")
        output_feature_test = open(out_feature_path_test, "w")
        groups = open(group_file_path)
        for line in groups:
            if not line:
                break
            splits_x = line.strip().split(" ")
            if int(splits_x[1]) < 42623:
                output_feature_train.write(line)
            if int(splits_x[1]) > 42622 and int(splits_x[0]) < 47958:
                output_feature_valid.write(line)
            if int(splits_x[1]) > 47957:
                output_feature_test.write(line)
    
        output_feature_train.close()
        output_feature_valid.close()
        output_feature_test.close()
    
    if __name__ =="__main__":
        transdata("gt.txt","gt.train.txt","gt.valid.txt","gt.test.txt")
    

    随即根据qid对数据进行分组,生成.group文件

    #train id:0-42622
    #valid id:42623-47957
    #test id:47958-53285
    def transgroup(group_file_path,save_path):
       group_output = open(save_path,"w")
       group_file = open(group_file_path)
       group = ""
       group_data = []
       for line in group_file:
           if not line:
               break
           splits = line.strip().split("   ")
           if splits[2]!=group:
               group_output.write(str(len(group_data))+"\n")
               group_data = []
           group = splits[2]
           group_data.append(splits[0])
    
       group_output.write(str(len(group_data)) + "\n")
       group_output.close()
       group_file.close()
    if __name__ =="__main__":
       transgroup("gt.test.txt","group.test.txt")
    
    objective参数的解释
    `rank:pairwise`: Use LambdaMART to perform pairwise ranking where the pairwise loss is minimized
    `rank:ndcg`: Use LambdaMART to perform list-wise ranking where [Normalized Discounted Cumulative Gain (NDCG)](http://en.wikipedia.org/wiki/NDCG) is maximized
    

    数据处理完成后,送入xgboost进行训练

    #!/usr/bin/python
    import xgboost as xgb
    from xgboost import DMatrix
    from sklearn.datasets import load_svmlight_file
    
    #  This script demonstrate how to do ranking with xgboost.train
    x_train, y_train = load_svmlight_file("libsvm_format.train.txt")
    x_valid, y_valid = load_svmlight_file("libsvm_format.valid.txt")
    x_test, y_test = load_svmlight_file("libsvm_format.test.txt")
    
    group_train = []
    with open("group.train.txt", "r") as f:
       data = f.readlines()
       for line in data:
           group_train.append(int(line.split("\n")[0]))
    
    group_valid = []
    with open("group.valid.txt", "r") as f:
       data = f.readlines()
       for line in data:
           group_valid.append(int(line.split("\n")[0]))
    
    group_test = []
    with open("group.test.txt", "r") as f:
       data = f.readlines()
       for line in data:
           group_test.append(int(line.split("\n")[0]))
    
    train_dmatrix = DMatrix(x_train, y_train)
    valid_dmatrix = DMatrix(x_valid, y_valid)
    test_dmatrix = DMatrix(x_test)
    
    train_dmatrix.set_group(group_train)
    valid_dmatrix.set_group(group_valid)
    
    params = {'objective': 'rank:pairwise', 'eta': 0.1, 'gamma': 1.0,
             'min_child_weight': 0.1, 'max_depth': 6}
    xgb_model = xgb.train(params, train_dmatrix, num_boost_round=4,
                         evals=[(valid_dmatrix, 'validation')])
    pred = xgb_model.predict(test_dmatrix)
    

    参考代码1:https://www.jianshu.com/p/9caef967ec0a

    参考代码2: https://github.com/dmlc/xgboost/blob/master/demo/rank/rank.py

    相关文章

      网友评论

          本文标题:xgboost to rank训练

          本文链接:https://www.haomeiwen.com/subject/xeuwghtx.html