美文网首页
隐语义模型的梯度下降求解-python实现

隐语义模型的梯度下降求解-python实现

作者: 多彩海洋 | 来源:发表于2019-09-19 22:11 被阅读0次

    LFM--梯度下降法--实现基于模型的协同过滤

    0.引入依赖

    import numpy as np # 数值计算、矩阵运算、向量运算 
     import pandas as pd # 数值分析、科学计算
    

    1.数据准备

    # 定义评分矩阵 R  
     R = np.array([[4, 0, 2, 0, 1],  
                  [0, 2, 3, 0, 0],     
                  [1, 0, 2, 4, 0],       
                   [5, 0, 0, 3, 1],         
                   [0, 0, 1, 5, 1],        
                   [0, 3, 2, 4, 1],          
                  ])
    # R.shape # (6, 5)
    # R.shape[0] # 6
    # R.shape[1] # 5
    # len(R) # 6
    # len(R[0]) # 5
    

    2.算法的实现

    """@输入参数:
    R:M*N 的评分矩阵
    K:隐特征向量维度
    max_iter: 最大迭代次数
    alpha:步长lamda:正则化系数
    @输出:分解之后的 P,Q
    P:初始化用户特征矩
    阵 M*K
    Q:初始化物品特征矩阵 N*K,Q 的转置是 K*N
    """
    
    # 给定超参数K = 5
    max_iter = 5000
    alpha = 0.0002
    lamda = 0.004
    
    # 核心算法def LMF_grad_desc(R, K=2, max_iter=1000, alpha=0.0001, lamda=0.002):   
     # 定义基本维度参数    M = len(R)  
      N = len(R[0])   
     # P、Q 的初始值随机生成  
      P = np.random.rand(M, K)    Q = np.random.rand(N, K) 
       Q = Q.T 
       # 开始迭代   
     for steps in range(max_iter): 
           # 对所有的用户 u,物品 i 做遍历,然后对对应的特征向量 Pu、Qi 做梯度下降        
    for u in range(M):         
       for i in range(N):              
      # 对于每一个大于 0 的评分,求出预测评分误差 e_ui            
        if R[u[i] > 0:            
            e_ui = np.dot(P[u,:], Q[:,i]) - R[u][i]                
        # 代入公式,按照梯度下降算法更新当前的 Pu、Qi                
        for k in range(K):                        P[u][k] = P[u][k] - alpha * (2 * e_ui * Q[k][i] + 2 * lamda * P[u][k])                 
           Q[k][i] = Q[k][i] - alpha * (2 * e_ui * P[u][k] + 2 * lamda * Q[k][i])      
      # u,i 遍历完成,所有的特征向量更新完成,可以得到 P、Q,可以计算预测评分矩阵      
      predR = np.dot(P, Q)     
       # 计算当前损失函数(所有的预测误差平方后求和)      
      cost = 0     
       for u in range(M):     
           for i in range(N):                # 对于每一个大于 0 的评分,求出预测评分误差后,将所有的预测误差平方后求和         
           if R[u[i] > 0:          
              cost += (np.dot(P[u,:], Q[:,i]) - R[u][i]) ** 2        
                # 加上正则化项           
             for k in range(K):                        cost += lamda * (P[u][k] ** 2 + Q[k][i] ** 2)      
      if cost < 0.0001:         
       # 当前损失函数小于给定的值,退出迭代         
       break   
     return P, Q.T, cost
    
    

    3.测试

    P, Q, cost = LMF_grad_desc(R, K, max_iter, alpha, lamda)
    print(P)
    print(Q)
    print(cost)
    predR = P.dot(Q.T)
    print(R)predR
    
    

    当 K = 2 时,输出结果如下:

    [[1.44372596 1.29573962] 
    [1.82185633 0.0158696 ] 
    [1.5331521  0.16327061] 
    [0.31364667 1.9008297 ]
     [1.03622742 2.03603634] 
    [1.34107967 0.93406796]][[ 0.4501051   2.55477489]
     [ 1.18869845  1.20910294]
     [ 1.54255106 -0.23514326]
     [ 2.33556583  1.21026575]
     [ 0.43753164  0.34555928]]
    1.0432768290554293
    [[4 0 2 0 1] 
    [0 2 3 0 0]
     [1 0 2 4 0] 
    [5 0 0 3 1] 
    [0 0 1 5 1] 
    [0 3 2 4 1]]
    array([[3.96015147, 3.2828374 , 1.92233657, 4.9401063 , 1.07943065],    
       [0.86057008, 2.18482578, 2.80657478, 4.27427181, 0.80260368],    
       [1.10719924, 2.0198665 , 2.32657341, 3.77837848, 0.72722223],     
      [4.99736596, 2.6711301 , 0.03684871, 3.03305153, 0.79407969],    
       [5.66802576, 3.69353946, 1.11967348, 4.8843224 , 1.15695354],     
      [2.98996017, 2.72352365, 1.84904408, 4.2626503 , 0.90954065]])
    
    

    当 K = 5 时,输出结果如下:

    [[ 0.77991893  0.95803701  0.75945903  0.74581653  0.58070622]
     [ 1.51777367  0.66949331  0.89818609  0.23566984  0.56583223]
     [ 0.03567022  0.58391558  1.42477223  0.87262652 -0.52553017]
     [ 1.24101793  0.86257736  0.73772417  0.18181617  0.97014545]
     [ 0.58789616  0.53522492  0.48830352  1.80622908  0.81202167]
     [ 1.08640318  0.87660384  0.68935314  0.84506882  0.92284071]]
    [[ 1.64469428  1.10535565  0.56686066  0.38656745  1.56519511]
     [ 0.61680687  0.57188343  0.49729111  0.9623455   0.43969708] 
    [ 0.99260822  0.6007452   1.14768173 -0.16998497 -0.14094479]
     [ 0.47070988  0.85347655  1.43546859  1.8185161   0.29759968]
     [ 0.07923314  0.49412497  0.53285806  0.23753882 -0.05146021]]
    0.7478305665280703
    [[4 0 2 0 1]
     [0 2 3 0 0]
     [1 0 2 4 0]
     [5 0 0 3 1]
     [0 0 1 5 1]
     [0 3 2 4 1]]
    array([[3.9694342 , 2.37968507, 2.01268221, 3.8040546 , 1.08714641],    
       [4.72218838, 2.2412959 , 2.81976984, 3.17210672, 0.95653992],     
      [1.02652007, 1.67315396, 1.94711343, 3.99085212, 1.28488146],   
        [5.0014878 , 2.22716585, 2.42906339, 2.99867943, 0.91091753],    
       [3.80452512, 3.00679363, 1.04401937, 4.96078887, 0.95850804],   
        [4.91762916, 2.73324389, 2.1224277 , 4.06049468, 1.03980543]])
    
    

    显然后者更符合预期。

    相关文章

      网友评论

          本文标题:隐语义模型的梯度下降求解-python实现

          本文链接:https://www.haomeiwen.com/subject/djqductx.html