美文网首页
记录一次 hadoop+tornado 简单实践(二) -- h

记录一次 hadoop+tornado 简单实践(二) -- h

作者: kur0mi | 来源:发表于2018-08-18 20:40 被阅读49次

    hive 数据库操作

    hive 支持大部分的 sql 语法,因此熟悉 sql 可以很方便的上手 hive 操作

    hero

    -- hero
    -- 载入数据
    create table hero(line string);
    load data inpath '/hero.txt' to table hero;
     
    -- 提取出 英雄名字,胜负,玩家id 信息
    create table info as select
        regexp_extract(line, '^"(\w+),(\d)"\s#\s(\d+)$', 1) as name,
        regexp_extract(line, '^"(\w+),(\d)"\s#\s(\d+)$', 2) as is_win,
        regexp_extract(line, '^"(\w+),(\d)"\s#\s(\d+)$', 3) as user_id
    from hero;
     
    -- 计算 用户id,该用户总场次,和 总胜率
    select
        user_id,
        count(1) as total_count,
        sum(cast(is_win as int))/count(1) as win_rate
    from info group by user_id with rollup;
     
    -- 计算英雄,该英雄场次及胜率
    select
        name,
        count(1) as total_count,
        sum(cast(is_win as int))/count(1) as win_rate
    from info group by name with rollup;
     
    -- 英雄胜率 top3
    select
        name,
        count(1) as total_count,
        sum(cast(is_win as int))/count(1) as win_rate
    from info group by name order by win_rate desc limit 3;
     
    -- 计算 用户id, 英雄名字, 该用户该英雄出场次数 和 胜率
    select
        user_id,
        name,
        count(1) as total_count,
        sum(cast(is_win as int))/count(1) as rate
    from info group by user_id, name;
    

    gamelog

    -- gamelog
    -- 提取 gamelog 各字段信息
    create table info as select
        regexp_extract(line, '^\s*([\\w-]+)\\s+((Android|iOS)\\s+[\\d.]+)\\s+([\\d-]+)T([\\d:]+)\\s+([\\d-]+)T([\\d:]+)\\s+(\\d+)\\s*$', 1) as id,
        regexp_extract(line, '^\s*([\\w-]+)\\s+((Android|iOS)\\s+[\\d.]+)\\s+([\\d-]+)T([\\d:]+)\\s+([\\d-]+)T([\\d:]+)\\s+(\\d+)\\s*$', 2) as device,
        regexp_extract(line, '^\s*([\\w-]+)\\s+((Android|iOS)\\s+[\\d.]+)\\s+([\\d-]+)T([\\d:]+)\\s+([\\d-]+)T([\\d:]+)\\s+(\\d+)\\s*$', 3) as device2,
        regexp_extract(line, '^\s*([\\w-]+)\\s+((Android|iOS)\\s+[\\d.]+)\\s+([\\d-]+)T([\\d:]+)\\s+([\\d-]+)T([\\d:]+)\\s+(\\d+)\\s*$', 4) as online_date,
        regexp_extract(line, '^\s*([\\w-]+)\\s+((Android|iOS)\\s+[\\d.]+)\\s+([\\d-]+)T([\\d:]+)\\s+([\\d-]+)T([\\d:]+)\\s+(\\d+)\\s*$', 5) as online_time,
        regexp_extract(line, '^\s*([\\w-]+)\\s+((Android|iOS)\\s+[\\d.]+)\\s+([\\d-]+)T([\\d:]+)\\s+([\\d-]+)T([\\d:]+)\\s+(\\d+)\\s*$', 6) as offline_date,
        regexp_extract(line, '^\s*([\\w-]+)\\s+((Android|iOS)\\s+[\\d.]+)\\s+([\\d-]+)T([\\d:]+)\\s+([\\d-]+)T([\\d:]+)\\s+(\\d+)\\s*$', 7) as offline_time,
        regexp_extract(line, '^\s*([\\w-]+)\\s+((Android|iOS)\\s+[\\d.]+)\\s+([\\d-]+)T([\\d:]+)\\s+([\\d-]+)T([\\d:]+)\\s+(\\d+)\\s*$', 8) as gametime
    from gamelog;
     
    -- 安卓端 iOS端 用户数量(注意 id 去重)
    select
        device2,
        count(distinct id) as count
    from info group by device2;
     
    /*   
    Android 9948
    iOS     23318
    */
     
    -- 安卓端 iOS端 各版本 用户数量(注意 id 去重)
    select
        device,
        count(distinct id) as count
    from info group by device;
     
    /*
    Android 4.3     2037
    Android 4.4     1883
    Android 5.0     2011
    Android 6.0     1969
    Android 7.0     2048
    iOS     10.1.1  3294
    iOS     10.2    3249
    iOS     10.3.2  3352
    iOS     11.0    3298
    iOS     11.1    3399
    iOS     11.2    3290
    iOS     11.2.5  3436
    */
     
    -- 安卓端 iOS端 各版本 登录次数,平均在线时长
    select
        device,
        count(1) as login_count,
        avg(cast(gametime as int)) as aver_time
    from info group by device;
     
    /*
    Android 4.3     20303   24598.16017337339
    Android 4.4     18840   24282.822399150744
    Android 5.0     20202   24659.246064746065
    Android 6.0     19402   24436.93505824142
    Android 7.0     20519   24462.314440274866
    iOS     10.1.1  32800   24391.159786585366
    iOS     10.2    32661   24450.062643519796
    iOS     10.3.2  33852   24518.667316554413
    iOS     11.0    33304   24433.191718712467
    iOS     11.1    33859   24444.414719867687
    iOS     11.2    33525   24549.974645786726
    iOS     11.2.5  34619   24442.049019324648
    */
     
    -- 每天各时段登陆的人数(去重)
    select online_date, stage, count(distinct id) as count from (
        select id, online_date, (
            case
            when time between 0 and 6 then 'dawn'
            when time between 7 and 12 then 'morning'
            when time between 13 and 15 then 'mid'
            when time between 16 and 19 then 'afternoon'
            when time between 20 and 24 then 'night'
            else 'error'
            end
            ) as stage from (
                select id, online_date, cast(split(online_time, ":")[0] as int) as time from info
                )a
    )b group by online_date, stage;
     
     
    -- 7-12 早 morning
    -- 13-15 中 mid
    -- 16-19 下 afternoon
    -- 20-24 晚 night
    -- 1-6 凌晨 dawn
    

    结果示例

    hive> select online_date, stage, count(distinct id) as count from (
    > select id, online_date, (
    > case
    > when time between 0 and 6 then 'dawn'
    > when time between 7 and 12 then 'morning'
    > when time between 13 and 15 then 'mid'
    > when time between 16 and 19 then 'afternoon'
    > when time between 20 and 24 then 'night'
    > else 'error'
    > end
    > ) as stage from (
    > select id, online_date, cast(split(online_time, ":")[0] as int) as time from info
    > )a
    > )b group by online_date, stage;
    WARNING: Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
    Query ID = root_20180728223233_d170698a-a243-410c-a24e-b3c12f6aaf84
    Total jobs = 1
    Launching Job 1 out of 1
    Number of reduce tasks not specified. Estimated from input data size: 1
    In order to change the average load for a reducer (in bytes):
    set hive.exec.reducers.bytes.per.reducer=<number>
    In order to limit the maximum number of reducers:
    set hive.exec.reducers.max=<number>
    In order to set a constant number of reducers:
    set mapreduce.job.reduces=<number>
    Starting Job = job_1532741886556_0012, Tracking URL = http://master:8088/proxy/application_1532741886556_0012/
    Kill Command = /opt/SoftWare/hadoop-2.7.3/bin/hadoop job -kill job_1532741886556_0012
    Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 1
    2018-07-28 22:34:58,189 Stage-1 map = 0%, reduce = 0%
    2018-07-28 22:35:59,527 Stage-1 map = 0%, reduce = 0%
    2018-07-28 22:36:38,337 Stage-1 map = 67%, reduce = 0%, Cumulative CPU 52.62 sec
    2018-07-28 22:36:45,013 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 58.61 sec
    2018-07-28 22:37:16,082 Stage-1 map = 100%, reduce = 45%, Cumulative CPU 62.77 sec
    2018-07-28 22:37:19,870 Stage-1 map = 100%, reduce = 67%, Cumulative CPU 64.69 sec
    2018-07-28 22:37:35,500 Stage-1 map = 100%, reduce = 68%, Cumulative CPU 75.08 sec
    2018-07-28 22:37:38,039 Stage-1 map = 100%, reduce = 72%, Cumulative CPU 77.26 sec
    2018-07-28 22:37:42,296 Stage-1 map = 100%, reduce = 74%, Cumulative CPU 79.44 sec
    2018-07-28 22:37:45,238 Stage-1 map = 100%, reduce = 75%, Cumulative CPU 81.63 sec
    2018-07-28 22:37:49,095 Stage-1 map = 100%, reduce = 77%, Cumulative CPU 83.79 sec
    2018-07-28 22:37:51,691 Stage-1 map = 100%, reduce = 80%, Cumulative CPU 85.79 sec
    2018-07-28 22:37:55,689 Stage-1 map = 100%, reduce = 83%, Cumulative CPU 88.48 sec
    2018-07-28 22:37:58,208 Stage-1 map = 100%, reduce = 90%, Cumulative CPU 91.65 sec
    2018-07-28 22:38:01,819 Stage-1 map = 100%, reduce = 99%, Cumulative CPU 94.74 sec
    2018-07-28 22:38:03,065 Stage-1 map = 100%, reduce = 100%, Cumulative CPU 96.58 sec
    MapReduce Total cumulative CPU time: 1 minutes 36 seconds 580 msec
    Ended Job = job_1532741886556_0012
    MapReduce Jobs Launched:
    Stage-Stage-1: Map: 1 Reduce: 1 Cumulative CPU: 96.58 sec HDFS Read: 32762295 HDFS Write: 1308 SUCCESS
    Total MapReduce CPU Time Spent: 1 minutes 36 seconds 580 msec
    OK
    2017-01-01 afternoon 3177
    2017-01-01 dawn 11076
    2017-01-01 mid 2567
    2017-01-01 morning 2676
    2017-01-01 night 5880
    2017-01-02 afternoon 4085
    2017-01-02 dawn 13930
    2017-01-02 mid 3173
    2017-01-02 morning 3285
    2017-01-02 night 7412
    2017-01-03 afternoon 4478
    2017-01-03 dawn 15393
    2017-01-03 mid 3423
    2017-01-03 morning 3690
    2017-01-03 night 8315
    2017-01-04 afternoon 4666
    2017-01-04 dawn 16371
    2017-01-04 mid 3699
    2017-01-04 morning 3795
    2017-01-04 night 8766
    2017-01-05 afternoon 5827
    2017-01-05 dawn 20144
    2017-01-05 mid 4427
    2017-01-05 morning 4762
    2017-01-05 night 10822
    2017-01-06 afternoon 7507
    2017-01-06 dawn 26158
    2017-01-06 mid 5838
    2017-01-06 morning 6249
    2017-01-06 night 13990
    2017-01-07 afternoon 7083
    2017-01-07 dawn 24763
    2017-01-07 mid 5405
    2017-01-07 morning 5803
    2017-01-07 night 13380
    Time taken: 332.701 seconds, Fetched: 35 row(s)
    hive>

    相关文章

      网友评论

          本文标题:记录一次 hadoop+tornado 简单实践(二) -- h

          本文链接:https://www.haomeiwen.com/subject/qefqiftx.html