基础命令
基本DDL
// 查看数据库
show databases;
// 使用数据库
use srm;
// 显示所有的函数
show functions;
// 查看函数用法
describe function substr;
// 查看当前数据库下
show tables;
// 查看表结构
desc invoice_lines;
// 查看某个表的分区情况
show partitions invoice_lines;
// 创建表
CREATE TABLE IF NOT EXISTS srm.invoice_lines_temp2(
SOURCE_SYS_KEY string comment '' ,
LEGAL_COMPANY string comment '' ,
VENDOR_NAME string comment '' ,
INVOICE_UNIT_PRICE double comment '' ,
PREPAY_UNAPPLIED double comment '' ,
GR_NON_VALUATED string comment ''
)partitioned by(jobid string) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',';
// LOCATION 用于指定表的数据文件路径
# LOCATION 'hdfs://cdh5/tmp/invoice/';
// 根据某张表,创建一张机构一样的表
create table invoice_lines_temp2 like invoice_lines;
// 创建外部表
CREATE EXTERNAL TABLE tinvoice_lines(id STRING, name STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LOCATION '/data/test/test_table';
// 删除表,如果是外部表,只会删除元数据(表结构),不会删除外部文件中
drop table invoice_lines;
// 删除表的某个分区
alter table srm.invoice_lines_temp2 drop partition(jobid='JOBID');
// 删除外部表数据文件以及目录
DFS -rm -r /data/test/test_table;
// 更新表
ALTER TABLE invoice_lines RENAME TO invoice_lines2;
ALTER TABLE invoice_lines ADD COLUMNS (new_col2 INT COMMENT '内容');
// 清空表,比delete快很多,在mysql中会连索引记录都清空。delete会记录日志,truncate 不会记录日志?
truncate table invoice_lines;
// 删除记录
delete from invoice [where xxx = yyy]
内部表与外部表的区别
- Hive 创建内部表时,会将数据移动到数据仓库指向的路径;
- Hive 创建外部表,仅记录数据所在的路径, 不对数据的位置做任何改变;
- 在删除表的时候,内部表的元数据和数据会被一起删除, 而外部表只删除元数据,不删除数据。这样外部表相对来说更加安全些,数据组织也更加灵活,方便共享源数据;
CLI基本参数
和数据导入相关
Hive数据导入表情况:
- 在load data时,如果加载的文件在HDFS上,此文件会被移动到表路径中;
- 在load data时,如果加载的文件在本地,此文件会被复制到HDFS的表路径中;
- 在load data时,会为每一个待导入的文件,启动一个MR任务进行导入;
-----------------------------------------有关于数据导入------------------------------------------
// 导入本地文件数据到Hive表
load data local inpath '/apps/data/test1.txt' into table invoice_lines;
// 导入HDFS文件数据到Hive表
load data inpath '/hdfs/app/data/test.txt' into table invoice_lines;
// 从别的表中查询出相应的数据并导入到Hive表中,注意列数目一定要相同
insert into table invoice_lines select * from invoice_lines_temp2;
// 导入到指定分区表,注意列数目一定要相同
insert into table invoice_lines partition(jobid='106') select xx1,xx2,xx3 from invoice_lines_temp2 where jobid='106';
// 导入到指定分区表,采用动态分区的方式,注意列数目一定要相同
insert into table invoice_lines partition(jobid) select * from invoice_lines_temp2;
// Hive还支持多表插入,即把FROM 写到前面
FROM invoice insert into table invoice_temp1 select xx,xx2 insert into table invoice_temp2 select xx4,xx6;
// 项目上用到的一些写法
INSERT OVERWRITE TABLE srm.invoice_lines_temp2 PARTITION(jobid) SELECT sour_t.* FROM srm.invoice_lines_temp2 sour_t WHERE jobid = '106';
INSERT INTO TABLE srm.invoice_lines SELECT * FROM srm.invoice_lines_temp2 WHERE jobid = '106';
INSERT OVERWRITE TABLE srm.invoice_lines_temp2 PARTITION(jobid) SELECT * FROM srm.invoice_lines_temp2 WHERE jobid='106' AND 1 = 1;
INSERT OVERWRITE TABLE srm.invoice_lines_temp2 PARTITION(jobid)
SELECT temp.* FROM srm.invoice_lines_temp2 temp JOIN
(
SELECT
source_sys_key,
legal_company,
count( DISTINCT concat_ws( '', concat( invoice_line_type ), concat( invoice_head_id ) ) )
FROM
srm.invoice_lines_temp2
WHERE jobid = '106'
GROUP BY
source_sys_key,
legal_company
HAVING
count( DISTINCT concat_ws( '', concat( invoice_line_type ), concat( invoice_head_id ) ) ) = 1
) t0 ON (temp.source_sys_key = t0.source_sys_key AND temp.legal_company = t0.legal_company )
where temp.jobid = '106';
// 在创建表的时候通过从别的表中查询出相应的记录并插入到所创建的表中
create table invoice_temp1 AS select xx1,xx2,xx3 from invoice;
-----------------------------------------有关于数据导入------------------------------------------
// 删除表中数据,但要保持表的结构定义
dfs -rmr /user/hive/warehouse/srm/invoice_lines;
// 创建外部表
CREATE EXTERNAL TABLE tinvoice_lines(id STRING, name STRING) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' LOCATION '/data/test/test_table';
// 导入数据到表中(文件会被移动到仓库目录/data/test/test_table)
load data inpath '/test_tmp_data.txt' INTO TABLE tinvoice_lines;
hive -e "load data local inpath '${SOURCE_PATH}/${SourceFileNameNochar}' overwrite into table srm.invoice_lines_temp1 partition(jobid='${JOBID}');"
Sqoop导入导出
Sqoop的参数非常多,具体使用时可以查资料,这里只是举几个常见例子
// 测试数据库连接
sqoop eval --connect jdbc:mysql://192.168.180.11/angel --username root--password root
// MySQL导入到Hive
sqoop import --connect jdbc:mysql://localhost:3306/test --username root --password 123456 --table person -m 1 --hive-import
// 导出该某Hive表所有数据到MySQL
sqoop export --connect jdbc:mysql://192.168.11.172:16408/ztsrm --username srm --password handhand --table invoice_lines --export-dir /apps/hive/warehouse/srm.db/invoice_lines_temp2/jobid=106 --input-fields-terminated-by ',' --input-null-string "\\\\N" --input-null-non-string "\\\\N"
// 导出该某Hive表指定分区数据到MySQL
sqoop export --connect jdbc:mysql://192.168.11.172:16408/ztsrm --username srm --password handhand --table invoice_lines --export-dir /apps/hive/warehouse/srm.db/invoice_lines_temp2 --input-fields-terminated-by ',' --input-null-string "\\\\N" --input-null-non-string "\\\\N"
主键重复
org.apache.sqoop.mapreduce.TextExportMapper.map(TextExportMapper.java:90) ... 10 more Caused by:
com.mysql.jdbc.exceptions.jdbc4.MySQLIntegrityConstraintViolationException: Duplicate entry 'uechairs-100020162000000623003' for key 'PRIMARY' at
sun.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) at
sun.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:62) at
sun.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45)
at java.lang.reflect.Constructor.newInstance(Constructor.java:423) at
插入表问题
从一个表查数据插入到另一个表中,出现以下异常:
'STATUS' in insert schema specification is not found among regular columns of srm.invoice_lines_temp2 nor dynamic partition columns.. Error encountered near token 'material_group'
执行的SQL语句如下
INSERT INTO TABLE srm.invoice_lines_temp2 PARTITION (jobid = '106') (
source_sys_key,
STATUS,
material_group
)
SELECT
'${GROUP_NAME}' source_sys_key,
(
CASE
WHEN column28 IN ( '5', 'P', 'V' ) THEN 'VERIFIED'
ELSE column28
END
) STATUS,
IF(column30 IS NULL, '', regexp_replace ( column30, '"', '' ) ) material_group
FROM
srm.invoice_lines_temp1 WHERE jobid = '106';
在网上找到的一个建议:
If source & destination tables have identical columns in same order, it not required to mention source & destination columns, else better to mention destination columns in lowercase in insert statement
如果源列和目标列相同则不同管;如果不同最好将目标列改成小写。但是我仅仅将目标列改成小写也没用,需要将源列和目标列一起改成小写
INSERT INTO TABLE srm.invoice_lines_temp2 PARTITION (jobid = '106') (
source_sys_key,
status,
material_group
)
SELECT
'${GROUP_NAME}' source_sys_key,
(
CASE
WHEN column28 IN ( '5', 'P', 'V' ) THEN 'VERIFIED'
ELSE column28
END
) status,
IF(column30 IS NULL, '', regexp_replace ( column30, '"', '' ) ) material_group
FROM
srm.invoice_lines_temp1 WHERE jobid = '106';
这里再分析一下这个问题产生的原因,因为这段SQL是拼接出来的,然后存到数据库。正常清空下执行这段SQL的流程是这样的:通过kettle从数据库拿到这段SQL,然后再通过shell组件执行,并且这这段流程执行过很多次,是没有问题的。那为什么我单独把SQL拿出就报错了?因为我通过Navicate美化了SQL,然后那个status好像被当作一个关键字来处理了,所以自动给将它转化成了大写,但是表里的字段是其实是小写的,所以导致这个问题。
动态分区表
有这么一个需求,将一张Hive分区表里面的数据做一些筛选,然后通过筛选出来的数据通过 INSERT OVERWRITE TABLE 这种模式将原先表的数据覆盖,以下是SQL
INSERT OVERWRITE TABLE srm.invoice_lines_temp2 PARTITION(jobid = '106')
SELECT
sour_t.*
FROM
srm.invoice_lines_temp2 sour_t WHERE sour_t.jobid = '106';
但是很不幸,报错了,异常信息如下:
Line 1:23 Cannot insert into target table because column number/types are different ''106'': Table insclause-0 has 54 columns, but query has 55 columns
大概意思就是:列的个数不一致,插入的列需要54列,但是查出来了55列,首先擦测可能是因为分区字段的原因。
解决方法也比较简单,只是比较麻烦一点,在SELECT的时候排除分区列,将那些列一个一个查出来就可以了。但在这里不太合适,因为这是动态拼出的SQL,按这种方式,改起来太麻烦了。所以这里没有用这种方式,而是通过动态分区表来实现
最终的SQL如下:
INSERT OVERWRITE TABLE srm.invoice_lines_temp2 PARTITION(jobid)
SELECT
sour_t.*
FROM
srm.invoice_lines_temp2 sour_t where jobid='106';
但是这里有个问题,动态分区默认是没有开启的,所以需要修改一下配置
// 是否启动动态分区,默认false
SET hive.exec.dynamic.partition=true;
// 打开动态分区后,动态分区的模式,有 strict和 nonstrict 两个值可选,strict 要求至少包含一个静态分区列,nonstrict则无此要求
SET hive.exec.dynamic.partition.mode=nonstrict;
要不然可能会报这样的异常:
Dynamic partition strict mode requires at least one static partition column. To turn this off set hive.exec.dynamic.partition.mode=nonstrict
INTO 和 OVERWRITE
insert into 与 insert overwrite 都可以向hive表中插入数据,但是insert into直接追加到表中数据的尾部,而insert overwrite会重写数据,既先进行删除,再写入。如果存在分区的情况,insert overwrite会只重写当前分区数据。
创建HIVE表脚本
根据MySQL表创建Hive表脚本
import pymysql
import codecs
def getSingleSQL(table,schema = 'srm',ispartition = False):
# table = 为表名,mysql, hive表名一致
# schema = 为hive中的库名
# ispartition : 是否分区默认为分区
create_head = 'CREATE TABLE IF NOT EXISTS {0}.{1}('.format(schema,table) + '\n'
create_tail = 'ROW FORMAT DELIMITED FIELDS TERMINATED BY \',\' ; \n\n'
connection=pymysql.connect(host='192.168.11.172', port=16408, user='srm', password='handhand', db='srm', charset='utf8')
try:
with connection.cursor(cursor=pymysql.cursors.DictCursor) as cursor:
sql='SHOW FULL FIELDS FROM {0}'.format(table)
cursor.execute(sql)
try:
for row in cursor:
if 'bigint' in row['Type']:
row['Type'] = "bigint"
elif 'int' in row['Type'] or 'tinyint' in row['Type'] or 'smallint' in row['Type'] or 'mediumint' in row['Type'] or 'integer' in row['Type']:
row['Type'] = "int"
elif 'double' in row['Type'] or 'float' in row['Type'] or 'decimal' in row['Type']:
row['Type'] = "double"
else:
row['Type'] = "string"
create_head += row['Field'] + ' '+ row['Type'] +' comment \'' + row['Comment'] + '\' ,\n'
except:
print('程序异常!')
finally:
connection.close()
singleSQL = create_head[:-2] + '\n' + ')'+ create_tail
return singleSQL
def getTotalSQL():
connection=pymysql.connect(host='192.168.11.172', port=16408, user='srm', password='handhand', db='srm', charset='utf8')
try:
with connection.cursor(cursor=pymysql.cursors.DictCursor) as cursor:
sql='SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA=\'SRM\' AND TABLE_TYPE=\'BASE TABLE\' '
cursor.execute(sql)
try:
for row in cursor:
print(row)
tableName = row['TABLE_NAME']
singleSQL = getSingleSQL(tableName)
f = open('create_hive_table.sql', 'a', encoding='utf-8')
f.write(singleSQL)
except:
print('程序异常了哦!')
finally:
connection.close()
getTotalSQL()
筛选CSV中的非文件行
AND CAST( regexp_replace (sour_t.check_line_id, '"', '' ) AS DOUBLE ) >0
通过将某个列转换成DOUBLE值,如果转换失败会返回null。之前是转换成INT,但INT可能会太小了,这个看情况定
GROUP BY
SELECT c1,c2,c3, count(0) FROM table_a GROUP BY c1;
上面的SQL会报错,SELECT后面的非聚合列必须出现在group by中,所以得这么写
SELECT c1,c2,c3, count(0) FROM table_a GROUP BY c1,c2,c3;
那能不能不 GROUP BY所有的非聚合查询列?比如这里只想要GROUP BY c1 该怎么办?也是可以的。
这里可以使用collect_set函数,collect_set(col)函数只接受基本数据类型,它的主要作用是将某字段的值进行去重汇总,产生array类型字段。
SELECT c1,collect_set(c2)[0],collect_set(c3)[0], count(0) FROM table_a GROUP BY c1;
因为collect_set函数返回一个数组,这里的collect_set(c2)[0] 相当于是取数组里面的第 1 个元素。
具体取第几列呢?看情况吧。如果取第1列和第N列的效果是一样的,为啥不直接在 GROUP BY后面加上那个字段呢,这样还更方便一些吧。
collect_set 和 GROUP BY 一起使用的场景,应该是这样的:想查出A、B两个字段,但是只想对A分组,只需要随便取出A组里面的一个B,这种时候是可以用的。
和 collect_set 对应的还有一个 collect_list,作用类似,只是 collect_list 不会去重
这两个函数都可以达到行转列的效果
INSERT OVERWRITE TABLE srm.mst_orgs
SELECT
collect_set ( a.mst_org_id ) [ 0 ] AS mst_org_id,
a.source_sys_key,
a.org_type,
org_id,
collect_set ( a.org_name ) [ 0 ] AS org_name,
collect_set ( a.org_number ) [ 0 ] org_number,
collect_set ( a.imp_flag ) [ 0 ] imp_flag,
collect_set ( a.dc_creation_date ) [ 0 ] dc_creation_date,
collect_set ( a.dc_last_update_date ) [ 0 ] dc_last_update_date
FROM
srm.mst_orgs a
JOIN ( SELECT MAX( dc_last_update_date ), source_sys_key, org_id, org_type FROM srm.mst_orgs GROUP BY source_sys_key, org_id, org_type ) b ON ( a.source_sys_key = b.source_sys_key AND a.org_id = b.org_id AND a.org_type = b.org_type )
GROUP BY
a.source_sys_key,
a.org_id,
a.org_type;
Sqoop导出到MySQL字段类型问题
当MySQL中的字段类型是datetime类型的时候,报了以下的异常
Error: java.io.IOException:
Can't export data, please check failed map task logs at org.apache.sqoop.mapreduce.TextExportMapper.map(TextExportMapper.java:122)
at org.apache.sqoop.mapreduce.TextExportMapper.map(TextExportMapper.java:39)
at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:146)
at org.apache.sqoop.mapreduce.AutoProgressMapper.run(AutoProgressMapper.java:64)
at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:787)
at org.apache.hadoop.mapred.MapTask.run(MapTask.java:341)
at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:170)
at java.security.AccessController.doPrivileged(Native Method)
at javax.security.auth.Subject.doAs(Subject.java:422)
at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1866)
at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:164)
Caused by: java.lang.RuntimeException: Can't parse input data: '2015-5-26 00:00:00' at invoice_headers.__loadFromFields(invoice_headers.java:2325) at invoice_headers.parse(invoice_headers.java:2058) at org.apache.sqoop.mapreduce.TextExportMapper.map(TextExportMapper.java:89) ... 10 more Caused by: java.lang.IllegalArgumentException at java.sql.Date.valueOf(Date.java:143)
at invoice_headers.__loadFromFields(invoice_headers.java:2162) ... 12 more
我也有点纳闷,这里的时间格式好像是对上了,这时候HIVE中的字段类型是String,MySQL中的字段类型是datetime,
根据网上的一些资料,说是要指定类型
--map-column-java start_time=java.sql.Timestamp,end_time=java.sql.Timestamp,receive_time=java.sql.Timestamp --map-column-hive start_time=TIMESTAMP,end_time=TIMESTAMP,receive_time=TIMESTAMP
不过这里不太适合,因为这里相当于是一个通用的导出到MySQL,根本不知道什么时候有哪些字段
所以,是将MySQL中的一些datetime类型改成varchar类型?不太好吧?
CSV列长度受限
04-09-2018 15:21:20 CST LoadFileToTemp1 INFO - java.io.IOException:
Maximum column length of 100,000 exceeded in column 0 in record 1,329. Set the SafetySwitch property to false if you're expecting column lengths greater than 100,000 characters to avoid this error.
解决方法
CsvReader reader = new CsvReader(tempList[i].getPath(), ',', Charset.forName("UTF-8")); //一般用这编码读就可以了
// 设置最大长度不受限制
reader.setSafetySwitch(false);
data too long?
org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:164) Caused by:
java.io.IOException: com.mysql.jdbc.MysqlDataTruncation: Data truncation: Data too long for column 'SPECIAL_INV_FLAG' at row 43
at org.apache.sqoop.mapreduce.AsyncSqlRecordWriter.write(AsyncSqlRecordWriter.java:233)
at org.apache.sqoop.mapreduce.AsyncSqlRecordWriter.write(AsyncSqlRecordWriter.java:46)
at org.apache.hadoop.mapred.MapTask$NewDirectOutputCollector.write(MapTask.java:658)
at org.apache.hadoop.mapreduce.task.TaskInputOutputContextImpl.write(TaskInputOutputContextImpl.java:89)
at org.apache.hadoop.mapreduce.lib.map.WrappedMapper$Context.write(WrappedMapper.java:112)
at org.apache.sqoop.mapreduce.TextExportMapper.map(TextExportMapper.java:90) ... 10 more
检查了一下数据库中的该字段,SPECIAL_INV_FLAG字段的长度为1,按理来说没什么问题。
这时我检查了一下Hive表中的数据
// 因为在插入Hive表的时候已经将 null 转换成立 '',所以这里直接用 special_inv_flag != '' 进行判断
select special_inv_flag from transaction_lines where special_inv_flag != '';
结果如下,好吧,还真是文件数据有问题
WHERE中的子查询
在hive中的子查询会有各种问题,这里的解决方法是将子查询改成JOIN的方式
先看一段在MySQL中的SQL,下不管这段SQL从哪来的,我也不知道从哪里来的
SELECT
*
FROM
srm.payment_lines sour_t
WHERE
1 = 1
AND (
(
( sour_t.reference4 LIKE '001001%' OR sour_t.reference4 LIKE '001002%' OR sour_t.reference4 LIKE '001121%' OR sour_t.reference4 LIKE '002201%' )
AND (
SELECT
count( DISTINCT ifnull( pl0.REL_VENDOR_ID, pl0.VENDOR_ID ) )
FROM
srm.payment_lines pl0
WHERE
pl0.SOURCE_SYS_KEY = sour_t.SOURCE_SYS_KEY
AND pl0.CHECK_ID = sour_t.CHECK_ID
AND ifnull( pl0.REL_VENDOR_ID, pl0.VENDOR_ID ) IS NOT NULL
) = 1
)
OR (
ifnull( sour_t.VENDOR_ID, sour_t.REL_VENDOR_ID ) IS NOT NULL
AND (
SELECT
count( DISTINCT ifnull( pl1.REL_VENDOR_ID, pl1.VENDOR_ID ) )
FROM
srm.payment_lines pl1
WHERE
pl1.SOURCE_SYS_KEY = sour_t.SOURCE_SYS_KEY
AND pl1.CHECK_ID = sour_t.CHECK_ID
AND ifnull( pl1.REL_VENDOR_ID, pl1.VENDOR_ID ) IS NOT NULL
) > 1
AND EXISTS (
SELECT
1
FROM
srm.payment_lines pl2
WHERE
pl2.SOURCE_SYS_KEY = sour_t.SOURCE_SYS_KEY
AND pl2.CHECK_ID = sour_t.CHECK_ID
AND ( pl2.reference4 LIKE '001001%' OR pl2.reference4 LIKE '001002%' OR pl2.reference4 LIKE '001121%' OR pl2.reference4 LIKE '002201%' )
)
AND NOT EXISTS (
SELECT
1
FROM
DUAL
WHERE
( sour_t.reference4 LIKE '001001%' OR sour_t.reference4 LIKE '001002%' OR sour_t.reference4 LIKE '001121%' OR sour_t.reference4 LIKE '002201%' )
)
)
)
这段SQL在hive中执行肯定会报错的,所以需要将它改成JOIN方式
SELECT * FROM srm.payment_lines sour_t
JOIN
(
SELECT
count( DISTINCT IF(pl0.REL_VENDOR_ID IS NULL, pl0.VENDOR_ID, pl0.REL_VENDOR_ID ) ) count_num,
pl0.SOURCE_SYS_KEY,
pl0.CHECK_ID
FROM
srm.payment_lines pl0
WHERE IF(pl0.REL_VENDOR_ID IS NULL, pl0.VENDOR_ID, pl0.REL_VENDOR_ID ) IS NOT NULL
GROUP BY pl0.SOURCE_SYS_KEY, pl0.CHECK_ID
)t ON (t.SOURCE_SYS_KEY = sour_t.SOURCE_SYS_KEY AND t.CHECK_ID = sour_t.CHECK_ID)
WHERE
1 =1
AND t.count_num = 1
AND (sour_t.reference4 LIKE '001001%' OR sour_t.reference4 LIKE '001002%' OR sour_t.reference4 LIKE '001121%' OR sour_t.reference4 LIKE '002201%')
AND CAST( regexp_replace (sour_t.check_line_id, '"', '' ) AS DOUBLE ) >0
UNION ALL
SELECT
sour_t.*
FROM
srm.payment_lines_temp2 sour_t JOIN
(
SELECT
count( DISTINCT IF(pl1.REL_VENDOR_ID IS NULL, pl1.VENDOR_ID, pl1.REL_VENDOR_ID) ) count_num,
pl1.SOURCE_SYS_KEY,
pl1.CHECK_ID
FROM
srm.payment_lines pl1
WHERE IF(pl1.REL_VENDOR_ID IS NULL, pl1.VENDOR_ID, pl1.REL_VENDOR_ID) IS NOT NULL
GROUP BY pl1.SOURCE_SYS_KEY, pl1.CHECK_ID
)t ON (t.SOURCE_SYS_KEY = sour_t.SOURCE_SYS_KEY AND t.CHECK_ID = sour_t.CHECK_ID) JOIN
(
SELECT
pl2.SOURCE_SYS_KEY,
pl2.CHECK_ID
FROM
srm.payment_lines pl2
WHERE
1 = 1
AND ( pl2.reference4 LIKE '001001%' OR pl2.reference4 LIKE '001002%' OR pl2.reference4 LIKE '001121%' OR pl2.reference4 LIKE '002201%' )
GROUP BY pl2.SOURCE_SYS_KEY, pl2.CHECK_ID
)t2 ON (t.SOURCE_SYS_KEY = t2.SOURCE_SYS_KEY AND t.CHECK_ID = t2.CHECK_ID)
WHERE
1 = 1
AND t.count_num > 1
AND IF(sour_t.VENDOR_ID IS NULL, sour_t.REL_VENDOR_ID, sour_t.VENDOR_ID ) IS NOT NULL
AND (sour_t.reference4 NOT LIKE '001001%' AND sour_t.reference4 NOT LIKE '001002%' AND sour_t.reference4 NOT LIKE '001121%' AND sour_t.reference4 NOT LIKE '002201%')
AND CAST( regexp_replace (sour_t.check_line_id, '"', '' ) AS DOUBLE ) >0
那这种SQL在界面上怎么配置,之前只是通过一个 AND() 包装就可以,现在用这种方式肯定不行。所以需要将 AND() 中的SQL进行拆分,抽象成 JOIN、LEFT JOIN、UNION等方式。
public enum FilterEnum {
AND("AND"),
INNER_JOIN("JOIN"),
LEFT_JOIN("LEFT JOIN"),
RIGHT_JOIN("RIGHT JOIN"),
UNION("UNION"),
UNION_ALL("UNION ALL");
private String name;
FilterEnum(String name) {
this.name = name;
}
public String getName() {
return this.name == null ? toString() : this.name;
}
}
- 在 JOIN/LEFT JOIN/RIGHT JOIN之后,添加 AND 中的条件
- 如果有 UNION/UNION ALL操作, 添加 AND 中的条件到其后面
举个例子来说,就比如上面SQL中的:AND CAST( regexp_replace (sour_t.check_line_id, '"', '' ) AS DOUBLE ) >0
所以,这段SQL在界面上的配置如下
CASE中的子查询
这个与上面是一样的,都是改成JOIN的方式。
在MySQL中的SQL如下:
CASE
WHEN
sour_t.REL_VENDOR_ID IS NOT NULL AND sour_t.VENDOR_ID IS NULL
THEN
sour_t.REL_VENDOR_ID
WHEN
(
SELECT
count( DISTINCT ifnull( pl0.REL_VENDOR_ID, pl0.VENDOR_ID ) )
FROM
srm.payment_lines pl0
WHERE
pl0.SOURCE_SYS_KEY = sour_t.SOURCE_SYS_KEY
AND pl0.CHECK_ID = sour_t.CHECK_ID
AND ifnull( pl0.REL_VENDOR_ID, pl0.VENDOR_ID ) IS NOT NULL
) = 1
THEN
(
SELECT DISTINCT
ifnull( pl1.REL_VENDOR_ID, pl1.VENDOR_ID )
FROM
srm.payment_lines pl1
WHERE
pl1.SOURCE_SYS_KEY = sour_t.SOURCE_SYS_KEY
AND pl1.CHECK_ID = sour_t.CHECK_ID
AND ifnull( pl1.REL_VENDOR_ID, pl1.VENDOR_ID ) IS NOT NULL
)
ELSE
sour_t.VENDOR_ID
END AS VENDOR_ID,
在Hive中的SQL如下:
JOIN (
SELECT
count(DISTINCT IF(REL_VENDOR_ID IS NULL, VENDOR_ID, REL_VENDOR_ID) ) count_num,
SOURCE_SYS_KEY,
CHECK_ID
FROM srm.payment_lines
WHERE
1 = 1
AND
IF(REL_VENDOR_ID IS NULL, VENDOR_ID, REL_VENDOR_ID ) IS NOT NULL
GROUP BY SOURCE_SYS_KEY, CHECK_ID
)t ON (column1 = t.SOURCE_SYS_KEY AND column10 = t.CHECK_ID)
JOIN (
SELECT DISTINCT
IF(REL_VENDOR_ID IS NULL, VENDOR_ID, REL_VENDOR_ID) VENDOR_ID,
SOURCE_SYS_KEY,
CHECK_ID
FROM srm.payment_lines
WHERE
1 = 1
AND
IF(REL_VENDOR_ID IS NULL, VENDOR_ID, REL_VENDOR_ID ) IS NOT NULL
)t2 ON (column1 = t2.SOURCE_SYS_KEY AND column10 = t2.CHECK_ID)
网友评论