Skip to content
项目
群组
代码片段
帮助
正在加载...
帮助
提交反馈
为 GitLab 提交贡献
登录
切换导航
S
study-report
项目
项目
详情
动态
版本
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
成员
成员
收起侧边栏
Close sidebar
动态
分支图
统计图
提交
打开侧边栏
zhenxin.ma
study-report
提交
a3956a72
提交
a3956a72
编写于
1月 02, 2020
作者:
zhenxin.ma
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
配置文件
上级
180bdf2f
变更
2
隐藏空白字符变更
内嵌
并排
正在显示
2 个修改的文件
包含
108 行增加
和
0 行删除
+108
-0
MySQLConfig.scala
src/main/scala/com/config/MySQLConfig.scala
+36
-0
SyncDataConfig.scala
src/main/scala/com/config/SyncDataConfig.scala
+72
-0
未找到文件。
src/main/scala/com/config/MySQLConfig.scala
0 → 100644
浏览文件 @
a3956a72
package
com.config
/**
* @Author zhenxin.ma
* @Date 2019/11/15 9:46
* @Version 1.0
*/
object
MySQLConfig
{
//集群hdfs127 Mysql配置,记录任务的执行情况
final
val
HDFS_DRIVER
=
"com.mysql.jdbc.Driver"
final
val
HDFS_BASE
=
"pica_job"
final
val
HDFS_URL
=
s
"jdbc:mysql://hdfs127:3306/${HDFS_BASE}?useTimezone=true&serverTimezone=GMT%2B8&useUnicode=true&characterEncoding=utf8"
final
val
HDFS_USERNAME
=
"pica_spider"
final
val
HDFS_PSSWORD
=
"5$7FXgz#e5JWP08e"
final
val
HDFS_TABLE
=
"schedule_job_record"
final
val
HDFS_MSQL_CONFIG
:
Map
[
String
,
String
]
=
Map
(
"driver"
->
"com.mysql.jdbc.Driver"
,
"url"
->
"jdbc:mysql://hdfs127:3306/pica_job?useTimezone=true&serverTimezone=GMT%2B8&useUnicode=true&characterEncoding=utf8"
,
"username"
->
"pica_spider"
,
"password"
->
"5$7FXgz#e5JWP08e"
,
"table"
->
"schedule_job_record"
)
//同步MYSQL线上环境账号配置
final
val
URL
:
String
=
"jdbc:mysql://rr-uf6p67797265cm09f.mysql.rds.aliyuncs.com:3306/pica"
+
"?useTimezone=true&serverTimezone=GMT%2B8&characterEncoding=UTF-8&zeroDateTimeBehavior=convertToNull"
final
val
USER
:
String
=
"bi_readonly"
final
val
PSSWORD
:
String
=
"1Qaz2wsx"
final
val
MSQL_CONFIG
:
Map
[
String
,
String
]
=
Map
(
"driver"
->
"com.mysql.jdbc.Driver"
,
"url"
->
"jdbc:mysql://rr-uf6p67797265cm09f.mysql.rds.aliyuncs.com:3306/pica?useTimezone=true&serverTimezone=GMT%2B8&characterEncoding=UTF-8&zeroDateTimeBehavior=convertToNull"
,
"username"
->
"bi_readonly"
,
"password"
->
"1Qaz2wsx"
)
//同步MYSQLUAT环境账号配置
// final val URL: String = "jdbc:mysql://192.168.110.181:3306/pica" +
// "?useTimezone=true&serverTimezone=GMT%2B8&characterEncoding=UTF-8&zeroDateTimeBehavior=convertToNull"
// final val USER: String = "pica_test"
// final val PSSWORD: String = "pkv#sqvSGn@O1@tg"
}
src/main/scala/com/config/SyncDataConfig.scala
0 → 100644
浏览文件 @
a3956a72
package
com.config
/**
* @Author zhenxin.ma
* @Date 2019/11/15 9:58
* @Version 1.0
*/
object
SyncDataConfig
{
//同步MYSQL数据导入到Hive中,线上环境,Hive库名
final
val
DATABASE1
:
String
=
"pica_ds"
final
val
DATABASE2
:
String
=
"pica_project_v2"
final
val
DATABASE3
:
String
=
"pica_ods"
final
val
DATABASE4
:
String
=
"pica_dw"
//线上Parquet文件路径
final
val
PARQUET_PATH
:
String
=
"hdfs://bi-name1:8020/tmp/output/"
//UAT环境,Hive库名
// final val DATABASE1:String = "pica_ds"
// final val DATABASE3:String = "pica_ods"
// final val DATABASE4:String = "pica_dw"
// final val DATABASE2:String = "pica_project"
// //UAT环境,Parquet文件路径
// final val PARQUET_PATH: String = "hdfs://master61:8020/tmp/output/"
//区域反推中间表数据目录
final
val
REGION_DATA_PATH
:
String
=
"/home/big-data/ods_parent_hospital_level/parent_hospital_level.txt"
final
val
REGION_BAD_PATH
:
String
=
"/home/big-data/ods_parent_hospital_level/bad.txt"
//区域反推中间表用到的SQL
final
val
REGION_SQL1
:
String
=
s
"""
| SELECT cd.project_id,cd.doctor_id,ppa.province_id,ppa.city_id,ppa.county_id,
| ppa.town_id FROM ${DATABASE1}.pica_portal_campaign_doctor cd
| INNER JOIN ${DATABASE4}.dw_dim_portal_project pj ON cd.project_id = pj.project_id
| INNER JOIN ${DATABASE3}.ods_basic_doctor_info d ON cd.doctor_id = d.doctor_id
| INNER JOIN ${DATABASE1}.pica_portal_project_attachregion ppa ON cd.project_id = ppa.project_id AND cd.doctor_id = ppa.doctor_id
| WHERE cd.delete_flag = 1 AND cd.doctor_role = 3 AND cd.doctor_type != 2
| AND date_format(cd.modified_time,'yyyy-MM-dd') <= date_sub(from_unixtime(unix_timestamp(),'yyyy-MM-dd'),1)
| AND d.delete_flag = 1 AND d.hospital_id != 0
| AND ppa.delete_flag = 1 AND ppa.content != ''
| AND date_format(ppa.modified_time,'yyyy-MM-dd') <= date_sub(from_unixtime(unix_timestamp(),'yyyy-MM-dd'),0)
"""
.
stripMargin
final
val
REGION_SQL2
:
String
=
s
"SELECT project_id,province_id,city_id,COALESCE(county_id,0) county_id,COALESCE(town_id,0) town_id "
+
s
"FROM ${DATABASE2}.lr_project_attachregion"
//Hive表名
final
val
Hive_TABLE
:
String
=
"pica_portal_campaign_mapping"
final
val
Hive_TABLE1
:
String
=
"pica_portal_campaign_doctor"
final
val
Hive_TABLE2
:
String
=
"pica_portal_campaign_organization"
final
val
Hive_TABLE3
:
String
=
"pica_portal_campaign_department"
final
val
Hive_TABLE4
:
String
=
"pica_portal_project_attachregion"
final
val
Hive_TABLE5
:
String
=
"lr_project_attachregion"
final
val
Hive_TABLE6
:
String
=
"attach_region_result"
final
val
Hive_TABLE7
:
String
=
"lr_project_sub_leader_attachregion"
//同步的MySQL表名
final
val
MYSQL_TABLE1
:
String
=
"portal_campaign_doctor_"
final
val
MYSQL_TABLE2
:
String
=
"portal_campaign_organization_"
final
val
MYSQL_TABLE3
:
String
=
"portal_campaign_department"
final
val
MYSQL_TABLE4
:
String
=
"portal_project_attachregion"
//以下是Spark读取Mysql时,设置的分区属性
final
val
PARTITIONCOLUMN
:
String
=
"id"
final
val
LOWERBOUND
:
String
=
"100"
final
val
UPPERBOUND
:
String
=
"20000000"
final
val
NUMPARTITIONS
:
String
=
"12"
//导入Hive语句
final
val
Hive_TABLE1_SQL
:
String
=
s
"insert into table ${DATABASE1}.${Hive_TABLE1} "
+
s
"select id,project_id,doctor_id,doctor_role,doctor_role_flag,doctor_type,"
+
s
"white_flag,id_type,delete_flag,created_id,created_time,modified_id,modified_time from "
final
val
Hive_TABLE2_SQL
:
String
=
s
"insert into table ${DATABASE1}.${Hive_TABLE2} "
+
s
"select id,project_id,organization_id,organization_type,"
+
s
"white_flag,scope_flag,id_type,delete_flag,created_id,created_time,modified_id,modified_time from "
}
写
预览
Markdown
格式
0%
请重试
or
附加一个文件
附加文件
取消
您添加了
0
人
到此讨论。请谨慎行事。
先完成此消息的编辑!
取消
想要评论请
注册
或
登录