Skip to content
项目
群组
代码片段
帮助
正在加载...
帮助
提交反馈
为 GitLab 提交贡献
登录
切换导航
P
pica-cloud-account
项目
项目
详情
动态
版本
周期分析
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
统计图
议题
0
议题
0
列表
看板
标记
里程碑
合并请求
1
合并请求
1
CI / CD
CI / CD
流水线
作业
计划
统计图
Wiki
Wiki
代码片段
代码片段
成员
成员
收起侧边栏
Close sidebar
动态
分支图
统计图
创建新议题
作业
提交
议题看板
打开侧边栏
com.pica.cloud.account
pica-cloud-account
提交
f7935d22
提交
f7935d22
编写于
12月 27, 2019
作者:
hujun
浏览文件
操作
浏览文件
下载
差异文件
Merge branch 'dev-20191224-registerSource' into 'release'
Dev 20191224 register source review by yongbo See merge request
!8
上级
7ab0c6f3
e50f1fcd
流水线
#20511
已失败 于阶段
in 0 second
变更
2
流水线
2
隐藏空白字符变更
内嵌
并排
正在显示
2 个修改的文件
包含
166 行增加
和
166 行删除
+166
-166
RedisTestController.java
...ccount/account/server/controller/RedisTestController.java
+111
-111
DoctorRepeatDataJob.java
...cloud/account/account/server/job/DoctorRepeatDataJob.java
+55
-55
未找到文件。
server/src/main/java/com/pica/cloud/account/account/server/controller/RedisTestController.java
浏览文件 @
f7935d22
package
com
.
pica
.
cloud
.
account
.
account
.
server
.
controller
;
import
com.alibaba.fastjson.JSON
;
import
com.pica.cloud.foundation.entity.PicaResponse
;
import
com.pica.cloud.foundation.entity.PicaUser
;
import
com.pica.cloud.foundation.redis.ICacheClient
;
import
com.pica.cloud.foundation.service.starter.common.BaseController
;
import
com.pica.cloud.foundation.service.starter.interceptor.LoginPermission
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.web.bind.annotation.RequestMapping
;
import
org.springframework.web.bind.annotation.RequestMethod
;
import
org.springframework.web.bind.annotation.RestController
;
import
redis.clients.jedis.Jedis
;
import
redis.clients.jedis.JedisSentinelPool
;
import
java.util.*
;
@RestController
public
class
RedisTestController
extends
BaseController
{
@Autowired
private
ICacheClient
redisClient
;
@RequestMapping
(
value
=
"/redis"
,
method
=
RequestMethod
.
GET
,
produces
=
"application/json;charset=UTF-8"
)
@LoginPermission
public
PicaResponse
<
Map
<
String
,
String
>>
redis
()
{
// http://localhost:11501/hospital/hospitals/redis
// cluster
String
token
=
getTokenFromHeader
();
Map
<
String
,
String
>
map
=
new
HashMap
<>();
map
.
put
(
"id"
,
"1111111"
);
map
.
put
(
"token"
,
token
);
map
.
put
(
"sysCode"
,
getSysCodeFromHeader
());
try
{
token
=
redisClient
.
saveToken
(
map
,
3600
);
}
catch
(
Exception
ex
)
{
// nothing to do
}
System
.
out
.
println
(
token
);
Map
<
String
,
String
>
map1
=
redisClient
.
getToken
(
token
);
System
.
out
.
println
(
JSON
.
toJSON
(
map1
));
PicaUser
user
=
fetchPicaUser
();
System
.
out
.
println
(
JSON
.
toJSON
(
user
));
// sentinel
Set
<
String
>
set1
=
new
HashSet
<>();
set1
.
add
(
"192.168.140.27:17000"
);
set1
.
add
(
"192.168.140.28:17000"
);
set1
.
add
(
"192.168.140.29:17000"
);
JedisSentinelPool
pool1
=
new
JedisSentinelPool
(
"master01"
,
set1
,
"Uu49Kz1olY85HQBu"
);
Jedis
jedis1
=
pool1
.
getResource
();
System
.
out
.
println
(
"sentinel dev - "
+
jedis1
.
get
(
"token-"
+
token
));
Set
<
String
>
set2
=
new
HashSet
<>();
set2
.
add
(
"192.168.110.68:17000"
);
set2
.
add
(
"192.168.110.69:17001"
);
set2
.
add
(
"192.168.110.70:17002"
);
JedisSentinelPool
pool2
=
new
JedisSentinelPool
(
"master01"
,
set2
,
"Uu49Kz1olY85HQBu"
);
Jedis
jedis2
=
pool2
.
getResource
();
System
.
out
.
println
(
"sentinel test - "
+
jedis2
.
get
(
"token-"
+
token
));
Set
<
String
>
set3
=
new
HashSet
<>();
set3
.
add
(
"192.168.110.177:17000"
);
set3
.
add
(
"192.168.110.178:17000"
);
set3
.
add
(
"192.168.110.179:17000"
);
JedisSentinelPool
pool3
=
new
JedisSentinelPool
(
"master01"
,
set3
,
"Uu49Kz1olY85HQBu"
);
Jedis
jedis3
=
pool3
.
getResource
();
System
.
out
.
println
(
"sentinel uat - "
+
jedis3
.
get
(
"token-"
+
token
));
redisClient
.
deleteToken
(
token
);
try
{
PicaUser
user1
=
fetchPicaUser
();
}
catch
(
Exception
ex
)
{
System
.
out
.
println
(
"Exception -> the user has been deleted"
);
}
Map
<
String
,
String
>
map2
=
new
HashMap
<>();
map2
.
put
(
"id"
,
"1111111"
);
map2
.
put
(
"token"
,
token
);
map2
.
put
(
"sysCode"
,
getSysCodeFromHeader
());
try
{
token
=
redisClient
.
saveToken
(
map2
,
3600
);
}
catch
(
Exception
ex
)
{
// nothing to do
}
String
clusterKey
=
"cluster-key-001"
;
String
clusterValue
=
"cluster-value-001"
;
redisClient
.
set
(
clusterKey
,
clusterValue
);
System
.
out
.
println
(
"cluster -> "
+
redisClient
.
get
(
clusterKey
));
System
.
out
.
println
(
"dev get cluster value -> "
+
pool1
.
getResource
().
get
(
clusterKey
));
System
.
out
.
println
(
"test1 get cluster value -> "
+
pool2
.
getResource
().
get
(
clusterKey
));
System
.
out
.
println
(
"uat get cluster value -> "
+
pool3
.
getResource
().
get
(
clusterKey
));
String
sentKey
=
"sent-key-001"
;
String
sentValue
=
"sent-value-001"
;
pool1
.
getResource
().
set
(
sentKey
,
sentValue
);
System
.
out
.
println
(
"dev sentinel -> "
+
pool1
.
getResource
().
get
(
sentKey
));
pool2
.
getResource
().
set
(
sentKey
,
sentValue
);
System
.
out
.
println
(
"test1 sentinel -> "
+
pool2
.
getResource
().
get
(
sentKey
));
pool3
.
getResource
().
set
(
sentKey
,
sentValue
);
System
.
out
.
println
(
"uat sentinel -> "
+
pool3
.
getResource
().
get
(
sentKey
));
return
PicaResponse
.
toResponse
(
token
);
}
}
//
package com.pica.cloud.account.account.server.controller;
//
//
import com.alibaba.fastjson.JSON;
//
import com.pica.cloud.foundation.entity.PicaResponse;
//
import com.pica.cloud.foundation.entity.PicaUser;
//
import com.pica.cloud.foundation.redis.ICacheClient;
//
//
import com.pica.cloud.foundation.service.starter.common.BaseController;
//
import com.pica.cloud.foundation.service.starter.interceptor.LoginPermission;
//
import org.springframework.beans.factory.annotation.Autowired;
//
import org.springframework.web.bind.annotation.RequestMapping;
//
import org.springframework.web.bind.annotation.RequestMethod;
//
import org.springframework.web.bind.annotation.RestController;
//
import redis.clients.jedis.Jedis;
//
import redis.clients.jedis.JedisSentinelPool;
//
//
import java.util.*;
//
//
@RestController
//
public class RedisTestController extends BaseController {
//
@Autowired
//
private ICacheClient redisClient;
//
//
@RequestMapping(value = "/redis", method = RequestMethod.GET, produces = "application/json;charset=UTF-8")
//
@LoginPermission
//
public PicaResponse<Map<String, String>> redis() {
//
// http://localhost:11501/hospital/hospitals/redis
//
// cluster
//
String token = getTokenFromHeader();
//
Map<String, String> map = new HashMap<>();
//
map.put("id", "1111111");
//
map.put("token", token);
//
map.put("sysCode", getSysCodeFromHeader());
//
//
try {
//
token = redisClient.saveToken(map, 3600);
//
} catch (Exception ex) {
//
// nothing to do
//
}
//
System.out.println(token);
//
Map<String, String> map1 = redisClient.getToken(token);
//
System.out.println(JSON.toJSON(map1));
//
PicaUser user = fetchPicaUser();
//
System.out.println(JSON.toJSON(user));
//
//
// sentinel
//
Set<String> set1 = new HashSet<>();
//
set1.add("192.168.140.27:17000");
//
set1.add("192.168.140.28:17000");
//
set1.add("192.168.140.29:17000");
//
JedisSentinelPool pool1 = new JedisSentinelPool("master01", set1, "Uu49Kz1olY85HQBu");
//
Jedis jedis1 = pool1.getResource();
//
System.out.println("sentinel dev - " + jedis1.get("token-" + token));
//
//
Set<String> set2 = new HashSet<>();
//
set2.add("192.168.110.68:17000");
//
set2.add("192.168.110.69:17001");
//
set2.add("192.168.110.70:17002");
//
JedisSentinelPool pool2 = new JedisSentinelPool("master01", set2, "Uu49Kz1olY85HQBu");
//
Jedis jedis2 = pool2.getResource();
//
System.out.println("sentinel test - " + jedis2.get("token-" + token));
//
//
Set<String> set3 = new HashSet<>();
//
set3.add("192.168.110.177:17000");
//
set3.add("192.168.110.178:17000");
//
set3.add("192.168.110.179:17000");
//
JedisSentinelPool pool3 = new JedisSentinelPool("master01", set3, "Uu49Kz1olY85HQBu");
//
Jedis jedis3 = pool3.getResource();
//
System.out.println("sentinel uat - " + jedis3.get("token-" + token));
//
//
redisClient.deleteToken(token);
//
//
try {
//
PicaUser user1 = fetchPicaUser();
//
} catch (Exception ex) {
//
System.out.println("Exception -> the user has been deleted");
//
}
//
//
Map<String, String> map2 = new HashMap<>();
//
map2.put("id", "1111111");
//
map2.put("token", token);
//
map2.put("sysCode", getSysCodeFromHeader());
//
try {
//
token = redisClient.saveToken(map2, 3600);
//
} catch (Exception ex) {
//
// nothing to do
//
}
//
//
String clusterKey = "cluster-key-001";
//
String clusterValue = "cluster-value-001";
//
//
redisClient.set(clusterKey, clusterValue);
//
System.out.println("cluster -> " + redisClient.get(clusterKey));
//
System.out.println("dev get cluster value -> " + pool1.getResource().get(clusterKey));
//
System.out.println("test1 get cluster value -> " + pool2.getResource().get(clusterKey));
//
System.out.println("uat get cluster value -> " + pool3.getResource().get(clusterKey));
//
//
String sentKey = "sent-key-001";
//
String sentValue = "sent-value-001";
//
pool1.getResource().set(sentKey, sentValue);
//
System.out.println("dev sentinel -> " + pool1.getResource().get(sentKey));
//
//
pool2.getResource().set(sentKey, sentValue);
//
System.out.println("test1 sentinel -> " + pool2.getResource().get(sentKey));
//
//
pool3.getResource().set(sentKey, sentValue);
//
System.out.println("uat sentinel -> " + pool3.getResource().get(sentKey));
//
//
return PicaResponse.toResponse(token);
//
}
//
}
server/src/main/java/com/pica/cloud/account/account/server/job/DoctorRepeatDataJob.java
浏览文件 @
f7935d22
package
com
.
pica
.
cloud
.
account
.
account
.
server
.
job
;
import
com.pica.cloud.account.account.server.mapper.DoctorMapper
;
import
com.pica.cloud.foundation.redis.ICacheClient
;
import
org.slf4j.Logger
;
import
org.slf4j.LoggerFactory
;
import
org.springframework.beans.factory.annotation.Autowired
;
import
org.springframework.beans.factory.annotation.Qualifier
;
import
org.springframework.scheduling.annotation.Scheduled
;
import
org.springframework.stereotype.Component
;
/**
* Created on 2019/10/29 15:37
* author:crs
* Description:doctor表重复记录处理
*/
@Component
public
class
DoctorRepeatDataJob
{
private
final
String
KEY
=
"cacheProcessDoctorRepeat"
;
private
Logger
logger
=
LoggerFactory
.
getLogger
(
this
.
getClass
());
@Autowired
private
ICacheClient
cacheClient
;
@Autowired
private
DoctorMapper
doctorMapper
;
/**
* 1、使用分布式锁保证一个微服务执行;
* 2、定时任务;
*/
@Scheduled
(
cron
=
"0 0 0/1 * * ?"
)
public
void
processDoctorRepeatData
()
{
//通过接口幂等性逻辑处理
String
exist
=
cacheClient
.
get
(
KEY
);
if
(
exist
==
null
){
cacheClient
.
set
(
KEY
,
"1"
);
cacheClient
.
expire
(
KEY
,
60
*
10
);
logger
.
info
(
"DoctorRepeatDataJob:开始执行刷新doctor表中重复的记录"
);
int
row
=
doctorMapper
.
processDoctorRepeatData
();
logger
.
info
(
"此次数据刷新影响的行数:{}"
,
row
);
try
{
//防止任务一秒跑完,其他机器的时间晚了几秒
Thread
.
sleep
(
30
*
1000
);
}
catch
(
InterruptedException
ex
)
{
logger
.
error
(
ex
.
getMessage
(),
ex
);
}
//释放锁
cacheClient
.
del
(
KEY
);
}
}
}
//
package com.pica.cloud.account.account.server.job;
//
//
import com.pica.cloud.account.account.server.mapper.DoctorMapper;
//
import com.pica.cloud.foundation.redis.ICacheClient;
//
import org.slf4j.Logger;
//
import org.slf4j.LoggerFactory;
//
import org.springframework.beans.factory.annotation.Autowired;
//
import org.springframework.beans.factory.annotation.Qualifier;
//
//
import org.springframework.scheduling.annotation.Scheduled;
//
import org.springframework.stereotype.Component;
//
/
//
**
//
* Created on 2019/10/29 15:37
//
* author:crs
//
* Description:doctor表重复记录处理
//
*/
//
@Component
//
public class DoctorRepeatDataJob {
//
private final String KEY = "cacheProcessDoctorRepeat";
//
//
private Logger logger = LoggerFactory.getLogger(this.getClass());
//
//
@Autowired
//
//
private ICacheClient cacheClient;
//
//
@Autowired
//
private DoctorMapper doctorMapper;
//
//
/**
//
* 1、使用分布式锁保证一个微服务执行;
//
* 2、定时任务;
//
*/
//
@Scheduled(cron = "0 0 0/1 * * ?")
//
public void processDoctorRepeatData() {
//
//通过接口幂等性逻辑处理
//
String exist = cacheClient.get(KEY);
//
if (exist==null){
//
cacheClient.set(KEY,"1");
//
cacheClient.expire(KEY, 60 * 10);
//
logger.info("DoctorRepeatDataJob:开始执行刷新doctor表中重复的记录");
//
int row = doctorMapper.processDoctorRepeatData();
//
logger.info("此次数据刷新影响的行数:{}", row);
//
try {
//
//防止任务一秒跑完,其他机器的时间晚了几秒
//
Thread.sleep(30*1000);
//
} catch (InterruptedException ex) {
//
logger.error(ex.getMessage(), ex);
//
}
//
//释放锁
//
cacheClient.del(KEY);
//
}
//
}
//
}
写
预览
Markdown
格式
0%
请重试
or
附加一个文件
附加文件
取消
您添加了
0
人
到此讨论。请谨慎行事。
先完成此消息的编辑!
取消
想要评论请
注册
或
登录