提交 f7935d22 编写于 作者: hujun's avatar hujun

Merge branch 'dev-20191224-registerSource' into 'release'

Dev 20191224 register source

review by yongbo

See merge request !8
流水线 #20511 已失败 于阶段
in 0 second
package com.pica.cloud.account.account.server.controller;
import com.alibaba.fastjson.JSON;
import com.pica.cloud.foundation.entity.PicaResponse;
import com.pica.cloud.foundation.entity.PicaUser;
import com.pica.cloud.foundation.redis.ICacheClient;
import com.pica.cloud.foundation.service.starter.common.BaseController;
import com.pica.cloud.foundation.service.starter.interceptor.LoginPermission;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.RestController;
import redis.clients.jedis.Jedis;
import redis.clients.jedis.JedisSentinelPool;
import java.util.*;
@RestController
public class RedisTestController extends BaseController {
@Autowired
private ICacheClient redisClient;
@RequestMapping(value = "/redis", method = RequestMethod.GET, produces = "application/json;charset=UTF-8")
@LoginPermission
public PicaResponse<Map<String, String>> redis() {
// http://localhost:11501/hospital/hospitals/redis
// cluster
String token = getTokenFromHeader();
Map<String, String> map = new HashMap<>();
map.put("id", "1111111");
map.put("token", token);
map.put("sysCode", getSysCodeFromHeader());
try {
token = redisClient.saveToken(map, 3600);
} catch (Exception ex) {
// nothing to do
}
System.out.println(token);
Map<String, String> map1 = redisClient.getToken(token);
System.out.println(JSON.toJSON(map1));
PicaUser user = fetchPicaUser();
System.out.println(JSON.toJSON(user));
// sentinel
Set<String> set1 = new HashSet<>();
set1.add("192.168.140.27:17000");
set1.add("192.168.140.28:17000");
set1.add("192.168.140.29:17000");
JedisSentinelPool pool1 = new JedisSentinelPool("master01", set1, "Uu49Kz1olY85HQBu");
Jedis jedis1 = pool1.getResource();
System.out.println("sentinel dev - " + jedis1.get("token-" + token));
Set<String> set2 = new HashSet<>();
set2.add("192.168.110.68:17000");
set2.add("192.168.110.69:17001");
set2.add("192.168.110.70:17002");
JedisSentinelPool pool2 = new JedisSentinelPool("master01", set2, "Uu49Kz1olY85HQBu");
Jedis jedis2 = pool2.getResource();
System.out.println("sentinel test - " + jedis2.get("token-" + token));
Set<String> set3 = new HashSet<>();
set3.add("192.168.110.177:17000");
set3.add("192.168.110.178:17000");
set3.add("192.168.110.179:17000");
JedisSentinelPool pool3 = new JedisSentinelPool("master01", set3, "Uu49Kz1olY85HQBu");
Jedis jedis3 = pool3.getResource();
System.out.println("sentinel uat - " + jedis3.get("token-" + token));
redisClient.deleteToken(token);
try {
PicaUser user1 = fetchPicaUser();
} catch (Exception ex) {
System.out.println("Exception -> the user has been deleted");
}
Map<String, String> map2 = new HashMap<>();
map2.put("id", "1111111");
map2.put("token", token);
map2.put("sysCode", getSysCodeFromHeader());
try {
token = redisClient.saveToken(map2, 3600);
} catch (Exception ex) {
// nothing to do
}
String clusterKey = "cluster-key-001";
String clusterValue = "cluster-value-001";
redisClient.set(clusterKey, clusterValue);
System.out.println("cluster -> " + redisClient.get(clusterKey));
System.out.println("dev get cluster value -> " + pool1.getResource().get(clusterKey));
System.out.println("test1 get cluster value -> " + pool2.getResource().get(clusterKey));
System.out.println("uat get cluster value -> " + pool3.getResource().get(clusterKey));
String sentKey = "sent-key-001";
String sentValue = "sent-value-001";
pool1.getResource().set(sentKey, sentValue);
System.out.println("dev sentinel -> " + pool1.getResource().get(sentKey));
pool2.getResource().set(sentKey, sentValue);
System.out.println("test1 sentinel -> " + pool2.getResource().get(sentKey));
pool3.getResource().set(sentKey, sentValue);
System.out.println("uat sentinel -> " + pool3.getResource().get(sentKey));
return PicaResponse.toResponse(token);
}
}
//package com.pica.cloud.account.account.server.controller;
//
//import com.alibaba.fastjson.JSON;
//import com.pica.cloud.foundation.entity.PicaResponse;
//import com.pica.cloud.foundation.entity.PicaUser;
//import com.pica.cloud.foundation.redis.ICacheClient;
//
//import com.pica.cloud.foundation.service.starter.common.BaseController;
//import com.pica.cloud.foundation.service.starter.interceptor.LoginPermission;
//import org.springframework.beans.factory.annotation.Autowired;
//import org.springframework.web.bind.annotation.RequestMapping;
//import org.springframework.web.bind.annotation.RequestMethod;
//import org.springframework.web.bind.annotation.RestController;
//import redis.clients.jedis.Jedis;
//import redis.clients.jedis.JedisSentinelPool;
//
//import java.util.*;
//
//@RestController
//public class RedisTestController extends BaseController {
// @Autowired
// private ICacheClient redisClient;
//
// @RequestMapping(value = "/redis", method = RequestMethod.GET, produces = "application/json;charset=UTF-8")
// @LoginPermission
// public PicaResponse<Map<String, String>> redis() {
// // http://localhost:11501/hospital/hospitals/redis
// // cluster
// String token = getTokenFromHeader();
// Map<String, String> map = new HashMap<>();
// map.put("id", "1111111");
// map.put("token", token);
// map.put("sysCode", getSysCodeFromHeader());
//
// try {
// token = redisClient.saveToken(map, 3600);
// } catch (Exception ex) {
// // nothing to do
// }
// System.out.println(token);
// Map<String, String> map1 = redisClient.getToken(token);
// System.out.println(JSON.toJSON(map1));
// PicaUser user = fetchPicaUser();
// System.out.println(JSON.toJSON(user));
//
// // sentinel
// Set<String> set1 = new HashSet<>();
// set1.add("192.168.140.27:17000");
// set1.add("192.168.140.28:17000");
// set1.add("192.168.140.29:17000");
// JedisSentinelPool pool1 = new JedisSentinelPool("master01", set1, "Uu49Kz1olY85HQBu");
// Jedis jedis1 = pool1.getResource();
// System.out.println("sentinel dev - " + jedis1.get("token-" + token));
//
// Set<String> set2 = new HashSet<>();
// set2.add("192.168.110.68:17000");
// set2.add("192.168.110.69:17001");
// set2.add("192.168.110.70:17002");
// JedisSentinelPool pool2 = new JedisSentinelPool("master01", set2, "Uu49Kz1olY85HQBu");
// Jedis jedis2 = pool2.getResource();
// System.out.println("sentinel test - " + jedis2.get("token-" + token));
//
// Set<String> set3 = new HashSet<>();
// set3.add("192.168.110.177:17000");
// set3.add("192.168.110.178:17000");
// set3.add("192.168.110.179:17000");
// JedisSentinelPool pool3 = new JedisSentinelPool("master01", set3, "Uu49Kz1olY85HQBu");
// Jedis jedis3 = pool3.getResource();
// System.out.println("sentinel uat - " + jedis3.get("token-" + token));
//
// redisClient.deleteToken(token);
//
// try {
// PicaUser user1 = fetchPicaUser();
// } catch (Exception ex) {
// System.out.println("Exception -> the user has been deleted");
// }
//
// Map<String, String> map2 = new HashMap<>();
// map2.put("id", "1111111");
// map2.put("token", token);
// map2.put("sysCode", getSysCodeFromHeader());
// try {
// token = redisClient.saveToken(map2, 3600);
// } catch (Exception ex) {
// // nothing to do
// }
//
// String clusterKey = "cluster-key-001";
// String clusterValue = "cluster-value-001";
//
// redisClient.set(clusterKey, clusterValue);
// System.out.println("cluster -> " + redisClient.get(clusterKey));
// System.out.println("dev get cluster value -> " + pool1.getResource().get(clusterKey));
// System.out.println("test1 get cluster value -> " + pool2.getResource().get(clusterKey));
// System.out.println("uat get cluster value -> " + pool3.getResource().get(clusterKey));
//
// String sentKey = "sent-key-001";
// String sentValue = "sent-value-001";
// pool1.getResource().set(sentKey, sentValue);
// System.out.println("dev sentinel -> " + pool1.getResource().get(sentKey));
//
// pool2.getResource().set(sentKey, sentValue);
// System.out.println("test1 sentinel -> " + pool2.getResource().get(sentKey));
//
// pool3.getResource().set(sentKey, sentValue);
// System.out.println("uat sentinel -> " + pool3.getResource().get(sentKey));
//
// return PicaResponse.toResponse(token);
// }
//}
package com.pica.cloud.account.account.server.job;
import com.pica.cloud.account.account.server.mapper.DoctorMapper;
import com.pica.cloud.foundation.redis.ICacheClient;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;
/**
* Created on 2019/10/29 15:37
* author:crs
* Description:doctor表重复记录处理
*/
@Component
public class DoctorRepeatDataJob {
private final String KEY = "cacheProcessDoctorRepeat";
private Logger logger = LoggerFactory.getLogger(this.getClass());
@Autowired
private ICacheClient cacheClient;
@Autowired
private DoctorMapper doctorMapper;
/**
* 1、使用分布式锁保证一个微服务执行;
* 2、定时任务;
*/
@Scheduled(cron = "0 0 0/1 * * ?")
public void processDoctorRepeatData() {
//通过接口幂等性逻辑处理
String exist = cacheClient.get(KEY);
if (exist==null){
cacheClient.set(KEY,"1");
cacheClient.expire(KEY, 60 * 10);
logger.info("DoctorRepeatDataJob:开始执行刷新doctor表中重复的记录");
int row = doctorMapper.processDoctorRepeatData();
logger.info("此次数据刷新影响的行数:{}", row);
try {
//防止任务一秒跑完,其他机器的时间晚了几秒
Thread.sleep(30*1000);
} catch (InterruptedException ex) {
logger.error(ex.getMessage(), ex);
}
//释放锁
cacheClient.del(KEY);
}
}
}
//package com.pica.cloud.account.account.server.job;
//
//import com.pica.cloud.account.account.server.mapper.DoctorMapper;
//import com.pica.cloud.foundation.redis.ICacheClient;
//import org.slf4j.Logger;
//import org.slf4j.LoggerFactory;
//import org.springframework.beans.factory.annotation.Autowired;
//import org.springframework.beans.factory.annotation.Qualifier;
//
//import org.springframework.scheduling.annotation.Scheduled;
//import org.springframework.stereotype.Component;
//
///**
// * Created on 2019/10/29 15:37
// * author:crs
// * Description:doctor表重复记录处理
// */
//@Component
//public class DoctorRepeatDataJob {
// private final String KEY = "cacheProcessDoctorRepeat";
//
// private Logger logger = LoggerFactory.getLogger(this.getClass());
//
// @Autowired
//
// private ICacheClient cacheClient;
//
// @Autowired
// private DoctorMapper doctorMapper;
//
// /**
// * 1、使用分布式锁保证一个微服务执行;
// * 2、定时任务;
// */
// @Scheduled(cron = "0 0 0/1 * * ?")
// public void processDoctorRepeatData() {
// //通过接口幂等性逻辑处理
// String exist = cacheClient.get(KEY);
// if (exist==null){
// cacheClient.set(KEY,"1");
// cacheClient.expire(KEY, 60 * 10);
// logger.info("DoctorRepeatDataJob:开始执行刷新doctor表中重复的记录");
// int row = doctorMapper.processDoctorRepeatData();
// logger.info("此次数据刷新影响的行数:{}", row);
// try {
// //防止任务一秒跑完,其他机器的时间晚了几秒
// Thread.sleep(30*1000);
// } catch (InterruptedException ex) {
// logger.error(ex.getMessage(), ex);
// }
// //释放锁
// cacheClient.del(KEY);
// }
// }
//}
Markdown 格式
0% or
您添加了 0 到此讨论。请谨慎行事。
先完成此消息的编辑!
想要评论请 注册