站点更新:2019-01-22 15:38:12
This commit is contained in:
@@ -28,8 +28,29 @@ public class CharToHex {
|
||||
String oct = Integer.toOctalString(i);
|
||||
System.out.println(oct);
|
||||
|
||||
System.out.println("---------------");
|
||||
System.out.println(Integer.toBinaryString(-22));
|
||||
//测试 int 转 byte
|
||||
int int0 = 234;
|
||||
byte byte0 = intToByte(int0);
|
||||
System.out.println("byte0= " + byte0);//byte0=-22
|
||||
//测试 byte 转 int
|
||||
int int1 = byteToInt(byte0);
|
||||
System.out.println("int1= " + int1);//int1=234
|
||||
}
|
||||
|
||||
|
||||
//byte 与 int 的相互转换
|
||||
public static byte intToByte(int x) {
|
||||
return (byte) x;
|
||||
}
|
||||
|
||||
public static int byteToInt(byte b) {
|
||||
//Java 总是把 byte 当做有符处理;我们可以通过将其和 0xFF 进行二进制与得到它的无符值
|
||||
return b & 0xFF;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* 字符对应编码的哈希值
|
||||
*
|
||||
|
29
src/main/java/me/ehlxr/DidSdkTest.java
Normal file
29
src/main/java/me/ehlxr/DidSdkTest.java
Normal file
@@ -0,0 +1,29 @@
|
||||
package me.ehlxr;
|
||||
|
||||
import cn.ceres.did.client.SdkClient;
|
||||
import cn.ceres.did.sdk.SdkProto;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
/**
|
||||
* @author ehlxr
|
||||
*/
|
||||
public class DidSdkTest {
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
SdkClient client = new SdkClient("10.19.248.200", 30581);
|
||||
// SdkClient client = new SdkClient();
|
||||
client.init();
|
||||
client.start();
|
||||
|
||||
// client.invokeOneWay(new SdkProto(), 2000);
|
||||
// System.out.println(client.invokeSync(new SdkProto(), 2000).getDid());
|
||||
CountDownLatch countDownLatch = new CountDownLatch(1);
|
||||
client.invokeAsync(new SdkProto(), 2000, responseFuture -> {
|
||||
System.out.println(responseFuture.getSdkProto().getDid());
|
||||
countDownLatch.countDown();
|
||||
});
|
||||
countDownLatch.await();
|
||||
client.shutdown();
|
||||
}
|
||||
}
|
44
src/main/java/me/ehlxr/FindRequestMapping.java
Normal file
44
src/main/java/me/ehlxr/FindRequestMapping.java
Normal file
@@ -0,0 +1,44 @@
|
||||
package me.ehlxr;
|
||||
|
||||
import java.io.*;
|
||||
|
||||
|
||||
/**
|
||||
* @author ehlxr
|
||||
*/
|
||||
public class FindRequestMapping {
|
||||
private static int total = 0;
|
||||
private static int unDeal = 0;
|
||||
|
||||
public static void main(String[] args) throws IOException {
|
||||
File dir = new File("/Users/ehlxr/WorkSpaces/enncloud/ceres-epns/ceres-epns-web/src/main/java/com/ceres/epns/web");
|
||||
deal(dir);
|
||||
}
|
||||
|
||||
private static void deal(File file) throws IOException {
|
||||
if (file.isDirectory()) {
|
||||
File[] fs = file.listFiles(((dir, name) -> {
|
||||
File f = new File(dir.getPath() + File.separator + name);
|
||||
|
||||
return (f.getPath().contains("src") && name.endsWith(".java")) || f.isDirectory();
|
||||
}));
|
||||
|
||||
for (File f : fs != null ? fs : new File[0]) {
|
||||
deal(f);
|
||||
}
|
||||
|
||||
} else {
|
||||
InputStreamReader read = new InputStreamReader(new FileInputStream(file));
|
||||
BufferedReader bufferedReader = new BufferedReader(read);
|
||||
String lineTxt;
|
||||
|
||||
while ((lineTxt = bufferedReader.readLine()) != null) {
|
||||
if (lineTxt.contains("@RequestMapping")) {
|
||||
System.out.println(lineTxt);
|
||||
}
|
||||
}
|
||||
bufferedReader.close();
|
||||
read.close();
|
||||
}
|
||||
}
|
||||
}
|
15
src/main/java/me/ehlxr/Hello.kt
Normal file
15
src/main/java/me/ehlxr/Hello.kt
Normal file
@@ -0,0 +1,15 @@
|
||||
package me.ehlxr
|
||||
|
||||
/**
|
||||
* Created by lixiangrong on 2018/8/17.
|
||||
*/
|
||||
fun main(args: Array<String>) {
|
||||
val strList = listOf("a", "ab", "abc","abcd","abcde","abcdef","abcdefg")
|
||||
// 非常好用的流式 API filter,flat,map 等等
|
||||
strList.forEach{str->
|
||||
run {
|
||||
str.length
|
||||
println(str)
|
||||
}
|
||||
}
|
||||
}
|
31
src/main/java/me/ehlxr/ReferenceCountingGC.java
Normal file
31
src/main/java/me/ehlxr/ReferenceCountingGC.java
Normal file
@@ -0,0 +1,31 @@
|
||||
package me.ehlxr;
|
||||
|
||||
/**
|
||||
* @author lixiangrong
|
||||
* @date 2018/8/21
|
||||
*/
|
||||
public class ReferenceCountingGC {
|
||||
public Object instance = null;
|
||||
private static final int _1MB = 1024 * 1024;
|
||||
/**
|
||||
* 这个成员属性的唯一意义就是占点内存,以便在能在 GC 日志中看清楚是否有回收过
|
||||
*/
|
||||
private byte[] bigSize = new byte[2 * _1MB];
|
||||
|
||||
public static void testGC() {
|
||||
ReferenceCountingGC objA = new ReferenceCountingGC();
|
||||
ReferenceCountingGC objB = new ReferenceCountingGC();
|
||||
objA.instance = objB;
|
||||
objB.instance = objA;
|
||||
|
||||
objA = null;
|
||||
objB = null;
|
||||
|
||||
// 假设在这行发生 GC,objA 和 objB 是否能被回收?
|
||||
System.gc();
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
testGC();
|
||||
}
|
||||
}
|
@@ -3,13 +3,19 @@ package me.ehlxr;
|
||||
import java.io.*;
|
||||
|
||||
/**
|
||||
* Created by lixiangrong on 2017/3/27.
|
||||
* @author lixiangrong
|
||||
* @date 2017/3/27
|
||||
*/
|
||||
public class Rename {
|
||||
public static void main(String[] args) {
|
||||
File dir = new File("/Users/ehlxr/Desktop/_posts/");
|
||||
File dir = new File("/Users/ehlxr/ehlxr/blog/posts");
|
||||
|
||||
File[] files = dir.listFiles();
|
||||
if (null == files || files.length <= 0) {
|
||||
System.out.println("sources is null!");
|
||||
return;
|
||||
}
|
||||
int count = 0;
|
||||
for (File file : files) {
|
||||
try {
|
||||
String oName = file.getName();
|
||||
@@ -19,31 +25,23 @@ public class Rename {
|
||||
|
||||
String nName = date + "-" + title + ".md";
|
||||
|
||||
|
||||
copyFileUsingFileStreams(file, new File("/Users/ehlxr/Desktop/posts/" + nName));
|
||||
copyFileUsingFileStreams(file, new File("/Users/ehlxr/Desktop/post/" + nName));
|
||||
count++;
|
||||
} catch (Exception e) {
|
||||
continue;
|
||||
System.out.println("exce file [ " + file.getName() + " ] error, reason: " + e.getMessage());
|
||||
}
|
||||
}
|
||||
System.out.println("complete: " + count);
|
||||
}
|
||||
|
||||
private static void copyFileUsingFileStreams(File source, File dest)
|
||||
throws IOException {
|
||||
InputStream input = null;
|
||||
OutputStream output = null;
|
||||
try {
|
||||
input = new FileInputStream(source);
|
||||
output = new FileOutputStream(dest);
|
||||
try (InputStream input = new FileInputStream(source); OutputStream output = new FileOutputStream(dest)) {
|
||||
byte[] buf = new byte[1024];
|
||||
int bytesRead;
|
||||
while ((bytesRead = input.read(buf)) > 0) {
|
||||
output.write(buf, 0, bytesRead);
|
||||
}
|
||||
} finally {
|
||||
input.close();
|
||||
output.close();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
40
src/main/java/me/ehlxr/ScheduledThreadPoolExecutorTest.java
Normal file
40
src/main/java/me/ehlxr/ScheduledThreadPoolExecutorTest.java
Normal file
@@ -0,0 +1,40 @@
|
||||
package me.ehlxr;
|
||||
|
||||
import cn.enncloud.ceres.utils.NamedThreadFactory;
|
||||
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.ScheduledThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
* @author lixiangrong
|
||||
* @date 2018/8/27
|
||||
*/
|
||||
public class ScheduledThreadPoolExecutorTest {
|
||||
|
||||
private ScheduledThreadPoolExecutor scheduledThreadPoolExecutor = new ScheduledThreadPoolExecutor(1, new NamedThreadFactory("TestTimer", true));
|
||||
private ScheduledFuture<?> scheduledFuture;
|
||||
private static CountDownLatch countDownLatch = new CountDownLatch(1);
|
||||
|
||||
private void start() {
|
||||
scheduledFuture = scheduledThreadPoolExecutor.scheduleAtFixedRate(() -> {
|
||||
System.out.println(scheduledThreadPoolExecutor.getQueue() + " " + Thread.currentThread().getName());
|
||||
|
||||
scheduledFuture.cancel(false);
|
||||
// System.out.println(scheduledThreadPoolExecutor.remove(pingCommand));
|
||||
// System.out.println(scheduledThreadPoolExecutor.getRemoveOnCancelPolicy());
|
||||
// scheduledThreadPoolExecutor.setRemoveOnCancelPolicy(true);
|
||||
System.out.println(scheduledThreadPoolExecutor.getQueue() + " " + Thread.currentThread().getName());
|
||||
// System.out.println(scheduledThreadPoolExecutor.getActiveCount());
|
||||
|
||||
countDownLatch.countDown();
|
||||
}, 2, 5, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws InterruptedException {
|
||||
ScheduledThreadPoolExecutorTest test = new ScheduledThreadPoolExecutorTest();
|
||||
test.start();
|
||||
countDownLatch.await();
|
||||
}
|
||||
}
|
183
src/main/java/me/ehlxr/SnowflakeIdWorker.java
Normal file
183
src/main/java/me/ehlxr/SnowflakeIdWorker.java
Normal file
@@ -0,0 +1,183 @@
|
||||
package me.ehlxr;
|
||||
|
||||
|
||||
/**
|
||||
* Twitter_Snowflake<br>
|
||||
* SnowFlake的结构如下(每部分用-分开):<br>
|
||||
* 0 - 0000000000 0000000000 0000000000 0000000000 0 - 00000 - 00000 - 000000000000 <br>
|
||||
* SnowFlake的优点是,整体上按照时间自增排序,并且整个分布式系统内不会产生ID碰撞(由数据中心ID和机器ID作区分)
|
||||
*
|
||||
* @author ehlxr
|
||||
*/
|
||||
public class SnowflakeIdWorker {
|
||||
|
||||
/**
|
||||
* 开始时间截 (2015-01-01)
|
||||
*/
|
||||
private final long twepoch = 1420041600000L;
|
||||
|
||||
/**
|
||||
* 机器id所占的位数
|
||||
*/
|
||||
private final long workerIdBits = 5L;
|
||||
|
||||
/**
|
||||
* 数据标识id所占的位数
|
||||
*/
|
||||
private final long datacenterIdBits = 5L;
|
||||
|
||||
/**
|
||||
* 支持的最大机器id,结果是31 (这个移位算法可以很快的计算出几位二进制数所能表示的最大十进制数)
|
||||
*/
|
||||
private final long maxWorkerId = -1L ^ (-1L << workerIdBits);
|
||||
|
||||
/**
|
||||
* 支持的最大数据标识id,结果是31
|
||||
*/
|
||||
private final long maxDatacenterId = -1L ^ (-1L << datacenterIdBits);
|
||||
|
||||
/**
|
||||
* 序列在id中占的位数
|
||||
*/
|
||||
private final long sequenceBits = 12L;
|
||||
|
||||
/**
|
||||
* 机器ID向左移12位
|
||||
*/
|
||||
private final long workerIdShift = sequenceBits;
|
||||
|
||||
/**
|
||||
* 数据标识id向左移17位(12+5)
|
||||
*/
|
||||
private final long datacenterIdShift = sequenceBits + workerIdBits;
|
||||
|
||||
/**
|
||||
* 时间截向左移22位(5+5+12)
|
||||
*/
|
||||
private final long timestampLeftShift = sequenceBits + workerIdBits + datacenterIdBits;
|
||||
|
||||
/**
|
||||
* 生成序列的掩码,这里为4095 (0b111111111111=0xfff=4095)
|
||||
*/
|
||||
private final long sequenceMask = -1L ^ (-1L << sequenceBits);
|
||||
|
||||
/**
|
||||
* 工作机器ID(0~31)
|
||||
*/
|
||||
private long workerId;
|
||||
|
||||
/**
|
||||
* 数据中心ID(0~31)
|
||||
*/
|
||||
private long datacenterId;
|
||||
|
||||
/**
|
||||
* 毫秒内序列(0~4095)
|
||||
*/
|
||||
private long sequence = 0L;
|
||||
|
||||
/**
|
||||
* 上次生成ID的时间截
|
||||
*/
|
||||
private long lastTimestamp = -1L;
|
||||
|
||||
/**
|
||||
* 构造函数
|
||||
*
|
||||
* @param workerId 工作ID (0~31)
|
||||
* @param datacenterId 数据中心ID (0~31)
|
||||
*/
|
||||
public SnowflakeIdWorker(long workerId, long datacenterId) {
|
||||
if (workerId > maxWorkerId || workerId < 0) {
|
||||
throw new IllegalArgumentException(String.format("worker Id can't be greater than %d or less than 0", maxWorkerId));
|
||||
}
|
||||
if (datacenterId > maxDatacenterId || datacenterId < 0) {
|
||||
throw new IllegalArgumentException(String.format("datacenter Id can't be greater than %d or less than 0", maxDatacenterId));
|
||||
}
|
||||
this.workerId = workerId;
|
||||
this.datacenterId = datacenterId;
|
||||
}
|
||||
|
||||
/**
|
||||
* 获得下一个ID (该方法是线程安全的)
|
||||
*
|
||||
* @return SnowflakeId
|
||||
*/
|
||||
public synchronized long nextId() {
|
||||
long timestamp = timeGen();
|
||||
|
||||
//如果当前时间小于上一次ID生成的时间戳,说明系统时钟回退过这个时候应当抛出异常
|
||||
if (timestamp < lastTimestamp) {
|
||||
throw new RuntimeException(
|
||||
String.format("Clock moved backwards. Refusing to generate id for %d milliseconds", lastTimestamp - timestamp));
|
||||
}
|
||||
|
||||
//如果是同一时间生成的,则进行毫秒内序列
|
||||
if (lastTimestamp == timestamp) {
|
||||
sequence = (sequence + 1) & sequenceMask;
|
||||
//毫秒内序列溢出
|
||||
if (sequence == 0) {
|
||||
//阻塞到下一个毫秒,获得新的时间戳
|
||||
timestamp = tilNextMillis(lastTimestamp);
|
||||
}
|
||||
}
|
||||
//时间戳改变,毫秒内序列重置
|
||||
else {
|
||||
sequence = 0L;
|
||||
}
|
||||
|
||||
//上次生成ID的时间截
|
||||
lastTimestamp = timestamp;
|
||||
|
||||
//移位并通过或运算拼到一起组成64位的ID
|
||||
return ((timestamp - twepoch) << timestampLeftShift)
|
||||
| (datacenterId << datacenterIdShift)
|
||||
| (workerId << workerIdShift)
|
||||
| sequence;
|
||||
}
|
||||
|
||||
/**
|
||||
* 阻塞到下一个毫秒,直到获得新的时间戳
|
||||
*
|
||||
* @param lastTimestamp 上次生成ID的时间截
|
||||
* @return 当前时间戳
|
||||
*/
|
||||
protected long tilNextMillis(long lastTimestamp) {
|
||||
long timestamp = timeGen();
|
||||
while (timestamp <= lastTimestamp) {
|
||||
timestamp = timeGen();
|
||||
}
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
/**
|
||||
* 返回以毫秒为单位的当前时间
|
||||
*
|
||||
* @return 当前时间(毫秒)
|
||||
*/
|
||||
protected long timeGen() {
|
||||
return System.currentTimeMillis();
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
// SnowflakeIdWorker snowflakeIdWorker = new SnowflakeIdWorker(1, 2);
|
||||
// System.out.println(snowflakeIdWorker.nextId());
|
||||
|
||||
// System.out.println(1<<30);
|
||||
// System.out.println(1<<31);
|
||||
|
||||
|
||||
// System.out.println(0xff>>>7);
|
||||
// System.out.println(((short)0xff)>>>7);
|
||||
// System.out.println(((byte)0xff)>>>7);
|
||||
// System.out.println(((short)0xff));
|
||||
// System.out.println(((byte)0xff));
|
||||
// System.out.println((0xff));
|
||||
//
|
||||
//
|
||||
// System.out.println(0xff==255);
|
||||
// System.out.println(0xff==255);
|
||||
|
||||
System.out.println(~(-1L << 5));
|
||||
}
|
||||
}
|
@@ -1,16 +1,14 @@
|
||||
package me.ehlxr;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
/**
|
||||
* Created by lixiangrong on 2016/12/23.
|
||||
*/
|
||||
public class dfd {
|
||||
|
||||
public static void main(String[] args) {
|
||||
var map = Maps.newHashMap();
|
||||
map.put("d",1);
|
||||
System.out.println(map);
|
||||
// var map = Maps.newHashMap();
|
||||
// map.put("d",1);
|
||||
// System.out.println(map);
|
||||
}
|
||||
public void printCircle(int[][] matrix, int startX, int startY, int endX, int endY) {
|
||||
// only one column left
|
||||
|
@@ -19,9 +19,9 @@ log4j.appender.stdout=org.apache.log4j.ConsoleAppender
|
||||
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.stdout.layout.ConversionPattern=[%p] %-d{yyyy-MM-dd HH\:mm\:ss} [%t] [%c.%M\:%L] %m%n
|
||||
|
||||
log4j.appender.logFile=org.apache.log4j.RollingFileAppender
|
||||
log4j.appender.logFile.File=${webapp.root}/logs/amq.log
|
||||
log4j.appender.logFile.layout=org.apache.log4j.PatternLayout
|
||||
log4j.appender.logFile.layout.ConversionPattern=[%p] %-d{yyyy-MM-dd HH\:mm\:ss} [%t] [%c.%M\:%L] %m%n
|
||||
log4j.appender.logFile.MaxFileSize = 5MB
|
||||
log4j.appender.logFile.MaxBackupIndex =3
|
||||
#log4j.appender.logFile=org.apache.log4j.RollingFileAppender
|
||||
#log4j.appender.logFile.File=${webapp.root}/logs/amq.log
|
||||
#log4j.appender.logFile.layout=org.apache.log4j.PatternLayout
|
||||
#log4j.appender.logFile.layout.ConversionPattern=[%p] %-d{yyyy-MM-dd HH\:mm\:ss} [%t] [%c.%M\:%L] %m%n
|
||||
#log4j.appender.logFile.MaxFileSize = 5MB
|
||||
#log4j.appender.logFile.MaxBackupIndex =3
|
Reference in New Issue
Block a user