序
本文主要研究一下reactor-netty中TcpClient的create的过程
maven
<dependency> <groupId>io.projectreactor.ipc</groupId> <artifactId>reactor-netty</artifactId> <version>0.7.3.RELEASE</version> </dependency>
TcpClient
reactor-netty-0.7.3.RELEASE-sources.jar!/reactor/ipc/netty/tcp/TcpClient.java
protected TcpClient(TcpClient.Builder builder) { ClientOptions.Builder<?> clientOptionsBuilder = ClientOptions.builder(); if (Objects.nonNull(builder.options)) { builder.options.accept(clientOptionsBuilder); } if (!clientOptionsBuilder.isLoopAvailable()) { clientOptionsBuilder.loopResources(TcpResources.get()); } if (!clientOptionsBuilder.isPoolAvailable() && !clientOptionsBuilder.isPoolDisabled()) { clientOptionsBuilder.poolResources(TcpResources.get()); } this.options = clientOptionsBuilder.build(); }
loopResources和poolResources其实是通过TcpResources创建
上面loopResources创建完之后,下面的poolResources其实是直接返回
clientOptionsBuilder.isLoopAvailable()
reactor-netty-0.7.3.RELEASE-sources.jar!/reactor/ipc/netty/options/NettyOptions.java
public final boolean isLoopAvailable() { return this.loopResources != null; }
一开始是null,于是调用TcpResources.get()创建
TcpResources.get()
reactor-netty-0.7.3.RELEASE-sources.jar!/reactor/ipc/netty/tcp/TcpResources.java
/** * Return the global HTTP resources for event loops and pooling * * @return the global HTTP resources for event loops and pooling */ public static TcpResources get() { return getOrCreate(tcpResources,null,ON_TCP_NEW,"tcp"); } /** * Safely check if existing resource exist and proceed to update/cleanup if new * resources references are passed. * * @param ref the resources atomic reference * @param loops the eventual new {@link LoopResources} * @param pools the eventual new {@link PoolResources} * @param onNew a {@link TcpResources} factory * @param name a name for resources * @param <T> the reified type of {@link TcpResources} * * @return an existing or new {@link TcpResources} */ protected static <T extends TcpResources> T getOrCreate(AtomicReference<T> ref,LoopResources loops,PoolResources pools,BiFunction<LoopResources,PoolResources,T> onNew,String name) { T update; for (; ; ) { T resources = ref.get(); if (resources == null || loops != null || pools != null) { update = create(resources,loops,pools,name,onNew); if (ref.compareAndSet(resources,update)) { if(resources != null){ if(loops != null){ resources.defaultLoops.dispose(); } if(pools != null){ resources.defaultPools.dispose(); } } return update; } else { update._dispose(); } } else { return resources; } } }
这里进入create,创建loops还有pools
static final AtomicReference<TcpResources> tcpResources; static final BiFunction<LoopResources,TcpResources> ON_TCP_NEW; static { ON_TCP_NEW = TcpResources::new; tcpResources = new AtomicReference<>(); } final PoolResources defaultPools; final LoopResources defaultLoops; protected TcpResources(LoopResources defaultLoops,PoolResources defaultPools) { this.defaultLoops = defaultLoops; this.defaultPools = defaultPools; } static <T extends TcpResources> T create(T prevIoUs,String name,T> onNew) { if (prevIoUs == null) { loops = loops == null ? LoopResources.create("reactor-" + name) : loops; pools = pools == null ? PoolResources.elastic(name) : pools; } else { loops = loops == null ? prevIoUs.defaultLoops : loops; pools = pools == null ? prevIoUs.defaultPools : pools; } return onNew.apply(loops,pools); }
这里的onNew是创建TcpResources,使用的构造器是TcpResources(LoopResources defaultLoops,PoolResources defaultPools)
LoopResources.create
reactor-netty-0.7.3.RELEASE-sources.jar!/reactor/ipc/netty/resources/LoopResources.java
/** * Default worker thread count,fallback to available processor */ int DEFAULT_IO_WORKER_COUNT = Integer.parseInt(System.getProperty( "reactor.ipc.netty.workerCount","" + Math.max(Runtime.getRuntime() .availableProcessors(),4))); /** * Default selector thread count,fallback to -1 (no selector thread) */ int DEFAULT_IO_SELECT_COUNT = Integer.parseInt(System.getProperty( "reactor.ipc.netty.selectCount","" + -1)); /** * Create a simple {@link LoopResources} to provide automatically for {@link * EventLoopGroup} and {@link Channel} factories * * @param prefix the event loop thread name prefix * * @return a new {@link LoopResources} to provide automatically for {@link * EventLoopGroup} and {@link Channel} factories */ static LoopResources create(String prefix) { return new DefaultLoopResources(prefix,DEFAULT_IO_SELECT_COUNT,DEFAULT_IO_WORKER_COUNT,true); }
这里有两个参数,一个是worker thread count,一个是selector thread count
-
@H_301_46@DEFAULT_IO_WORKER_COUNT
如果环境变量有设置reactor.ipc.netty.workerCount,则用该值;没有设置则取Math.max(Runtime.getRuntime().availableProcessors(),4)))
-
@H_301_46@DEFAULT_IO_SELECT_COUNT
如果环境变量有设置reactor.ipc.netty.selectCount,则用该值;没有设置则取-1,表示没有selector thread
DefaultLoopResources
reactor-netty-0.7.3.RELEASE-sources.jar!/reactor/ipc/netty/resources/DefaultLoopResources.java
DefaultLoopResources(String prefix,int selectCount,int workerCount,boolean daemon) { this.running = new AtomicBoolean(true); this.daemon = daemon; this.workerCount = workerCount; this.prefix = prefix; this.serverLoops = new NioEventLoopGroup(workerCount,threadFactory(this,"nio")); this.clientLoops = LoopResources.colocate(serverLoops); this.cacheNativeClientLoops = new AtomicReference<>(); this.cacheNativeServerLoops = new AtomicReference<>(); if (selectCount == -1) { this.selectCount = workerCount; this.serverSelectLoops = this.serverLoops; this.cacheNativeSelectLoops = this.cacheNativeServerLoops; } else { this.selectCount = selectCount; this.serverSelectLoops = new NioEventLoopGroup(selectCount,"select-nio")); this.cacheNativeSelectLoops = new AtomicReference<>(); } }
这里prefix为reactor-tcp,selectCount为-1,workerCount为4,daemon为true
可以看到这里创建了NioEventLoopGroup,workerCount为4;由于selectCount=-1因此没有单独创建serverSelectLoops
NioEventLoopGroup
netty-transport-4.1.20.Final-sources.jar!/io/netty/channel/nio/NioEventLoopGroup.java
public NioEventLoopGroup(int nThreads,ThreadFactory threadFactory,final SelectorProvider selectorProvider,final SelectStrategyFactory selectStrategyFactory) { super(nThreads,threadFactory,selectorProvider,selectStrategyFactory,RejectedExecutionHandlers.reject()); }
注意这里的rejectHandler是RejectedExecutionHandlers.reject()
netty-common-4.1.20.Final-sources.jar!/io/netty/util/concurrent/MultithreadEventExecutorGroup.java
/** * Create a new instance. * * @param nThreads the number of threads that will be used by this instance. * @param threadFactory the ThreadFactory to use,or {@code null} if the default should be used. * @param args arguments which will passed to each {@link #newChild(Executor,Object...)} call */ protected MultithreadEventExecutorGroup(int nThreads,Object... args) { this(nThreads,threadFactory == null ? null : new ThreadPerTaskExecutor(threadFactory),args); }
new NioEventLoopGroup的时候调用了MultithreadEventExecutorGroup
这里的threadFactory是reactor.ipc.netty.resources.DefaultLoopResources$EventLoopSelectorFactory
这里的executor是ThreadPerTaskExecutor
netty-common-4.1.20.Final-sources.jar!/io/netty/util/concurrent/ThreadPerTaskExecutor.java
public final class ThreadPerTaskExecutor implements Executor { private final ThreadFactory threadFactory; public ThreadPerTaskExecutor(ThreadFactory threadFactory) { if (threadFactory == null) { throw new NullPointerException("threadFactory"); } this.threadFactory = threadFactory; } @Override public void execute(Runnable command) { threadFactory.newThread(command).start(); } }
MultithreadEventExecutorGroup
/** * Create a new instance. * * @param nThreads the number of threads that will be used by this instance. * @param executor the Executor to use,or {@code null} if the default should be used. * @param chooserFactory the {@link EventExecutorChooserFactory} to use. * @param args arguments which will passed to each {@link #newChild(Executor,Executor executor,EventExecutorChooserFactory chooserFactory,Object... args) { if (nThreads <= 0) { throw new IllegalArgumentException(String.format("nThreads: %d (expected: > 0)",nThreads)); } if (executor == null) { executor = new ThreadPerTaskExecutor(newDefaultThreadFactory()); } children = new EventExecutor[nThreads]; for (int i = 0; i < nThreads; i ++) { boolean success = false; try { children[i] = newChild(executor,args); success = true; } catch (Exception e) { // TODO: Think about if this is a good exception type throw new IllegalStateException("Failed to create a child event loop",e); } finally { if (!success) { for (int j = 0; j < i; j ++) { children[j].shutdownGracefully(); } for (int j = 0; j < i; j ++) { EventExecutor e = children[j]; try { while (!e.isTerminated()) { e.awaitTermination(Integer.MAX_VALUE,TimeUnit.SECONDS); } } catch (InterruptedException interrupted) { // Let the caller handle the interruption. Thread.currentThread().interrupt(); break; } } } } } chooser = chooserFactory.newChooser(children); final FutureListener<Object> terminationListener = new FutureListener<Object>() { @Override public void operationComplete(Future<Object> future) throws Exception { if (terminatedChildren.incrementAndGet() == children.length) { terminationFuture.setSuccess(null); } } }; for (EventExecutor e: children) { e.terminationFuture().addListener(terminationListener); } Set<EventExecutor> childrenSet = new LinkedHashSet<EventExecutor>(children.length); Collections.addAll(childrenSet,children); readonlyChildren = Collections.unmodifiableSet(childrenSet); }
注意,这里用for循环去newChild
netty-transport-4.1.20.Final-sources.jar!/io/netty/channel/nio/NioEventLoopGroup.java
protected EventLoop newChild(Executor executor,Object... args) throws Exception { return new NioEventLoop(this,executor,(SelectorProvider) args[0],((SelectStrategyFactory) args[1]).newSelectStrategy(),(RejectedExecutionHandler) args[2]); }
每个child都是一个NioEventLoop
NioEventLoop
netty-transport-4.1.20.Final-sources.jar!/io/netty/channel/nio/NioEventLoop.java
protected static final int DEFAULT_MAX_PENDING_TASKS = Math.max(16,SystemPropertyUtil.getInt("io.netty.eventLoop.maxPendingTasks",Integer.MAX_VALUE)); NioEventLoop(NioEventLoopGroup parent,SelectorProvider selectorProvider,SelectStrategy strategy,RejectedExecutionHandler rejectedExecutionHandler) { super(parent,false,DEFAULT_MAX_PENDING_TASKS,rejectedExecutionHandler); if (selectorProvider == null) { throw new NullPointerException("selectorProvider"); } if (strategy == null) { throw new NullPointerException("selectStrategy"); } provider = selectorProvider; final SelectorTuple selectorTuple = openSelector(); selector = selectorTuple.selector; unwrappedSelector = selectorTuple.unwrappedSelector; selectStrategy = strategy; }
注意这里的DEFAULT_MAX_PENDING_TASKS参数,指定了队列的大小。
如果io.netty.eventLoop.maxPendingTasks有指定,则取它跟16的最大值;没有指定则是Integer.MAX_VALUE
这里没有指定,默认是Integer.MAX_VALUE
NioEventLoop extends SingleThreadEventLoop
netty-transport-4.1.20.Final-sources.jar!/io/netty/channel/SingleThreadEventLoop.java
protected SingleThreadEventLoop(EventLoopGroup parent,boolean addTaskWakesUp,int maxPendingTasks,addTaskWakesUp,maxPendingTasks,rejectedExecutionHandler); tailTasks = newTaskQueue(maxPendingTasks); }
这里的parent是NioEventLoopGroup
这里的executor是ThreadPerTaskExecutor
这里的rejectHandler是RejectedExecutionHandlers.reject()
SingleThreadEventLoop extends SingleThreadEventExecutor
/** * Create a new instance * * @param parent the {@link EventExecutorGroup} which is the parent of this instance and belongs to it * @param executor the {@link Executor} which will be used for executing * @param addTaskWakesUp {@code true} if and only if invocation of {@link #addTask(Runnable)} will wake up the * executor thread * @param maxPendingTasks the maximum number of pending tasks before new tasks will be rejected. * @param rejectedHandler the {@link RejectedExecutionHandler} to use. */ protected SingleThreadEventExecutor(EventExecutorGroup parent,RejectedExecutionHandler rejectedHandler) { super(parent); this.addTaskWakesUp = addTaskWakesUp; this.maxPendingTasks = Math.max(16,maxPendingTasks); this.executor = ObjectUtil.checkNotNull(executor,"executor"); taskQueue = newTaskQueue(this.maxPendingTasks); rejectedExecutionHandler = ObjectUtil.checkNotNull(rejectedHandler,"rejectedHandler"); } /** * Create a new {@link Queue} which will holds the tasks to execute. This default implementation will return a * {@link LinkedBlockingQueue} but if your sub-class of {@link SingleThreadEventExecutor} will not do any blocking * calls on the this {@link Queue} it may make sense to {@code @Override} this and return some more performant * implementation that does not support blocking operations at all. */ protected Queue<Runnable> newTaskQueue(int maxPendingTasks) { return new LinkedBlockingQueue<Runnable>(maxPendingTasks); }
这里的maxPendingTasks是Integer.MAX_VALUE,创建的taskQueue的大小为Integer.MAX_VALUE
这里的addTaskWakesUp为false
PoolResources.elastic(name)
reactor-netty-0.7.3.RELEASE-sources.jar!/reactor/ipc/netty/resources/PoolResources.java
/** * Create an uncapped {@link PoolResources} to provide automatically for {@link * ChannelPool}. * <p>An elastic {@link PoolResources} will never wait before opening a new * connection. The reuse window is limited but it cannot starve an undetermined volume * of clients using it. * * @param name the channel pool map name * * @return a new {@link PoolResources} to provide automatically for {@link * ChannelPool} */ static PoolResources elastic(String name) { return new DefaultPoolResources(name,SimpleChannelPool::new); }
DefaultPoolResources
reactor-netty-0.7.3.RELEASE-sources.jar!/reactor/ipc/netty/resources/DefaultPoolResources.java
final ConcurrentMap<SocketAddress,Pool> channelPools; final String name; final PoolFactory provider; DefaultPoolResources(String name,PoolFactory provider) { this.name = name; this.provider = provider; this.channelPools = PlatformDependent.newConcurrentHashMap(); }
创建channelPools的map,key是SocketAddress,value是Pool
小结
TcpClient的create方法主要是创建TcpResources,而TcpResources则创建loopResources和poolResources。
loopResources
这个loopResources主要是创建NioEventLoopGroup,以及该group下面的workerCount个NioEventLoop(这里涉及两个参数,一个是worker thread count,一个是selector thread count
)
-
@H_301_46@DEFAULT_IO_WORKER_COUNT:如果环境变量有设置reactor.ipc.netty.workerCount,则用该值;没有设置则取Math.max(Runtime.getRuntime().availableProcessors(),4)))
@H_301_46@DEFAULT_IO_SELECT_COUNT:如果环境变量有设置reactor.ipc.netty.selectCount,则用该值;没有设置则取-1,表示没有selector thread
@H_301_46@DEFAULT_MAX_PENDING_TASKS: 指定NioEventLoop的taskQueue的大小,Math.max(16,Integer.MAX_VALUE))
@H_301_46@NioEventLoop继承了SingleThreadEventLoop,而SingleThreadEventLoop则继承SingleThreadEventExecutor,而其代理的executor是ThreadPerTaskExecutor,rejectHandler是RejectedExecutionHandlers.reject(),默认的taskQueue是LinkedBlockingQueue,其大小为Integer.MAX_VALUE
poolResources
这个主要是创建channelPools,类型是ConcurrentMap<SocketAddress,Pool>