This is an automated email from the ASF dual-hosted git repository.
markt pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/tomcat.git
The following commit(s) were added to refs/heads/main by this push:
new d381ef8fca Code clean-up formatting - no functional change
d381ef8fca is described below
commit d381ef8fcaf9ace51b37815071181dce221b678f
Author: Mark Thomas <[email protected]>
AuthorDate: Thu Aug 28 11:06:06 2025 +0100
Code clean-up formatting - no functional change
---
.../org/apache/tomcat/util/threads/LimitLatch.java | 43 +-
.../tomcat/util/threads/ResizableExecutor.java | 3 +-
.../apache/tomcat/util/threads/RetryableQueue.java | 5 +-
.../util/threads/ScheduledThreadPoolExecutor.java | 32 +-
.../util/threads/StopPooledThreadException.java | 4 +-
java/org/apache/tomcat/util/threads/TaskQueue.java | 28 +-
.../org/apache/tomcat/util/threads/TaskThread.java | 10 +-
.../tomcat/util/threads/TaskThreadFactory.java | 3 +-
.../tomcat/util/threads/ThreadPoolExecutor.java | 1568 ++++++++------------
9 files changed, 687 insertions(+), 1009 deletions(-)
diff --git a/java/org/apache/tomcat/util/threads/LimitLatch.java
b/java/org/apache/tomcat/util/threads/LimitLatch.java
index e0f2ae94d8..5066f363d1 100644
--- a/java/org/apache/tomcat/util/threads/LimitLatch.java
+++ b/java/org/apache/tomcat/util/threads/LimitLatch.java
@@ -26,9 +26,8 @@ import org.apache.juli.logging.LogFactory;
import org.apache.tomcat.util.res.StringManager;
/**
- * Shared latch that allows the latch to be acquired a limited number of times
- * after which all subsequent requests to acquire the latch will be placed in a
- * FIFO queue until one of the shares is returned.
+ * Shared latch that allows the latch to be acquired a limited number of times
after which all subsequent requests to
+ * acquire the latch will be placed in a FIFO queue until one of the shares is
returned.
*/
public class LimitLatch {
@@ -71,6 +70,7 @@ public class LimitLatch {
/**
* Instantiates a LimitLatch object with an initial limit.
+ *
* @param limit - maximum number of concurrent acquisitions of this latch
*/
public LimitLatch(long limit) {
@@ -81,6 +81,7 @@ public class LimitLatch {
/**
* Returns the current count for the latch
+ *
* @return the current count for latch
*/
public long getCount() {
@@ -89,6 +90,7 @@ public class LimitLatch {
/**
* Obtain the current limit.
+ *
* @return the limit
*/
public long getLimit() {
@@ -97,13 +99,11 @@ public class LimitLatch {
/**
- * Sets a new limit. If the limit is decreased there may be a period where
- * more shares of the latch are acquired than the limit. In this case no
- * more shares of the latch will be issued until sufficient shares have
been
- * returned to reduce the number of acquired shares of the latch to below
- * the new limit. If the limit is increased, threads currently in the queue
- * may not be issued one of the newly available shares until the next
- * request is made for a latch.
+ * Sets a new limit. If the limit is decreased there may be a period where
more shares of the latch are acquired
+ * than the limit. In this case no more shares of the latch will be issued
until sufficient shares have been
+ * returned to reduce the number of acquired shares of the latch to below
the new limit. If the limit is increased,
+ * threads currently in the queue may not be issued one of the newly
available shares until the next request is made
+ * for a latch.
*
* @param limit The new limit
*/
@@ -113,33 +113,34 @@ public class LimitLatch {
/**
- * Acquires a shared latch if one is available or waits for one if no
shared
- * latch is current available.
+ * Acquires a shared latch if one is available or waits for one if no
shared latch is current available.
+ *
* @throws InterruptedException If the current thread is interrupted
*/
public void countUpOrAwait() throws InterruptedException {
if (log.isTraceEnabled()) {
- log.trace("Counting up["+Thread.currentThread().getName()+"]
latch="+getCount());
+ log.trace("Counting up[" + Thread.currentThread().getName() + "]
latch=" + getCount());
}
sync.acquireSharedInterruptibly(1);
}
/**
* Releases a shared latch, making it available for another thread to use.
+ *
* @return the previous counter value
*/
public long countDown() {
sync.releaseShared(0);
long result = getCount();
if (log.isTraceEnabled()) {
- log.trace("Counting down["+Thread.currentThread().getName()+"]
latch="+result);
+ log.trace("Counting down[" + Thread.currentThread().getName() + "]
latch=" + result);
}
return result;
}
/**
- * Releases all waiting threads and causes the {@link #limit} to be ignored
- * until {@link #reset()} is called.
+ * Releases all waiting threads and causes the {@link #limit} to be
ignored until {@link #reset()} is called.
+ *
* @return <code>true</code> if release was done
*/
public boolean releaseAll() {
@@ -149,6 +150,7 @@ public class LimitLatch {
/**
* Resets the latch and initializes the shared acquisition counter to zero.
+ *
* @see #releaseAll()
*/
public void reset() {
@@ -157,8 +159,9 @@ public class LimitLatch {
}
/**
- * Returns <code>true</code> if there is at least one thread waiting to
- * acquire the shared lock, otherwise returns <code>false</code>.
+ * Returns <code>true</code> if there is at least one thread waiting to
acquire the shared lock, otherwise returns
+ * <code>false</code>.
+ *
* @return <code>true</code> if threads are waiting
*/
public boolean hasQueuedThreads() {
@@ -166,8 +169,8 @@ public class LimitLatch {
}
/**
- * Provide access to the list of threads waiting to acquire this limited
- * shared latch.
+ * Provide access to the list of threads waiting to acquire this limited
shared latch.
+ *
* @return a collection of threads
*/
public Collection<Thread> getQueuedThreads() {
diff --git a/java/org/apache/tomcat/util/threads/ResizableExecutor.java
b/java/org/apache/tomcat/util/threads/ResizableExecutor.java
index f73957e4fd..50cae7b9a1 100644
--- a/java/org/apache/tomcat/util/threads/ResizableExecutor.java
+++ b/java/org/apache/tomcat/util/threads/ResizableExecutor.java
@@ -30,8 +30,7 @@ public interface ResizableExecutor extends Executor {
int getMaxThreads();
/**
- * Returns the approximate number of threads that are actively executing
- * tasks.
+ * Returns the approximate number of threads that are actively executing
tasks.
*
* @return the number of threads
*/
diff --git a/java/org/apache/tomcat/util/threads/RetryableQueue.java
b/java/org/apache/tomcat/util/threads/RetryableQueue.java
index fe60fe4d9b..357f0acd01 100644
--- a/java/org/apache/tomcat/util/threads/RetryableQueue.java
+++ b/java/org/apache/tomcat/util/threads/RetryableQueue.java
@@ -24,10 +24,9 @@ public interface RetryableQueue<T> extends BlockingQueue<T> {
/**
* Used to add a task to the queue if the task has been rejected by the
Executor.
*
- * @param o The task to add to the queue
+ * @param o The task to add to the queue
*
- * @return {@code true} if the task was added to the queue,
- * otherwise {@code false}
+ * @return {@code true} if the task was added to the queue, otherwise
{@code false}
*/
boolean force(T o);
}
diff --git
a/java/org/apache/tomcat/util/threads/ScheduledThreadPoolExecutor.java
b/java/org/apache/tomcat/util/threads/ScheduledThreadPoolExecutor.java
index 45d9c93c50..e25f9ba36b 100644
--- a/java/org/apache/tomcat/util/threads/ScheduledThreadPoolExecutor.java
+++ b/java/org/apache/tomcat/util/threads/ScheduledThreadPoolExecutor.java
@@ -28,8 +28,7 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
/**
- * Class which wraps a ScheduledExecutorService, while preventing
- * lifecycle and configuration operations.
+ * Class which wraps a ScheduledExecutorService, while preventing lifecycle
and configuration operations.
*/
public class ScheduledThreadPoolExecutor implements ScheduledExecutorService {
@@ -37,6 +36,7 @@ public class ScheduledThreadPoolExecutor implements
ScheduledExecutorService {
/**
* Builds a wrapper for the given executor.
+ *
* @param executor the wrapped executor
*/
public ScheduledThreadPoolExecutor(ScheduledExecutorService executor) {
@@ -65,8 +65,7 @@ public class ScheduledThreadPoolExecutor implements
ScheduledExecutorService {
}
@Override
- public boolean awaitTermination(long timeout, TimeUnit unit)
- throws InterruptedException {
+ public boolean awaitTermination(long timeout, TimeUnit unit) throws
InterruptedException {
return executor.awaitTermination(timeout, unit);
}
@@ -86,26 +85,23 @@ public class ScheduledThreadPoolExecutor implements
ScheduledExecutorService {
}
@Override
- public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>>
tasks)
- throws InterruptedException {
+ public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>>
tasks) throws InterruptedException {
return executor.invokeAll(tasks);
}
@Override
- public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>>
tasks, long timeout,
- TimeUnit unit) throws InterruptedException {
+ public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>>
tasks, long timeout, TimeUnit unit)
+ throws InterruptedException {
return executor.invokeAll(tasks, timeout, unit);
}
@Override
- public <T> T invokeAny(Collection<? extends Callable<T>> tasks)
- throws InterruptedException, ExecutionException {
+ public <T> T invokeAny(Collection<? extends Callable<T>> tasks) throws
InterruptedException, ExecutionException {
return executor.invokeAny(tasks);
}
@Override
- public <T> T invokeAny(Collection<? extends Callable<T>> tasks,
- long timeout, TimeUnit unit)
+ public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long
timeout, TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
return executor.invokeAny(tasks, timeout, unit);
}
@@ -116,26 +112,22 @@ public class ScheduledThreadPoolExecutor implements
ScheduledExecutorService {
}
@Override
- public ScheduledFuture<?> schedule(Runnable command, long delay,
- TimeUnit unit) {
+ public ScheduledFuture<?> schedule(Runnable command, long delay, TimeUnit
unit) {
return executor.schedule(command, delay, unit);
}
@Override
- public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay,
- TimeUnit unit) {
+ public <V> ScheduledFuture<V> schedule(Callable<V> callable, long delay,
TimeUnit unit) {
return executor.schedule(callable, delay, unit);
}
@Override
- public ScheduledFuture<?> scheduleAtFixedRate(Runnable command,
- long initialDelay, long period, TimeUnit unit) {
+ public ScheduledFuture<?> scheduleAtFixedRate(Runnable command, long
initialDelay, long period, TimeUnit unit) {
return executor.scheduleAtFixedRate(command, initialDelay, period,
unit);
}
@Override
- public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command,
- long initialDelay, long delay, TimeUnit unit) {
+ public ScheduledFuture<?> scheduleWithFixedDelay(Runnable command, long
initialDelay, long delay, TimeUnit unit) {
return executor.scheduleWithFixedDelay(command, initialDelay, delay,
unit);
}
diff --git a/java/org/apache/tomcat/util/threads/StopPooledThreadException.java
b/java/org/apache/tomcat/util/threads/StopPooledThreadException.java
index 1b24a553fa..7045bd1694 100644
--- a/java/org/apache/tomcat/util/threads/StopPooledThreadException.java
+++ b/java/org/apache/tomcat/util/threads/StopPooledThreadException.java
@@ -20,8 +20,8 @@ package org.apache.tomcat.util.threads;
import java.io.Serial;
/**
- * A custom {@link RuntimeException} thrown by the {@link ThreadPoolExecutor}
- * to signal that the thread should be disposed of.
+ * A custom {@link RuntimeException} thrown by the {@link ThreadPoolExecutor}
to signal that the thread should be
+ * disposed of.
*/
public class StopPooledThreadException extends RuntimeException {
diff --git a/java/org/apache/tomcat/util/threads/TaskQueue.java
b/java/org/apache/tomcat/util/threads/TaskQueue.java
index 1b5f730c6a..cfc878c5ed 100644
--- a/java/org/apache/tomcat/util/threads/TaskQueue.java
+++ b/java/org/apache/tomcat/util/threads/TaskQueue.java
@@ -25,11 +25,9 @@ import java.util.concurrent.TimeUnit;
import org.apache.tomcat.util.res.StringManager;
/**
- * As task queue specifically designed to run with a thread pool executor. The
- * task queue is optimised to properly utilize threads within a thread pool
- * executor. If you use a normal queue, the executor will spawn threads when
- * there are idle threads and you won't be able to force items onto the queue
- * itself.
+ * As task queue specifically designed to run with a thread pool executor. The
task queue is optimised to properly
+ * utilize threads within a thread pool executor. If you use a normal queue,
the executor will spawn threads when there
+ * are idle threads and you won't be able to force items onto the queue itself.
*/
public class TaskQueue extends LinkedBlockingQueue<Runnable> implements
RetryableQueue<Runnable> {
@@ -61,36 +59,35 @@ public class TaskQueue extends
LinkedBlockingQueue<Runnable> implements Retryabl
if (parent == null || parent.isShutdown()) {
throw new
RejectedExecutionException(sm.getString("taskQueue.notRunning"));
}
- return super.offer(o); //forces the item onto the queue, to be used if
the task is rejected
+ return super.offer(o); // forces the item onto the queue, to be used
if the task is rejected
}
@Override
public boolean offer(Runnable o) {
- //we can't do any checks
- if (parent==null) {
+ // we can't do any checks
+ if (parent == null) {
return super.offer(o);
}
- //we are maxed out on threads, simply queue the object
+ // we are maxed out on threads, simply queue the object
if (parent.getPoolSizeNoLock() == parent.getMaximumPoolSize()) {
return super.offer(o);
}
- //we have idle threads, just add it to the queue
+ // we have idle threads, just add it to the queue
if (parent.getSubmittedCount() <= parent.getPoolSizeNoLock()) {
return super.offer(o);
}
- //if we have less threads than maximum force creation of a new thread
+ // if we have less threads than maximum force creation of a new thread
if (parent.getPoolSizeNoLock() < parent.getMaximumPoolSize()) {
return false;
}
- //if we reached here, we need to add it to the queue
+ // if we reached here, we need to add it to the queue
return super.offer(o);
}
@Override
- public Runnable poll(long timeout, TimeUnit unit)
- throws InterruptedException {
+ public Runnable poll(long timeout, TimeUnit unit) throws
InterruptedException {
Runnable runnable = super.poll(timeout, unit);
if (runnable == null && parent != null) {
// the poll timed out, it gives an opportunity to stop the current
@@ -103,8 +100,7 @@ public class TaskQueue extends
LinkedBlockingQueue<Runnable> implements Retryabl
@Override
public Runnable take() throws InterruptedException {
if (parent != null && parent.currentThreadShouldBeStopped()) {
- return poll(parent.getKeepAliveTime(TimeUnit.MILLISECONDS),
- TimeUnit.MILLISECONDS);
+ return poll(parent.getKeepAliveTime(TimeUnit.MILLISECONDS),
TimeUnit.MILLISECONDS);
// yes, this may return null (in case of timeout) which normally
// does not occur with take()
// but the ThreadPoolExecutor implementation allows this
diff --git a/java/org/apache/tomcat/util/threads/TaskThread.java
b/java/org/apache/tomcat/util/threads/TaskThread.java
index a3743989d9..fbcd108a98 100644
--- a/java/org/apache/tomcat/util/threads/TaskThread.java
+++ b/java/org/apache/tomcat/util/threads/TaskThread.java
@@ -22,7 +22,6 @@ import org.apache.tomcat.util.res.StringManager;
/**
* A Thread implementation that records the time at which it was created.
- *
*/
public class TaskThread extends Thread {
@@ -35,8 +34,7 @@ public class TaskThread extends Thread {
this.creationTime = System.currentTimeMillis();
}
- public TaskThread(ThreadGroup group, Runnable target, String name,
- long stackSize) {
+ public TaskThread(ThreadGroup group, Runnable target, String name, long
stackSize) {
super(group, new WrappingRunnable(target), name, stackSize);
this.creationTime = System.currentTimeMillis();
}
@@ -49,15 +47,15 @@ public class TaskThread extends Thread {
}
/**
- * Wraps a {@link Runnable} to swallow any {@link
StopPooledThreadException}
- * instead of letting it go and potentially trigger a break in a debugger.
+ * Wraps a {@link Runnable} to swallow any {@link
StopPooledThreadException} instead of letting it go and
+ * potentially trigger a break in a debugger.
*/
private record WrappingRunnable(Runnable wrappedRunnable) implements
Runnable {
@Override
public void run() {
try {
wrappedRunnable.run();
- } catch(StopPooledThreadException exc) {
+ } catch (StopPooledThreadException exc) {
// expected : we just swallow the exception to avoid
disturbing debuggers like eclipse's
if (log.isDebugEnabled()) {
log.debug(sm.getString("taskThread.exiting"), exc);
diff --git a/java/org/apache/tomcat/util/threads/TaskThreadFactory.java
b/java/org/apache/tomcat/util/threads/TaskThreadFactory.java
index 71ae99d9c0..335482bd4e 100644
--- a/java/org/apache/tomcat/util/threads/TaskThreadFactory.java
+++ b/java/org/apache/tomcat/util/threads/TaskThreadFactory.java
@@ -20,8 +20,7 @@ import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
/**
- * Simple task thread factory to use to create threads for an executor
- * implementation.
+ * Simple task thread factory to use to create threads for an executor
implementation.
*/
public class TaskThreadFactory implements ThreadFactory {
diff --git a/java/org/apache/tomcat/util/threads/ThreadPoolExecutor.java
b/java/org/apache/tomcat/util/threads/ThreadPoolExecutor.java
index 3248a4c389..0c0f93b5f6 100644
--- a/java/org/apache/tomcat/util/threads/ThreadPoolExecutor.java
+++ b/java/org/apache/tomcat/util/threads/ThreadPoolExecutor.java
@@ -45,244 +45,134 @@ import java.util.concurrent.locks.ReentrantLock;
import org.apache.tomcat.util.res.StringManager;
/**
- * An {@link java.util.concurrent.ExecutorService}
- * that executes each submitted task using
- * one of possibly several pooled threads, normally configured
- * using {@link Executors} factory methods.
- *
- * <p>Thread pools address two different problems: they usually
- * provide improved performance when executing large numbers of
- * asynchronous tasks, due to reduced per-task invocation overhead,
- * and they provide a means of bounding and managing the resources,
- * including threads, consumed when executing a collection of tasks.
- * Each {@code ThreadPoolExecutor} also maintains some basic
- * statistics, such as the number of completed tasks.
- *
- * <p>To be useful across a wide range of contexts, this class
- * provides many adjustable parameters and extensibility
- * hooks. However, programmers are urged to use the more convenient
- * {@link Executors} factory methods {@link
- * Executors#newCachedThreadPool} (unbounded thread pool, with
- * automatic thread reclamation), {@link Executors#newFixedThreadPool}
- * (fixed size thread pool) and {@link
- * Executors#newSingleThreadExecutor} (single background thread), that
- * preconfigure settings for the most common usage
- * scenarios. Otherwise, use the following guide when manually
- * configuring and tuning this class:
- *
+ * An {@link java.util.concurrent.ExecutorService} that executes each
submitted task using one of possibly several
+ * pooled threads, normally configured using {@link Executors} factory methods.
+ * <p>
+ * Thread pools address two different problems: they usually provide improved
performance when executing large numbers
+ * of asynchronous tasks, due to reduced per-task invocation overhead, and
they provide a means of bounding and managing
+ * the resources, including threads, consumed when executing a collection of
tasks. Each {@code ThreadPoolExecutor} also
+ * maintains some basic statistics, such as the number of completed tasks.
+ * <p>
+ * To be useful across a wide range of contexts, this class provides many
adjustable parameters and extensibility hooks.
+ * However, programmers are urged to use the more convenient {@link Executors}
factory methods
+ * {@link Executors#newCachedThreadPool} (unbounded thread pool, with
automatic thread reclamation),
+ * {@link Executors#newFixedThreadPool} (fixed size thread pool) and {@link
Executors#newSingleThreadExecutor} (single
+ * background thread), that preconfigure settings for the most common usage
scenarios. Otherwise, use the following
+ * guide when manually configuring and tuning this class:
* <dl>
- *
* <dt>Core and maximum pool sizes</dt>
- *
- * <dd>A {@code ThreadPoolExecutor} will automatically adjust the
- * pool size (see {@link #getPoolSize})
- * according to the bounds set by
- * corePoolSize (see {@link #getCorePoolSize}) and
- * maximumPoolSize (see {@link #getMaximumPoolSize}).
- *
- * When a new task is submitted in method {@link #execute(Runnable)},
- * if fewer than corePoolSize threads are running, a new thread is
- * created to handle the request, even if other worker threads are
- * idle. Else if fewer than maximumPoolSize threads are running, a
- * new thread will be created to handle the request only if the queue
- * is full. By setting corePoolSize and maximumPoolSize the same, you
- * create a fixed-size thread pool. By setting maximumPoolSize to an
- * essentially unbounded value such as {@code Integer.MAX_VALUE}, you
- * allow the pool to accommodate an arbitrary number of concurrent
- * tasks. Most typically, core and maximum pool sizes are set only
- * upon construction, but they may also be changed dynamically using
- * {@link #setCorePoolSize} and {@link #setMaximumPoolSize}. </dd>
- *
+ * <dd>A {@code ThreadPoolExecutor} will automatically adjust the pool size
(see {@link #getPoolSize}) according to the
+ * bounds set by corePoolSize (see {@link #getCorePoolSize}) and
maximumPoolSize (see {@link #getMaximumPoolSize}). When
+ * a new task is submitted in method {@link #execute(Runnable)}, if fewer than
corePoolSize threads are running, a new
+ * thread is created to handle the request, even if other worker threads are
idle. Else if fewer than maximumPoolSize
+ * threads are running, a new thread will be created to handle the request
only if the queue is full. By setting
+ * corePoolSize and maximumPoolSize the same, you create a fixed-size thread
pool. By setting maximumPoolSize to an
+ * essentially unbounded value such as {@code Integer.MAX_VALUE}, you allow
the pool to accommodate an arbitrary number
+ * of concurrent tasks. Most typically, core and maximum pool sizes are set
only upon construction, but they may also be
+ * changed dynamically using {@link #setCorePoolSize} and {@link
#setMaximumPoolSize}.</dd>
* <dt>On-demand construction</dt>
- *
- * <dd>By default, even core threads are initially created and
- * started only when new tasks arrive, but this can be overridden
- * dynamically using method {@link #prestartCoreThread} or {@link
- * #prestartAllCoreThreads}. You probably want to prestart threads if
- * you construct the pool with a non-empty queue. </dd>
- *
+ * <dd>By default, even core threads are initially created and started only
when new tasks arrive, but this can be
+ * overridden dynamically using method {@link #prestartCoreThread} or {@link
#prestartAllCoreThreads}. You probably want
+ * to prestart threads if you construct the pool with a non-empty queue.</dd>
* <dt>Creating new threads</dt>
- *
- * <dd>New threads are created using a {@link ThreadFactory}. If not
- * otherwise specified, a {@link Executors#defaultThreadFactory} is
- * used, that creates threads to all be in the same {@link
- * ThreadGroup} and with the same {@code NORM_PRIORITY} priority and
- * non-daemon status. By supplying a different ThreadFactory, you can
- * alter the thread's name, thread group, priority, daemon status,
- * etc. If a {@code ThreadFactory} fails to create a thread when asked
- * by returning null from {@code newThread}, the executor will
- * continue, but might not be able to execute any tasks. Threads
- * should possess the "modifyThread" {@code RuntimePermission}. If
- * worker threads or other threads using the pool do not possess this
- * permission, service may be degraded: configuration changes may not
- * take effect in a timely manner, and a shutdown pool may remain in a
- * state in which termination is possible but not completed.</dd>
- *
+ * <dd>New threads are created using a {@link ThreadFactory}. If not otherwise
specified, a
+ * {@link Executors#defaultThreadFactory} is used, that creates threads to all
be in the same {@link ThreadGroup} and
+ * with the same {@code NORM_PRIORITY} priority and non-daemon status. By
supplying a different ThreadFactory, you can
+ * alter the thread's name, thread group, priority, daemon status, etc. If a
{@code ThreadFactory} fails to create a
+ * thread when asked by returning null from {@code newThread}, the executor
will continue, but might not be able to
+ * execute any tasks. Threads should possess the "modifyThread" {@code
RuntimePermission}. If worker threads or other
+ * threads using the pool do not possess this permission, service may be
degraded: configuration changes may not take
+ * effect in a timely manner, and a shutdown pool may remain in a state in
which termination is possible but not
+ * completed.</dd>
* <dt>Keep-alive times</dt>
- *
- * <dd>If the pool currently has more than corePoolSize threads,
- * excess threads will be terminated if they have been idle for more
- * than the keepAliveTime (see {@link #getKeepAliveTime(TimeUnit)}).
- * This provides a means of reducing resource consumption when the
- * pool is not being actively used. If the pool becomes more active
- * later, new threads will be constructed. This parameter can also be
- * changed dynamically using method {@link #setKeepAliveTime(long,
- * TimeUnit)}. Using a value of {@code Long.MAX_VALUE} {@link
- * TimeUnit#NANOSECONDS} effectively disables idle threads from ever
- * terminating prior to shut down. By default, the keep-alive policy
- * applies only when there are more than corePoolSize threads, but
- * method {@link #allowCoreThreadTimeOut(boolean)} can be used to
- * apply this time-out policy to core threads as well, so long as the
- * keepAliveTime value is non-zero. </dd>
- *
+ * <dd>If the pool currently has more than corePoolSize threads, excess
threads will be terminated if they have been
+ * idle for more than the keepAliveTime (see {@link
#getKeepAliveTime(TimeUnit)}). This provides a means of reducing
+ * resource consumption when the pool is not being actively used. If the pool
becomes more active later, new threads
+ * will be constructed. This parameter can also be changed dynamically using
method
+ * {@link #setKeepAliveTime(long, TimeUnit)}. Using a value of {@code
Long.MAX_VALUE} {@link TimeUnit#NANOSECONDS}
+ * effectively disables idle threads from ever terminating prior to shut down.
By default, the keep-alive policy applies
+ * only when there are more than corePoolSize threads, but method {@link
#allowCoreThreadTimeOut(boolean)} can be used
+ * to apply this time-out policy to core threads as well, so long as the
keepAliveTime value is non-zero.</dd>
* <dt>Queuing</dt>
- *
- * <dd>Any {@link BlockingQueue} may be used to transfer and hold
- * submitted tasks. The use of this queue interacts with pool sizing:
- *
+ * <dd>Any {@link BlockingQueue} may be used to transfer and hold submitted
tasks. The use of this queue interacts with
+ * pool sizing:
* <ul>
- *
- * <li>If fewer than corePoolSize threads are running, the Executor
- * always prefers adding a new thread
- * rather than queuing.
- *
- * <li>If corePoolSize or more threads are running, the Executor
- * always prefers queuing a request rather than adding a new
- * thread.
- *
- * <li>If a request cannot be queued, a new thread is created unless
- * this would exceed maximumPoolSize, in which case, the task will be
- * rejected.
- *
+ * <li>If fewer than corePoolSize threads are running, the Executor always
prefers adding a new thread rather than
+ * queuing.
+ * <li>If corePoolSize or more threads are running, the Executor always
prefers queuing a request rather than adding a
+ * new thread.
+ * <li>If a request cannot be queued, a new thread is created unless this
would exceed maximumPoolSize, in which case,
+ * the task will be rejected.
* </ul>
- *
* There are three general strategies for queuing:
* <ol>
- *
- * <li><em> Direct handoffs.</em> A good default choice for a work
- * queue is a {@link java.util.concurrent.SynchronousQueue}
- * that hands off tasks to threads
- * without otherwise holding them. Here, an attempt to queue a task
- * will fail if no threads are immediately available to run it, so a
- * new thread will be constructed. This policy avoids lockups when
- * handling sets of requests that might have internal dependencies.
- * Direct handoffs generally require unbounded maximumPoolSizes to
- * avoid rejection of new submitted tasks. This in turn admits the
- * possibility of unbounded thread growth when commands continue to
- * arrive faster on average than they can be processed.
- *
- * <li><em> Unbounded queues.</em> Using an unbounded queue (for
- * example a {@link java.util.concurrent.LinkedBlockingQueue}
- * without a predefined
- * capacity) will cause new tasks to wait in the queue when all
- * corePoolSize threads are busy. Thus, no more than corePoolSize
- * threads will ever be created. (And the value of the maximumPoolSize
- * therefore doesn't have any effect.) This may be appropriate when
- * each task is completely independent of others, so tasks cannot
- * affect each others execution; for example, in a web page server.
- * While this style of queuing can be useful in smoothing out
- * transient bursts of requests, it admits the possibility of
- * unbounded work queue growth when commands continue to arrive faster
- * on average than they can be processed.
- *
- * <li><em>Bounded queues.</em> A bounded queue (for example, an
- * {@link java.util.concurrent.ArrayBlockingQueue})
- * helps prevent resource exhaustion when
- * used with finite maximumPoolSizes, but can be more difficult to
- * tune and control. Queue sizes and maximum pool sizes may be traded
- * off for each other: Using large queues and small pools minimizes
- * CPU usage, OS resources, and context-switching overhead, but can
- * lead to artificially low throughput. If tasks frequently block (for
- * example if they are I/O bound), a system may be able to schedule
- * time for more threads than you otherwise allow. Use of small queues
- * generally requires larger pool sizes, which keeps CPUs busier but
- * may encounter unacceptable scheduling overhead, which also
- * decreases throughput.
- *
+ * <li><em> Direct handoffs.</em> A good default choice for a work queue is a
+ * {@link java.util.concurrent.SynchronousQueue} that hands off tasks to
threads without otherwise holding them. Here,
+ * an attempt to queue a task will fail if no threads are immediately
available to run it, so a new thread will be
+ * constructed. This policy avoids lockups when handling sets of requests that
might have internal dependencies. Direct
+ * handoffs generally require unbounded maximumPoolSizes to avoid rejection of
new submitted tasks. This in turn admits
+ * the possibility of unbounded thread growth when commands continue to arrive
faster on average than they can be
+ * processed.
+ * <li><em> Unbounded queues.</em> Using an unbounded queue (for example a
+ * {@link java.util.concurrent.LinkedBlockingQueue} without a predefined
capacity) will cause new tasks to wait in the
+ * queue when all corePoolSize threads are busy. Thus, no more than
corePoolSize threads will ever be created. (And the
+ * value of the maximumPoolSize therefore doesn't have any effect.) This may
be appropriate when each task is completely
+ * independent of others, so tasks cannot affect each others execution; for
example, in a web page server. While this
+ * style of queuing can be useful in smoothing out transient bursts of
requests, it admits the possibility of unbounded
+ * work queue growth when commands continue to arrive faster on average than
they can be processed.
+ * <li><em>Bounded queues.</em> A bounded queue (for example, an {@link
java.util.concurrent.ArrayBlockingQueue}) helps
+ * prevent resource exhaustion when used with finite maximumPoolSizes, but can
be more difficult to tune and control.
+ * Queue sizes and maximum pool sizes may be traded off for each other: Using
large queues and small pools minimizes CPU
+ * usage, OS resources, and context-switching overhead, but can lead to
artificially low throughput. If tasks frequently
+ * block (for example if they are I/O bound), a system may be able to schedule
time for more threads than you otherwise
+ * allow. Use of small queues generally requires larger pool sizes, which
keeps CPUs busier but may encounter
+ * unacceptable scheduling overhead, which also decreases throughput.
* </ol>
- *
* </dd>
- *
* <dt>Rejected tasks</dt>
- *
- * <dd>New tasks submitted in method {@link #execute(Runnable)} will be
- * <em>rejected</em> when the Executor has been shut down, and also when
- * the Executor uses finite bounds for both maximum threads and work queue
- * capacity, and is saturated. In either case, the {@code execute} method
- * invokes the {@link
- * RejectedExecutionHandler#rejectedExecution(Runnable, ThreadPoolExecutor)}
- * method of its {@link RejectedExecutionHandler}. Four predefined handler
- * policies are provided:
- *
+ * <dd>New tasks submitted in method {@link #execute(Runnable)} will be
<em>rejected</em> when the Executor has been
+ * shut down, and also when the Executor uses finite bounds for both maximum
threads and work queue capacity, and is
+ * saturated. In either case, the {@code execute} method invokes the
+ * {@link RejectedExecutionHandler#rejectedExecution(Runnable,
ThreadPoolExecutor)} method of its
+ * {@link RejectedExecutionHandler}. Four predefined handler policies are
provided:
* <ol>
- *
- * <li>In the default {@link ThreadPoolExecutor.AbortPolicy}, the handler
- * throws a runtime {@link RejectedExecutionException} upon rejection.
- *
- * <li>In {@link ThreadPoolExecutor.CallerRunsPolicy}, the thread
- * that invokes {@code execute} itself runs the task. This provides a
- * simple feedback control mechanism that will slow down the rate that
- * new tasks are submitted.
- *
- * <li>In {@link ThreadPoolExecutor.DiscardPolicy}, a task that cannot
- * be executed is simply dropped. This policy is designed only for
- * those rare cases in which task completion is never relied upon.
- *
- * <li>In {@link ThreadPoolExecutor.DiscardOldestPolicy}, if the
- * executor is not shut down, the task at the head of the work queue
- * is dropped, and then execution is retried (which can fail again,
- * causing this to be repeated.) This policy is rarely acceptable. In
- * nearly all cases, you should also cancel the task to cause an
- * exception in any component waiting for its completion, and/or log
- * the failure, as illustrated in {@link
- * ThreadPoolExecutor.DiscardOldestPolicy} documentation.
- *
+ * <li>In the default {@link ThreadPoolExecutor.AbortPolicy}, the handler
throws a runtime
+ * {@link RejectedExecutionException} upon rejection.
+ * <li>In {@link ThreadPoolExecutor.CallerRunsPolicy}, the thread that invokes
{@code execute} itself runs the task.
+ * This provides a simple feedback control mechanism that will slow down the
rate that new tasks are submitted.
+ * <li>In {@link ThreadPoolExecutor.DiscardPolicy}, a task that cannot be
executed is simply dropped. This policy is
+ * designed only for those rare cases in which task completion is never relied
upon.
+ * <li>In {@link ThreadPoolExecutor.DiscardOldestPolicy}, if the executor is
not shut down, the task at the head of the
+ * work queue is dropped, and then execution is retried (which can fail again,
causing this to be repeated.) This policy
+ * is rarely acceptable. In nearly all cases, you should also cancel the task
to cause an exception in any component
+ * waiting for its completion, and/or log the failure, as illustrated in
{@link ThreadPoolExecutor.DiscardOldestPolicy}
+ * documentation.
* </ol>
- *
- * It is possible to define and use other kinds of {@link
- * RejectedExecutionHandler} classes. Doing so requires some care
- * especially when policies are designed to work only under particular
- * capacity or queuing policies. </dd>
- *
+ * It is possible to define and use other kinds of {@link
RejectedExecutionHandler} classes. Doing so requires some care
+ * especially when policies are designed to work only under particular
capacity or queuing policies.</dd>
* <dt>Hook methods</dt>
- *
- * <dd>This class provides {@code protected} overridable
- * {@link #beforeExecute(Thread, Runnable)} and
- * {@link #afterExecute(Runnable, Throwable)} methods that are called
- * before and after execution of each task. These can be used to
- * manipulate the execution environment; for example, reinitializing
- * ThreadLocals, gathering statistics, or adding log entries.
- * Additionally, method {@link #terminated} can be overridden to perform
- * any special processing that needs to be done once the Executor has
- * fully terminated.
- *
- * <p>If hook, callback, or BlockingQueue methods throw exceptions,
- * internal worker threads may in turn fail, abruptly terminate, and
- * possibly be replaced.</dd>
- *
+ * <dd>This class provides {@code protected} overridable {@link
#beforeExecute(Thread, Runnable)} and
+ * {@link #afterExecute(Runnable, Throwable)} methods that are called before
and after execution of each task. These can
+ * be used to manipulate the execution environment; for example,
reinitializing ThreadLocals, gathering statistics, or
+ * adding log entries. Additionally, method {@link #terminated} can be
overridden to perform any special processing that
+ * needs to be done once the Executor has fully terminated.
+ * <p>
+ * If hook, callback, or BlockingQueue methods throw exceptions, internal
worker threads may in turn fail, abruptly
+ * terminate, and possibly be replaced.</dd>
* <dt>Queue maintenance</dt>
- *
- * <dd>Method {@link #getQueue()} allows access to the work queue
- * for purposes of monitoring and debugging. Use of this method for
- * any other purpose is strongly discouraged. Two supplied methods,
- * {@link #remove(Runnable)} and {@link #purge} are available to
- * assist in storage reclamation when large numbers of queued tasks
- * become cancelled.</dd>
- *
+ * <dd>Method {@link #getQueue()} allows access to the work queue for purposes
of monitoring and debugging. Use of this
+ * method for any other purpose is strongly discouraged. Two supplied methods,
{@link #remove(Runnable)} and
+ * {@link #purge} are available to assist in storage reclamation when large
numbers of queued tasks become
+ * cancelled.</dd>
* <dt>Reclamation</dt>
- *
- * <dd>A pool that is no longer referenced in a program <em>AND</em>
- * has no remaining threads may be reclaimed (garbage collected)
- * without being explicitly shutdown. You can configure a pool to
- * allow all unused threads to eventually die by setting appropriate
- * keep-alive times, using a lower bound of zero core threads and/or
- * setting {@link #allowCoreThreadTimeOut(boolean)}. </dd>
- *
+ * <dd>A pool that is no longer referenced in a program <em>AND</em> has no
remaining threads may be reclaimed (garbage
+ * collected) without being explicitly shutdown. You can configure a pool to
allow all unused threads to eventually die
+ * by setting appropriate keep-alive times, using a lower bound of zero core
threads and/or setting
+ * {@link #allowCoreThreadTimeOut(boolean)}.</dd>
* </dl>
- *
- * <p><b>Extension example.</b> Most extensions of this class
- * override one or more of the protected hook methods. For example,
- * here is a subclass that adds a simple pause/resume feature:
+ * <p>
+ * <b>Extension example.</b> Most extensions of this class override one or
more of the protected hook methods. For
+ * example, here is a subclass that adds a simple pause/resume feature:
*
* <pre> {@code
* class PausableThreadPoolExecutor extends ThreadPoolExecutor {
@@ -325,6 +215,7 @@ import org.apache.tomcat.util.res.StringManager;
* }}</pre>
*
* @since 1.5
+ *
* @author Doug Lea
*/
public class ThreadPoolExecutor extends AbstractExecutorService {
@@ -332,81 +223,68 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
protected static final StringManager sm =
StringManager.getManager(ThreadPoolExecutor.class);
/**
- * The main pool control state, ctl, is an atomic integer packing
- * two conceptual fields:
+ * The main pool control state, ctl, is an atomic integer packing two
conceptual fields:
* <ul>
- * <li>workerCount, indicating the effective number of threads</li>
- * <li>runState, indicating whether running, shutting down etc</li>
+ * <li>workerCount, indicating the effective number of threads</li>
+ * <li>runState, indicating whether running, shutting down etc</li>
* </ul>
- * In order to pack them into one int, we limit workerCount to
- * (2^29)-1 (about 500 million) threads rather than (2^31)-1 (2
- * billion) otherwise representable. If this is ever an issue in
- * the future, the variable can be changed to be an AtomicLong,
- * and the shift/mask constants below adjusted. But until the need
- * arises, this code is a bit faster and simpler using an int.
+ * In order to pack them into one int, we limit workerCount to (2^29)-1
(about 500 million) threads rather than
+ * (2^31)-1 (2 billion) otherwise representable. If this is ever an issue
in the future, the variable can be changed
+ * to be an AtomicLong, and the shift/mask constants below adjusted. But
until the need arises, this code is a bit
+ * faster and simpler using an int.
* <p>
- * The workerCount is the number of workers that have been
- * permitted to start and not permitted to stop. The value may be
- * transiently different from the actual number of live threads,
- * for example when a ThreadFactory fails to create a thread when
- * asked, and when exiting threads are still performing
- * bookkeeping before terminating. The user-visible pool size is
- * reported as the current size of the workers set.
+ * The workerCount is the number of workers that have been permitted to
start and not permitted to stop. The value
+ * may be transiently different from the actual number of live threads,
for example when a ThreadFactory fails to
+ * create a thread when asked, and when exiting threads are still
performing bookkeeping before terminating. The
+ * user-visible pool size is reported as the current size of the workers
set.
* <p>
* The runState provides the main lifecycle control, taking on values:
* <ul>
- * <li>RUNNING: Accept new tasks and process queued tasks</li>
- * <li>SHUTDOWN: Don't accept new tasks, but process queued tasks</li>
- * <li>STOP: Don't accept new tasks, don't process queued tasks,
- * and interrupt in-progress tasks</li>
- * <li>TIDYING: All tasks have terminated, workerCount is zero,
- * the thread transitioning to state TIDYING
- * will run the terminated() hook method</li>
- * <li>TERMINATED: terminated() has completed</li>
+ * <li>RUNNING: Accept new tasks and process queued tasks</li>
+ * <li>SHUTDOWN: Don't accept new tasks, but process queued tasks</li>
+ * <li>STOP: Don't accept new tasks, don't process queued tasks, and
interrupt in-progress tasks</li>
+ * <li>TIDYING: All tasks have terminated, workerCount is zero, the thread
transitioning to state TIDYING will run
+ * the terminated() hook method</li>
+ * <li>TERMINATED: terminated() has completed</li>
* </ul>
- * The numerical order among these values matters, to allow
- * ordered comparisons. The runState monotonically increases over
- * time, but need not hit each state. The transitions are:
+ * The numerical order among these values matters, to allow ordered
comparisons. The runState monotonically
+ * increases over time, but need not hit each state. The transitions are:
* <ul>
- * <li>RUNNING -> SHUTDOWN
- * On invocation of shutdown()</li>
- * <li>(RUNNING or SHUTDOWN) -> STOP
- * On invocation of shutdownNow()</li>
- * <li>SHUTDOWN -> TIDYING
- * When both queue and pool are empty</li>
- * <li>STOP -> TIDYING
- * When pool is empty</li>
- * <li>TIDYING -> TERMINATED
- * When the terminated() hook method has completed</li>
+ * <li>RUNNING -> SHUTDOWN On invocation of shutdown()</li>
+ * <li>(RUNNING or SHUTDOWN) -> STOP On invocation of shutdownNow()</li>
+ * <li>SHUTDOWN -> TIDYING When both queue and pool are empty</li>
+ * <li>STOP -> TIDYING When pool is empty</li>
+ * <li>TIDYING -> TERMINATED When the terminated() hook method has
completed</li>
* </ul>
- * Threads waiting in awaitTermination() will return when the
- * state reaches TERMINATED.
+ * Threads waiting in awaitTermination() will return when the state
reaches TERMINATED.
* <p>
- * Detecting the transition from SHUTDOWN to TIDYING is less
- * straightforward than you'd like because the queue may become
- * empty after non-empty and vice versa during SHUTDOWN state, but
- * we can only terminate if, after seeing that it is empty, we see
- * that workerCount is 0 (which sometimes entails a recheck -- see
- * below).
+ * Detecting the transition from SHUTDOWN to TIDYING is less
straightforward than you'd like because the queue may
+ * become empty after non-empty and vice versa during SHUTDOWN state, but
we can only terminate if, after seeing
+ * that it is empty, we see that workerCount is 0 (which sometimes entails
a recheck -- see below).
*/
private final AtomicInteger ctl = new AtomicInteger(ctlOf(RUNNING, 0));
private static final int COUNT_BITS = Integer.SIZE - 3;
private static final int COUNT_MASK = (1 << COUNT_BITS) - 1;
// runState is stored in the high-order bits
- private static final int RUNNING = -1 << COUNT_BITS;
- private static final int SHUTDOWN = 0;
- private static final int STOP = 1 << COUNT_BITS;
- private static final int TIDYING = 2 << COUNT_BITS;
- private static final int TERMINATED = 3 << COUNT_BITS;
+ private static final int RUNNING = -1 << COUNT_BITS;
+ private static final int SHUTDOWN = 0;
+ private static final int STOP = 1 << COUNT_BITS;
+ private static final int TIDYING = 2 << COUNT_BITS;
+ private static final int TERMINATED = 3 << COUNT_BITS;
// Packing and unpacking ctl
- private static int workerCountOf(int c) { return c & COUNT_MASK; }
- private static int ctlOf(int rs, int wc) { return rs | wc; }
+ private static int workerCountOf(int c) {
+ return c & COUNT_MASK;
+ }
+
+ private static int ctlOf(int rs, int wc) {
+ return rs | wc;
+ }
/*
- * Bit field accessors that don't require unpacking ctl.
- * These depend on the bit layout and on workerCount being never negative.
+ * Bit field accessors that don't require unpacking ctl. These depend on
the bit layout and on workerCount being
+ * never negative.
*/
private static boolean runStateLessThan(int c, int s) {
@@ -436,45 +314,35 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Decrements the workerCount field of ctl. This is called only on
- * abrupt termination of a thread (see processWorkerExit). Other
- * decrements are performed within getTask.
+ * Decrements the workerCount field of ctl. This is called only on abrupt
termination of a thread (see
+ * processWorkerExit). Other decrements are performed within getTask.
*/
private void decrementWorkerCount() {
ctl.addAndGet(-1);
}
/**
- * The queue used for holding tasks and handing off to worker
- * threads. We do not require that workQueue.poll() returning
- * null necessarily means that workQueue.isEmpty(), so rely
- * solely on isEmpty to see if the queue is empty (which we must
- * do for example when deciding whether to transition from
- * SHUTDOWN to TIDYING). This accommodates special-purpose
- * queues such as DelayQueues for which poll() is allowed to
- * return null even if it may later return non-null when delays
- * expire.
+ * The queue used for holding tasks and handing off to worker threads. We
do not require that workQueue.poll()
+ * returning null necessarily means that workQueue.isEmpty(), so rely
solely on isEmpty to see if the queue is empty
+ * (which we must do for example when deciding whether to transition from
SHUTDOWN to TIDYING). This accommodates
+ * special-purpose queues such as DelayQueues for which poll() is allowed
to return null even if it may later return
+ * non-null when delays expire.
*/
private final BlockingQueue<Runnable> workQueue;
/**
- * Lock held on access to workers set and related bookkeeping.
- * While we could use a concurrent set of some sort, it turns out
- * to be generally preferable to use a lock. Among the reasons is
- * that this serializes interruptIdleWorkers, which avoids
- * unnecessary interrupt storms, especially during shutdown.
- * Otherwise, exiting threads would concurrently interrupt those
- * that have not yet interrupted. It also simplifies some of the
- * associated statistics bookkeeping of largestPoolSize etc. We
- * also hold mainLock on shutdown and shutdownNow, for the sake of
- * ensuring workers set is stable while separately checking
- * permission to interrupt and actually interrupting.
+ * Lock held on access to workers set and related bookkeeping. While we
could use a concurrent set of some sort, it
+ * turns out to be generally preferable to use a lock. Among the reasons
is that this serializes
+ * interruptIdleWorkers, which avoids unnecessary interrupt storms,
especially during shutdown. Otherwise, exiting
+ * threads would concurrently interrupt those that have not yet
interrupted. It also simplifies some of the
+ * associated statistics bookkeeping of largestPoolSize etc. We also hold
mainLock on shutdown and shutdownNow, for
+ * the sake of ensuring workers set is stable while separately checking
permission to interrupt and actually
+ * interrupting.
*/
private final ReentrantLock mainLock = new ReentrantLock();
/**
- * Set containing all worker threads in pool. Accessed only when
- * holding mainLock.
+ * Set containing all worker threads in pool. Accessed only when holding
mainLock.
*/
private final HashSet<Worker> workers = new HashSet<>();
@@ -484,38 +352,33 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
private final Condition termination = mainLock.newCondition();
/**
- * Tracks largest attained pool size. Accessed only under
- * mainLock.
+ * Tracks largest attained pool size. Accessed only under mainLock.
*/
private int largestPoolSize;
/**
- * Counter for completed tasks. Updated only on termination of
- * worker threads. Accessed only under mainLock.
+ * Counter for completed tasks. Updated only on termination of worker
threads. Accessed only under mainLock.
*/
private long completedTaskCount;
/**
- * The number of tasks submitted but not yet finished. This includes tasks
- * in the queue and tasks that have been handed to a worker thread but the
- * latter did not start executing the task yet.
- * This number is always greater or equal to {@link #getActiveCount()}.
+ * The number of tasks submitted but not yet finished. This includes tasks
in the queue and tasks that have been
+ * handed to a worker thread but the latter did not start executing the
task yet. This number is always greater or
+ * equal to {@link #getActiveCount()}.
*/
private final AtomicInteger submittedCount = new AtomicInteger(0);
private final AtomicLong lastContextStoppedTime = new AtomicLong(0L);
/**
- * Most recent time in ms when a thread decided to kill itself to avoid
- * potential memory leaks. Useful to throttle the rate of renewals of
- * threads.
+ * Most recent time in ms when a thread decided to kill itself to avoid
potential memory leaks. Useful to throttle
+ * the rate of renewals of threads.
*/
private final AtomicLong lastTimeThreadKilledItself = new AtomicLong(0L);
/*
- * All user control parameters are declared as volatiles so that
- * ongoing actions are based on freshest values, but without need
- * for locking, since no internal invariants depend on them
- * changing synchronously with respect to other actions.
+ * All user control parameters are declared as volatiles so that ongoing
actions are based on freshest values, but
+ * without need for locking, since no internal invariants depend on them
changing synchronously with respect to
+ * other actions.
*/
/**
@@ -524,21 +387,15 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
private volatile long threadRenewalDelay =
Constants.DEFAULT_THREAD_RENEWAL_DELAY;
/**
- * Factory for new threads. All threads are created using this
- * factory (via method addWorker). All callers must be prepared
- * for addWorker to fail, which may reflect a system or user's
- * policy limiting the number of threads. Even though it is not
- * treated as an error, failure to create threads may result in
- * new tasks being rejected or existing ones remaining stuck in
- * the queue.
+ * Factory for new threads. All threads are created using this factory
(via method addWorker). All callers must be
+ * prepared for addWorker to fail, which may reflect a system or user's
policy limiting the number of threads. Even
+ * though it is not treated as an error, failure to create threads may
result in new tasks being rejected or
+ * existing ones remaining stuck in the queue.
* <p>
- * We go further and preserve pool invariants even in the face of
- * errors such as OutOfMemoryError, that might be thrown while
- * trying to create threads. Such errors are rather common due to
- * the need to allocate a native stack in Thread.start, and users
- * will want to perform clean pool shutdown to clean up. There
- * will likely be enough memory available for the cleanup code to
- * complete without encountering yet another OutOfMemoryError.
+ * We go further and preserve pool invariants even in the face of errors
such as OutOfMemoryError, that might be
+ * thrown while trying to create threads. Such errors are rather common
due to the need to allocate a native stack
+ * in Thread.start, and users will want to perform clean pool shutdown to
clean up. There will likely be enough
+ * memory available for the cleanup code to complete without encountering
yet another OutOfMemoryError.
*/
private volatile ThreadFactory threadFactory;
@@ -548,35 +405,31 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
private volatile RejectedExecutionHandler handler;
/**
- * Timeout in nanoseconds for idle threads waiting for work.
- * Threads use this timeout when there are more than corePoolSize
- * present or if allowCoreThreadTimeOut. Otherwise, they wait
- * forever for new work.
+ * Timeout in nanoseconds for idle threads waiting for work. Threads use
this timeout when there are more than
+ * corePoolSize present or if allowCoreThreadTimeOut. Otherwise, they wait
forever for new work.
*/
private volatile long keepAliveTime;
/**
- * If false (default), core threads stay alive even when idle.
- * If true, core threads use keepAliveTime to time out waiting
- * for work.
+ * If false (default), core threads stay alive even when idle. If true,
core threads use keepAliveTime to time out
+ * waiting for work.
*/
private volatile boolean allowCoreThreadTimeOut;
/**
- * Core pool size is the minimum number of workers to keep alive
- * (and not allow to time out etc) unless allowCoreThreadTimeOut
- * is set, in which case the minimum is zero.
+ * Core pool size is the minimum number of workers to keep alive (and not
allow to time out etc) unless
+ * allowCoreThreadTimeOut is set, in which case the minimum is zero.
* <p>
- * Since the worker count is actually stored in COUNT_BITS bits,
- * the effective limit is {@code corePoolSize & COUNT_MASK}.
+ * Since the worker count is actually stored in COUNT_BITS bits, the
effective limit is
+ * {@code corePoolSize & COUNT_MASK}.
*/
private volatile int corePoolSize;
/**
* Maximum pool size.
* <p>
- * Since the worker count is actually stored in COUNT_BITS bits,
- * the effective limit is {@code maximumPoolSize & COUNT_MASK}.
+ * Since the worker count is actually stored in COUNT_BITS bits, the
effective limit is
+ * {@code maximumPoolSize & COUNT_MASK}.
*/
private volatile int maximumPoolSize;
@@ -586,35 +439,25 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
private static final RejectedExecutionHandler defaultHandler = new
RejectPolicy();
/**
- * Class Worker mainly maintains interrupt control state for
- * threads running tasks, along with other minor bookkeeping.
- * This class opportunistically extends AbstractQueuedSynchronizer
- * to simplify acquiring and releasing a lock surrounding each
- * task execution. This protects against interrupts that are
- * intended to wake up a worker thread waiting for a task from
- * instead interrupting a task being run. We implement a simple
- * non-reentrant mutual exclusion lock rather than use
- * ReentrantLock because we do not want worker tasks to be able to
- * reacquire the lock when they invoke pool control methods like
- * setCorePoolSize. Additionally, to suppress interrupts until
- * the thread actually starts running tasks, we initialize lock
- * state to a negative value, and clear it upon start (in
+ * Class Worker mainly maintains interrupt control state for threads
running tasks, along with other minor
+ * bookkeeping. This class opportunistically extends
AbstractQueuedSynchronizer to simplify acquiring and releasing
+ * a lock surrounding each task execution. This protects against
interrupts that are intended to wake up a worker
+ * thread waiting for a task from instead interrupting a task being run.
We implement a simple non-reentrant mutual
+ * exclusion lock rather than use ReentrantLock because we do not want
worker tasks to be able to reacquire the lock
+ * when they invoke pool control methods like setCorePoolSize.
Additionally, to suppress interrupts until the thread
+ * actually starts running tasks, we initialize lock state to a negative
value, and clear it upon start (in
* runWorker).
*/
- private final class Worker
- extends AbstractQueuedSynchronizer
- implements Runnable
- {
+ private final class Worker extends AbstractQueuedSynchronizer implements
Runnable {
/**
- * This class will never be serialized, but we provide a
- * serialVersionUID to suppress a javac warning.
+ * This class will never be serialized, but we provide a
serialVersionUID to suppress a javac warning.
*/
@Serial
private static final long serialVersionUID = 6138294804551838833L;
- /** Thread this worker is running in. Null if factory fails. */
+ /** Thread this worker is running in. Null if factory fails. */
final Thread thread;
- /** Initial task to run. Possibly null. */
+ /** Initial task to run. Possibly null. */
Runnable firstTask;
/** Per-thread task counter */
volatile long completedTasks;
@@ -624,6 +467,7 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
/**
* Creates with given first task and thread from ThreadFactory.
+ *
* @param firstTask the first task (null if none)
*/
Worker(Runnable firstTask) {
@@ -664,10 +508,21 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
return true;
}
- public void lock() { acquire(1); }
- public boolean tryLock() { return tryAcquire(1); }
- public void unlock() { release(1); }
- public boolean isLocked() { return isHeldExclusively(); }
+ public void lock() {
+ acquire(1);
+ }
+
+ public boolean tryLock() {
+ return tryAcquire(1);
+ }
+
+ public void unlock() {
+ release(1);
+ }
+
+ public boolean isLocked() {
+ return isHeldExclusively();
+ }
void interruptIfStarted() {
Thread t;
@@ -685,39 +540,32 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
*/
/**
- * Transitions runState to given target, or leaves it alone if
- * already at least the given target.
+ * Transitions runState to given target, or leaves it alone if already at
least the given target.
*
- * @param targetState the desired state, either SHUTDOWN or STOP
- * (but not TIDYING or TERMINATED -- use tryTerminate for that)
+ * @param targetState the desired state, either SHUTDOWN or STOP (but not
TIDYING or TERMINATED -- use tryTerminate
+ * for that)
*/
private void advanceRunState(int targetState) {
// assert targetState == SHUTDOWN || targetState == STOP;
for (;;) {
int c = ctl.get();
- if (runStateAtLeast(c, targetState) ||
- ctl.compareAndSet(c, ctlOf(targetState, workerCountOf(c)))) {
+ if (runStateAtLeast(c, targetState) || ctl.compareAndSet(c,
ctlOf(targetState, workerCountOf(c)))) {
break;
}
}
}
/**
- * Transitions to TERMINATED state if either (SHUTDOWN and pool
- * and queue empty) or (STOP and pool empty). If otherwise
- * eligible to terminate but workerCount is nonzero, interrupts an
- * idle worker to ensure that shutdown signals propagate. This
- * method must be called following any action that might make
- * termination possible -- reducing worker count or removing tasks
- * from the queue during shutdown. The method is non-private to
- * allow access from ScheduledThreadPoolExecutor.
+ * Transitions to TERMINATED state if either (SHUTDOWN and pool and queue
empty) or (STOP and pool empty). If
+ * otherwise eligible to terminate but workerCount is nonzero, interrupts
an idle worker to ensure that shutdown
+ * signals propagate. This method must be called following any action that
might make termination possible --
+ * reducing worker count or removing tasks from the queue during shutdown.
The method is non-private to allow access
+ * from ScheduledThreadPoolExecutor.
*/
final void tryTerminate() {
for (;;) {
int c = ctl.get();
- if (isRunning(c) ||
- runStateAtLeast(c, TIDYING) ||
- (runStateLessThan(c, STOP) && ! workQueue.isEmpty())) {
+ if (isRunning(c) || runStateAtLeast(c, TIDYING) ||
(runStateLessThan(c, STOP) && !workQueue.isEmpty())) {
return;
}
if (workerCountOf(c) != 0) { // Eligible to terminate
@@ -749,8 +597,8 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
*/
/**
- * Interrupts all threads, even if active. Ignores SecurityExceptions
- * (in which case some threads may remain uninterrupted).
+ * Interrupts all threads, even if active. Ignores SecurityExceptions (in
which case some threads may remain
+ * uninterrupted).
*/
private void interruptWorkers() {
// assert mainLock.isHeldByCurrentThread();
@@ -760,23 +608,17 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Interrupts threads that might be waiting for tasks (as
- * indicated by not being locked) so they can check for
- * termination or configuration changes. Ignores
- * SecurityExceptions (in which case some threads may remain
+ * Interrupts threads that might be waiting for tasks (as indicated by not
being locked) so they can check for
+ * termination or configuration changes. Ignores SecurityExceptions (in
which case some threads may remain
* uninterrupted).
*
- * @param onlyOne If true, interrupt at most one worker. This is
- * called only from tryTerminate when termination is otherwise
- * enabled but there are still other workers. In this case, at
- * most one waiting worker is interrupted to propagate shutdown
- * signals in case all threads are currently waiting.
- * Interrupting any arbitrary thread ensures that newly arriving
- * workers since shutdown began will also eventually exit.
- * To guarantee eventual termination, it suffices to always
- * interrupt only one idle worker, but shutdown() interrupts all
- * idle workers so that redundant workers exit promptly, not
- * waiting for a straggler task to finish.
+ * @param onlyOne If true, interrupt at most one worker. This is called
only from tryTerminate when termination is
+ * otherwise enabled but there are still other workers.
In this case, at most one waiting worker
+ * is interrupted to propagate shutdown signals in case
all threads are currently waiting.
+ * Interrupting any arbitrary thread ensures that newly
arriving workers since shutdown began
+ * will also eventually exit. To guarantee eventual
termination, it suffices to always interrupt
+ * only one idle worker, but shutdown() interrupts all
idle workers so that redundant workers
+ * exit promptly, not waiting for a straggler task to
finish.
*/
private void interruptIdleWorkers(boolean onlyOne) {
final ReentrantLock mainLock = this.mainLock;
@@ -802,8 +644,7 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Common form of interruptIdleWorkers, to avoid having to
- * remember what the boolean argument means.
+ * Common form of interruptIdleWorkers, to avoid having to remember what
the boolean argument means.
*/
private void interruptIdleWorkers() {
interruptIdleWorkers(false);
@@ -812,31 +653,27 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
private static final boolean ONLY_ONE = true;
/*
- * Misc utilities, most of which are also exported to
- * ScheduledThreadPoolExecutor
+ * Misc utilities, most of which are also exported to
ScheduledThreadPoolExecutor
*/
/**
- * Invokes the rejected execution handler for the given command.
- * Package-protected for use by ScheduledThreadPoolExecutor.
+ * Invokes the rejected execution handler for the given command.
Package-protected for use by
+ * ScheduledThreadPoolExecutor.
*/
final void reject(Runnable command) {
handler.rejectedExecution(command, this);
}
/**
- * Performs any further cleanup following run state transition on
- * invocation of shutdown. A no-op here, but used by
+ * Performs any further cleanup following run state transition on
invocation of shutdown. A no-op here, but used by
* ScheduledThreadPoolExecutor to cancel delayed tasks.
*/
void onShutdown() {
}
/**
- * Drains the task queue into a new list, normally using
- * drainTo. But if the queue is a DelayQueue or any other kind of
- * queue for which poll or drainTo may fail to remove some
- * elements, it deletes them one by one.
+ * Drains the task queue into a new list, normally using drainTo. But if
the queue is a DelayQueue or any other kind
+ * of queue for which poll or drainTo may fail to remove some elements, it
deletes them one by one.
*/
private List<Runnable> drainQueue() {
BlockingQueue<Runnable> q = workQueue;
@@ -857,55 +694,43 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
*/
/**
- * Checks if a new worker can be added with respect to current
- * pool state and the given bound (either core or maximum). If so,
- * the worker count is adjusted accordingly, and, if possible, a
- * new worker is created and started, running firstTask as its
- * first task. This method returns false if the pool is stopped or
- * eligible to shut down. It also returns false if the thread
- * factory fails to create a thread when asked. If the thread
- * creation fails, either due to the thread factory returning
- * null, or due to an exception (typically OutOfMemoryError in
+ * Checks if a new worker can be added with respect to current pool state
and the given bound (either core or
+ * maximum). If so, the worker count is adjusted accordingly, and, if
possible, a new worker is created and started,
+ * running firstTask as its first task. This method returns false if the
pool is stopped or eligible to shut down.
+ * It also returns false if the thread factory fails to create a thread
when asked. If the thread creation fails,
+ * either due to the thread factory returning null, or due to an exception
(typically OutOfMemoryError in
* Thread.start()), we roll back cleanly.
*
- * @param firstTask the task the new thread should run first (or
- * null if none). Workers are created with an initial first task
- * (in method execute()) to bypass queuing when there are fewer
- * than corePoolSize threads (in which case we always start one),
- * or when the queue is full (in which case we must bypass queue).
- * Initially idle threads are usually created via
- * prestartCoreThread or to replace other dying workers.
- *
- * @param core if true use corePoolSize as bound, else
- * maximumPoolSize. (A boolean indicator is used here rather than a
- * value to ensure reads of fresh values after checking other pool
- * state).
+ * @param firstTask the task the new thread should run first (or null if
none). Workers are created with an initial
+ * first task (in method execute()) to bypass queuing
when there are fewer than corePoolSize
+ * threads (in which case we always start one), or
when the queue is full (in which case we
+ * must bypass queue). Initially idle threads are
usually created via prestartCoreThread or to
+ * replace other dying workers.
+ * @param core if true use corePoolSize as bound, else
maximumPoolSize. (A boolean indicator is used here
+ * rather than a value to ensure reads of fresh
values after checking other pool state).
+ *
* @return true if successful
*/
private boolean addWorker(Runnable firstTask, boolean core) {
retry:
for (int c = ctl.get();;) {
// Check if queue empty only if necessary.
- if (runStateAtLeast(c, SHUTDOWN)
- && (runStateAtLeast(c, STOP)
- || firstTask != null
- || workQueue.isEmpty())) {
+ if (runStateAtLeast(c, SHUTDOWN) &&
+ (runStateAtLeast(c, STOP) || firstTask != null ||
workQueue.isEmpty())) {
return false;
}
for (;;) {
- if (workerCountOf(c)
- >= ((core ? corePoolSize : maximumPoolSize) & COUNT_MASK))
{
+ if (workerCountOf(c) >= ((core ? corePoolSize :
maximumPoolSize) & COUNT_MASK)) {
return false;
}
if (compareAndIncrementWorkerCount(c)) {
break retry;
}
- c = ctl.get(); // Re-read ctl
- if (runStateAtLeast(c, SHUTDOWN))
- {
+ c = ctl.get(); // Re-read ctl
+ if (runStateAtLeast(c, SHUTDOWN)) {
continue retry;
- // else CAS failed due to workerCount change; retry inner loop
+ // else CAS failed due to workerCount change; retry inner
loop
}
}
}
@@ -925,8 +750,7 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
// shut down before lock acquired.
int c = ctl.get();
- if (isRunning(c) ||
- (runStateLessThan(c, STOP) && firstTask == null)) {
+ if (isRunning(c) || (runStateLessThan(c, STOP) &&
firstTask == null)) {
if (t.getState() != Thread.State.NEW) {
throw new IllegalThreadStateException();
}
@@ -946,7 +770,7 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
}
} finally {
- if (! workerStarted) {
+ if (!workerStarted) {
addWorkerFailed(w);
}
}
@@ -958,8 +782,7 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
* <ul>
* <li>removes worker from workers, if present</li>
* <li>decrements worker count</li>
- * <li>rechecks for termination, in case the existence of this
- * worker was holding up termination</li>
+ * <li>rechecks for termination, in case the existence of this worker was
holding up termination</li>
* </ul>
*/
private void addWorkerFailed(Worker w) {
@@ -977,16 +800,12 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Performs cleanup and bookkeeping for a dying worker. Called
- * only from worker threads. Unless completedAbruptly is set,
- * assumes that workerCount has already been adjusted to account
- * for exit. This method removes thread from worker set, and
- * possibly terminates the pool or replaces the worker if either
- * it exited due to user task exception or if fewer than
- * corePoolSize workers are running or queue is non-empty but
- * there are no workers.
+ * Performs cleanup and bookkeeping for a dying worker. Called only from
worker threads. Unless completedAbruptly is
+ * set, assumes that workerCount has already been adjusted to account for
exit. This method removes thread from
+ * worker set, and possibly terminates the pool or replaces the worker if
either it exited due to user task
+ * exception or if fewer than corePoolSize workers are running or queue is
non-empty but there are no workers.
*
- * @param w the worker
+ * @param w the worker
* @param completedAbruptly if the worker died due to user exception
*/
private void processWorkerExit(Worker w, boolean completedAbruptly) {
@@ -1009,7 +828,7 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
if (runStateLessThan(c, STOP)) {
if (!completedAbruptly) {
int min = allowCoreThreadTimeOut ? 0 : corePoolSize;
- if (min == 0 && ! workQueue.isEmpty()) {
+ if (min == 0 && !workQueue.isEmpty()) {
min = 1;
}
// https://bz.apache.org/bugzilla/show_bug.cgi?id=65454
@@ -1028,22 +847,18 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Performs blocking or timed wait for a task, depending on
- * current configuration settings, or returns null if this worker
- * must exit because of any of:
+ * Performs blocking or timed wait for a task, depending on current
configuration settings, or returns null if this
+ * worker must exit because of any of:
* <ol>
- * <li>There are more than maximumPoolSize workers (due to
- * a call to setMaximumPoolSize).</li>
+ * <li>There are more than maximumPoolSize workers (due to a call to
setMaximumPoolSize).</li>
* <li>The pool is stopped.</li>
* <li>The pool is shutdown and the queue is empty.</li>
- * <li>This worker timed out waiting for a task, and timed-out
- * workers are subject to termination (that is,
- * {@code allowCoreThreadTimeOut || workerCount > corePoolSize})
- * both before and after the timed wait, and if the queue is
- * non-empty, this worker is not the last thread in the pool.</li>
+ * <li>This worker timed out waiting for a task, and timed-out workers are
subject to termination (that is,
+ * {@code allowCoreThreadTimeOut || workerCount > corePoolSize}) both
before and after the timed wait, and if the
+ * queue is non-empty, this worker is not the last thread in the pool.</li>
* </ol>
- * @return task, or null if the worker must exit, in which case
- * workerCount is decremented
+ *
+ * @return task, or null if the worker must exit, in which case
workerCount is decremented
*/
private Runnable getTask() {
boolean timedOut = false; // Did the last poll() time out?
@@ -1052,8 +867,7 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
int c = ctl.get();
// Check if queue empty only if necessary.
- if (runStateAtLeast(c, SHUTDOWN)
- && (runStateAtLeast(c, STOP) || workQueue.isEmpty())) {
+ if (runStateAtLeast(c, SHUTDOWN) && (runStateAtLeast(c, STOP) ||
workQueue.isEmpty())) {
decrementWorkerCount();
return null;
}
@@ -1063,8 +877,7 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
// Are workers subject to culling?
boolean timed = allowCoreThreadTimeOut || wc > corePoolSize;
- if ((wc > maximumPoolSize || (timed && timedOut))
- && (wc > 1 || workQueue.isEmpty())) {
+ if ((wc > maximumPoolSize || (timed && timedOut)) && (wc > 1 ||
workQueue.isEmpty())) {
if (compareAndDecrementWorkerCount(c)) {
return null;
}
@@ -1072,9 +885,7 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
try {
- Runnable r = timed ?
- workQueue.poll(keepAliveTime, TimeUnit.NANOSECONDS) :
- workQueue.take();
+ Runnable r = timed ? workQueue.poll(keepAliveTime,
TimeUnit.NANOSECONDS) : workQueue.take();
if (r != null) {
return r;
}
@@ -1086,45 +897,31 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Main worker run loop. Repeatedly gets tasks from queue and
- * executes them, while coping with a number of issues:
+ * Main worker run loop. Repeatedly gets tasks from queue and executes
them, while coping with a number of issues:
* <p>
- * 1. We may start out with an initial task, in which case we
- * don't need to get the first one. Otherwise, as long as pool is
- * running, we get tasks from getTask. If it returns null then the
- * worker exits due to changed pool state or configuration
- * parameters. Other exits result from exception throws in
- * external code, in which case completedAbruptly holds, which
- * usually leads processWorkerExit to replace this thread.
+ * 1. We may start out with an initial task, in which case we don't need
to get the first one. Otherwise, as long as
+ * pool is running, we get tasks from getTask. If it returns null then the
worker exits due to changed pool state or
+ * configuration parameters. Other exits result from exception throws in
external code, in which case
+ * completedAbruptly holds, which usually leads processWorkerExit to
replace this thread.
* <p>
- * 2. Before running any task, the lock is acquired to prevent
- * other pool interrupts while the task is executing, and then we
- * ensure that unless pool is stopping, this thread does not have
- * its interrupt set.
+ * 2. Before running any task, the lock is acquired to prevent other pool
interrupts while the task is executing,
+ * and then we ensure that unless pool is stopping, this thread does not
have its interrupt set.
* <p>
- * 3. Each task run is preceded by a call to beforeExecute, which
- * might throw an exception, in which case we cause thread to die
- * (breaking loop with completedAbruptly true) without processing
- * the task.
+ * 3. Each task run is preceded by a call to beforeExecute, which might
throw an exception, in which case we cause
+ * thread to die (breaking loop with completedAbruptly true) without
processing the task.
* <p>
- * 4. Assuming beforeExecute completes normally, we run the task,
- * gathering any of its thrown exceptions to send to afterExecute.
- * We separately handle RuntimeException, Error (both of which the
- * specs guarantee that we trap) and arbitrary Throwables.
- * Because we cannot rethrow Throwables within Runnable.run, we
- * wrap them within Errors on the way out (to the thread's
- * UncaughtExceptionHandler). Any thrown exception also
- * conservatively causes thread to die.
+ * 4. Assuming beforeExecute completes normally, we run the task,
gathering any of its thrown exceptions to send to
+ * afterExecute. We separately handle RuntimeException, Error (both of
which the specs guarantee that we trap) and
+ * arbitrary Throwables. Because we cannot rethrow Throwables within
Runnable.run, we wrap them within Errors on the
+ * way out (to the thread's UncaughtExceptionHandler). Any thrown
exception also conservatively causes thread to
+ * die.
* <p>
- * 5. After task.run completes, we call afterExecute, which may
- * also throw an exception, which will also cause thread to
- * die. According to JLS Sec 14.20, this exception is the one that
- * will be in effect even if task.run throws.
+ * 5. After task.run completes, we call afterExecute, which may also throw
an exception, which will also cause
+ * thread to die. According to JLS Sec 14.20, this exception is the one
that will be in effect even if task.run
+ * throws.
* <p>
- * The net effect of the exception mechanics is that afterExecute
- * and the thread's UncaughtExceptionHandler have as accurate
- * information as we can provide about any problems encountered by
- * user code.
+ * The net effect of the exception mechanics is that afterExecute and the
thread's UncaughtExceptionHandler have as
+ * accurate information as we can provide about any problems encountered
by user code.
*
* @param w the worker
*/
@@ -1138,13 +935,11 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
while (task != null || (task = getTask()) != null) {
w.lock();
// If pool is stopping, ensure thread is interrupted;
- // if not, ensure thread is not interrupted. This
+ // if not, ensure thread is not interrupted. This
// requires a recheck in second case to deal with
// shutdownNow race while clearing interrupt
- if ((runStateAtLeast(ctl.get(), STOP) ||
- (Thread.interrupted() &&
- runStateAtLeast(ctl.get(), STOP))) &&
- !wt.isInterrupted()) {
+ if ((runStateAtLeast(ctl.get(), STOP) || (Thread.interrupted()
&& runStateAtLeast(ctl.get(), STOP))) &&
+ !wt.isInterrupted()) {
wt.interrupt();
}
try {
@@ -1171,152 +966,113 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
// Public constructors and methods
/**
- * Creates a new {@code ThreadPoolExecutor} with the given initial
- * parameters, the
- * {@linkplain Executors#defaultThreadFactory default thread factory}
- * and the {@linkplain ThreadPoolExecutor.RejectPolicy
- * default rejected execution handler}.
- *
- * <p>It may be more convenient to use one of the {@link Executors}
- * factory methods instead of this general purpose constructor.
- *
- * @param corePoolSize the number of threads to keep in the pool, even
- * if they are idle, unless {@code allowCoreThreadTimeOut} is set
- * @param maximumPoolSize the maximum number of threads to allow in the
- * pool
- * @param keepAliveTime when the number of threads is greater than
- * the core, this is the maximum time that excess idle threads
- * will wait for new tasks before terminating.
- * @param unit the time unit for the {@code keepAliveTime} argument
- * @param workQueue the queue to use for holding tasks before they are
- * executed. This queue will hold only the {@code Runnable}
- * tasks submitted by the {@code execute} method.
+ * Creates a new {@code ThreadPoolExecutor} with the given initial
parameters, the
+ * {@linkplain Executors#defaultThreadFactory default thread factory} and
the
+ * {@linkplain ThreadPoolExecutor.RejectPolicy default rejected execution
handler}.
+ * <p>
+ * It may be more convenient to use one of the {@link Executors} factory
methods instead of this general purpose
+ * constructor.
+ *
+ * @param corePoolSize the number of threads to keep in the pool, even
if they are idle, unless
+ * {@code allowCoreThreadTimeOut} is set
+ * @param maximumPoolSize the maximum number of threads to allow in the
pool
+ * @param keepAliveTime when the number of threads is greater than the
core, this is the maximum time that excess
+ * idle threads will wait for new tasks before
terminating.
+ * @param unit the time unit for the {@code keepAliveTime}
argument
+ * @param workQueue the queue to use for holding tasks before they
are executed. This queue will hold only the
+ * {@code Runnable} tasks submitted by the
{@code execute} method.
+ *
* @throws IllegalArgumentException if one of the following holds:<br>
- * {@code corePoolSize < 0}<br>
- * {@code keepAliveTime < 0}<br>
- * {@code maximumPoolSize <= 0}<br>
- * {@code maximumPoolSize < corePoolSize}
- * @throws NullPointerException if {@code workQueue} is null
- */
- public ThreadPoolExecutor(int corePoolSize,
- int maximumPoolSize,
- long keepAliveTime,
- TimeUnit unit,
- BlockingQueue<Runnable> workQueue) {
- this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue,
- Executors.defaultThreadFactory(), defaultHandler);
- }
-
- /**
- * Creates a new {@code ThreadPoolExecutor} with the given initial
- * parameters and the {@linkplain ThreadPoolExecutor.RejectPolicy
- * default rejected execution handler}.
- *
- * @param corePoolSize the number of threads to keep in the pool, even
- * if they are idle, unless {@code allowCoreThreadTimeOut} is set
- * @param maximumPoolSize the maximum number of threads to allow in the
- * pool
- * @param keepAliveTime when the number of threads is greater than
- * the core, this is the maximum time that excess idle threads
- * will wait for new tasks before terminating.
- * @param unit the time unit for the {@code keepAliveTime} argument
- * @param workQueue the queue to use for holding tasks before they are
- * executed. This queue will hold only the {@code Runnable}
- * tasks submitted by the {@code execute} method.
- * @param threadFactory the factory to use when the executor
- * creates a new thread
+ * {@code corePoolSize < 0}<br>
+ * {@code keepAliveTime < 0}<br>
+ * {@code maximumPoolSize <= 0}<br>
+ * {@code maximumPoolSize <
corePoolSize}
+ * @throws NullPointerException if {@code workQueue} is null
+ */
+ public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long
keepAliveTime, TimeUnit unit,
+ BlockingQueue<Runnable> workQueue) {
+ this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue,
Executors.defaultThreadFactory(),
+ defaultHandler);
+ }
+
+ /**
+ * Creates a new {@code ThreadPoolExecutor} with the given initial
parameters and the
+ * {@linkplain ThreadPoolExecutor.RejectPolicy default rejected execution
handler}.
+ *
+ * @param corePoolSize the number of threads to keep in the pool, even
if they are idle, unless
+ * {@code allowCoreThreadTimeOut} is set
+ * @param maximumPoolSize the maximum number of threads to allow in the
pool
+ * @param keepAliveTime when the number of threads is greater than the
core, this is the maximum time that excess
+ * idle threads will wait for new tasks before
terminating.
+ * @param unit the time unit for the {@code keepAliveTime}
argument
+ * @param workQueue the queue to use for holding tasks before they
are executed. This queue will hold only the
+ * {@code Runnable} tasks submitted by the
{@code execute} method.
+ * @param threadFactory the factory to use when the executor creates a
new thread
+ *
* @throws IllegalArgumentException if one of the following holds:<br>
- * {@code corePoolSize < 0}<br>
- * {@code keepAliveTime < 0}<br>
- * {@code maximumPoolSize <= 0}<br>
- * {@code maximumPoolSize < corePoolSize}
- * @throws NullPointerException if {@code workQueue}
- * or {@code threadFactory} is null
- */
- public ThreadPoolExecutor(int corePoolSize,
- int maximumPoolSize,
- long keepAliveTime,
- TimeUnit unit,
- BlockingQueue<Runnable> workQueue,
- ThreadFactory threadFactory) {
- this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue,
- threadFactory, defaultHandler);
- }
-
- /**
- * Creates a new {@code ThreadPoolExecutor} with the given initial
- * parameters and the
+ * {@code corePoolSize < 0}<br>
+ * {@code keepAliveTime < 0}<br>
+ * {@code maximumPoolSize <= 0}<br>
+ * {@code maximumPoolSize <
corePoolSize}
+ * @throws NullPointerException if {@code workQueue} or {@code
threadFactory} is null
+ */
+ public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long
keepAliveTime, TimeUnit unit,
+ BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory) {
+ this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue,
threadFactory, defaultHandler);
+ }
+
+ /**
+ * Creates a new {@code ThreadPoolExecutor} with the given initial
parameters and the
* {@linkplain Executors#defaultThreadFactory default thread factory}.
*
- * @param corePoolSize the number of threads to keep in the pool, even
- * if they are idle, unless {@code allowCoreThreadTimeOut} is set
- * @param maximumPoolSize the maximum number of threads to allow in the
- * pool
- * @param keepAliveTime when the number of threads is greater than
- * the core, this is the maximum time that excess idle threads
- * will wait for new tasks before terminating.
- * @param unit the time unit for the {@code keepAliveTime} argument
- * @param workQueue the queue to use for holding tasks before they are
- * executed. This queue will hold only the {@code Runnable}
- * tasks submitted by the {@code execute} method.
- * @param handler the handler to use when execution is blocked
- * because the thread bounds and queue capacities are reached
+ * @param corePoolSize the number of threads to keep in the pool, even
if they are idle, unless
+ * {@code allowCoreThreadTimeOut} is set
+ * @param maximumPoolSize the maximum number of threads to allow in the
pool
+ * @param keepAliveTime when the number of threads is greater than the
core, this is the maximum time that excess
+ * idle threads will wait for new tasks before
terminating.
+ * @param unit the time unit for the {@code keepAliveTime}
argument
+ * @param workQueue the queue to use for holding tasks before they
are executed. This queue will hold only the
+ * {@code Runnable} tasks submitted by the
{@code execute} method.
+ * @param handler the handler to use when execution is blocked
because the thread bounds and queue
+ * capacities are reached
+ *
* @throws IllegalArgumentException if one of the following holds:<br>
- * {@code corePoolSize < 0}<br>
- * {@code keepAliveTime < 0}<br>
- * {@code maximumPoolSize <= 0}<br>
- * {@code maximumPoolSize < corePoolSize}
- * @throws NullPointerException if {@code workQueue}
- * or {@code handler} is null
- */
- public ThreadPoolExecutor(int corePoolSize,
- int maximumPoolSize,
- long keepAliveTime,
- TimeUnit unit,
- BlockingQueue<Runnable> workQueue,
- RejectedExecutionHandler handler) {
- this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue,
- Executors.defaultThreadFactory(), handler);
- }
-
- /**
- * Creates a new {@code ThreadPoolExecutor} with the given initial
- * parameters.
- *
- * @param corePoolSize the number of threads to keep in the pool, even
- * if they are idle, unless {@code allowCoreThreadTimeOut} is set
- * @param maximumPoolSize the maximum number of threads to allow in the
- * pool
- * @param keepAliveTime when the number of threads is greater than
- * the core, this is the maximum time that excess idle threads
- * will wait for new tasks before terminating.
- * @param unit the time unit for the {@code keepAliveTime} argument
- * @param workQueue the queue to use for holding tasks before they are
- * executed. This queue will hold only the {@code Runnable}
- * tasks submitted by the {@code execute} method.
- * @param threadFactory the factory to use when the executor
- * creates a new thread
- * @param handler the handler to use when execution is blocked
- * because the thread bounds and queue capacities are reached
+ * {@code corePoolSize < 0}<br>
+ * {@code keepAliveTime < 0}<br>
+ * {@code maximumPoolSize <= 0}<br>
+ * {@code maximumPoolSize <
corePoolSize}
+ * @throws NullPointerException if {@code workQueue} or {@code
handler} is null
+ */
+ public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long
keepAliveTime, TimeUnit unit,
+ BlockingQueue<Runnable> workQueue, RejectedExecutionHandler
handler) {
+ this(corePoolSize, maximumPoolSize, keepAliveTime, unit, workQueue,
Executors.defaultThreadFactory(), handler);
+ }
+
+ /**
+ * Creates a new {@code ThreadPoolExecutor} with the given initial
parameters.
+ *
+ * @param corePoolSize the number of threads to keep in the pool, even
if they are idle, unless
+ * {@code allowCoreThreadTimeOut} is set
+ * @param maximumPoolSize the maximum number of threads to allow in the
pool
+ * @param keepAliveTime when the number of threads is greater than the
core, this is the maximum time that excess
+ * idle threads will wait for new tasks before
terminating.
+ * @param unit the time unit for the {@code keepAliveTime}
argument
+ * @param workQueue the queue to use for holding tasks before they
are executed. This queue will hold only the
+ * {@code Runnable} tasks submitted by the
{@code execute} method.
+ * @param threadFactory the factory to use when the executor creates a
new thread
+ * @param handler the handler to use when execution is blocked
because the thread bounds and queue
+ * capacities are reached
+ *
* @throws IllegalArgumentException if one of the following holds:<br>
- * {@code corePoolSize < 0}<br>
- * {@code keepAliveTime < 0}<br>
- * {@code maximumPoolSize <= 0}<br>
- * {@code maximumPoolSize < corePoolSize}
- * @throws NullPointerException if {@code workQueue}
- * or {@code threadFactory} or {@code handler} is null
- */
- public ThreadPoolExecutor(int corePoolSize,
- int maximumPoolSize,
- long keepAliveTime,
- TimeUnit unit,
- BlockingQueue<Runnable> workQueue,
- ThreadFactory threadFactory,
- RejectedExecutionHandler handler) {
- if (corePoolSize < 0 ||
- maximumPoolSize <= 0 ||
- maximumPoolSize < corePoolSize ||
- keepAliveTime < 0) {
+ * {@code corePoolSize < 0}<br>
+ * {@code keepAliveTime < 0}<br>
+ * {@code maximumPoolSize <= 0}<br>
+ * {@code maximumPoolSize <
corePoolSize}
+ * @throws NullPointerException if {@code workQueue} or {@code
threadFactory} or {@code handler} is null
+ */
+ public ThreadPoolExecutor(int corePoolSize, int maximumPoolSize, long
keepAliveTime, TimeUnit unit,
+ BlockingQueue<Runnable> workQueue, ThreadFactory threadFactory,
RejectedExecutionHandler handler) {
+ if (corePoolSize < 0 || maximumPoolSize <= 0 || maximumPoolSize <
corePoolSize || keepAliveTime < 0) {
throw new IllegalArgumentException();
}
if (workQueue == null || threadFactory == null || handler == null) {
@@ -1357,18 +1113,15 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
/**
- * Executes the given task sometime in the future. The task
- * may execute in a new thread or in an existing pooled thread.
- *
- * If the task cannot be submitted for execution, either because this
- * executor has been shutdown or because its capacity has been reached,
- * the task is handled by the current {@link RejectedExecutionHandler}.
+ * Executes the given task sometime in the future. The task may execute in
a new thread or in an existing pooled
+ * thread. If the task cannot be submitted for execution, either because
this executor has been shutdown or because
+ * its capacity has been reached, the task is handled by the current
{@link RejectedExecutionHandler}.
*
* @param command the task to execute
- * @throws RejectedExecutionException at discretion of
- * {@code RejectedExecutionHandler}, if the task
- * cannot be accepted for execution
- * @throws NullPointerException if {@code command} is null
+ *
+ * @throws RejectedExecutionException at discretion of {@code
RejectedExecutionHandler}, if the task cannot be
+ * accepted for execution
+ * @throws NullPointerException if {@code command} is null
*/
private void executeInternal(Runnable command) {
if (command == null) {
@@ -1377,22 +1130,17 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
/*
* Proceed in 3 steps:
*
- * 1. If fewer than corePoolSize threads are running, try to
- * start a new thread with the given command as its first
- * task. The call to addWorker atomically checks runState and
- * workerCount, and so prevents false alarms that would add
- * threads when it shouldn't, by returning false.
+ * 1. If fewer than corePoolSize threads are running, try to start a
new thread with the given command as its
+ * first task. The call to addWorker atomically checks runState and
workerCount, and so prevents false alarms
+ * that would add threads when it shouldn't, by returning false.
*
- * 2. If a task can be successfully queued, then we still need
- * to double-check whether we should have added a thread
- * (because existing ones died since last checking) or that
- * the pool shut down since entry into this method. So we
- * recheck state and if necessary roll back the enqueuing if
- * stopped, or start a new thread if there are none.
+ * 2. If a task can be successfully queued, then we still need to
double-check whether we should have added a
+ * thread (because existing ones died since last checking) or that the
pool shut down since entry into this
+ * method. So we recheck state and if necessary roll back the
enqueuing if stopped, or start a new thread if
+ * there are none.
*
- * 3. If we cannot queue task, then we try to add a new
- * thread. If it fails, we know we are shut down or saturated
- * and so reject the task.
+ * 3. If we cannot queue task, then we try to add a new thread. If it
fails, we know we are shut down or
+ * saturated and so reject the task.
*/
int c = ctl.get();
if (workerCountOf(c) < corePoolSize) {
@@ -1403,25 +1151,22 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
if (isRunning(c) && workQueue.offer(command)) {
int recheck = ctl.get();
- if (! isRunning(recheck) && remove(command)) {
+ if (!isRunning(recheck) && remove(command)) {
reject(command);
} else if (workerCountOf(recheck) == 0) {
addWorker(null, false);
}
- }
- else if (!addWorker(command, false)) {
+ } else if (!addWorker(command, false)) {
reject(command);
}
}
/**
- * Initiates an orderly shutdown in which previously submitted
- * tasks are executed, but no new tasks will be accepted.
- * Invocation has no additional effect if already shut down.
- *
- * <p>This method does not wait for previously submitted tasks to
- * complete execution. Use {@link #awaitTermination awaitTermination}
- * to do that.
+ * Initiates an orderly shutdown in which previously submitted tasks are
executed, but no new tasks will be
+ * accepted. Invocation has no additional effect if already shut down.
+ * <p>
+ * This method does not wait for previously submitted tasks to complete
execution. Use {@link #awaitTermination
+ * awaitTermination} to do that.
*
* @throws SecurityException {@inheritDoc}
*/
@@ -1440,19 +1185,16 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Attempts to stop all actively executing tasks, halts the
- * processing of waiting tasks, and returns a list of the tasks
- * that were awaiting execution. These tasks are drained (removed)
- * from the task queue upon return from this method.
- *
- * <p>This method does not wait for actively executing tasks to
- * terminate. Use {@link #awaitTermination awaitTermination} to
- * do that.
- *
- * <p>There are no guarantees beyond best-effort attempts to stop
- * processing actively executing tasks. This implementation
- * interrupts tasks via {@link Thread#interrupt}; any task that
- * fails to respond to interrupts may never terminate.
+ * Attempts to stop all actively executing tasks, halts the processing of
waiting tasks, and returns a list of the
+ * tasks that were awaiting execution. These tasks are drained (removed)
from the task queue upon return from this
+ * method.
+ * <p>
+ * This method does not wait for actively executing tasks to terminate.
Use {@link #awaitTermination
+ * awaitTermination} to do that.
+ * <p>
+ * There are no guarantees beyond best-effort attempts to stop processing
actively executing tasks. This
+ * implementation interrupts tasks via {@link Thread#interrupt}; any task
that fails to respond to interrupts may
+ * never terminate.
*
* @throws SecurityException {@inheritDoc}
*/
@@ -1483,13 +1225,10 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Returns true if this executor is in the process of terminating
- * after {@link #shutdown} or {@link #shutdownNow} but has not
- * completely terminated. This method may be useful for
- * debugging. A return of {@code true} reported a sufficient
- * period after shutdown may indicate that submitted tasks have
- * ignored or suppressed interruption, causing this executor not
- * to properly terminate.
+ * Returns true if this executor is in the process of terminating after
{@link #shutdown} or {@link #shutdownNow}
+ * but has not completely terminated. This method may be useful for
debugging. A return of {@code true} reported a
+ * sufficient period after shutdown may indicate that submitted tasks have
ignored or suppressed interruption,
+ * causing this executor not to properly terminate.
*
* @return {@code true} if terminating but not yet terminated
*/
@@ -1504,8 +1243,7 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
@Override
- public boolean awaitTermination(long timeout, TimeUnit unit)
- throws InterruptedException {
+ public boolean awaitTermination(long timeout, TimeUnit unit) throws
InterruptedException {
long nanos = unit.toNanos(timeout);
final ReentrantLock mainLock = this.mainLock;
mainLock.lock();
@@ -1526,7 +1264,9 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
* Sets the thread factory used to create new threads.
*
* @param threadFactory the new thread factory
+ *
* @throws NullPointerException if threadFactory is null
+ *
* @see #getThreadFactory
*/
public void setThreadFactory(ThreadFactory threadFactory) {
@@ -1540,6 +1280,7 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
* Returns the thread factory used to create new threads.
*
* @return the current thread factory
+ *
* @see #setThreadFactory(ThreadFactory)
*/
public ThreadFactory getThreadFactory() {
@@ -1550,7 +1291,9 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
* Sets a new handler for unexecutable tasks.
*
* @param handler the new handler
+ *
* @throws NullPointerException if handler is null
+ *
* @see #getRejectedExecutionHandler
*/
public void setRejectedExecutionHandler(RejectedExecutionHandler handler) {
@@ -1564,6 +1307,7 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
* Returns the current handler for unexecutable tasks.
*
* @return the current handler
+ *
* @see #setRejectedExecutionHandler(RejectedExecutionHandler)
*/
public RejectedExecutionHandler getRejectedExecutionHandler() {
@@ -1571,16 +1315,15 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Sets the core number of threads. This overrides any value set
- * in the constructor. If the new value is smaller than the
- * current value, excess existing threads will be terminated when
- * they next become idle. If larger, new threads will, if needed,
- * be started to execute any queued tasks.
+ * Sets the core number of threads. This overrides any value set in the
constructor. If the new value is smaller
+ * than the current value, excess existing threads will be terminated when
they next become idle. If larger, new
+ * threads will, if needed, be started to execute any queued tasks.
*
* @param corePoolSize the new core size
- * @throws IllegalArgumentException if {@code corePoolSize < 0}
- * or {@code corePoolSize} is greater than the {@linkplain
- * #getMaximumPoolSize() maximum pool size}
+ *
+ * @throws IllegalArgumentException if {@code corePoolSize < 0} or {@code
corePoolSize} is greater than the
+ * {@linkplain #getMaximumPoolSize()
maximum pool size}
+ *
* @see #getCorePoolSize
*/
public void setCorePoolSize(int corePoolSize) {
@@ -1609,6 +1352,7 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
* Returns the core number of threads.
*
* @return the core number of threads
+ *
* @see #setCorePoolSize
*/
public int getCorePoolSize() {
@@ -1616,21 +1360,18 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Starts a core thread, causing it to idly wait for work. This
- * overrides the default policy of starting core threads only when
- * new tasks are executed. This method will return {@code false}
- * if all core threads have already been started.
+ * Starts a core thread, causing it to idly wait for work. This overrides
the default policy of starting core
+ * threads only when new tasks are executed. This method will return
{@code false} if all core threads have already
+ * been started.
*
* @return {@code true} if a thread was started
*/
public boolean prestartCoreThread() {
- return workerCountOf(ctl.get()) < corePoolSize &&
- addWorker(null, true);
+ return workerCountOf(ctl.get()) < corePoolSize && addWorker(null,
true);
}
/**
- * Same as prestartCoreThread except arranges that at least one
- * thread is started even if corePoolSize is 0.
+ * Same as prestartCoreThread except arranges that at least one thread is
started even if corePoolSize is 0.
*/
void ensurePrestart() {
int wc = workerCountOf(ctl.get());
@@ -1642,9 +1383,8 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Starts all core threads, causing them to idly wait for work. This
- * overrides the default policy of starting core threads only when
- * new tasks are executed.
+ * Starts all core threads, causing them to idly wait for work. This
overrides the default policy of starting core
+ * threads only when new tasks are executed.
*
* @return the number of threads started
*/
@@ -1657,15 +1397,12 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Returns true if this pool allows core threads to time out and
- * terminate if no tasks arrive within the keepAlive time, being
- * replaced if needed when new tasks arrive. When true, the same
- * keep-alive policy applying to non-core threads applies also to
- * core threads. When false (the default), core threads are never
- * terminated due to lack of incoming tasks.
+ * Returns true if this pool allows core threads to time out and terminate
if no tasks arrive within the keepAlive
+ * time, being replaced if needed when new tasks arrive. When true, the
same keep-alive policy applying to non-core
+ * threads applies also to core threads. When false (the default), core
threads are never terminated due to lack of
+ * incoming tasks.
*
- * @return {@code true} if core threads are allowed to time out,
- * else {@code false}
+ * @return {@code true} if core threads are allowed to time out, else
{@code false}
*
* @since 1.6
*/
@@ -1674,19 +1411,16 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Sets the policy governing whether core threads may time out and
- * terminate if no tasks arrive within the keep-alive time, being
- * replaced if needed when new tasks arrive. When false, core
- * threads are never terminated due to lack of incoming
- * tasks. When true, the same keep-alive policy applying to
- * non-core threads applies also to core threads. To avoid
- * continual thread replacement, the keep-alive time must be
- * greater than zero when setting {@code true}. This method
- * should in general be called before the pool is actively used.
+ * Sets the policy governing whether core threads may time out and
terminate if no tasks arrive within the
+ * keep-alive time, being replaced if needed when new tasks arrive. When
false, core threads are never terminated
+ * due to lack of incoming tasks. When true, the same keep-alive policy
applying to non-core threads applies also to
+ * core threads. To avoid continual thread replacement, the keep-alive
time must be greater than zero when setting
+ * {@code true}. This method should in general be called before the pool
is actively used.
*
* @param value {@code true} if should time out, else {@code false}
- * @throws IllegalArgumentException if value is {@code true}
- * and the current keep-alive time is not greater than zero
+ *
+ * @throws IllegalArgumentException if value is {@code true} and the
current keep-alive time is not greater than
+ * zero
*
* @since 1.6
*/
@@ -1703,15 +1437,14 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Sets the maximum allowed number of threads. This overrides any
- * value set in the constructor. If the new value is smaller than
- * the current value, excess existing threads will be
- * terminated when they next become idle.
+ * Sets the maximum allowed number of threads. This overrides any value
set in the constructor. If the new value is
+ * smaller than the current value, excess existing threads will be
terminated when they next become idle.
*
* @param maximumPoolSize the new maximum
- * @throws IllegalArgumentException if the new maximum is
- * less than or equal to zero, or
- * less than the {@linkplain #getCorePoolSize core pool size}
+ *
+ * @throws IllegalArgumentException if the new maximum is less than or
equal to zero, or less than the
+ * {@linkplain #getCorePoolSize core
pool size}
+ *
* @see #getMaximumPoolSize
*/
public void setMaximumPoolSize(int maximumPoolSize) {
@@ -1728,6 +1461,7 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
* Returns the maximum allowed number of threads.
*
* @return the maximum allowed number of threads
+ *
* @see #setMaximumPoolSize
*/
public int getMaximumPoolSize() {
@@ -1735,19 +1469,18 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Sets the thread keep-alive time, which is the amount of time
- * that threads may remain idle before being terminated.
- * Threads that wait this amount of time without processing a
- * task will be terminated if there are more than the core
- * number of threads currently in the pool, or if this pool
- * {@linkplain #allowsCoreThreadTimeOut() allows core thread timeout}.
- * This overrides any value set in the constructor.
+ * Sets the thread keep-alive time, which is the amount of time that
threads may remain idle before being
+ * terminated. Threads that wait this amount of time without processing a
task will be terminated if there are more
+ * than the core number of threads currently in the pool, or if this pool
{@linkplain #allowsCoreThreadTimeOut()
+ * allows core thread timeout}. This overrides any value set in the
constructor.
*
- * @param time the time to wait. A time value of zero will cause
- * excess threads to terminate immediately after executing tasks.
+ * @param time the time to wait. A time value of zero will cause excess
threads to terminate immediately after
+ * executing tasks.
* @param unit the time unit of the {@code time} argument
- * @throws IllegalArgumentException if {@code time} less than zero or
- * if {@code time} is zero and {@code allowsCoreThreadTimeOut}
+ *
+ * @throws IllegalArgumentException if {@code time} less than zero or if
{@code time} is zero and
+ * {@code allowsCoreThreadTimeOut}
+ *
* @see #getKeepAliveTime(TimeUnit)
*/
public void setKeepAliveTime(long time, TimeUnit unit) {
@@ -1766,15 +1499,15 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Returns the thread keep-alive time, which is the amount of time
- * that threads may remain idle before being terminated.
- * Threads that wait this amount of time without processing a
- * task will be terminated if there are more than the core
- * number of threads currently in the pool, or if this pool
- * {@linkplain #allowsCoreThreadTimeOut() allows core thread timeout}.
+ * Returns the thread keep-alive time, which is the amount of time that
threads may remain idle before being
+ * terminated. Threads that wait this amount of time without processing a
task will be terminated if there are more
+ * than the core number of threads currently in the pool, or if this pool
{@linkplain #allowsCoreThreadTimeOut()
+ * allows core thread timeout}.
*
* @param unit the desired time unit of the result
+ *
* @return the time limit
+ *
* @see #setKeepAliveTime(long, TimeUnit)
*/
public long getKeepAliveTime(TimeUnit unit) {
@@ -1795,10 +1528,9 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
/* User-level queue utilities */
/**
- * Returns the task queue used by this executor. Access to the
- * task queue is intended primarily for debugging and monitoring.
- * This queue may be in active use. Retrieving the task queue
- * does not prevent queued tasks from executing.
+ * Returns the task queue used by this executor. Access to the task queue
is intended primarily for debugging and
+ * monitoring. This queue may be in active use. Retrieving the task queue
does not prevent queued tasks from
+ * executing.
*
* @return the task queue
*/
@@ -1807,19 +1539,16 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Removes this task from the executor's internal queue if it is
- * present, thus causing it not to be run if it has not already
- * started.
- *
- * <p>This method may be useful as one part of a cancellation
- * scheme. It may fail to remove tasks that have been converted
- * into other forms before being placed on the internal queue.
- * For example, a task entered using {@code submit} might be
- * converted into a form that maintains {@code Future} status.
- * However, in such cases, method {@link #purge} may be used to
- * remove those Futures that have been cancelled.
+ * Removes this task from the executor's internal queue if it is present,
thus causing it not to be run if it has
+ * not already started.
+ * <p>
+ * This method may be useful as one part of a cancellation scheme. It may
fail to remove tasks that have been
+ * converted into other forms before being placed on the internal queue.
For example, a task entered using
+ * {@code submit} might be converted into a form that maintains {@code
Future} status. However, in such cases,
+ * method {@link #purge} may be used to remove those Futures that have
been cancelled.
*
* @param task the task to remove
+ *
* @return {@code true} if the task was removed
*/
public boolean remove(Runnable task) {
@@ -1829,14 +1558,11 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Tries to remove from the work queue all {@link Future}
- * tasks that have been cancelled. This method can be useful as a
- * storage reclamation operation, that has no other impact on
- * functionality. Cancelled tasks are never executed, but may
- * accumulate in work queues until worker threads can actively
- * remove them. Invoking this method instead tries to remove them now.
- * However, this method may fail to remove tasks in
- * the presence of interference by other threads.
+ * Tries to remove from the work queue all {@link Future} tasks that have
been cancelled. This method can be useful
+ * as a storage reclamation operation, that has no other impact on
functionality. Cancelled tasks are never
+ * executed, but may accumulate in work queues until worker threads can
actively remove them. Invoking this method
+ * instead tries to remove them now. However, this method may fail to
remove tasks in the presence of interference
+ * by other threads.
*/
public void purge() {
final BlockingQueue<Runnable> q = workQueue;
@@ -1847,7 +1573,7 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
// Make copy for traversal and call remove for cancelled entries.
// The slow path is more likely to be O(N*N).
for (Object r : q.toArray()) {
- if (r instanceof Future<?> && ((Future<?>)r).isCancelled()) {
+ if (r instanceof Future<?> && ((Future<?>) r).isCancelled()) {
q.remove(r);
}
}
@@ -1887,28 +1613,25 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
try {
// Remove rare and surprising possibility of
// isTerminated() && getPoolSize() > 0
- return runStateAtLeast(ctl.get(), TIDYING) ? 0
- : workers.size();
+ return runStateAtLeast(ctl.get(), TIDYING) ? 0 : workers.size();
} finally {
mainLock.unlock();
}
}
/**
- * Returns the current number of threads in the pool.
- * <br><b>NOTE</b>: this method only used in {@link
TaskQueue#offer(Runnable)},
- * where operations are frequent, can greatly reduce unnecessary
- * performance overhead by a lock-free way.
+ * Returns the current number of threads in the pool. <br>
+ * <b>NOTE</b>: this method only used in {@link
TaskQueue#offer(Runnable)}, where operations are frequent, can
+ * greatly reduce unnecessary performance overhead by a lock-free way.
+ *
* @return the number of threads
*/
protected int getPoolSizeNoLock() {
- return runStateAtLeast(ctl.get(), TIDYING) ? 0
- : workers.size();
+ return runStateAtLeast(ctl.get(), TIDYING) ? 0 : workers.size();
}
/**
- * Returns the approximate number of threads that are actively
- * executing tasks.
+ * Returns the approximate number of threads that are actively executing
tasks.
*
* @return the number of threads
*/
@@ -1929,8 +1652,7 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Returns the largest number of threads that have ever
- * simultaneously been in the pool.
+ * Returns the largest number of threads that have ever simultaneously
been in the pool.
*
* @return the number of threads
*/
@@ -1945,10 +1667,8 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Returns the approximate total number of tasks that have ever been
- * scheduled for execution. Because the states of tasks and
- * threads may change dynamically during computation, the returned
- * value is only an approximation.
+ * Returns the approximate total number of tasks that have ever been
scheduled for execution. Because the states of
+ * tasks and threads may change dynamically during computation, the
returned value is only an approximation.
*
* @return the number of tasks
*/
@@ -1970,11 +1690,9 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * Returns the approximate total number of tasks that have
- * completed execution. Because the states of tasks and threads
- * may change dynamically during computation, the returned value
- * is only an approximation, but one that does not ever decrease
- * across successive calls.
+ * Returns the approximate total number of tasks that have completed
execution. Because the states of tasks and
+ * threads may change dynamically during computation, the returned value
is only an approximation, but one that does
+ * not ever decrease across successive calls.
*
* @return the number of tasks
*/
@@ -1999,9 +1717,8 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
/**
- * Returns a string identifying this pool, as well as its state,
- * including indications of run state and estimated worker and
- * task counts.
+ * Returns a string identifying this pool, as well as its state, including
indications of run state and estimated
+ * worker and task counts.
*
* @return a string identifying this pool, as well as its state
*/
@@ -2025,87 +1742,66 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
mainLock.unlock();
}
int c = ctl.get();
- String runState =
- isRunning(c) ? "Running" :
- runStateAtLeast(c, TERMINATED) ? "Terminated" :
- "Shutting down";
- return super.toString() +
- "[" + runState +
- ", pool size = " + nworkers +
- ", active threads = " + nactive +
- ", queued tasks = " + workQueue.size() +
- ", completed tasks = " + ncompleted +
- "]";
+ String runState = isRunning(c) ? "Running" : runStateAtLeast(c,
TERMINATED) ? "Terminated" : "Shutting down";
+ return super.toString() + "[" + runState + ", pool size = " + nworkers
+ ", active threads = " + nactive +
+ ", queued tasks = " + workQueue.size() + ", completed tasks =
" + ncompleted + "]";
}
/* Extension hooks */
/**
- * Method invoked prior to executing the given Runnable in the
- * given thread. This method is invoked by thread {@code t} that
- * will execute task {@code r}, and may be used to re-initialize
- * ThreadLocals, or to perform logging.
- *
- * <p>This implementation does nothing, but may be customized in
- * subclasses. Note: To properly nest multiple overridings, subclasses
- * should generally invoke {@code super.beforeExecute} at the end of
- * this method.
+ * Method invoked prior to executing the given Runnable in the given
thread. This method is invoked by thread
+ * {@code t} that will execute task {@code r}, and may be used to
re-initialize ThreadLocals, or to perform logging.
+ * <p>
+ * This implementation does nothing, but may be customized in subclasses.
Note: To properly nest multiple
+ * overridings, subclasses should generally invoke {@code
super.beforeExecute} at the end of this method.
*
* @param t the thread that will run task {@code r}
* @param r the task that will be executed
*/
- protected void beforeExecute(Thread t, Runnable r) { }
+ protected void beforeExecute(Thread t, Runnable r) {
+ }
/**
- * Method invoked upon completion of execution of the given Runnable.
- * This method is invoked by the thread that executed the task. If
- * non-null, the Throwable is the uncaught {@code RuntimeException}
- * or {@code Error} that caused execution to terminate abruptly.
- *
- * <p>This implementation does nothing, but may be customized in
- * subclasses. Note: To properly nest multiple overridings, subclasses
- * should generally invoke {@code super.afterExecute} at the
- * beginning of this method.
- *
- * <p><b>Note:</b> When actions are enclosed in tasks (such as
- * {@link java.util.concurrent.FutureTask})
- * either explicitly or via methods such as
- * {@code submit}, these task objects catch and maintain
- * computational exceptions, and so they do not cause abrupt
- * termination, and the internal exceptions are <em>not</em>
- * passed to this method. If you would like to trap both kinds of
- * failures in this method, you can further probe for such cases,
- * as in this sample subclass that prints either the direct cause
- * or the underlying exception if a task has been aborted:
+ * Method invoked upon completion of execution of the given Runnable. This
method is invoked by the thread that
+ * executed the task. If non-null, the Throwable is the uncaught {@code
RuntimeException} or {@code Error} that
+ * caused execution to terminate abruptly.
+ * <p>
+ * This implementation does nothing, but may be customized in subclasses.
Note: To properly nest multiple
+ * overridings, subclasses should generally invoke {@code
super.afterExecute} at the beginning of this method.
+ * <p>
+ * <b>Note:</b> When actions are enclosed in tasks (such as {@link
java.util.concurrent.FutureTask}) either
+ * explicitly or via methods such as {@code submit}, these task objects
catch and maintain computational exceptions,
+ * and so they do not cause abrupt termination, and the internal
exceptions are <em>not</em> passed to this method.
+ * If you would like to trap both kinds of failures in this method, you
can further probe for such cases, as in this
+ * sample subclass that prints either the direct cause or the underlying
exception if a task has been aborted:
*
* <pre> {@code
* class ExtendedExecutor extends ThreadPoolExecutor {
- * // ...
- * protected void afterExecute(Runnable r, Throwable t) {
- * super.afterExecute(r, t);
- * if (t == null
- * && r instanceof Future<?>
- * && ((Future<?>)r).isDone()) {
- * try {
- * Object result = ((Future<?>) r).get();
- * } catch (CancellationException ce) {
- * t = ce;
- * } catch (ExecutionException ee) {
- * t = ee.getCause();
- * } catch (InterruptedException ie) {
- * // ignore/reset
- * Thread.currentThread().interrupt();
- * }
+ * // ...
+ * protected void afterExecute(Runnable r, Throwable t) {
+ * super.afterExecute(r, t);
+ * if (t == null && r instanceof Future<?> && ((Future<?>)
r).isDone()) {
+ * try {
+ * Object result = ((Future<?>) r).get();
+ * } catch (CancellationException ce) {
+ * t = ce;
+ * } catch (ExecutionException ee) {
+ * t = ee.getCause();
+ * } catch (InterruptedException ie) {
+ * // ignore/reset
+ * Thread.currentThread().interrupt();
+ * }
+ * }
+ * if (t != null)
+ * System.out.println(t);
* }
- * if (t != null)
- * System.out.println(t);
- * }
- * }}</pre>
+ * }
+ * }</pre>
*
* @param r the runnable that has completed
- * @param t the exception that caused termination, or null if
- * execution completed normally
+ * @param t the exception that caused termination, or null if execution
completed normally
*/
protected void afterExecute(Runnable r, Throwable t) {
// Throwing StopPooledThreadException is likely to cause this method to
@@ -2123,20 +1819,18 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
/**
- * If the current thread was started before the last time when a context
was
- * stopped, an exception is thrown so that the current thread is stopped.
+ * If the current thread was started before the last time when a context
was stopped, an exception is thrown so that
+ * the current thread is stopped.
*/
protected void stopCurrentThreadIfNeeded() {
if (currentThreadShouldBeStopped()) {
long lastTime = lastTimeThreadKilledItself.longValue();
if (lastTime + threadRenewalDelay < System.currentTimeMillis()) {
- if (lastTimeThreadKilledItself.compareAndSet(lastTime,
- System.currentTimeMillis() + 1)) {
+ if (lastTimeThreadKilledItself.compareAndSet(lastTime,
System.currentTimeMillis() + 1)) {
// OK, it's really time to dispose of this thread
- final String msg = sm.getString(
-
"threadPoolExecutor.threadStoppedToAvoidPotentialLeak",
- Thread.currentThread().getName());
+ final String msg =
sm.getString("threadPoolExecutor.threadStoppedToAvoidPotentialLeak",
+ Thread.currentThread().getName());
throw new StopPooledThreadException(msg);
}
@@ -2155,30 +1849,28 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
/**
- * Method invoked when the Executor has terminated. Default
- * implementation does nothing. Note: To properly nest multiple
- * overridings, subclasses should generally invoke
- * {@code super.terminated} within this method.
+ * Method invoked when the Executor has terminated. Default implementation
does nothing. Note: To properly nest
+ * multiple overridings, subclasses should generally invoke {@code
super.terminated} within this method.
*/
- protected void terminated() { }
+ protected void terminated() {
+ }
/* Predefined RejectedExecutionHandlers */
/**
- * A handler for rejected tasks that runs the rejected task
- * directly in the calling thread of the {@code execute} method,
- * unless the executor has been shut down, in which case the task
- * is discarded.
+ * A handler for rejected tasks that runs the rejected task directly in
the calling thread of the {@code execute}
+ * method, unless the executor has been shut down, in which case the task
is discarded.
*/
public static class CallerRunsPolicy implements RejectedExecutionHandler {
/**
* Creates a {@code CallerRunsPolicy}.
*/
- public CallerRunsPolicy() { }
+ public CallerRunsPolicy() {
+ }
/**
- * Executes task r in the caller's thread, unless the executor
- * has been shut down, in which case the task is discarded.
+ * Executes task r in the caller's thread, unless the executor has
been shut down, in which case the task is
+ * discarded.
*
* @param r the runnable task requested to be executed
* @param e the executor attempting to execute this task
@@ -2192,20 +1884,21 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * A handler for rejected tasks that throws a
- * {@link RejectedExecutionException}.
+ * A handler for rejected tasks that throws a {@link
RejectedExecutionException}.
*/
public static class AbortPolicy implements RejectedExecutionHandler {
/**
* Creates an {@code AbortPolicy}.
*/
- public AbortPolicy() { }
+ public AbortPolicy() {
+ }
/**
* Always throws RejectedExecutionException.
*
* @param r the runnable task requested to be executed
* @param e the executor attempting to execute this task
+ *
* @throws RejectedExecutionException always
*/
@Override
@@ -2216,14 +1909,14 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * A handler for rejected tasks that silently discards the
- * rejected task.
+ * A handler for rejected tasks that silently discards the rejected task.
*/
public static class DiscardPolicy implements RejectedExecutionHandler {
/**
* Creates a {@code DiscardPolicy}.
*/
- public DiscardPolicy() { }
+ public DiscardPolicy() {
+ }
/**
* Does nothing, which has the effect of discarding task r.
@@ -2237,34 +1930,35 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
}
/**
- * A handler for rejected tasks that discards the oldest unhandled
- * request and then retries {@code execute}, unless the executor
- * is shut down, in which case the task is discarded. This policy is
- * rarely useful in cases where other threads may be waiting for
- * tasks to terminate, or failures must be recorded. Instead, consider
- * using a handler of the form:
+ * A handler for rejected tasks that discards the oldest unhandled request
and then retries {@code execute}, unless
+ * the executor is shut down, in which case the task is discarded. This
policy is rarely useful in cases where other
+ * threads may be waiting for tasks to terminate, or failures must be
recorded. Instead, consider using a handler of
+ * the form:
+ *
* <pre> {@code
* new RejectedExecutionHandler() {
- * public void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
- * Runnable dropped = e.getQueue().poll();
- * if (dropped instanceof Future<?>) {
- * ((Future<?>)dropped).cancel(false);
- * // also consider logging the failure
+ * public void rejectedExecution(Runnable r, ThreadPoolExecutor e) {
+ * Runnable dropped = e.getQueue().poll();
+ * if (dropped instanceof Future<?>) {
+ * ((Future<?>) dropped).cancel(false);
+ * // also consider logging the failure
+ * }
+ * e.execute(r); // retry
* }
- * e.execute(r); // retry
- * }}}</pre>
+ * }
+ * }</pre>
*/
public static class DiscardOldestPolicy implements
RejectedExecutionHandler {
/**
* Creates a {@code DiscardOldestPolicy} for the given executor.
*/
- public DiscardOldestPolicy() { }
+ public DiscardOldestPolicy() {
+ }
/**
- * Obtains and ignores the next task that the executor
- * would otherwise execute, if one is immediately available,
- * and then retries execution of task r, unless the executor
- * is shut down, in which case task r is instead discarded.
+ * Obtains and ignores the next task that the executor would otherwise
execute, if one is immediately available,
+ * and then retries execution of task r, unless the executor is shut
down, in which case task r is instead
+ * discarded.
*
* @param r the runnable task requested to be executed
* @param e the executor attempting to execute this task
@@ -2289,18 +1983,16 @@ public class ThreadPoolExecutor extends
AbstractExecutorService {
public interface RejectedExecutionHandler {
/**
- * Method that may be invoked by a {@link ThreadPoolExecutor} when
- * {@link ThreadPoolExecutor#execute execute} cannot accept a
- * task. This may occur when no more threads or queue slots are
- * available because their bounds would be exceeded, or upon
- * shutdown of the Executor.
- *
- * <p>In the absence of other alternatives, the method may throw
- * an unchecked {@link RejectedExecutionException}, which will be
- * propagated to the caller of {@code execute}.
+ * Method that may be invoked by a {@link ThreadPoolExecutor} when
{@link ThreadPoolExecutor#execute execute}
+ * cannot accept a task. This may occur when no more threads or queue
slots are available because their bounds
+ * would be exceeded, or upon shutdown of the Executor.
+ * <p>
+ * In the absence of other alternatives, the method may throw an
unchecked {@link RejectedExecutionException},
+ * which will be propagated to the caller of {@code execute}.
*
- * @param r the runnable task requested to be executed
+ * @param r the runnable task requested to be executed
* @param executor the executor attempting to execute this task
+ *
* @throws RejectedExecutionException if there is no remedy
*/
void rejectedExecution(Runnable r, ThreadPoolExecutor executor);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]