Skip to content
Snippets Groups Projects
Commit 2eae50c8 authored by Tobias Pietzsch's avatar Tobias Pietzsch
Browse files

fix javadoc errors

parent d8f94e60
No related branches found
No related tags found
No related merge requests found
......@@ -195,10 +195,12 @@ public final class LoadingVolatileCache< K, V extends VolatileCacheValue > imple
/**
* Prepare the cache for providing data for the "next frame":
* <ul>
* <li>the contents of fetch queues is moved to the prefetch.
* <li>some cleaning up of garbage collected entries ({@link VolatileCache#finalizeRemovedCacheEntries()}).
* <li>the internal frame counter is incremented, which will enable
* previously enqueued requests to be enqueued again for the new frame.
* <li>Move pending cell request to the prefetch queue (
* {@link BlockingFetchQueues#clearToPrefetch()}).
* <li>Perform pending cache maintenance operations (
* {@link WeakSoftCache#cleanUp()}).
* <li>Increment the internal frame counter, which will enable previously
* enqueued requests to be enqueued again for the new frame.
* </ul>
*/
@Override
......
......@@ -13,19 +13,17 @@ public interface VolatileCacheValueLoader< V extends VolatileCacheValue >
{
/**
* Create an empty, that is, invalid (see
* {@link VolatileCacheValue#isValid()}) value for the given key.
* {@link VolatileCacheValue#isValid()}) value.
*
* @param key
* @return
* @return an empty placeholder value
*/
public V createEmptyValue();
/**
* Load the value for the given key. The returned value is
* {@link VolatileCacheValue#isValid() valid}.
* Load the value. The returned value is {@link VolatileCacheValue#isValid()
* valid}.
*
* @param key
* @return
* @return the loaded value
* @throws InterruptedException
*/
public V load() throws InterruptedException;
......
......@@ -10,7 +10,7 @@ import bdv.cache.VolatileCacheValue;
* A set of threads that load data. Each thread does the following in a loop:
* <ol>
* <li>Take the next {@code key} from a queue.</li>
* <li>Try {@link Loader#load() loading} the key's data (retry until that
* <li>Try {@link Loader#load(Object) loading} the key's data (retry until that
* succeeds).</li>
* </ol>
* {@link FetcherThreads} are employed by {@link LoadingVolatileCache} to
......
......@@ -33,6 +33,8 @@ import bdv.cache.CacheHints;
import bdv.cache.CacheIoTiming;
import bdv.cache.LoadingVolatileCache;
import bdv.cache.VolatileCacheValueLoader;
import bdv.cache.WeakSoftCache;
import bdv.cache.util.BlockingFetchQueues;
import bdv.img.cache.VolatileImgCells.CellCache;
import net.imglib2.img.basictypeaccess.volatiles.VolatileAccess;
......@@ -119,7 +121,7 @@ public class VolatileGlobalCellCache implements CacheControl
}
/**
* pause all {@link Fetcher} threads for the specified number of milliseconds.
* pause all fetcher threads for the specified number of milliseconds.
*/
public void pauseFetcherThreadsFor( final long ms )
{
......@@ -129,10 +131,12 @@ public class VolatileGlobalCellCache implements CacheControl
/**
* Prepare the cache for providing data for the "next frame":
* <ul>
* <li>the contents of fetch queues is moved to the prefetch.
* <li>some cleaning up of garbage collected entries ({@link VolatileCache#finalizeRemovedCacheEntries()}).
* <li>the internal frame counter is incremented, which will enable
* previously enqueued requests to be enqueued again for the new frame.
* <li>Move pending cell request to the prefetch queue (
* {@link BlockingFetchQueues#clearToPrefetch()}).
* <li>Perform pending cache maintenance operations (
* {@link WeakSoftCache#cleanUp()}).
* <li>Increment the internal frame counter, which will enable previously
* enqueued requests to be enqueued again for the new frame.
* </ul>
*/
@Override
......
......@@ -93,7 +93,7 @@ public class VolatileImgCells< A extends VolatileAccess > extends AbstractCells<
* If a cell is enqueued, it is enqueued in the queue with the specified
* {@link CacheHints#getQueuePriority() queue priority}. Priorities are
* consecutive integers <em>0 ... n-1</em>, where 0 is the highest
* priority. Requests with priority <em>i &lt j</em> will be handled
* priority. Requests with priority <em>i &lt; j</em> will be handled
* before requests with priority <em>j</em>.
* <p>
* Finally, the {@link CacheHints#isEnqueuToFront() queue order}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment