package de.am_soft.docbeam.raw.server.worker.new_job.limiter;

import java.io.IOException;
import java.io.InputStream;
import java.util.Objects;
import java.util.concurrent.TimeUnit;

import org.apache.commons.lang3.concurrent.TimedSemaphore;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

// TODO docs
class LimitingIs extends InputStream
{
	private static final Logger logger = LoggerFactory.getLogger(LimitingIs.class);

	// TODO docs
	private final InputStream inputStream;

	// TODO docs
	private final TimedSemaphore limiterIf;

	// TODO docs
	// always using a limiter is slower than not to use any if not needed: ~130-140 MBit vs. 100-110 MBit at hbogs-nb
	// Guava RateLimiter fails: https://stackoverflow.com/questions/52611148/why-doesnt-guava-ratelimiter-limit-for-too-large-permits-per-second-anymore
	private TimedSemaphore newLimiterIf(int permitsPerSec1k)
	{
		if (permitsPerSec1k <= 0)
		{
			return null;
		}

		return new TimedSemaphore(	1, TimeUnit.SECONDS,
									permitsPerSec1k * 1024);
	}

	// TODO docs
	private void shutdownLimiterIf()
	{
		if (this.limiterIf == null)
		{
			return;
		}

		this.limiterIf.shutdown();
	}

	// TODO docs
	private void aquireLimitIf()
	{
		if (this.limiterIf == null)
		{
			return;
		}

		try
		{
			this.limiterIf.acquire();
		}
		catch (InterruptedException e)
		{
			logger.warn("Limiter interrupted too early, continueing.", e);
		}
	}

	// TODO docs
	LimitingIs(	InputStream	inputStream,
				int			permitsPerSec1k)
	{
		this.inputStream	= Objects.requireNonNull(inputStream, "No input stream given.");
		this.limiterIf		= this.newLimiterIf(permitsPerSec1k);
	}

	@Override
	public int available() throws IOException
	{
		return this.inputStream.available();
	}

	@Override
	public void close() throws IOException
	{
		this.inputStream.close();
		this.shutdownLimiterIf();
	}

	@Override
	public synchronized void mark(int readlimit)
	{
		this.inputStream.mark(readlimit);
	}

	@Override
	public boolean markSupported()
	{
		return this.inputStream.markSupported();
	}

	@Override
	public int read() throws IOException
	{
		// Not sure if it's better to handle this here or in the method reading blocks instead. I
		// don't have control over the size of those blocks, but each one of those would be the
		// least amount of data permittable, which might result in bursts in case of large blocks
		// instead of normalized distribution of individual bytes over time. Guava RateLimiter e.g.
		// initially grants a block of 100 MB and a subsequent block needs to wait until the limit
		// is available again, resulting in 100 MB permitted at once again. I would like to have a
		// stable rate of some KiB/s always instead.
		//
		// OTOH, we don't limit sending the output directly, only reading the data to send, which
		// indirectly limits sending it. But whoever actually sends the data, reads blocks from us
		// most likely. So we limit at a very low level with possibly lots of overhead and permit
		// blocks of some unknown size anyway?
		this.aquireLimitIf();

		return this.inputStream.read();
	}

	@Override
	public synchronized void reset() throws IOException
	{
		this.inputStream.reset();
	}
}
