alexw wrote:
 I _think_ the issue was some kind of stuttering playback; it didn't
happen always, but often enough that one day i decided to see if writing
the stream in much larger chunks would help. It did; never seen it since.


I am using the following VDR setup: 2 VDR machines in client(FF)/server(budget) mode (thank to streamdev) over wifi. The video folder is shared over the network in CIFS (aka Samba). Actually replaying VDR

I have a similar setup, just with 100M ethernet instead of wifi and NFS instead
of samba. wifi could be a problem, but if you're able to watch the same channel
live you should also be able to view a recording.

recording is not smooth all the time and the timeshifting on the client side is not working at all.

Timeshifting on the client requires 3x the bandwith and a lot of data going in
both directions, so may not work all that well over wireless.

Recording on the server and playing on the client at the same time result in smooth and freeze cycle with lip sync issue. Watching live programs is perfect.

But this sounds very much like the problem i was seeing. In my case it was
dependent on the bitrate and number of simultaneous recordings IIRC.

If you'd like to try a patch, attached is what i've using since May 2007.
It could help, but might also not change anything at all; the main reason
i'm interested if it helps is because i have O_DIRECT support done on top
of this, and am not sure if i should keep the write coalescing for the case
where O_DIRECT isn't available.
It contains:
- various code cleanups (that i made after Klaus merged the fadvise-related
 modifications, they make the patch a bit harder to read, but should be
 otherwise harmless)
- tweaks to improve playback (smarter readahead)
- tweaks to improve cutting performance (didn't help much iirc)
- write coalescing - the stream is written in multi-megabyte chunks instead
 of a dozen or so K at a time. This is the interesting part; it adds
 another data copy, but seems to have made the stuttering playback disappear.
 (this change will only make a difference when applied to the vdr that does
 the recording, ie the server; the fadvise tweaks mentioned above could also
 improve things when applied to the client)

Patch made against vdr-1.4.7, but applies to 1.6.0 too.
artur
diff --git a/cutter.c b/cutter.c
index 5170ae4..7e2e506 100644
--- a/cutter.c
+++ b/cutter.c
@@ -66,7 +66,8 @@ void cCuttingThread::Action(void)
      toFile = toFileName->Open();
      if (!fromFile || !toFile)
         return;
-     fromFile->SetReadAhead(MEGABYTE(20));
+     fromFile->CuttingSrc();
+     toFile->CuttingDst();
      int Index = Mark->position;
      Mark = fromMarks.Next(Mark);
      int FileSize = 0;
@@ -91,7 +92,7 @@ void cCuttingThread::Action(void)
            if (fromIndex->Get(Index++, &FileNumber, &FileOffset, &PictureType, 
&Length)) {
               if (FileNumber != CurrentFileNumber) {
                  fromFile = fromFileName->SetOffset(FileNumber, FileOffset);
-                 fromFile->SetReadAhead(MEGABYTE(20));
+                 fromFile->CuttingSrc();
                  CurrentFileNumber = FileNumber;
                  }
               if (fromFile) {
@@ -124,6 +125,7 @@ void cCuttingThread::Action(void)
                     error = "toFile 1";
                     break;
                     }
+                 toFile->CuttingDst();
                  FileSize = 0;
                  }
               LastIFrame = 0;
@@ -164,6 +166,7 @@ void cCuttingThread::Action(void)
                        error = "toFile 2";
                        break;
                        }
+                    toFile->CuttingDst();
                     FileSize = 0;
                     }
                  }
diff --git a/tools.c b/tools.c
index 255c806..a14f799 100644
--- a/tools.c
+++ b/tools.c
@@ -1055,7 +1055,8 @@ bool cSafeFile::Close(void)
 
 #define USE_FADVISE
 
-#define WRITE_BUFFER KILOBYTE(800)
+//#define dfsyslog dsyslog                     // uncomment to turn on fadvise 
related logging
+#define dfsyslog(a...) do {} while (0)
 
 cUnbufferedFile::cUnbufferedFile(void)
 {
@@ -1073,12 +1074,19 @@ int cUnbufferedFile::Open(const char *FileName, int 
Flags, mode_t Mode)
   fd = open(FileName, Flags, Mode);
   curpos = 0;
 #ifdef USE_FADVISE
-  begin = lastpos = ahead = 0;
-  cachedstart = 0;
-  cachedend = 0;
-  readahead = KILOBYTE(128);
+  lastpos = 0;
+  lastjump = 0;
+  cachedstart = cachedend = 0;
+  // 4M readahead seems to work for playback while cutting,
+  // but isn't quite enough for FF while cutting...
+  readahead = MEGABYTE(12);
+  writebuffer = KILOBYTE(2000);
   written = 0;
   totwritten = 0;
+  cutting = 0;
+  
+  wbuf = NULL;
+  
   if (fd >= 0)
      posix_fadvise(fd, 0, 0, POSIX_FADV_RANDOM); // we could use 
POSIX_FADV_SEQUENTIAL, but we do our own readahead, disabling the kernel one.
 #endif
@@ -1087,6 +1095,11 @@ int cUnbufferedFile::Open(const char *FileName, int 
Flags, mode_t Mode)
 
 int cUnbufferedFile::Close(void)
 {
+  if (fd >= 0 && wbuf) {
+     if (wbuf_len)
+        WriteBuf(wbuf, wbuf_len);
+     free(wbuf);
+     }
 #ifdef USE_FADVISE
   if (fd >= 0) {
      if (totwritten)    // if we wrote anything make sure the data has hit the 
disk before
@@ -1107,15 +1120,35 @@ int cUnbufferedFile::Close(void)
 #define FADVGRAN   KILOBYTE(4) // AKA fadvise-chunk-size; PAGE_SIZE or 
getpagesize(2) would also work.
 #define READCHUNK  MEGABYTE(8)
 
-void cUnbufferedFile::SetReadAhead(size_t ra)
+void cUnbufferedFile::CuttingSrc(void)
+{
+  readahead = MEGABYTE(20);
+  cutting = 1;
+}
+
+void cUnbufferedFile::CuttingDst(void)
 {
-  readahead = ra;
+  writebuffer = MEGABYTE(16);  // 4M is too little.
+  cutting = 2;
 }
 
 int cUnbufferedFile::FadviseDrop(off_t Offset, off_t Len)
 {
-  // rounding up the window to make sure that not PAGE_SIZE-aligned data gets 
freed.
-  return posix_fadvise(fd, Offset - (FADVGRAN - 1), Len + (FADVGRAN - 1) * 2, 
POSIX_FADV_DONTNEED);
+  // Round up the window to make sure that not PAGE_SIZE-aligned data gets 
freed.
+  // Note that that also means calling this with Len==0 isn't special (unlike 
fadvise).
+  dfsyslog("DROP: fd:%3d %9zd .. %9zd SIZE: %6zd", fd, Offset, Offset+Len, 
Len);
+  off_t prewin = min(Offset ,(off_t)FADVGRAN - 1); // must not wrap below 0.
+  return posix_fadvise(fd, Offset - prewin, Len + prewin + (FADVGRAN - 1), 
POSIX_FADV_DONTNEED);
+}
+
+// Trigger background readahead on the specified range and add
+// it to the "cached" area so that we can drop the data later.
+int cUnbufferedFile::FadviseRead(off_t Offset, off_t Len)
+{
+  dfsyslog("WANT: fd:%3d %9zd .. %9zd SIZE: %6zd", fd, Offset, Offset+Len, 
Len);
+  cachedstart = min(cachedstart, Offset);
+  cachedend = max(cachedend, Offset+Len);
+  return posix_fadvise(fd, Offset, Len, POSIX_FADV_WILLNEED);
 }
 
 off_t cUnbufferedFile::Seek(off_t Offset, int Whence)
@@ -1131,82 +1164,107 @@ ssize_t cUnbufferedFile::Read(void *Data, size_t Size)
   if (fd >= 0) {
 #ifdef USE_FADVISE
      off_t jumped = curpos-lastpos; // nonzero means we're not at the last 
offset
-     if ((cachedstart < cachedend) && (curpos < cachedstart || curpos > 
cachedend)) {
+     
+     dfsyslog("READ: fd:%3d %9zd .. %9zd SIZE: %6zd jump: %9zd ra: %7zd", fd, 
curpos, curpos+Size, Size, jumped, readahead);
+     
+     if (curpos < cachedstart || curpos > cachedend) {
         // current position is outside the cached window -- invalidate it.
-        FadviseDrop(cachedstart, cachedend-cachedstart);
+        if (cachedstart != cachedend)
+           FadviseDrop(cachedstart, cachedend-cachedstart);
         cachedstart = curpos;
         cachedend = curpos;
         }
-     cachedstart = min(cachedstart, curpos);
 #endif
      ssize_t bytesRead = safe_read(fd, Data, Size);
 #ifdef USE_FADVISE
      if (bytesRead > 0) {
+        cachedstart = min(cachedstart, curpos);
         curpos += bytesRead;
-        cachedend = max(cachedend, curpos);
-
         // Read ahead:
-        // no jump? (allow small forward jump still inside readahead window).
-        if (jumped >= 0 && jumped <= (off_t)readahead) {
-           // Trigger the readahead IO, but only if we've used at least
-           // 1/2 of the previously requested area. This avoids calling
-           // fadvise() after every read() call.
-           if (ahead - curpos < (off_t)(readahead / 2)) {
-              posix_fadvise(fd, curpos, readahead, POSIX_FADV_WILLNEED);
-              ahead = curpos + readahead;
-              cachedend = max(cachedend, ahead);
-              }
-           if (readahead < Size * 32) { // automagically tune readahead size.
-              readahead = Size * 32;
+        // no jump or small forward jump still inside readahead window.
+        if (jumped >= 0 && curpos <= cachedend) {
+           // Trigger the readahead IO, but only if we've used at least some 
of the previously
+           // requested area. This avoids calling fadvise() after every read() 
call.
+           size_t cachedsize = cachedend - curpos;
+           size_t ra = cachedsize + Size*2 + (size_t)jumped*1;
+           if (cutting)
+              ra += KILOBYTE(64);
+           ra = min(readahead, ra);
+           // Start I/O if we A) used some of the data or B) can read 
sufficiently large new chunk.
+           // (A) is important when starting w/ a small readahead.
+           if (cachedsize < (ra-ra/4) || cachedsize+KILOBYTE(256) <= ra)
+              FadviseRead(curpos, ra);
+           }
+        else if (jumped >= 0) {    // either large forward jump, or FF (jumps 
by ~4xSize)
+              FadviseRead(curpos, ((size_t)jumped < 
Size*8)?(jumped+Size)*2:Size*2);
+        }
+        else /*if (jumped < 0)*/ { // backward jump:
+           // We don't want any readahead, otherwise e.g. fast-rewind gets in 
trouble.
+#if 1
+           // But we'll do some read behind for sequential short backward 
jumps.
+           size_t rbsize = -jumped * 4;
+           if (lastjump <= 0 && (size_t)-jumped < Size * 16 && 
(size_t)((curpos-Size) - cachedstart ) < rbsize) {
+              // current position has moved back enough, grow tail window.
+              off_t start = max((off_t)0, (off_t)((curpos - Size) - rbsize));
+              FadviseRead(start, rbsize);
               }
+#endif
+           // We could reduce readahead window here. But this would lead to 
already
+           // prefetched data being thrown out by the code below; not exactly 
ideal
+           // if this jump was only caused by a play mode transition etc.
+           //readahead = Size * 8;
            }
-        else
-           ahead = curpos; // jumped -> we really don't want any readahead, 
otherwise e.g. fast-rewind gets in trouble.
+        cachedend = max(cachedend, curpos);
         }
 
      if (cachedstart < cachedend) {
-        if (curpos - cachedstart > READCHUNK * 2) {
+        off_t maxtail = cutting ? KILOBYTE(64) : READCHUNK;
+        off_t maxhead = max(readahead, (size_t)READCHUNK);
+        if (jumped >= 0 && curpos - cachedstart >= maxtail * 2) {
            // current position has moved forward enough, shrink tail window.
-           FadviseDrop(cachedstart, curpos - READCHUNK - cachedstart);
-           cachedstart = curpos - READCHUNK;
+           FadviseDrop(cachedstart, (curpos - maxtail) - cachedstart);
+           cachedstart = curpos - maxtail;
            }
-        else if (cachedend > ahead && cachedend - curpos > READCHUNK * 2) {
+        else if (jumped < 0 && lastjump < 0 && cachedend - curpos >= maxhead * 
2) {
            // current position has moved back enough, shrink head window.
-           FadviseDrop(curpos + READCHUNK, cachedend - (curpos + READCHUNK));
-           cachedend = curpos + READCHUNK;
+           // (a large readahead value may prevent this)
+           FadviseDrop(curpos + maxhead, cachedend - (curpos + maxhead));
+           cachedend = curpos + maxhead;
            }
         }
      lastpos = curpos;
+     lastjump = jumped;
 #endif
      return bytesRead;
      }
   return -1;
 }
 
-ssize_t cUnbufferedFile::Write(const void *Data, size_t Size)
+ssize_t cUnbufferedFile::WriteBuf(const void *Data, size_t Size)
 {
   if (fd >=0) {
      ssize_t bytesWritten = safe_write(fd, Data, Size);
+     //dsyslog("WRIT: fd:%3d %9zd .. %9zd SIZE: %6zd", fd, curpos, 
curpos+Size, Size);
 #ifdef USE_FADVISE
      if (bytesWritten > 0) {
-        begin = min(begin, curpos);
+        cachedstart = min(cachedstart, curpos);
         curpos += bytesWritten;
         written += bytesWritten;
-        lastpos = max(lastpos, curpos);
-        if (written > WRITE_BUFFER) {
-           if (lastpos > begin) {
+        cachedend = max(cachedend, curpos);
+        if (written > writebuffer) {
+           if (cachedend > cachedstart) {
               // Now do three things:
-              // 1) Start writeback of begin..lastpos range
+              // 1) Start writeback of cachedstart..cachedend range
               // 2) Drop the already written range (by the previous fadvise 
call)
               // 3) Handle nonpagealigned data.
-              //    This is why we double the WRITE_BUFFER; the first time 
around the
+              //    This is why we double the writebuffer; the first time 
around the
               //    last (partial) page might be skipped, writeback will start 
only after
               //    second call; the third call will still include this page 
and finally
               //    drop it from cache.
-              off_t headdrop = min(begin, WRITE_BUFFER * 2L);
-              posix_fadvise(fd, begin - headdrop, lastpos - begin + headdrop, 
POSIX_FADV_DONTNEED);
+              off_t headdrop = min(cachedstart, (off_t)writebuffer * 2);
+              posix_fadvise(fd, cachedstart - headdrop, cachedend - 
cachedstart + headdrop, POSIX_FADV_DONTNEED);
               }
-           begin = lastpos = curpos;
+           cachedstart = cachedend = curpos;
            totwritten += written;
            written = 0;
            // The above fadvise() works when writing slowly (recording), but 
could
@@ -1216,7 +1274,7 @@ ssize_t cUnbufferedFile::Write(const void *Data, size_t 
Size)
            // So we do another round of flushing, just like above, but at 
larger
            // intervals -- this should catch any pages that couldn't be 
released
            // earlier.
-           if (totwritten > MEGABYTE(32)) {
+           if (totwritten > MEGABYTE(32) + writebuffer ) {
               // It seems in some setups, fadvise() does not trigger any I/O 
and
               // a fdatasync() call would be required do all the work 
(reiserfs with some
               // kind of write gathering enabled), but the syncs cause (io) 
load..
@@ -1234,6 +1292,26 @@ ssize_t cUnbufferedFile::Write(const void *Data, size_t 
Size)
   return -1;
 }
 
+ssize_t cUnbufferedFile::Write(const void *Data, size_t Size)
+{
+  if (!wbuf) {
+     wbuf_chunk = cutting?MEGABYTE(8):MEGABYTE(4);
+     wbuf = MALLOC(uchar,wbuf_chunk);
+     if (!wbuf)
+        return WriteBuf(Data, Size);
+     wbuf_len = 0;
+  }
+  if (Size <= wbuf_chunk-wbuf_len) {
+     memcpy(wbuf+wbuf_len, Data, Size);
+     wbuf_len += Size;
+  } else {
+     WriteBuf(wbuf, wbuf_len);
+     memcpy(wbuf, Data, Size);
+     wbuf_len = Size;
+  }
+  return Size;
+}
+
 cUnbufferedFile *cUnbufferedFile::Create(const char *FileName, int Flags, 
mode_t Mode)
 {
   cUnbufferedFile *File = new cUnbufferedFile;
diff --git a/tools.h b/tools.h
index 1f71ec4..ce7283c 100644
--- a/tools.h
+++ b/tools.h
@@ -246,19 +246,26 @@ private:
   off_t curpos;
   off_t cachedstart;
   off_t cachedend;
-  off_t begin;
   off_t lastpos;
-  off_t ahead;
+  off_t lastjump;
   size_t readahead;
+  uchar *wbuf;
+  int wbuf_len;
+  int wbuf_chunk;
   size_t written;
   size_t totwritten;
+  int cutting;
+  size_t writebuffer;
   int FadviseDrop(off_t Offset, off_t Len);
+  int FadviseRead(off_t Offset, off_t Len);
+  ssize_t WriteBuf(const void *Data, size_t Size);
 public:
   cUnbufferedFile(void);
   ~cUnbufferedFile();
   int Open(const char *FileName, int Flags, mode_t Mode = DEFFILEMODE);
   int Close(void);
-  void SetReadAhead(size_t ra);
+  void CuttingSrc(void);
+  void CuttingDst(void);
   off_t Seek(off_t Offset, int Whence);
   ssize_t Read(void *Data, size_t Size);
   ssize_t Write(const void *Data, size_t Size);
_______________________________________________
vdr mailing list
vdr@linuxtv.org
http://www.linuxtv.org/cgi-bin/mailman/listinfo/vdr

Reply via email to