I fixed the segmentation fault and the image artifacts.
This new patch assumes the profiling patch (previous post) was applied
before. It contains all the shader beauty from my first post but this
time it doesn't crash.
Profiling results for 500 frames of 1280x720 Mpeg-2 video on a MacBook
Pro Core-2 duo 2.16 GHz with RadeonX1600:
before: ~14 seconds (35 fps)
patched: ~10 seconds (50 fps)
I'll try to eliminate some memcpy operations in the decoding pipeline
to improve the figures some more.
Jan
--- examples/media_player.py 2009-11-27 21:45:41.000000000 +0100
+++ pyglet/examples/media_player.py 2009-11-27 22:23:25.000000000
+0100
@@ -44,6 +44,7 @@
from pyglet.gl import *
import pyglet
from pyglet.window import key
+from shader import FragmentShader, ShaderError, ShaderProgram
def draw_rect(x, y, width, height):
glBegin(GL_LINE_LOOP)
@@ -299,16 +300,84 @@
# Video
if self.player.source and self.player.source.video_format:
if _profile: self.player.update_texture()
- self.player.get_texture().blit(self.video_x,
- self.video_y,
- width=self.video_width,
- height=self.video_height)
+ glPushAttrib(GL_ENABLE_BIT)
+ index = 0
+ texs = self.player._textures
+ if texs[0] is not None and texs[1] is not None and texs
[2] is not None:
+ glUseProgram(shader.id)
+
+ arrays = [None]*3
+ for i in range(3):
+ arrays[i] = texs[i].get_coord_array( \
+ self.video_x, \
+ self.video_y, \
+ width=self.video_width, \
+ height=self.video_height \
+ )
+
+ glActiveTexture
((GL_TEXTURE0,GL_TEXTURE1,GL_TEXTURE2)[i])
+ glClientActiveTexture
((GL_TEXTURE0,GL_TEXTURE1,GL_TEXTURE2)[i])
+ l = glGetUniformLocationARB(shader.id,("Ytex",
"Utex", "Vtex")[i])
+ glUniform1iARB(l,i)
+
+ glEnable(texs[i].target)
+ glBindTexture(texs[i].target, texs[i].id)
+
+ glBegin (GL_QUADS);
+
+ for p in range(4):
+ x, y = arrays[0][p*8+4:p*8+6]
+ glColor4f (1.0, 1.0, 1.0,
1.0)
+ for i in range(3):
+ u, v = arrays[i][p*8+0:p*8+2]
+ glMultiTexCoord2fARB(
+ (GL_TEXTURE0_ARB,
+ GL_TEXTURE1_ARB,
+ GL_TEXTURE2_ARB)[i],
+ u, v)
+ glVertex2f (x, y)
+ glEnd ();
+ glUseProgram(0)
+ glPopAttrib()
# GUI
self.slider.value = self.player.time
for control in self.controls:
control.draw()
+shader = None
+def install_shaders():
+ global shader
+
+ # the fragment shader was borrowed from:
+ # http://www.fourcc.org/source/YUV420P-OpenGL-GLSLang.c
+ fsrc = """
+ uniform sampler2DRect Ytex;
+ uniform sampler2DRect Utex,Vtex;
+ void main(void) {
+ float r,g,b,y,u,v;
+
+ y=texture2DRect(Ytex,gl_TexCoord[0].xy).r;
+ u=texture2DRect(Utex,gl_TexCoord[1].xy).r;
+ v=texture2DRect(Vtex,gl_TexCoord[2].xy).r;
+
+ y=1.1643*(y-0.0625);
+ u=u-0.5;
+ v=v-0.5;
+
+ r=y+1.5958*v;
+ g=y-0.39173*u-0.81290*v;
+ b=y+2.017*u;
+
+ gl_FragColor=vec4(r,g,b,1.0);
+ }
+ """
+ fshader = FragmentShader([fsrc])
+
+ shader = ShaderProgram(fshader)
+ shader.use()
+ glUseProgram(0)
+
_profile = pyglet.options['profile_media']
if __name__ == '__main__':
@@ -322,6 +391,12 @@
player = pyglet.media.Player()
window = PlayerWindow(player)
+ try:
+ install_shaders()
+ except ShaderError, e:
+ print str(e)
+ sys.exit(2)
+
source = pyglet.media.load(filename)
player.queue(source)
@@ -331,6 +406,10 @@
window.set_default_video_size()
window.set_visible(True)
+ if not pyglet.gl.gl_info.have_extension
('GL_ARB_multitexture'):
+ print 'no GL_ARB_multitexture'
+ sys.exit(-1)
+
player.play()
window.gui_update_state()
--- pyglet/image/__init__.py 2009-11-26 22:17:05.000000000 +0100
+++ ../pyglet/pyglet/image/__init__.py 2009-11-26 15:10:41.000000000
+0100
@@ -1607,7 +1607,7 @@
# no implementation of blit_to_texture yet (could use aux buffer)
- def blit(self, x, y, z=0, width=None, height=None):
+ def get_coord_array(self, x, y, z=0, width=None, height=None):
t = self.tex_coords
x1 = x - self.anchor_x
y1 = y - self.anchor_y
@@ -1622,7 +1622,10 @@
x2, y2, z, 1.,
t[9], t[10], t[11], 1.,
x1, y2, z, 1.)
-
+ return array
+
+ def blit(self, x, y, z=0, width=None, height=None):
+ array = self.get_coord_array(x, y, z, width, height)
glPushAttrib(GL_ENABLE_BIT)
glEnable(self.target)
glBindTexture(self.target, self.id)
--- pyglet/media/__init__.py 2009-11-27 21:58:41.000000000 +0100
+++ ../pyglet/pyglet/media/__init__.py 2009-11-27 22:07:08.000000000
+0100
@@ -910,6 +910,7 @@
_last_video_timestamp = None
_texture = None
+ _textures = [None] * 4
# Spacialisation attributes, preserved between audio players
_volume = 1.0
@@ -1102,12 +1103,19 @@
time = property(_get_time)
- def _create_texture(self):
+ def _create_texture(self, index = 0, width = None, height =
None):
video_format = self.source.video_format
- self._texture = pyglet.image.Texture.create(
- video_format.width, video_format.height, rectangle=True)
- self._texture = self._texture.get_transform(flip_y=True)
- self._texture.anchor_y = 0
+ if width is None: width = video_format.width
+ if height is None: height = video_format.height
+ tex = pyglet.image.Texture.create(
+ width, height, rectangle=True)
+ tex = tex.get_transform(flip_y=True)
+ tex.anchor_y = 0
+
+ self._textures[index] = tex
+
+ if index == 0:
+ self._texture = tex
def get_texture(self):
return self._texture
@@ -1142,11 +1150,12 @@
self._last_video_timestamp = None
return
- image = self._groups[0].get_next_video_frame()
- if image is not None:
- if self._texture is None:
- self._create_texture()
- self._texture.blit_into(image, 0, 0, 0)
+ images = self._groups[0].get_next_video_frame()
+ if images is not None:
+ for i in range(len(images)):
+ if self._textures[i] is None:
+ self._create_texture(i, images[i].width, images
[i].height)
+ self._textures[i].blit_into(images[i], 0, 0, 0)
self._last_video_timestamp = ts
def _set_eos_action(self, eos_action):
--- pyglet/media/avbin.py 2009-11-27 20:34:03.000000000 +0100
+++ ../pyglet/pyglet/media/avbin.py 2009-11-27 21:46:59.000000000
+0100
@@ -82,10 +82,43 @@
AVbinLogLevel = ctypes.c_int
AVbinFileP = ctypes.c_void_p
-AVbinStreamP = ctypes.c_void_p
Timestamp = ctypes.c_int64
+# libavcodec internals as of revision 13661
+# These may change in future versions
+
+class AVFrame(ctypes.Structure):
+ _fields_ = [
+ ('data', ctypes.POINTER(ctypes.c_ubyte) * 4),
+ #('data', ctypes.c_char_p * 4),
+ ('linesize', ctypes.c_int * 4),
+ # additional fields skipped
+ ]
+
+# int avcodec_decode_video(AVCodecContext *avctx, AVFrame *picture,
+# int *got_picture_ptr,
+# const uint8_t *buf, int buf_size);
+
+av.avcodec_decode_video.restype = ctypes.c_int
+av.avcodec_decode_video.argtypes = [ctypes.c_void_p,
+ ctypes.POINTER(AVFrame), ctypes.POINTER(ctypes.c_int),
ctypes.c_void_p, ctypes.c_size_t]
+
+# AVbin interal stream repesentation (non public, probably unstable)
+# as of version 7
+
+class AVbinStream(ctypes.Structure):
+ _fields_ = [
+ ('type', ctypes.c_int),
+ ('format_context', ctypes.c_void_p),
+ ('codec_context', ctypes.c_void_p),
+ ('frame', ctypes.POINTER(AVFrame)),
+ ]
+
+AVbinStreamP = ctypes.POINTER(AVbinStream)
+
+### end of unstable APIs
+
class AVbinFileInfo(ctypes.Structure):
_fields_ = [
('structure_size', ctypes.c_size_t),
@@ -164,7 +197,7 @@
av.avbin_stream_info.argtypes = [AVbinFileP, ctypes.c_int,
ctypes.POINTER(AVbinStreamInfo8)]
-av.avbin_open_stream.restype = ctypes.c_void_p
+av.avbin_open_stream.restype = AVbinStreamP
av.avbin_open_stream.argtypes = [AVbinFileP, ctypes.c_int]
av.avbin_close_stream.argtypes = [AVbinStreamP]
@@ -220,6 +253,7 @@
# Decoded image. 0 == not decoded yet; None == Error or
discarded
self.image = 0
+ self.images = None # YUV image planes
self.id = self._next_id
self.__class__._next_id += 1
@@ -494,20 +528,50 @@
if _profile and packet.id == 500:
print "%dx%d" % (width, height)
os.kill(os.getpid(),9)
-
- pitch = width * 3
- buffer = (ctypes.c_uint8 * (pitch * height))()
- result = av.avbin_decode_video(self._video_stream,
- packet.data, packet.size,
- buffer)
+
+ _avbin_lock.acquire()
+ result = self._decode_video(packet.data, packet.size)
+
if result < 0:
- image_data = None
+ packet.image = None
+ packet.images = None
+ _avbin_lock.release()
else:
- image_data = image.ImageData(width, height, 'RGB',
buffer, pitch)
-
- packet.image = image_data
+ frame = self._video_stream[0].frame[0]
- if not _profile:
+ y_size = height * frame.linesize[0]
+ y_buff = (ctypes.c_ubyte * y_size)()
+ ctypes.memmove(y_buff, frame.data[0], y_size)
+
+ u_size = height//2 * frame.linesize[1]
+ u_buff = (ctypes.c_ubyte * u_size)()
+ ctypes.memmove(u_buff, frame.data[1], u_size)
+
+ v_size = (height//2 * frame.linesize[2])
+ v_buff = (ctypes.c_ubyte * v_size)()
+ ctypes.memmove(v_buff, frame.data[2], v_size)
+
+ _avbin_lock.release()
+
+ images = [
+ image.ImageData(
+ width, height, 'L',
+ y_buff, frame.linesize[0]
+ ),
+ image.ImageData(
+ width//2, height//2, 'L',
+ u_buff, frame.linesize[1]
+ ),
+ image.ImageData(
+ width//2, height//2, 'L',
+ v_buff, frame.linesize[2]
+ ),
+ ]
+
+ packet.image = images[0]
+ packet.images = images
+
+ if not _profile:
# Notify get_next_video_frame() that another one is
ready.
self._condition.acquire()
self._condition.notify()
@@ -566,9 +630,27 @@
if _debug:
print 'Returning', packet
- return packet.image
+ return packet.images
+
+ def _decode_video(self, data_in, size_in):
+ '''similar to avbin_decode_video() but not calling
+ img_convert(). The frame is decoded into the (private) field
+ "frame" of self._video_stream'''
+ width = self.video_format.width
+ height = self.video_format.height
+ stream = self._video_stream
+
+ got_picture = ctypes.c_int()
+ used = av.avcodec_decode_video(stream[0].codec_context,
+ stream[0].frame, ctypes.byref
(got_picture),
+ data_in, size_in)
+ if not got_picture.value:
+ return AVBIN_RESULT_ERROR
+ return used
av.avbin_init()
+assert av.avbin_get_ffmpeg_revision() == 13661
+
if pyglet.options['debug_media']:
_debug = True
av.avbin_set_log_level(AVBIN_LOG_DEBUG)
--
You received this message because you are subscribed to the Google Groups
"pyglet-users" group.
To post to this group, send email to [email protected].
To unsubscribe from this group, send email to
[email protected].
For more options, visit this group at
http://groups.google.com/group/pyglet-users?hl=en.