Thank you for responding. I am a noob to pyqtgraph.

The capture rate will match the frame rate and processing. So, I expect 
there to be 25-30 (frame rate) values per second. Its a very simple python 
app. I do not think I even need  date/time variable on the second (x) axis 
- maybe a nice to have. I just want to show eye aspect ratio (fluctuations) 
on the y axis over time (e.g. 60 second demo). I can do this in matplotlib, 
but the graphs don't look as cool as pyqtgraph. 

I have attached a similar style script which captures yawns (e.g. 
lip_distance). So, in this case, I want to store lip_distance into an array 
and while program writes/appends/extends to this lip_distance [] array, I 
want the plot to show the new values as they are captured. Has anyone built 
something similar? I saw a few posts in this group around scrolling plots.

So, is scrolling plots the recommended route?  Or is there another python 
visualization tool that can solve this?

On Thursday, May 17, 2018 at 12:52:11 PM UTC-4, Luke Campagnola wrote:
>
> My suggestion would have been to look at the scrolling plot examples, but 
> you have already done that. Have you tried using any of those? If so, why 
> didn't they work for you?
>
> On Wed, May 16, 2018, 23:39 Kaisar Khatak <[email protected] 
> <javascript:>> wrote:
>
>> I have a real time application that uses a camera to measure eye aspect 
>> ratio. The ratio will be an integer value and will fluctuate between 1-10 
>> (example).
>>
>> What is the easiest way to capture that value and display a scrolling 
>> plot (best approach?) real time? Do I need threading? Just looking for 
>> simplest solution to start off with...
>>
>> I have read the scrollingPlots.py examples...
>>
>> I have installled pyqtgraph and am running python 2/3 on ubuntu 16.
>>
>>
>> Thanks.
>>
>>
>> ----------------------------------------------------------------------------
>> # -*- coding: utf-8 -*-
>> """
>> Various methods of drawing scrolling plots.
>> """
>> import initExample ## Add path to library (just for examples; you do not 
>> need this)
>>
>> import pyqtgraph as pg
>> from pyqtgraph.Qt import QtCore, QtGui
>> import numpy as np
>>
>> win = pg.GraphicsLayoutWidget(show=True)
>> win.setWindowTitle('pyqtgraph example: Scrolling Plots')
>>
>>
>> # 1) Simplest approach -- update data in the array such that plot appears 
>> to scroll
>> #    In these examples, the array size is fixed.
>> p1 = win.addPlot()
>> p2 = win.addPlot()
>> data1 = np.random.normal(size=300)
>> curve1 = p1.plot(data1)
>> curve2 = p2.plot(data1)
>> ptr1 = 0
>> def update1():
>>     global data1, ptr1
>>     data1[:-1] = data1[1:]  # shift data in the array one sample left
>>                             # (see also: np.roll)
>>     data1[-1] = np.random.normal()
>>     curve1.setData(data1)
>>     
>>     ptr1 += 1
>>     curve2.setData(data1)
>>     curve2.setPos(ptr1, 0)
>>     
>>
>> # 2) Allow data to accumulate. In these examples, the array doubles in 
>> length
>> #    whenever it is full. 
>> win.nextRow()
>> p3 = win.addPlot()
>> p4 = win.addPlot()
>> # Use automatic downsampling and clipping to reduce the drawing load
>> p3.setDownsampling(mode='peak')
>> p4.setDownsampling(mode='peak')
>> p3.setClipToView(True)
>> p4.setClipToView(True)
>> p3.setRange(xRange=[-100, 0])
>> p3.setLimits(xMax=0)
>> curve3 = p3.plot()
>> curve4 = p4.plot()
>>
>> data3 = np.empty(100)
>> ptr3 = 0
>>
>> def update2():
>>     global data3, ptr3
>>     data3[ptr3] = np.random.normal()
>>     ptr3 += 1
>>     if ptr3 >= data3.shape[0]:
>>         tmp = data3
>>         data3 = np.empty(data3.shape[0] * 2)
>>         data3[:tmp.shape[0]] = tmp
>>     curve3.setData(data3[:ptr3])
>>     curve3.setPos(-ptr3, 0)
>>     curve4.setData(data3[:ptr3])
>>
>>
>> # 3) Plot in chunks, adding one new plot curve for every 100 samples
>> chunkSize = 100
>> # Remove chunks after we have 10
>> maxChunks = 10
>> startTime = pg.ptime.time()
>> win.nextRow()
>> p5 = win.addPlot(colspan=2)
>> p5.setLabel('bottom', 'Time', 's')
>> p5.setXRange(-10, 0)
>> curves = []
>> data5 = np.empty((chunkSize+1,2))
>> ptr5 = 0
>>
>> def update3():
>>     global p5, data5, ptr5, curves
>>     now = pg.ptime.time()
>>     for c in curves:
>>         c.setPos(-(now-startTime), 0)
>>     
>>     i = ptr5 % chunkSize
>>     if i == 0:
>>         curve = p5.plot()
>>         curves.append(curve)
>>         last = data5[-1]
>>         data5 = np.empty((chunkSize+1,2))        
>>         data5[0] = last
>>         while len(curves) > maxChunks:
>>             c = curves.pop(0)
>>             p5.removeItem(c)
>>     else:
>>         curve = curves[-1]
>>     data5[i+1,0] = now - startTime
>>     data5[i+1,1] = np.random.normal()
>>     curve.setData(x=data5[:i+2, 0], y=data5[:i+2, 1])
>>     ptr5 += 1
>>
>>
>> # update all plots
>> def update():
>>     update1()
>>     update2()
>>     update3()
>> timer = pg.QtCore.QTimer()
>> timer.timeout.connect(update)
>> timer.start(50)
>>
>>
>>
>> ## Start Qt event loop unless running in interactive mode or using pyside.
>> if __name__ == '__main__':
>>     import sys
>>     if (sys.flags.interactive != 1) or not hasattr(QtCore, 
>> 'PYQT_VERSION'):
>>         QtGui.QApplication.instance().exec_()
>>
>> -- 
>> You received this message because you are subscribed to the Google Groups 
>> "pyqtgraph" group.
>> To unsubscribe from this group and stop receiving emails from it, send an 
>> email to [email protected] <javascript:>.
>> To view this discussion on the web visit 
>> https://groups.google.com/d/msgid/pyqtgraph/2ca21aaa-92a5-48eb-a3c9-7c5490032a24%40googlegroups.com
>>  
>> <https://groups.google.com/d/msgid/pyqtgraph/2ca21aaa-92a5-48eb-a3c9-7c5490032a24%40googlegroups.com?utm_medium=email&utm_source=footer>
>> .
>> For more options, visit https://groups.google.com/d/optout.
>>
>

-- 
You received this message because you are subscribed to the Google Groups 
"pyqtgraph" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
To view this discussion on the web visit 
https://groups.google.com/d/msgid/pyqtgraph/af7e0b7a-45cd-40ff-a3eb-2cd3a2602f52%40googlegroups.com.
For more options, visit https://groups.google.com/d/optout.
import cv2
import dlib
import numpy as np
import imutils
from imutils import face_utils

PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
predictor = dlib.shape_predictor(PREDICTOR_PATH)
detector = dlib.get_frontal_face_detector()

def get_landmarks(im):
    print("get landmarks")
    #rects = detector(im, 1)
    rects = detector(im, 0) #0 for performance

    if len(rects) > 1:
        return "error"
    if len(rects) == 0:
        return "error"
    return np.matrix([[p.x, p.y] for p in predictor(im, rects[0]).parts()])


def annotate_landmarks(im, landmarks):
    print("annotate landmarks")
    im = im.copy()
    for idx, point in enumerate(landmarks):
        pos = (point[0, 0], point[0, 1])
        #cv2.putText(im, str(idx), pos,
        #            fontFace=cv2.FONT_HERSHEY_SCRIPT_SIMPLEX,
        #            fontScale=0.4,
        #            color=(0, 0, 255))
        cv2.circle(im, pos, 2, color=(0, 255, 0))
    return im

def top_lip(landmarks):
    print("top lip")
    top_lip_pts = []
    for i in range(50,53):
        top_lip_pts.append(landmarks[i])
    for i in range(61,64):
        top_lip_pts.append(landmarks[i])
    top_lip_all_pts = np.squeeze(np.asarray(top_lip_pts))
    top_lip_mean = np.mean(top_lip_pts, axis=0)
    return int(top_lip_mean[:,1])

def bottom_lip(landmarks):
    print("bottom lip")
    bottom_lip_pts = []
    for i in range(65,68):
        bottom_lip_pts.append(landmarks[i])
    for i in range(56,59):
        bottom_lip_pts.append(landmarks[i])
    bottom_lip_all_pts = np.squeeze(np.asarray(bottom_lip_pts))
    bottom_lip_mean = np.mean(bottom_lip_pts, axis=0)
    return int(bottom_lip_mean[:,1])

def mouth_open(image):
    print("mouth open")
    landmarks = get_landmarks(image)

    if landmarks == "error":
	print("landmarks = error in mouth open")
        return image, 0

    image_with_landmarks = annotate_landmarks(image, landmarks)
    top_lip_center = top_lip(landmarks)
    bottom_lip_center = bottom_lip(landmarks)
    lip_distance = abs(top_lip_center - bottom_lip_center)
    return image_with_landmarks, lip_distance

# Open the input movie file\
camera = cv2.VideoCapture("yawn5.mp4")
frame_width = int(camera.get(3))
frame_height = int(camera.get(4))
print("vid frame_width {}".format(int(frame_width)))
print("vid frame_height {}".format(int(frame_height)))

vid_length = int(camera.get(cv2.CAP_PROP_FRAME_COUNT)) #CAP_PROP_FPS 
#vid_length = int(input_movie.get(cv2.CAP_PROP_FPS)) #CAP_PROP_FPS
vid_width = int(camera.get(cv2.CAP_PROP_FRAME_WIDTH)) #640
vid_height = int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT)) #480
print("vid length {}".format(vid_length))
print("vid width {}".format(int(vid_width)))
print("vid height {}".format(int(vid_height)))

# Create an output movie file (make sure resolution/frame rate matches input video!)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
#output_movie = cv2.VideoWriter('output.avi', fourcc, 29.97, (640, 360))
output_movie = cv2.VideoWriter('output-yawn5.avi', fourcc, 25, (vid_width, vid_height))
output_movie_lm = cv2.VideoWriter('output-yawn5_lm.avi', fourcc, 25, (vid_width, vid_height))

yawn_lip_distance = 30 #really depends on subject's distance from camera (in car dash cam is one application - what if user pushes seat backwards???)
yawns = 0
yawn_status = False
frame_number = 0
prev_max_lip_distance_yawn = 0
#output_text = "Yawn Count: " + str(yawns)

while True:
	(ret, frame) = camera.read()
	frame_number += 1

	# if we are viewing a video and we did not grab a frame,
	# then we have reached the end of the video
	if not ret:
		break

	height, width, _ = frame.shape

	image_landmarks, lip_distance = mouth_open(frame)

	prev_yawn_status = yawn_status

    cv2.putText(frame, "Lip Distance: " + str(lip_distance), (50,150), cv2.FONT_HERSHEY_COMPLEX, 1,(0,0,255),2)
    cv2.putText(image_landmarks, "Lip Distance: " + str(lip_distance), (50,150), cv2.FONT_HERSHEY_COMPLEX, 1,(0,0,255),2)
			
	if lip_distance > 27:
		yawn_status = True
		output_text = "Yawn Count: " + str(yawns + 1)
		cv2.putText(frame, output_text, (50,50), cv2.FONT_HERSHEY_COMPLEX, 1,(0,255,127),2)
		cv2.putText(frame, "Subject is Yawning", (50,100), cv2.FONT_HERSHEY_COMPLEX, 1,(0,0,255),2)		
		cv2.putText(image_landmarks, output_text, (50,50), cv2.FONT_HERSHEY_COMPLEX, 1,(0,255,127),2)
		cv2.putText(image_landmarks, "Subject is Yawning", (50,100), cv2.FONT_HERSHEY_COMPLEX, 1,(0,0,255),2)
		
		if lip_distance > prev_max_lip_distance_yawn:
			 prev_max_lip_distance_yawn = lip_distance	
			 
		cv2.putText(image_landmarks, "max_lip_distance_yawn:" + str(prev_max_lip_distance_yawn), (50,100), cv2.FONT_HERSHEY_COMPLEX, 1,(0,0,255),2)			 
	else:
		yawn_status = False
		prev_max_lip_distance_yawn = 0

	if prev_yawn_status == True and yawn_status == False:
		yawns += 1

	# Write the resulting image to the output video file
	print("Writing frame {} / {}".format(frame_number, vid_length))
	output_movie.write(frame)
	output_movie_lm.write(image_landmarks)
	if cv2.waitKey(1) & 0xFF == ord('q'):
	    break

	#if cv2.waitKey(1) == 13: #13 is the Enter Key
	#    break

#cap.release()
camera.release()
cv2.destroyAllWindows()

Reply via email to