If you want to save the result to a file from your video file(not webcam), you 
can use all frames in your video file.
This is a simple example.

    import cv2

    import mxnet as mx

    import gluoncv

    from gluoncv.model_zoo import get_model

    from gluoncv.data.transforms.pose import detector_to_alpha_pose, 
heatmap_to_coord

    from gluoncv.utils.viz import cv_plot_keypoints

    from gluoncv import utils

    url = 
'https://github.com/bryanyzhu/tiny-ucf101/raw/master/v_Basketball_g01_c01.avi'

    video_fname = utils.download(url)

    cap = cv2.VideoCapture(video_fname)

    fps = int(cap.get(cv2.CAP_PROP_FPS))

    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))

    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    # Define the codec and create VideoWriter object

    fourcc = cv2.VideoWriter_fourcc('M','J','P','G')

    out = cv2.VideoWriter('output.avi',fourcc, fps, (width,height))

    ctx = mx.cpu()

    detector = get_model('ssd_512_mobilenet1.0_coco', pretrained=True, ctx=ctx)

    detector.reset_class(classes=['person'], reuse_weights={'person':'person'})

    detector.hybridize()

    estimator = get_model('alpha_pose_resnet101_v1b_coco', pretrained=True, 
ctx=ctx)

    estimator.hybridize()

    while(True):

        ret, frame = cap.read()

        if(ret):

            frame = mx.nd.array(cv2.cvtColor(frame, 
cv2.COLOR_BGR2RGB)).astype('uint8')

            x, frame = 
gluoncv.data.transforms.presets.ssd.transform_test(frame, short=240)

            x = x.as_in_context(ctx)

            class_IDs, scores, bounding_boxs = detector(x)

            pose_input, upscale_bbox = detector_to_alpha_pose(frame, class_IDs, 
scores, bounding_boxs)

            if upscale_bbox is not None:

                predicted_heatmap = estimator(pose_input.as_in_context(ctx))

                pred_coords, confidence = heatmap_to_coord(predicted_heatmap, 
upscale_bbox)

                img = cv_plot_keypoints(frame, pred_coords, confidence, 
class_IDs, bounding_boxs, scores,

                                        box_thresh=0.5, keypoint_thresh=0.2)

                out.write(img)

            else:

                out.write(frame)

        else:

            break

        if cv2.waitKey(1) == 27:

            break

    cap.release()

    out.release()

    cv2.destroyAllWindows()

Hope this helps.





---
[Visit 
Topic](https://discuss.mxnet.apache.org/t/how-to-run-pose-estimation-alphapose-with-video-instead-of-cam/6645/4)
 or reply to this email to respond.

You are receiving this because you enabled mailing list mode.

To unsubscribe from these emails, [click 
here](https://discuss.mxnet.apache.org/email/unsubscribe/4da9023f1e1be33e26ac2244858cfa1dcf12696b0ad063c3ba5fbc14e82545b8).

Reply via email to