This commit is contained in:
NikolajDanger
2022-09-19 13:51:56 +02:00
parent 280de12b3d
commit cea460354e
8 changed files with 368 additions and 0 deletions

View File

@ -0,0 +1,49 @@
# This script shows how to open a camera in OpenCV and grab frames and show these.
# Kim S. Pedersen, 2022
import cv2 # Import the OpenCV library
def gstreamer_pipeline(capture_width=1024, capture_height=720, framerate=30):
"""Utility function for setting parameters for the gstreamer camera pipeline"""
return (
"libcamerasrc !"
"video/x-raw, width=(int)%d, height=(int)%d, framerate=(fraction)%d/1 ! "
"videoconvert ! "
"appsink"
% (
capture_width,
capture_height,
framerate,
)
)
print("OpenCV version = " + cv2.__version__)
# Open a camera device for capturing
cam = cv2.VideoCapture(gstreamer_pipeline(), apiPreference=cv2.CAP_GSTREAMER)
if not cam.isOpened(): # Error
print("Could not open camera")
exit(-1)
# Open a window
WIN_RF = "Example 1"
cv2.namedWindow(WIN_RF)
cv2.moveWindow(WIN_RF, 100, 100)
while cv2.waitKey(4) == -1: # Wait for a key pressed event
retval, frameReference = cam.read() # Read frame
if not retval: # Error
print(" < < < Game over! > > > ")
exit(-1)
# Show frames
cv2.imshow(WIN_RF, frameReference)
# Finished successfully

View File

@ -0,0 +1,91 @@
# This script shows how to do simple processing on frames comming from a camera in OpenCV.
# Kim S. Pedersen, 2015
import cv2 # Import the OpenCV library
import numpy as np # We also need numpy
from pkg_resources import parse_version
OPCV3 = parse_version(cv2.__version__) >= parse_version('3')
def capPropId(prop):
"""returns OpenCV VideoCapture property id given, e.g., "FPS
This is needed because of differences in the Python interface in OpenCV 2.4 and 3.0
"""
return getattr(cv2 if OPCV3 else cv2.cv, ("" if OPCV3 else "CV_") + "CAP_PROP_" + prop)
def gstreamer_pipeline(capture_width=1024, capture_height=720, framerate=30):
"""Utility function for setting parameters for the gstreamer camera pipeline"""
return (
"libcamerasrc !"
"video/x-raw, width=(int)%d, height=(int)%d, framerate=(fraction)%d/1 ! "
"videoconvert ! "
"appsink"
% (
capture_width,
capture_height,
framerate,
)
)
print("OpenCV version = " + cv2.__version__)
# Define some constants
lowThreshold = 35
ratio = 3
kernel_size = 3
# Open a camera device for capturing
cam = cv2.VideoCapture(gstreamer_pipeline(), apiPreference=cv2.CAP_GSTREAMER)
if not cam.isOpened(): # Error
print("Could not open camera")
exit(-1)
# Get camera properties
width = int(cam.get(capPropId("FRAME_WIDTH")))
height = int(cam.get(capPropId("FRAME_HEIGHT")))
print("width = " + str(width) + ", Height = " + str(height))
# Open a window
WIN_RF = "Example 2"
cv2.namedWindow(WIN_RF)
cv2.moveWindow(WIN_RF, 100, 100)
# Preallocate memory
#gray_frame = np.zeros((height, width), dtype=np.uint8)
while cv2.waitKey(4) == -1: # Wait for a key pressed event
retval, frameReference = cam.read() # Read frame
if not retval: # Error
print(" < < < Game over! > > > ")
exit(-1)
# Convert the image to grayscale
gray_frame = cv2.cvtColor( frameReference, cv2.COLOR_BGR2GRAY )
# Reduce noise with a kernel 3x3
edge_frame = cv2.blur( gray_frame, (3,3) )
# Canny detector
cv2.Canny( edge_frame, lowThreshold, lowThreshold*ratio, edge_frame, kernel_size )
# Show frames
cv2.imshow(WIN_RF, edge_frame)
# Close all windows
cv2.destroyAllWindows()
# Finished successfully

View File

@ -0,0 +1,59 @@
# Example showing how to grab frames using the PiCamera module instead of OpenCV
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
from fractions import *
import time
import cv2
print("OpenCV version = " + cv2.__version__)
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
time.sleep(1) # Wait for camera
camera.resolution = (640, 480)
camera.framerate = 30
camera.shutter_speed = camera.exposure_speed
camera.exposure_mode = 'off'
gain = camera.awb_gains
camera.awb_mode='off'
#gain = (Fraction(2,1), Fraction(1,1))
#gain = (1.5, 1.5)
camera.awb_gains = gain
print("shutter_speed = ", camera.shutter_speed)
print("awb_gains = ", gain)
rawCapture = PiRGBArray(camera, size=camera.resolution)
# Open a window
WIN_RF = "Frame";
cv2.namedWindow(WIN_RF);
cv2.moveWindow(WIN_RF, 100 , 100);
# allow the camera to warmup
time.sleep(0.1)
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
# grab the raw NumPy array representing the image
image = frame.array
# show the frame
cv2.imshow(WIN_RF, image)
key = cv2.waitKey(4) & 0xFF
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break

View File

@ -0,0 +1,57 @@
# Example showing how to grab frames using the PiCamera module instead of OpenCV
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
from fractions import *
import time
import cv2
print("OpenCV version = " + cv2.__version__)
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
time.sleep(1) # Wait for camera
camera.resolution = (640, 480)
camera.framerate = 30
camera.shutter_speed = camera.exposure_speed
camera.exposure_mode = 'off'
gain = camera.awb_gains
camera.awb_mode='off'
#gain = (Fraction(2,1), Fraction(1,1))
#gain = (1.5, 1.5)
camera.awb_gains = gain
print("shutter_speed = ", camera.shutter_speed)
print("awb_gains = ", gain)
rawCapture = PiRGBArray(camera, size=camera.resolution)
# Open a window
WIN_RF = "Frame";
cv2.namedWindow(WIN_RF);
cv2.moveWindow(WIN_RF, 100, 100);
# allow the camera to warmup
time.sleep(0.1)
while True:
# capture frames from the camera
camera.capture(rawCapture, format="bgr", use_video_port=True)
# grab the raw NumPy array representing the image
image = rawCapture.array
# show the frame
cv2.imshow(WIN_RF, image)
key = cv2.waitKey(4) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break

View File

@ -0,0 +1,34 @@
# This script shows how to open a camera in OpenCV and grab frames and show these.
# Kim S. Pedersen, 2022
import cv2 # Import the OpenCV library
print("OpenCV version = " + cv2.__version__)
# Open a camera device for capturing
cam = cv2.VideoCapture(0)
if not cam.isOpened(): # Error
print("Could not open camera")
exit(-1)
# Open a window
WIN_RF = "Example 1"
cv2.namedWindow(WIN_RF)
cv2.moveWindow(WIN_RF, 100, 100)
while cv2.waitKey(4) == -1: # Wait for a key pressed event
retval, frameReference = cam.read() # Read frame
if not retval: # Error
print(" < < < Game over! > > > ")
exit(-1)
# Show frames
cv2.imshow(WIN_RF, frameReference)
# Finished successfully

View File

@ -0,0 +1,72 @@
# This script shows how to do simple processing on frames comming from a camera in OpenCV.
# Kim S. Pedersen, 2015
import cv2 # Import the OpenCV library
import numpy as np # We also need numpy
from pkg_resources import parse_version
OPCV3 = parse_version(cv2.__version__) >= parse_version('3')
def capPropId(prop):
"""returns OpenCV VideoCapture property id given, e.g., "FPS
This is needed because of differences in the Python interface in OpenCV 2.4 and 3.0
"""
return getattr(cv2 if OPCV3 else cv2.cv, ("" if OPCV3 else "CV_") + "CAP_PROP_" + prop)
print("OpenCV version = " + cv2.__version__)
# Define some constants
lowThreshold = 35
ratio = 3
kernel_size = 3
# Open a camera device for capturing
cam = cv2.VideoCapture(0)
if not cam.isOpened(): # Error
print("Could not open camera")
exit(-1)
# Get camera properties
width = int(cam.get(capPropId("FRAME_WIDTH")))
height = int(cam.get(capPropId("FRAME_HEIGHT")))
print("width = " + str(width) + ", Height = " + str(height))
# Open a window
WIN_RF = "Example 2"
cv2.namedWindow(WIN_RF)
cv2.moveWindow(WIN_RF, 100, 100)
# Preallocate memory
#gray_frame = np.zeros((height, width), dtype=np.uint8)
while cv2.waitKey(4) == -1: # Wait for a key pressed event
retval, frameReference = cam.read() # Read frame
if not retval: # Error
print(" < < < Game over! > > > ")
exit(-1)
# Convert the image to grayscale
gray_frame = cv2.cvtColor( frameReference, cv2.COLOR_BGR2GRAY )
# Reduce noise with a kernel 3x3
edge_frame = cv2.blur( gray_frame, (3,3) )
# Canny detector
cv2.Canny( edge_frame, lowThreshold, lowThreshold*ratio, edge_frame, kernel_size )
# Show frames
cv2.imshow(WIN_RF, edge_frame)
# Close all windows
cv2.destroyAllWindows()
# Finished successfully