私的AI研究会 > OpenCV5
「Python画像処理100 (Interface 2021年1月号)」CQ出版社刊 のサンプルコードを中心に検証をしてみる。
OpenVINO™ ツールキットをインストールした環境で検証する。
pi@raspberrypi:~ $ python3 Python 3.7.3 (default, Jul 25 2020, 13:03:44) [GCC 8.3.0] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import cv2 >>> cv2.__version__ '4.5.1-openvino' >>> quit()
pi@raspberrypi:~ $ ls Bookshelf Desktop Documents Downloads Music Pictures Public Templates Videos build cq workspace
オリジナル | 変換サイズ | 低解像度 | 変換サイズ | 縮小率 | |
X | 1920 | 1280 | 720 | 640 | 0.5 |
Y | 1080 | 720 | 480 | 426 | 0.59 |
オリジナル | 変換後後 |
MVI_1186_Trim.mp4 | speed-test.mp4 |
1920x1080(30flame/sec) | 720/480(30flame/sec) |
~/cq $ vi cameraincar.py # -*- coding: utf-8 -*- # This is a sample Python script. # Press Shift+F10 to execute it or replace it with your code. # Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings. import cv2 import numpy as np frameCount = 0 thresRate = 150 speedList = np.array([]) aveSpeed = 0 def setSpeedMovie(img): global frameCount global aveSpeed height = img.shape[0] width = img.shape[1] y = int(height * 0.8) x = int(width * 0.5) # h = y + int(height * 0.05) h = y + 5 w = x + int(width * 0.3) monitor = img[y: h, x: w] cv2.imshow("Monitor", monitor) monitor = cv2.cvtColor(monitor, cv2.COLOR_BGR2GRAY) _, monitor = cv2.threshold(monitor, thresRate, 255, cv2.THRESH_BINARY) # img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 12); avePixelNum = np.average(monitor) # 全画素の平均値 # print(avePixelNum) if avePixelNum > 10: # 白線が見つかった場合 frameCount += 1 else: print(frameCount) if frameCount > 0: # print("CarSpeed = {} /Km/h".format(getSpeed(frameCount))) aveSpeed = getSpeed(frameCount) frameCount = 0 # 判定画像を実映像にオーバーレイする # monitor = cv2.cvtColor(monitor, cv2.COLOR_GRAY2BGR) # img[y: h, x: w] = monitor cv2.imshow("Monitor", monitor) img = getResize(img) color = (255, 255, 255) cv2.putText(img, "Speed {0}Km/h".format(aveSpeed), (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, color, 2, cv2.LINE_AA) return img def getSpeed(num): global speedList whiteLine = 8 # 白線の長さは8m oneFrameSec = 1 / 30 # 映像のフレームレートが30 whiteLinePassingSec = num * oneFrameSec # 白線を何秒で通過したか carSpeed = whiteLine / 1000 / whiteLinePassingSec * 60 * 60 if carSpeed < 150: speedList = np.append(speedList, carSpeed) if speedList.size > 10: speedList = np.delete(speedList, 0) print('Speed = {0} Count = {1}'.format(np.average(speedList), speedList.size)) return int(np.average(speedList)) def getResize(img): basePixSize = 640 # 縦横で大きい辺の変更したいサイズ height = img.shape[0] width = img.shape[1] largeSize = max(height, width) # 大きい方の辺のサイズ resizeRate = basePixSize / largeSize # 変更比率を計算 img = cv2.resize(img, (int(width * resizeRate), int(height * resizeRate))) return img if __name__ == '__main__': cap = cv2.VideoCapture("video/speed-test.mp4") if not cap.isOpened(): # ビデオキャプチャー可能か判断 print("Not Opened Video Camera") exit() while True: ret, img = cap.read() if not ret: # キャプチャー画像取得に失敗したら終了 print("Video Capture Err") break img = setSpeedMovie(img) cv2.imshow("Speed", img) if cv2.waitKeyEx(10) > -1: break cv2.destroyAllWindows()
オリジナル | 変換後後 |
PXL_20201011_005931739.mp4 | cont-test-test.mp4 |
1920x1080(30flame/sec) | 720/480(30flame/sec) |
~/cq $ vi carcount.py # -*- coding: utf-8 -*- import cv2 import numpy as np def __main(): cap = cv2.VideoCapture('video/count-test.mp4') while True: ret, img = cap.read() if not ret: # キャプチャー画像取得に失敗したら終了 print("Video Capture Err") break # ここで処理を実行する img = getResize(img) # --ここに解析処理を記述 戻り値を imgにする img = getArea(img) cv2.imshow("Final result", img) # 画面表示 if cv2.waitKey(10) > -1: break cap.release() def getArea(img): # 判定サイズ w = 155 h = 3 # 内側 x1 = 285 y1 = 315 x2 = x1 + w y2 = y1 + h # 外側 x3 = 465 y3 = 315 x4 = x3 + w y4 = y3 + h area1 = img[y1:y2, x1:x2] # 内側車線 area2 = img[y3:y4, x3:x4] # 外側車線 area1 = getBackgroundSubMog(img, area1, 1) # 1はIN area2 = getBackgroundSubMog(img, area2, 2) # 2はOUT img[y1:y2, x1:x2] = cv2.cvtColor(area1, cv2.COLOR_GRAY2BGR) img[y3:y4, x3:x4] = cv2.cvtColor(area2, cv2.COLOR_GRAY2BGR) return img def getBackgroundSubMog(img, area, loadLine): global inside global outside global outsideZeroCount global insideZeroCount monitor = fgbg.apply(area) avePixelNum = np.average(monitor) # 全画素の平均値 print("Averege = {0}".format(avePixelNum)) # outside(外側判定) if loadLine == 2: if avePixelNum < 5: #平均値が5/255以下の場合はノイズとみなす outsideZeroCount += 1 # 未通過フレームのカウント else: if outsideZeroCount > 5: # 未通過フレームが5枚以上続いて変化があれば通過したとみなす outside += 1 outsideZeroCount = 0 #inside(内側判定) if loadLine == 1: if avePixelNum < 5: #平均値が5/255以下の場合はノイズとみなす insideZeroCount += 1 # 未通過フレームのカウント else: if insideZeroCount > 5: # 未通過フレームが5枚以上続いて変化があれば通過したとみなす inside += 1 insideZeroCount = 0 cv2.putText(img=img, text="{0}".format(outside), org=(550, 340), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1.5, color=(255, 255, 255), lineType=cv2.LINE_AA) cv2.putText(img=img, text="{0}".format(inside), org=(350, 340), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1.5, color=(255, 255, 255), lineType=cv2.LINE_AA) return monitor def getResize(src): basePixSize = 640 # 縦横で大きい辺の変更したいサイズ height = src.shape[0] width = src.shape[1] largeSize = max(height, width) # 大きい方の辺のサイズ resizeRate = basePixSize / largeSize # 変更比率を計算 dst = cv2.resize(src, (int(width * resizeRate), int(height * resizeRate)), interpolation=None) return dst if __name__ == '__main__': outside = 0 # 外側の通過カウント inside = 0 # 内側の通過カウント outsideZeroCount = 0 insideZeroCount = 0 # 背景差分のフィルター作成 fgbg = cv2.bgsegm.createBackgroundSubtractorMOG(history=120) __main()
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG(history=120)エラーが発生する。
~/cq $ python3 CarCount.py Traceback (most recent call last): File "CarCount.py", line 107, in <module> fgbg = cv2.bgsegm.createBackgroundSubtractorMOG(history=120) AttributeError: module 'cv2' has no attribute 'bgsegm'
fgbg =cv2.createBackgroundSubtractorMOG2(history=120)
~cq $ vi FacePhoto,py # -*- coding: utf-8 -*- import cv2 import numpy as np def __main(): maxPhotoNum = 100 cap = cv2.VideoCapture(0) cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720) if not cap.isOpened(): # ビデオキャプチャー可能か判断 print("Not Opened Video Camera") exit() while True: ret, img = cap.read() if not ret: # キャプチャー画像取得に失敗したら終了 print("Video Capture Err") break # ここで処理を実行する dst = setResultArea(img) getface(dst) x1, y1, x2, y2 = init(img) img[y1:y2, x1:x2] = dst cv2.rectangle(img,(50, 50), (330, 120),(0, 0, 0), -1) cv2.putText(img=img, text="{0}/{1}".format(str(faceCount).zfill(3), maxPhotoNum), org=(80, 100), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1.5, color=(255, 255, 255), lineType=cv2.LINE_AA) cv2.imshow("Final result", img) # 画面表示 if cv2.waitKey(10) > -1: break if faceCount >= maxPhotoNum: break cap.release() cv2.destroyAllWindows() return 0 def init(src): h = src.shape[0] w = src.shape[1] x = w / 2 y = h / 2 rectLength = w * 0.3 x1 = int(x - (rectLength / 2)) y1 = int(y - (rectLength / 2)) x2 = int(x + (rectLength / 2)) y2 = int(y + (rectLength / 2)) return x1, y1, x2, y2 def setResultArea(src): x1, y1, x2, y2 = init(src) dst = src[y1:y2, x1:x2] # cv2.imshow("dst", dst) cv2.rectangle(src, (x1, y1), (x2, y2), (255, 255, 255), 2) return dst def getface(img): global cascade gray = cv2.cvtColor(src=img, code=cv2.COLOR_BGR2GRAY) bodyRect = cascade.detectMultiScale(image=gray, scaleFactor=1.05, minNeighbors=10, flags=None, minSize=(30, 30)) for x, y, w, h in bodyRect: face = img[y:y+h, x:x+w] saveImg(face) cv2.rectangle(img=img, pt1=(x, y), pt2=(x + w, y + h), color=(0, 255, 255), thickness=3) return img def saveImg(src): global faceCount dst = setImageSize(src, faceCount) fileName = "./facesImage/face{0}.jpg".format(faceCount) # cv2.imwrite(fileName, dst) faceCount += 1 def setImageSize(src, count): global height global width if count > 0: h = src.shape[0] w = src.shape[1] rate = height / h print(rate) src =cv2.resize(src, (int(w * rate), int(h * rate))) else: height = src.shape[0] width = src.shape[1] return src if __name__ == '__main__': faceCount = 0 height = 0 width = 0 cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') __main()※ テストのため記録部分をコメントアウト。
~/cq $ vi movieshow.py # -*- coding: utf-8 -*- import cv2 import sys file_path = './video/movie-test.mp4' delay = 1 window_name = 'frame' cap = cv2.VideoCapture(file_path) if not cap.isOpened(): sys.exit() # 幅と高さを取得 width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) size = (width, height) #総フレーム数を取得 frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) #フレームレート(1フレームの時間単位はミリ秒)の取得 frame_rate = int(cap.get(cv2.CAP_PROP_FPS)) print(frame_rate, size, frame_count) while True: ret, frame = cap.read() if ret: cv2.imshow(window_name, frame) if cv2.waitKey(delay) & 0xFF == ord('q'): break else: cap.set(cv2.CAP_PROP_POS_FRAMES, 0) cv2.destroyWindow(window_name)
~/cq $ vi movietest.py # -*- coding: utf-8 -*- import cv2 print("---start---") #動画ファイルを読み込む video = cv2.VideoCapture('./video/movie-test.mp4') # 幅と高さを取得 width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)) size = (width, height) #総フレーム数を取得 frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT)) #フレームレート(1フレームの時間単位はミリ秒)の取得 frame_rate = int(video.get(cv2.CAP_PROP_FPS)) print(frame_rate, size, frame_count) ### # 保存用 fmt = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') writer = cv2.VideoWriter('./result/outtest.mp4', fmt, frame_rate, size) while True: ret, frame = video.read() if ret: ### ここに加工処理などを記述する ### writer.write(frame) # 1フレーム画像表示 cv2.imshow('test window', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break else: break writer.release() video.release() cv2.destroyAllWindows() print("---end---")※ 総フレーム数が取得できないようなので、ファイルエンドまでループする処理に変更。
~/cq $ python3 movietest.py ---start--- [ WARN:0] global ../opencv/modules/videoio/src/cap_gstreamer.cpp (919) open OpenCV | GStreamer warning: unable to query duration of stream [ WARN:0] global ../opencv/modules/videoio/src/cap_gstreamer.cpp (956) open OpenCV | GStreamer warning: Cannot query video position: status=1, value=0, duration=-1 29 (720, 480) -1 (python3:1885): GStreamer-CRITICAL **: 11:37:23.764: gst_element_make_from_uri: assertion 'gst_uri_is_valid (uri)' failed [ WARN:0] global ../opencv/modules/videoio/src/cap_gstreamer.cpp (1601) open OpenCV | GStreamer warning: cannot link elements ---end---
オリジナル | 変換後 |
MVI_1592.MP4 | background-test.mp4 |
1920x1080(30flame/sec) | 720/480(30flame/sec) |
~/cq $ vi background.py # -*- coding: utf-8 -*- import cv2 def __main(): cap = cv2.VideoCapture("./video/background-test.mp4") if not cap.isOpened(): # ビデオキャプチャー可能か判断 print("Not Opened Video Camera") exit() while True: ret, img = cap.read() img = getResize(img) org = img if not ret: # キャプチャー画像取得に失敗したら終了 print("Video Capture Err") break subImg = getBackgroundSubMog(org, img) #cv2.imshow('bugs', subImg) # cv2.imshow('Final result', org) if cv2.waitKeyEx(10) > -1: break cv2.destroyAllWindows() def getResize(img): basePixSize = 640 # 縦横で大きい辺の変更したいサイズ height = img.shape[0] width = img.shape[1] largeSize = max(height, width) # 大きい方の辺のサイズ resizeRate = basePixSize / largeSize # 変更比率を計算 img = cv2.resize(img, (int(width * resizeRate), int(height * resizeRate))) return img def getBackgroundSubMog(org, img): global fgbg # 背景差分フィルター実行 # 戻り値には2値化されて差分が白で表現された画像が戻る subImg = fgbg.apply(img) cv2.imshow('Fvbg', subImg) contours, _ = cv2.findContours(image=subImg, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_SIMPLE) contours = list(filter(lambda x: cv2.contourArea(x) > 100, contours)) # 小さい輪郭は誤検出として削除する resultImg = cv2.drawContours(image=org, contours=contours, contourIdx=-1, color=(0, 0, 255), thickness=2) # 輪郭の描画 return resultImg if __name__ == '__main__': print(cv2.__version__) # 背景差分のフィルター作成 # fgbg = cv2.bgsegm.createBackgroundSubtractorMOG(history=120) fgbg =cv2.createBackgroundSubtractorMOG2(history=120) __main()
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG(history=120)
fgbg =cv2.createBackgroundSubtractorMOG2(history=120)
$ python3 Python 3.7.3 (default, Jul 25 2020, 13:03:44) [GCC 8.3.0] on linux Type "help", "copyright", "credits" or "license" for more information. >>> import cv2 >>> print(cv2.getBuildInformation())
General configuration for OpenCV 4.5.1-openvino ===================================== Version control: 705e7b207c4c95231fb18c7ddfad353795aa6531 Platform: Timestamp: 2020-12-01T12:29:06Z Host: Linux 4.15.0-29-generic x86_64 Target: Linux 1 armv7l CMake: 3.7.2 CMake generator: Ninja CMake build tool: /usr/bin/ninja Configuration: Release CPU/HW features: Baseline: NEON required: NEON disabled: VFPV3 C/C++: Built as dynamic libs?: YES C++ standard: 11 C++ Compiler: /usr/bin/arm-linux-gnueabihf-g++ (ver 6.3.0) C++ flags (Release): -mthumb -fdata-sections -Wa,--noexecstack -fsigned-char -Wno-psabi -fsigned-char -W -Wall -Werror=return-type -Werror=non-virtual-dtor -Werror=address -Werror=sequence-point -Wformat -Werror=format-security -Wmissing-declarations -Wundef -Winit-self -Wpointer-arith -Wshadow -Wsign-promo -Wuninitialized -Winit-self -Wno-psabi -Wsuggest-override -Wno-delete-non-virtual-dtor -Wno-comment -fdiagnostics-show-option -pthread -fomit-frame-pointer -ffunction-sections -fdata-sections -mfpu=neon -fvisibility=hidden -fvisibility-inlines-hidden -fstack-protector-strong -fPIC -O2 -DNDEBUG -DNDEBUG -D_FORTIFY_SOURCE=2 C++ flags (Debug): -mthumb -fdata-sections -Wa,--noexecstack -fsigned-char -Wno-psabi -fsigned-char -W -Wall -Werror=return-type -Werror=non-virtual-dtor -Werror=address -Werror=sequence-point -Wformat -Werror=format-security -Wmissing-declarations -Wundef -Winit-self -Wpointer-arith -Wshadow -Wsign-promo -Wuninitialized -Winit-self -Wno-psabi -Wsuggest-override -Wno-delete-non-virtual-dtor -Wno-comment -fdiagnostics-show-option -pthread -fomit-frame-pointer -ffunction-sections -fdata-sections -mfpu=neon -fvisibility=hidden -fvisibility-inlines-hidden -fstack-protector-strong -fPIC -g -O0 -DDEBUG -D_DEBUG C Compiler: /usr/bin/arm-linux-gnueabihf-gcc C flags (Release): -mthumb -fdata-sections -Wa,--noexecstack -fsigned-char -Wno-psabi -fsigned-char -W -Wall -Werror=return-type -Werror=non-virtual-dtor -Werror=address -Werror=sequence-point -Wformat -Werror=format-security -Wmissing-declarations -Wmissing-prototypes -Wstrict-prototypes -Wundef -Winit-self -Wpointer-arith -Wshadow -Wuninitialized -Winit-self -Wno-psabi -Wno-comment -fdiagnostics-show-option -pthread -fomit-frame-pointer -ffunction-sections -fdata-sections -mfpu=neon -fvisibility=hidden -fstack-protector-strong -fPIC -O2 -DNDEBUG -DNDEBUG -D_FORTIFY_SOURCE=2 C flags (Debug): -mthumb -fdata-sections -Wa,--noexecstack -fsigned-char -Wno-psabi -fsigned-char -W -Wall -Werror=return-type -Werror=non-virtual-dtor -Werror=address -Werror=sequence-point -Wformat -Werror=format-security -Wmissing-declarations -Wmissing-prototypes -Wstrict-prototypes -Wundef -Winit-self -Wpointer-arith -Wshadow -Wuninitialized -Winit-self -Wno-psabi -Wno-comment -fdiagnostics-show-option -pthread -fomit-frame-pointer -ffunction-sections -fdata-sections -mfpu=neon -fvisibility=hidden -fstack-protector-strong -fPIC -g -O0 -DDEBUG -D_DEBUG Linker flags (Release): -Wl,--fix-cortex-a8 -Wl,--no-undefined -Wl,--gc-sections -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now -Wl,--gc-sections -Wl,--as-needed -z noexecstack -z relro -z now Linker flags (Debug): -Wl,--fix-cortex-a8 -Wl,--no-undefined -Wl,--gc-sections -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now -Wl,--gc-sections -Wl,--as-needed -z noexecstack -z relro -z now ccache: YES Precompiled headers: NO Extra dependencies: dl m pthread rt 3rdparty dependencies: OpenCV modules: To be built: calib3d core dnn features2d flann gapi highgui imgcodecs imgproc ml objdetect photo python3 stitching ts video videoio Disabled: world Disabled by dependency: - Unavailable: java js python2 Applications: tests perf_tests apps Documentation: NO Non-free algorithms: NO GUI: GTK+: YES (ver 3.22.11) GThread : YES (ver 2.50.3) GtkGlExt: NO Media I/O: ZLib: build (ver 1.2.11) JPEG: build-libjpeg-turbo (ver 2.0.6-62) PNG: build (ver 1.6.37) HDR: YES SUNRASTER: YES PXM: YES PFM: YES Video I/O: FFMPEG: YES avcodec: YES (57.64.101) avformat: YES (57.56.101) avutil: YES (55.34.101) swscale: YES (4.2.100) avresample: NO GStreamer: YES (1.10.4) v4l/v4l2: YES (linux/videodev2.h) Parallel framework: pthreads Trace: YES (with Intel ITT) Other third-party libraries: Inference Engine: YES (2021020000 / 2.1.0) * libs: /home/jenkins/workspace/OpenCV/OpenVINO/2021.2/build/debian9arm/ie/inference_engine/lib/armv7l/libinference_engine.so * includes: /home/jenkins/workspace/OpenCV/OpenVINO/2021.2/build/debian9arm/ie/inference_engine/include nGraph: YES (0.0.0+d2e3e1f) * libs: /home/jenkins/workspace/OpenCV/OpenVINO/2021.2/build/debian9arm/ie/ngraph/lib/libngraph.so * includes: /home/jenkins/workspace/OpenCV/OpenVINO/2021.2/build/debian9arm/ie/ngraph/include Custom HAL: YES (carotene (ver 0.0.1)) Protobuf: build (3.5.1) Python 3: Interpreter: /usr/bin/python3 (ver 3.5.3) Libraries: numpy: /usr/lib/python3.5/dist-packages/numpy/core/include (ver undefined - cannot be probed because of the cross-compilation) install path: /home/jenkins/workspace/OpenCV/OpenVINO/2021.2/build/debian9arm/build_release/install/python/python3 Python (for build): /usr/bin/python2.7 Install to: /home/jenkins/workspace/OpenCV/OpenVINO/2021.2/build/debian9arm/build_release/install -----------------------------------------------------------------