https://github.com/pyresearch/pyresearch
Computer Vision Helping Library
https://github.com/pyresearch/pyresearch
artificialintelligence datascience google machine machine-learning machinelearning mediapipe opencv pypi pyresearch python pythonresearch researchtools visualization
Last synced: about 2 months ago
JSON representation
Computer Vision Helping Library
- Host: GitHub
- URL: https://github.com/pyresearch/pyresearch
- Owner: pyresearch
- License: mit
- Created: 2023-02-21T16:55:08.000Z (over 2 years ago)
- Default Branch: main
- Last Pushed: 2024-11-05T07:51:55.000Z (11 months ago)
- Last Synced: 2025-07-27T06:28:42.594Z (2 months ago)
- Topics: artificialintelligence, datascience, google, machine, machine-learning, machinelearning, mediapipe, opencv, pypi, pyresearch, python, pythonresearch, researchtools, visualization
- Language: Python
- Homepage: https://pyresearch.org/
- Size: 1.89 MB
- Stars: 41
- Watchers: 0
- Forks: 8
- Open Issues: 16
-
Metadata Files:
- Readme: README.md
- License: LICENSE
Awesome Lists containing this project
README
## Installation
You can simply use pip to install the latest version of pyresearch.##
`pip install pyresearch`
### 60 FPS Face Detection
from pyresearch.FaceDetectionModule import FaceDetector
import cv2cap = cv2.VideoCapture(0)
detector = FaceDetector()while True:
success, img = cap.read()
img, bboxs = detector.findFaces(img)if bboxs:
# bboxInfo - "id","bbox","score","center"
center = bboxs[0]["center"]
cv2.circle(img, center, 5, (255, 0, 255), cv2.FILLED)cv2.imshow("Image", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
### Face Mesh Detection
from pyresearch.FaceMeshModule import FaceMeshDetector
import cv2cap = cv2.VideoCapture(0)
detector = FaceMeshDetector(maxFaces=2)
while True:
success, img = cap.read()
img, faces = detector.findFaceMesh(img)
if faces:
print(faces[0])
cv2.imshow("Image", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
### FPS
import pyresearch
import cv2fpsReader = pyresearch.FPS()
cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)
while True:
success, img = cap.read()
fps, img = fpsReader.update(img,pos=(50,80),color=(0,255,0),scale=5,thickness=5)
cv2.imshow("Image", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
breakcap.release()
cv2.destroyAllWindows()
### Stack Images
import pyresearch
import cv2cap = cv2.VideoCapture(0)
cap.set(3, 1280)
cap.set(4, 720)while True:
success, img = cap.read()
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgList = [img, img, imgGray, img, imgGray, img,imgGray, img, img]
stackedImg = pyresearch.stackImages(imgList, 3, 0.4)cv2.imshow("stackedImg", stackedImg)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
### Hand Tracking
#### Basic Code Example
from pyresearch.HandTrackingModule import HandDetector
import cv2cap = cv2.VideoCapture(0)
detector = HandDetector(detectionCon=0.8, maxHands=2)
while True:
# Get image framesuccess, img = cap.read()
# Find the hand and its landmarks
hands, img = detector.findHands(img) # with draw
# hands = detector.findHands(img, draw=False) # without draw
if hands:
# Hand 1
hand1 = hands[0]
lmList1 = hand1["lmList"] # List of 21 Landmark points
bbox1 = hand1["bbox"] # Bounding box info x,y,w,h
centerPoint1 = hand1['center'] # center of the hand cx,cy
handType1 = hand1["type"] # Handtype Left or Right
fingers1 = detector.fingersUp(hand1)
if len(hands) == 2:
# Hand 2
hand2 = hands[1]
lmList2 = hand2["lmList"] # List of 21 Landmark points
bbox2 = hand2["bbox"] # Bounding box info x,y,w,h
centerPoint2 = hand2['center'] # center of the hand cx,cy
handType2 = hand2["type"] # Hand Type "Left" or "Right"fingers2 = detector.fingersUp(hand2)
# Find Distance between two Landmarks. Could be same hand or different hands
length, info, img = detector.findDistance(lmList1[8], lmList2[8], img) # with draw
# length, info = detector.findDistance(lmList1[8], lmList2[8]) # with draw
# Display
cv2.imshow("Image", img)
cv2.waitKey(1)
cap.release()
cv2.destroyAllWindows()
### Pose Estimation
from pyresearch.PoseModule import PoseDetector
import cv2cap = cv2.VideoCapture(0)
detector = PoseDetector()
while True:
success, img = cap.read()
img = detector.findPose(img)
lmList, bboxInfo = detector.findPosition(img, bboxWithHands=False)
if bboxInfo:
center = bboxInfo["center"]
cv2.circle(img, center, 5, (255, 0, 255), cv2.FILLED)cv2.imshow("Image", img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
### Real-Time-Background-Remover-through-Human-Segmentation
import cv2
from pyresearch.SelfiSegmentationModule import SelfiSegmentation# connecting the internal camera (first camera index will be 0, it is the default)
cap = cv2.VideoCapture(0)# extracting the camera capture size
width, height = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH )), int( cap.get(cv2.CAP_PROP_FRAME_HEIGHT ))# loading and resizing the background image
background_image = cv2.resize(cv2.imread("bg_image.jpeg"), (width, height))# creating segmentation instance for taking the foreground (the person).
segmentor = SelfiSegmentation()# iterating the camera captures
while True:
# Reading the captured images from the camera
ret, frame = cap.read()# segmenting the image
segmentated_img = segmentor.removeBG(frame, background_image, threshold=0.9)# concatenating the images horizontally
concatenated_img = cv2.hconcat([frame, segmentated_img])#cv2.imshow("Camera Capture", concatenated_img)
cv2.imshow("Camera Live", concatenated_img)# ending condition
if cv2.waitKey(1) == ord('q'):
break# relasing the sources
cap.release()
cv2.destroyAllWindows()
### Heatmap & Tracking Objects using YOLOv8 ByteTrack & Supervision
import argparse
from pyresearch.heatmap_and_track import process_videodef main():
# Define the arguments as a dictionary
args = {
"source_weights_path": "yolov8s.pt",
"source_video_path": "people-walking.mp4",
"target_video_path": "output.mp4",
"confidence_threshold": 0.35,
"iou_threshold": 0.5,
"heatmap_alpha": 0.5,
"radius": 25,
"track_threshold": 0.35,
"track_seconds": 5,
"match_threshold": 0.99,
"display": True,
}# Convert the dictionary to an argparse Namespace object
args_namespace = argparse.Namespace(**args)# Call the process_video function with the Namespace object
process_video(args_namespace)if __name__ == "__main__":
main()
[](https://youtu.be/KYSPbSPBgXM)
## Buy Us a Beer!
This Pyresearch platform is funded by donations only. Please support us to maintain and further improve our computer vision solutions!
Much more information about the self-sufficienty challenge : [Pyresearch](http://tamataocean.com)
[contact@pyresearch.org](mailto:contact@pyresearch.org)