import cv2
|
|
import mediapipe as mp
|
|
|
|
class GestureDetector():
|
|
|
|
def __init__(self, mode=False, maxHands=2, detectionCon=0.5, trackCon=0.5):
|
|
|
|
self.mode = mode
|
|
self.maxHands = maxHands
|
|
self.detectionCon = detectionCon
|
|
self.trackCon = trackCon
|
|
|
|
self.mpHands = mp.solutions.hands
|
|
self.hands = self.mpHands.Hands(static_image_mode=self.mode,
|
|
max_num_hands=self.maxHands,
|
|
min_detection_confidence=self.detectionCon,
|
|
min_tracking_confidence=self.trackCon)
|
|
self.mpDraw = mp.solutions.drawing_utils
|
|
|
|
def findHands(self, img, draw=True):
|
|
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
|
self.x,self.y,c=img.shape
|
|
self.results = self.hands.process(imgRGB)
|
|
if self.results.multi_hand_landmarks:
|
|
for handLms in self.results.multi_hand_landmarks:
|
|
if draw:
|
|
self.mpDraw.draw_landmarks(imgRGB, handLms, self.mpHands.HAND_CONNECTIONS)
|
|
return imgRGB
|
|
|
|
def findPosition(self, img, handNo=0):
|
|
lmList = []
|
|
if self.results.multi_hand_landmarks:
|
|
myHand = self.results.multi_hand_landmarks[handNo]
|
|
for id, lm in enumerate(myHand.landmark):
|
|
h, w, c = img.shape
|
|
cx, cy = int(lm.x * w), int(lm.y * h)
|
|
lmList.append([id, cx, cy])
|
|
return lmList
|