Responder a: Transformar “máscara” em vídeo

Home Fóruns Fórum Rastreamento de Objetos com Python e OpenCV Transformar “máscara” em vídeo Responder a: Transformar “máscara” em vídeo

#33636
Dalton Vargas
Moderador
    import numpy as np
    import cv2
    import sys
    from random import randint
    
    # Criar um vetor para gerar cores aleatoriamente (R,G,B)
    TEXT_COLOR = (randint(0, 255), randint(0, 255), randint(0, 255))
    BORDER_COLOR = (randint(0, 255), randint(0, 255), randint(0, 255))
    FONT = cv2.FONT_HERSHEY_SIMPLEX
    VIDEO_SOURCE = "videos/walking.avi"
    
    # vetor de background subtractors
    BGS_TYPES = ["GMG", "MOG", "MOG2", "KNN", "CNT"]
    
    # definir qual BGS usar, inserindo o número que corresponde ao BGS selecionado no vetor de background subtractors
    # 0 = GMG, 1 = MOG, 2 = MOG2, 3 = KNN,  4 = CNT
    BGS_TYPE = BGS_TYPES[2]
    
    # Kernel: Elemento estruturante
    def getKernel(KERNEL_TYPE):
        if KERNEL_TYPE == "dilation":
            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        if KERNEL_TYPE == "opening":
            kernel = np.ones((3, 3), np.uint8)
        if KERNEL_TYPE == "closing":
            kernel = np.ones((3, 3), np.uint8)
    
        return kernel
    
    # Filtros morfológicos para redução do ruído
    def getFilter(img, filter):
        '''            
            Esses filtros são escolhidos a dedo, apenas com base em testes visuais
        '''
        if filter == 'closing':
            return cv2.morphologyEx(img, cv2.MORPH_CLOSE, getKernel("closing"), iterations=2)
    
        if filter == 'opening':        
            return cv2.morphologyEx(img, cv2.MORPH_OPEN, getKernel("opening"), iterations=2)
    
        if filter == 'dilation':
            return cv2.dilate(img, getKernel("dilation"), iterations=2)
        
        if filter == 'combine':
            closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, getKernel("closing"), iterations=2)
            opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, getKernel("opening"), iterations=2)
            dilation = cv2.dilate(opening, getKernel("dilation"), iterations=2)
    
            return dilation
    
    def getBGSubtractor(BGS_TYPE):
        if BGS_TYPE == "GMG":
            return cv2.bgsegm.createBackgroundSubtractorGMG(initializationFrames=120, decisionThreshold=.8)  
        if BGS_TYPE == "MOG": 
            return cv2.bgsegm.createBackgroundSubtractorMOG(history=200, nmixtures=5, backgroundRatio=.7, noiseSigma=0)
        if BGS_TYPE == "MOG2":
            return cv2.createBackgroundSubtractorMOG2(history=500, detectShadows=True, varThreshold=100)  
        if BGS_TYPE == "KNN":
            return cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400, detectShadows=True)    
        if BGS_TYPE == "CNT":
            return cv2.bgsegm.createBackgroundSubtractorCNT(minPixelStability=15, useHistory=True, maxPixelStability=15*60, isParallel=True)
        print("Unknown createBackgroundSubtractor type")
        sys.exit(1)
    
    # Carregar o video
    cap = cv2.VideoCapture(VIDEO_SOURCE)
    
    bg_subtractor = getBGSubtractor(BGS_TYPE)
    
    def SubtractorTracker():
        # Inicialização do TRACKER
        parameters_shitomasi = dict(maxCorners = 100,
                                    qualityLevel = 0.3,
                                    minDistance = 7)
        parameters_lucas_kanade = dict(winSize = (15, 15),
                                       maxLevel = 2,
                                       criteria = (cv2.TERM_CRITERIA_EPS | cv2. TERM_CRITERIA_COUNT, 10, 0.03))
        colors = np.random.randint(0,255, (100, 3))
    
        ret, frame = cap.read()
        frame_gray_init = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        edges = cv2.goodFeaturesToTrack(frame_gray_init, mask = None,   **parameters_shitomasi)
        mask = np.zeros_like(frame)
    
        while (cap.isOpened):
    
            ok, frame = cap.read()
            
            if not ok:
                print("Frame capture failed, stopping...")
                break
    
            bg_mask = bg_subtractor.apply(frame)
            fg_mask = getFilter(bg_mask, 'combine')
    
            # Resultado da subtração de fundo
            result = cv2.bitwise_and(frame, frame, mask=fg_mask)
    
            # Passamos como parâmetro ao Tracker o resultado do BGS
            frame_gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
            new_edges, status, errors = cv2.calcOpticalFlowPyrLK(frame_gray_init, frame_gray, edges, None,           **parameters_lucas_kanade)
            news = new_edges[status == 1]
            olds = edges[status == 1]
    
            for i, (new, old) in enumerate(zip(news, olds)):
                a, b = new.ravel()
                c, d = old.ravel()
    
                mask = cv2.line(mask, (a,b), (c,d), colors[i].tolist(), 2)
    
                frame = cv2.circle(result, (a,b), 5, colors[i].tolist(), -1)
    
            img = cv2.add(result, mask)
    
            cv2.imshow('BGS + Optical flow', img)
            if cv2.waitKey(1) == 13:
                break
    
            frame_gray_init = frame_gray.copy()
            edges = news.reshape(-1,1,2)
    
    SubtractorTracker()
    • Esta resposta foi modificada 2 anos, 9 meses atrás por Dalton Vargas.