Responder a: Transformar “máscara” em vídeo

Home Fóruns Fórum Rastreamento de Objetos com Python e OpenCV Transformar “máscara” em vídeo Responder a: Transformar “máscara” em vídeo

#33716
Dalton Vargas
Moderador
    import numpy as np
    import cv2
    import sys
    from random import randint
    
    ##################################################################################
    
    # Escolha do Vídeo
    VIDEO_SOURCE = "videos/persons.mp4"
    
    #Definindo as variáveis de cor/fonte aleatoriamente (R,G,B)
    
    TEXT_COLOR = (randint(0, 255), randint(0,255),randint(0,255))
    TRACKER_COLOR = (randint(0, 255), randint(0,255),randint(0,255))
    FONT = cv2.FONT_HERSHEY_SIMPLEX
    
    # Escolha do Background
    BGS_TYPES = ["MOG2", "KNN"]
    BGS_TYPE = BGS_TYPES[1]
    
    ##################################################################################
    
    # Escolha do Kernel
    def getKernel(KERNEL_TYPE):
        if KERNEL_TYPE == "dilation":
            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3))
        if KERNEL_TYPE == "opening":
            kernel = np.ones((3,3), np.uint8)
        if KERNEL_TYPE == "closing":
            kernel = np.ones((3,3), np.uint8)
    
        return kernel
    
    ##################################################################################
    
    # Criação dos Filtros
    def getFilter(img, filter):
        if filter == 'closing':
            return cv2.morphologyEx(img, cv2.MORPH_CLOSE, getKernel("closing"), iterations=2)
    
        if filter == 'opening':
            return cv2.morphologyEx(img, cv2.MORPH_OPEN, getKernel("opening"), iterations=2)
    
        if filter == 'dilation':
            return cv2.dilate(img, getKernel("dilation"), iterations=2)
    
        if filter =='combine':
            closing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, getKernel("closing"), iterations=2)
            opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, getKernel("opening"), iterations=2)
            dilation = cv2.dilate(opening, getKernel("dilation"), iterations=2)
    
        return dilation
    
    ##################################################################################
    
    def getBGSubtractor(BGS_TYPE):
        if BGS_TYPE == "MOG2":
            return cv2.createBackgroundSubtractorMOG2()
        if BGS_TYPE == "KNN":
            return cv2.createBackgroundSubtractorKNN()
        print("Detector inválido")
        sys.exit(1)
    
    ##################################################################################
    
    # Carregando o vídeo
    cap = cv2.VideoCapture(VIDEO_SOURCE)
    bg_subtractor = getBGSubtractor(BGS_TYPE)
    minArea = 150
    
    ##################################################################################
    
    # Configuração dos parametros Shitomasi e Lucas Kanade
    def SK():
        parameters_shitomasi = dict(maxCorners=100, # máx de nós
        qualityLevel=0.05, # aumenta pra achar mais e diminui pra achar menos
        minDistance=30) # distancia entre pontos
        parameters_lucas_kanade = dict(winSize=(15, 15),
        maxLevel=2,
        criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.05))
        colors = np.random.randint(0, 255, (100, 3))
    
        ret, frame = cap.read()
        frame_gray_init = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    
        edges = cv2.goodFeaturesToTrack(frame_gray_init, mask=None, **parameters_shitomasi)
        mask = np.zeros_like(frame)
    # print(edges) #localização dos pontos inicias marcados
        # print(len(edges)) # contagem de pontos indentificados
    # print(mask)
    # print(np.shape(mask)) #dimensão do vídeo
    
        while (cap.isOpened):
            ok, frame = cap.read()
    
            if not ok:
                print("ERRO...")
                break
    
            bg_mask = bg_subtractor.apply(frame)
            fg_mask = getFilter(bg_mask, 'combine')
    
            result = cv2.bitwise_and(frame, frame, mask=fg_mask)
    
            frame_gray = cv2.cvtColor(result, cv2.COLOR_BGR2GRAY)
            new_edges, status, errors = cv2.calcOpticalFlowPyrLK(frame_gray_init,   frame_gray, edges, None, **parameters_lucas_kanade)
            news = new_edges[status == 1]
            olds = edges[status == 1]
    
            for i, (new, old) in enumerate(zip(news, olds)):
                a, b = new.ravel()
                c, d = old.ravel()
    
                # Não entendi esta parte
                # a, b, c, d = int(a), int(b), int(c), int(d)
    
                mask = cv2.line(mask, (a, b), (c, d), colors[i].tolist(), 2)
    
                frame = cv2.circle(result, (a, b), 5, colors[i].tolist(), -1)
    
            img = cv2.add(result, mask)
    
            cv2.imshow('BGS + Optical flow', img)
            if cv2.waitKey(1) == 13:
                break
    
            frame_gray_init = frame_gray.copy()
            edges = news.reshape(-1, 1, 2)
    
    # Acionamento do main()
    SK()