應(yīng)用實例 | 手把手教你用OpenCV實現(xiàn)餐盤水果識別計價程序(附代碼)
測試圖像與說明
使用圖像如下,拍攝環(huán)境有待改善(存在光照不均和拍攝角度的影響):






餐盤/菜品識別一般方法:
(1)識別餐盤---傳統(tǒng)方法和機器學(xué)習(xí)/深度學(xué)習(xí)方法;
(2)識別菜品---機器學(xué)習(xí)/深度學(xué)習(xí)方法;
本文使用傳統(tǒng)方法識別餐盤。
效果演示:
算法思路與實現(xiàn)步驟
思路:傳統(tǒng)方法識別餐盤---依據(jù)顏色和形狀來區(qū)分。
具體步驟:
(1)餐盤顏色共三種:白色、綠色、橙色,形狀共兩種:圓形和方形。區(qū)別顏色使用HSV閾值范圍篩選即可,圓形與方形通過輪廓面積與輪廓最小外接圓面積的比值來篩選,圓形rate>=0.9,方形<0.9;
(2)水果共三種:蘋果、香蕉、橙子,通過顏色可以區(qū)分蘋果和橙子,通過輪廓最小外接矩形的寬高比可以區(qū)分香蕉和橙子;
(3)計價:盤子和水果的數(shù)量乘以對應(yīng)的單價即可;
(4)設(shè)計UI,計價時顯示收款碼。
Python-OpenCV實現(xiàn)算法核心代碼與效果如下:
def Recognize_Dish(self,img):#-------------------香蕉檢測-----------------#banana_num = 0hsv_img=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)lower_yellow = np.array([15,30,145])#顏色范圍低閾值upper_yellow = np.array([35,255,255])#顏色范圍高閾值mask = cv2.inRange(hsv_img,lower_yellow,upper_yellow)#根據(jù)顏色范圍刪選mask = cv2.medianBlur(mask, 5)#中值濾波#cv2.imshow('mask_banana', mask)contours,hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)for cnt in contours:rect = cv2.minAreaRect(cnt)box = cv2.boxPoints(rect)box = np.int0(box)width = max(rect[1][0],rect[1][1])height = min(rect[1][0],rect[1][1])center = (int(rect[0][0]),int(rect[0][1]))if width > 180 and height > 80 and height < 130:#print(width,height)img = cv2.drawContours(img,[box],0,(0,0,255),2)cv2.putText(img,'banana',center,font,1,(255,0,255), 2)banana_num += 1item_0 = QTableWidgetItem("%d"%banana_num)self.tableWidget.setItem(8, 0, item_0)#-------------------蘋果檢測-----------------#apple_num = 0lower_apple = np.array([0,50,50])#顏色范圍低閾值upper_apple = np.array([30,255,255])#顏色范圍高閾值mask_apple = cv2.inRange(hsv_img,lower_apple,upper_apple)#根據(jù)顏色范圍刪選mask_apple = cv2.medianBlur(mask_apple, 9)#中值濾波#cv2.imshow('mask_apple', mask_apple)#cv2.imwrite('mask_apple.jpg', mask_apple)contours2,hierarchy2 = cv2.findContours(mask_apple, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)for cnt2 in contours2:center,radius = cv2.minEnclosingCircle(cnt2)area = cv2.contourArea(cnt2)#print(radius)rate = area / (math.pi * radius *radius)if radius > 50 and radius < 75 and rate < 0.91:#print(radius)cv2.circle(img,(int(center[0]),int(center[1])),int(radius),(0,255,0),2)cv2.putText(img,'apple',(int(center[0]),int(center[1])),font,1,(255,0,0), 2)apple_num += 1item_1 = QTableWidgetItem("%d"%apple_num)self.tableWidget.setItem(6, 0, item_1)#-------------------橘子檢測-----------------#orange_num = 0lower_orange = np.array([0,90,60])#顏色范圍低閾值upper_orange = np.array([60,255,255])#顏色范圍高閾值mask_orange = cv2.inRange(hsv_img,lower_orange,upper_orange)#根據(jù)顏色范圍刪選mask_orange = cv2.medianBlur(mask_orange, 5)#中值濾波#cv2.imshow('mask_orange', mask_orange)#cv2.imwrite('mask_orange.jpg', mask_orange)contours3,hierarchy3 = cv2.findContours(mask_orange, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)for cnt3 in contours3:center,radius = cv2.minEnclosingCircle(cnt3)area = cv2.contourArea(cnt3)#print(radius)rate = area / (math.pi * radius *radius)if radius > 50 and radius < 75 and rate > 0.85:#print(radius)cv2.circle(img,(int(center[0]),int(center[1])),int(radius),(255,0,255),2)cv2.putText(img,'orange',(int(center[0]),int(center[1])),font,1,(255,255,0), 2)orange_num += 1item_2 = QTableWidgetItem("%d"%orange_num)self.tableWidget.setItem(7, 0, item_2)#-------------------白色餐盤檢測-----------------#white_circle_num = 0white_rect_num = 0lower_white = np.array([0,0,150])#顏色范圍低閾值upper_white= np.array([100,55,255])#顏色范圍高閾值mask_white = cv2.inRange(hsv_img,lower_white,upper_white)#根據(jù)顏色范圍刪選mask_white = cv2.medianBlur(mask_white, 5)#中值濾波#cv2.imshow('mask_white', mask_white)#cv2.imwrite('mask_white.jpg', mask_white)contours4,hierarchy4 = cv2.findContours(mask_white, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)for cnt4 in contours4:area = cv2.contourArea(cnt4)center,radius = cv2.minEnclosingCircle(cnt4)#print(radius)rate = area / (math.pi * radius *radius)if radius > 100 and radius < 160:#print(radius)if rate >= 0.9:cv2.circle(img,(int(center[0]),int(center[1])),int(radius),(255,255,0),2)cv2.putText(img,'white_circle',(int(center[0]),int(center[1])),font,1,(0,255,0), 2)white_circle_num += 1elif rate >0.6 and rate < 0.9:rect = cv2.minAreaRect(cnt4)box = cv2.boxPoints(rect)box = np.int0(box)#cv2.circle(img,(int(center[0]),int(center[1])),int(radius),(255,0,255),5)img = cv2.drawContours(img,[box],0,(255,255,0),2)cv2.putText(img,'white_rect',(int(center[0]),int(center[1])),font,1,(0,255,0), 2)white_rect_num += 1item_3 = QTableWidgetItem("%d"%white_circle_num)self.tableWidget.setItem(0, 0, item_3)item_4 = QTableWidgetItem("%d"%white_rect_num)self.tableWidget.setItem(1, 0, item_4)#-------------------綠色餐盤檢測-----------------#green_circle_num = 0green_rect_num = 0lower_green = np.array([30,65,65])#顏色范圍低閾值upper_green= np.array([80,255,255])#顏色范圍高閾值mask_green = cv2.inRange(hsv_img,lower_green,upper_green)#根據(jù)顏色范圍刪選mask_green = cv2.medianBlur(mask_green, 5)#中值濾波#cv2.imshow('mask_green', mask_green)#cv2.imwrite('mask_green.jpg', mask_green)contours5,hierarchy5 = cv2.findContours(mask_green, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)for cnt5 in contours5:area = cv2.contourArea(cnt5)center,radius = cv2.minEnclosingCircle(cnt5)#print(radius)rate = area / (math.pi * radius *radius)if radius > 100 and radius < 160:#print(radius)if rate >= 0.9:cv2.circle(img,(int(center[0]),int(center[1])),int(radius),(0,255,0),2)cv2.putText(img,'green_circle',(int(center[0]),int(center[1])),font,1,(0,255,255), 2)green_circle_num += 1elif rate >0.6 and rate < 0.9:rect = cv2.minAreaRect(cnt5)box = cv2.boxPoints(rect)box = np.int0(box)#cv2.circle(img,(int(center[0]),int(center[1])),int(radius),(255,0,255),5)img = cv2.drawContours(img,[box],0,(0,255,0),2)cv2.putText(img,'green_rect',(int(center[0]),int(center[1])),font,1,(0,255,255), 2)green_rect_num += 1item_5 = QTableWidgetItem("%d"%green_circle_num)self.tableWidget.setItem(4, 0, item_5)item_6 = QTableWidgetItem("%d"%green_rect_num)self.tableWidget.setItem(5, 0, item_6)#-------------------橙色餐盤檢測-----------------#orange_circle_num = 0orange_rect_num = 0lower_orange_dish = np.array([0,100,100])#顏色范圍低閾值upper_orange_dish= np.array([15,255,255])#顏色范圍高閾值mask_orange_dish = cv2.inRange(hsv_img,lower_orange_dish,upper_orange_dish)#根據(jù)顏色范圍刪選mask_orange_dish = cv2.medianBlur(mask_orange_dish, 5)#中值濾波#cv2.imshow('mask_green', mask_green)#cv2.imwrite('mask_orange_dish.jpg', mask_orange_dish)contours6,hierarchy6 = cv2.findContours(mask_orange_dish, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)for cnt6 in contours6:area = cv2.contourArea(cnt6)center,radius = cv2.minEnclosingCircle(cnt6)#print('----------------')#print(radius)rate = area / (math.pi * radius *radius)if radius > 100 and radius < 160:#print(rate)if rate >= 0.8:cv2.circle(img,(int(center[0]),int(center[1])),int(radius),(0,255,0),2)cv2.putText(img,'orange_circle',(int(center[0]),int(center[1])),font,1,(255,0,255), 2)orange_circle_num += 1elif rate >0.3 and rate < 0.8:rect = cv2.minAreaRect(cnt6)box = cv2.boxPoints(rect)box = np.int0(box)#cv2.circle(img,(int(center[0]),int(center[1])),int(radius),(255,0,255),5)img = cv2.drawContours(img,[box],0,(0,255,0),2)cv2.putText(img,'orange_rect',(int(center[0]),int(center[1])),font,1,(255,0,255), 2)orange_rect_num += 1item_7 = QTableWidgetItem("%d"%orange_circle_num)self.tableWidget.setItem(2, 0, item_7)item_8 = QTableWidgetItem("%d"%orange_rect_num)self.tableWidget.setItem(3, 0, item_8)for i in range(0,9):self.tableWidget.item(i,0).setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter)self.tableWidget.item(i,1).setTextAlignment(QtCore.Qt.AlignHCenter|QtCore.Qt.AlignVCenter)#----------------計算價格--------------#self.price = self.price_white_circle * white_circle_num + \self.price_white_rect * white_rect_num + \self.price_orange_circle * orange_circle_num + \self.price_orange_rect * orange_rect_num + \self.price_green_circle * green_circle_num + \self.price_green_rect * green_rect_num + \self.price_apple * apple_num + \self.price_orange * orange_num +\self.price_banana * banana_numprint(self.price)return img






結(jié)尾語
(1) 算法只針對水果和餐盤數(shù)量和形態(tài)較少的情形,方法供參考;
(2) 實際應(yīng)用將更復(fù)雜,要求更高,一般開源的目標(biāo)檢測網(wǎng)絡(luò)也很難滿足要求;
(3) 常見菜品識別的實際應(yīng)用要求:一個菜只用一張圖片訓(xùn)練或做模板,訓(xùn)練和識別時間盡量短,能夠及時更新使用。所以真正類似的產(chǎn)品并不好做,如果你有好的方法歡迎留言。
—版權(quán)聲明—
來源: OpenCV與AI深度學(xué)習(xí)
僅用于學(xué)術(shù)分享,版權(quán)屬于原作者。
若有侵權(quán),請聯(lián)系微信號:yiyang-sy 刪除或修改!
