# import the necessary packages from scipy.spatial import distance as dist from imutils import perspective from imutils import contours import numpy as np import argparse import imutils import cv2 defmidpoint(ptA, ptB): return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5) # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required=True, help="path to the input image") ap.add_argument("-w", "--width", type=float, required=True, help="width of the left-most object in the image (in inches)") args = vars(ap.parse_args())
# loop over the contours individually for c in cnts: # if the contour is not sufficiently large, ignore it if cv2.contourArea(c) < 100: continue # compute the rotated bounding box of the contour box = cv2.minAreaRect(c) box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box) box = np.array(box, dtype="int") # order the points in the contour such that they appear # in top-left, top-right, bottom-right, and bottom-left # order, then draw the outline of the rotated bounding # box box = perspective.order_points(box) # compute the center of the bounding box cX = np.average(box[:, 0]) cY = np.average(box[:, 1])
# if this is the first contour we are examining (i.e., # the left-most contour), we presume this is the # reference object if refObj is None: # unpack the ordered bounding box, then compute the # midpoint between the top-left and top-right points, # followed by the midpoint between the top-right and # bottom-right (tl, tr, br, bl) = box (tlblX, tlblY) = midpoint(tl, bl) (trbrX, trbrY) = midpoint(tr, br) # compute the Euclidean distance between the midpoints, # then construct the reference object D = dist.euclidean((tlblX, tlblY), (trbrX, trbrY)) refObj = (box, (cX, cY), D / args["width"]) continue
# draw the contours on the image orig = image.copy() cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2) cv2.drawContours(orig, [refObj[0].astype("int")], -1, (0, 255, 0), 2) # stack the reference coordinates and the object coordinates # to include the object center refCoords = np.vstack([refObj[0], refObj[1]]) objCoords = np.vstack([box, (cX, cY)])
现在我们可以开始计算图像中各个物体的质心和质心之间的距离了:
# loop over the original points for ((xA, yA), (xB, yB), color) in zip(refCoords, objCoords, colors): # draw circles corresponding to the current points and # connect them with a line cv2.circle(orig, (int(xA), int(yA)), 5, color, -1) cv2.circle(orig, (int(xB), int(yB)), 5, color, -1) cv2.line(orig, (int(xA), int(yA)), (int(xB), int(yB)), color, 2) # compute the Euclidean distance between the coordinates, # and then convert the distance in pixels to distance in # units D = dist.euclidean((xA, yA), (xB, yB)) / refObj[2] (mX, mY) = midpoint((xA, yA), (xB, yB)) cv2.putText(orig, "{:.1f}in".format(D), (int(mX), int(mY - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.55, color, 2) # show the output image cv2.imshow("Image", orig) cv2.waitKey(0)