<ruby id="bdb3f"></ruby>

    <p id="bdb3f"><cite id="bdb3f"></cite></p>

      <p id="bdb3f"><cite id="bdb3f"><th id="bdb3f"></th></cite></p><p id="bdb3f"></p>
        <p id="bdb3f"><cite id="bdb3f"></cite></p>

          <pre id="bdb3f"></pre>
          <pre id="bdb3f"><del id="bdb3f"><thead id="bdb3f"></thead></del></pre>

          <ruby id="bdb3f"><mark id="bdb3f"></mark></ruby><ruby id="bdb3f"></ruby>
          <pre id="bdb3f"><pre id="bdb3f"><mark id="bdb3f"></mark></pre></pre><output id="bdb3f"></output><p id="bdb3f"></p><p id="bdb3f"></p>

          <pre id="bdb3f"><del id="bdb3f"><progress id="bdb3f"></progress></del></pre>

                <ruby id="bdb3f"></ruby>

                ThinkChat2.0新版上線,更智能更精彩,支持會話、畫圖、視頻、閱讀、搜索等,送10W Token,即刻開啟你的AI之旅 廣告
                將兩張相同部分的圖片拼接成一張 方案一: 參考網址:https://blog.csdn.net/qq878594585/article/details/81901703#commentBox 代碼: ``` import numpy as np import cv2 leftgray = cv2.imread('1.jpg') rightgray = cv2.imread('2.jpg') hessian=400 surf=cv2.SURF(hessian) #將Hessian Threshold設置為400,閾值越大能檢測的特征就越少 kp1,des1=surf.detectAndCompute(leftgray,None) #查找關鍵點和描述符 kp2,des2=surf.detectAndCompute(rightgray,None) FLANN_INDEX_KDTREE=0 #建立FLANN匹配器的參數 indexParams=dict(algorithm=FLANN_INDEX_KDTREE,trees=5) #配置索引,密度樹的數量為5 searchParams=dict(checks=50) #指定遞歸次數 #FlannBasedMatcher:是目前最快的特征匹配算法(最近鄰搜索) flann=cv2.FlannBasedMatcher(indexParams,searchParams) #建立匹配器 matches=flann.knnMatch(des1,des2,k=2) #得出匹配的關鍵點 good=[] #提取優秀的特征點 for m,n in matches: if m.distance < 0.7*n.distance: #如果第一個鄰近距離比第二個鄰近距離的0.7倍小,則保留 good.append(m) src_pts = np.array([ kp1[m.queryIdx].pt for m in good]) #查詢圖像的特征描述子索引 dst_pts = np.array([ kp2[m.trainIdx].pt for m in good]) #訓練(模板)圖像的特征描述子索引 H=cv2.findHomography(src_pts,dst_pts) #生成變換矩陣 h,w=leftgray.shape[:2] h1,w1=rightgray.shape[:2] shft=np.array([[1.0,0,w],[0,1.0,0],[0,0,1.0]]) M=np.dot(shft,H[0]) #獲取左邊圖像到右邊圖像的投影映射關系 dst_corners=cv2.warpPerspective(leftgray,M,(w*2,h))#透視變換,新圖像可容納完整的兩幅圖 cv2.imshow('tiledImg1',dst_corners) #顯示,第一幅圖已在標準位置 dst_corners[0:h,w:w*2]=rightgray #將第二幅圖放在右側 #cv2.imwrite('tiled.jpg',dst_corners) cv2.imshow('tiledImg',dst_corners) cv2.imshow('leftgray',leftgray) cv2.imshow('rightgray',rightgray) cv2.waitKey() cv2.destroyAllWindows() ``` 方案二: ``` ~~~ import numpy as np import cv2 as cv from matplotlib import pyplot as plt if __name__ == '__main__': top, bot, left, right = 100, 100, 0, 500 img1 = cv.imread('F:/licheng/six.jpg') img2 = cv.imread('F:/licheng/five.jpg') img1 = img1.astype('uint8') img2 = img2.astype('uint8') srcImg = cv.copyMakeBorder(img1, top, bot, left, right, cv.BORDER_CONSTANT, value=(0, 0, 0)) testImg = cv.copyMakeBorder(img2, top, bot, left, right, cv.BORDER_CONSTANT, value=(0, 0, 0)) img1gray = cv.cvtColor(srcImg, cv.COLOR_BGR2GRAY) img2gray = cv.cvtColor(testImg, cv.COLOR_BGR2GRAY) sift = cv.xfeatures2d.SIFT_create() # find the keypoints and descriptors with SIFT kp1, des1 = sift.detectAndCompute(img1, None) kp2, des2 = sift.detectAndCompute(img2, None) # FLANN parameters FLANN_INDEX_KDTREE = 1 index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) search_params = dict(checks=50) flann = cv.FlannBasedMatcher(index_params, search_params) matches = flann.knnMatch(des1, des2, k=2) # Need to draw only good matches, so create a mask matchesMask = [[0, 0] for i in range(len(matches))] good = [] pts1 = [] pts2 = [] # ratio test as per Lowe's paper for i, (m, n) in enumerate(matches): if m.distance < 0.7*n.distance: good.append(m) pts2.append(kp2[m.trainIdx].pt) pts1.append(kp1[m.queryIdx].pt) matchesMask[i] = [1, 0] draw_params = dict(matchColor=(0, 255, 0), singlePointColor=(255, 0, 0), matchesMask=matchesMask, flags=0) img3 = cv.drawMatchesKnn(img1gray, kp1, img2gray, kp2, matches, None, **draw_params) plt.imshow(img3, ),plt.show(2) rows, cols = srcImg.shape[:2] MIN_MATCH_COUNT = 10 if len(good) > MIN_MATCH_COUNT: src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2) dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2) M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 5.0) warpImg = cv.warpPerspective(testImg, np.array(M), (testImg.shape[1], testImg.shape[0]), flags=cv.WARP_INVERSE_MAP) for col in range(0, cols): if srcImg[:, col].any() and warpImg[:, col].any(): left = col break for col in range(cols-1, 0, -1): if srcImg[:, col].any() and warpImg[:, col].any(): right = col break res = np.zeros([rows, cols, 3], np.uint8) for row in range(0, rows): for col in range(0, cols): if not srcImg[row, col].any(): res[row, col] = warpImg[row, col] elif not warpImg[row, col].any(): res[row, col] = srcImg[row, col] else: srcImgLen = float(abs(col - left)) testImgLen = float(abs(col - right)) alpha = srcImgLen / (srcImgLen + testImgLen) res[row, col] = np.clip(srcImg[row, col] * (1-alpha) + warpImg[row, col] * alpha, 0, 255) # opencv is bgr, matplotlib is rgb res = cv.cvtColor(res, cv.COLOR_BGR2RGB) # show the result plt.figure() plt.imshow(res) plt.show() else: print("Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT)) matchesMask = None ~~~[鏈接]( [TOC] [TOC] ) ```
                  <ruby id="bdb3f"></ruby>

                  <p id="bdb3f"><cite id="bdb3f"></cite></p>

                    <p id="bdb3f"><cite id="bdb3f"><th id="bdb3f"></th></cite></p><p id="bdb3f"></p>
                      <p id="bdb3f"><cite id="bdb3f"></cite></p>

                        <pre id="bdb3f"></pre>
                        <pre id="bdb3f"><del id="bdb3f"><thead id="bdb3f"></thead></del></pre>

                        <ruby id="bdb3f"><mark id="bdb3f"></mark></ruby><ruby id="bdb3f"></ruby>
                        <pre id="bdb3f"><pre id="bdb3f"><mark id="bdb3f"></mark></pre></pre><output id="bdb3f"></output><p id="bdb3f"></p><p id="bdb3f"></p>

                        <pre id="bdb3f"><del id="bdb3f"><progress id="bdb3f"></progress></del></pre>

                              <ruby id="bdb3f"></ruby>

                              哎呀哎呀视频在线观看