日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 编程资源 > 编程问答 >内容正文

编程问答

图像变换dpi(tif->jpg),直方图均衡化,腐蚀膨胀,分水岭,模板匹配,直线检测

發布時間:2024/7/23 编程问答 26 豆豆
生活随笔 收集整理的這篇文章主要介紹了 图像变换dpi(tif->jpg),直方图均衡化,腐蚀膨胀,分水岭,模板匹配,直线检测 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

一.圖像變換dpi

1.示例1

import numpy as np from PIL import Image import cv2 def test_dp():path='./gt_1.tif'# img=Image.open(path)# print(img.size)# print(img.info)img=cv2.imread(path)img=Image.fromarray(img)print(img.size)print(img.info)img.save('test.jpg', dpi=(300.0, 300.0), quality=40) def test_dp2():path='./test.jpg'img=Image.open(path)print(img.size)print(img.info)if __name__ == '__main__':# test_dp()test_dp2()

2.示例2

#coding:utf-8 """ 生成的jpg大小在0.95到1.0之間 """ import os from PIL import Imagejpg_min_size = 0.95 jpg_max_size = 1.0 jpg_init_quality = 15 jpg_adjust_step = 3tif_list_path = './test/data/tif/002.tif'jpg_list_path = './002.jpg'img = Image.open(tif_list_path)img.convert("L").save(jpg_list_path, quality=jpg_init_quality, dpi=(300.0, 300.0))cur_quality = jpg_init_quality op_cnt = 0while (os.path.getsize(jpg_list_path) * 1.0 / 1024 / 1024) < jpg_min_size: # unit: metabytescur_quality += jpg_adjust_stepimg.convert("L").save(jpg_list_path, quality=cur_quality, dpi=(300.0, 300.0))op_cnt += 1 while (os.path.getsize(jpg_list_path) * 1.0 / 1024 / 1024) > jpg_max_size: # unit: metabytescur_quality -= jpg_adjust_stepimg.convert("L").save(jpg_list_path, quality=cur_quality, dpi=(300.0, 300.0))op_cnt += 1print('tif:{}->jpg:{},調整次數:{},最終質量:{},最終大小:{}MB'.format(tif_list_path, jpg_list_path, op_cnt, cur_quality, os.path.getsize(jpg_list_path)/1024/1024))

二.直方圖均衡化

對圖像進行非線性拉伸,重新分配圖像像素值,使一定灰度范圍內像素值的數量大致相等。輸出圖像的直方圖是一個較平的分段直方圖,從而提高圖像的對比度。

2.1 灰度圖直方圖均衡

def Hist():img = cv2.imread('./data/timg.jpg', 0) # 直接讀為灰度圖像res = cv2.equalizeHist(img)clahe = cv2.createCLAHE(clipLimit=2, tileGridSize=(10, 10))cl1 = clahe.apply(img)plt.subplot(131), plt.imshow(img, 'gray')plt.subplot(132), plt.imshow(res, 'gray')plt.subplot(133), plt.imshow(cl1, 'gray')plt.show()plt.hist(img.ravel(), 256, [0, 256])plt.hist(res.ravel(), 256, [0, 256])plt.hist(cl1.ravel(), 256, [0, 256])plt.show()

?灰度值的統計圖

2.2 彩色圖直方圖均衡

import cv2image_src = cv2.imread('./lena.png') r_image, g_image, b_image = cv2.split(image_src)r_image_eq = cv2.equalizeHist(r_image) g_image_eq = cv2.equalizeHist(g_image) b_image_eq = cv2.equalizeHist(b_image)image_eq = cv2.merge((r_image_eq, g_image_eq, b_image_eq)) cv2.imwrite('./image_eq.png', image_eq)

?三.腐蝕,膨脹

img_path='./image1200/small_disk800/result_result2_800/3272.jpg' image = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) #gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) im = cv2.threshold(image, 127, 255, cv2.THRESH_BINARY)[1] plt.imshow(im) plt.show() kernel = np.ones((7, 7), np.uint8) dilation = cv2.dilate(im, kernel, iterations=1) plt.imshow(dilation) plt.show() im = cv2.erode(dilation, kernel, iterations=1) plt.imshow(im) plt.show()

具體實現

a=np.array([[1,1,0,1,1],[1,1,0,1,1],[0,0,0,0,0],[1,1,0,1,1],[1,1,0,1,1]],dtype=np.float32) kernel=np.array([[0,1,0],[1,1,1],[0,1,0]],dtype=np.uint8) b=cv2.erode(a,kernel) print(b) c=cv2.dilate(a,kernel) print(c)

四.分水嶺

1.cv2.connectedComponents(gray_img, connectivity=None) 獲取連通域
# connectivity 4或8 臨近像素: 周圍4像素或8像素

import cv2 import numpy as npimg = np.array([[0, 255, 0, 0],[0, 0, 0, 255],[0, 0, 0, 255],[255, 0, 0, 0]], np.uint8) #獲取連通域 ret, markers = cv2.connectedComponents(img) print('===markers:\n', markers)

分水嶺算法的整個過程,主要分為以下幾步:

  • 對圖進行灰度化和二值化得到二值圖像
  • 通過膨脹得到確定的背景區域,通過距離轉換得到確定的前景區域,剩余部分為不確定區域
  • 對確定的前景圖像進行連接組件處理,得到標記圖像
  • 根據標記圖像對原圖像應用分水嶺算法,更新標記圖像
  • import cv2 import numpy as np import matplotlib.pyplot as pltsrc = cv2.imread("./1.png") # cv2.imshow("input", src) gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY) plt.hist(gray.ravel(), bins=100, range=[0, 255]) plt.show() # # cv2.THRESH_OTSU 屬于自適應閾值 # # cv2.THRESH_BINARY_INV屬于黑白反轉 binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1] # cv2.imshow("binary", binary) cv2.imwrite('binary.jpg', binary) # # 形態學操作 得到3x3卷積核 se = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) print('se=', se)# 開運算去除圖像中的細小白色噪點 open_img = cv2.morphologyEx(binary, cv2.MORPH_OPEN, se, iterations=2) print('open_img=', open_img) # # 膨脹 sure_bg = cv2.dilate(open_img, se, iterations=3) cv2.imwrite('sure_bg.jpg', sure_bg) # # 距離變換,可以得到硬幣的中心像素值最大(中心離背景像素最遠), 從而白色區域肯定是硬幣區域,而且還相互分離,得到確定的前景 dist_transform = cv2.distanceTransform(open_img, cv2.DIST_L2, 5) sure_fg = cv2.threshold(dist_transform, 0.7 * dist_transform.max(), 255, 0)[1]cv2.imwrite('dist_transform.jpg', dist_transform) cv2.imwrite('sure_fg.jpg', sure_fg) # #背景與前景相減得到不確定區域 sure_fg = np.uint8(sure_fg) unknown = cv2.subtract(sure_bg, sure_fg) cv2.imwrite('./unknown.jpg', unknown)# 將前景視為組件.進行連通組件標記 - 發現markers ret, markers = cv2.connectedComponents(sure_fg) markers = markers + 1 # print('===markers:\n', markers) # 設定邊緣待分割區域 markers[unknown == 255] = 0markers_copy = markers.copy() markers_copy[markers == 0] = 150 # 灰色表示不確定區域 markers_copy[markers == 1] = 0 # 黑色表示背景 markers_copy[markers > 1] = 255 # 白色表示前景 markers_copy = np.uint8(markers_copy) cv2.imwrite('./markers_copy.jpg', markers_copy)# 分水嶺分割 markers = cv2.watershed(src, markers) src[markers == -1] = [0, 0, 255] cv2.imwrite('result.jpg', src)

    ? ? ? ? ? ? 1.原圖? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? 2.二值化

    ? ? ? ? ? ? ?? ? ? ?3.直方圖? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ?4.距離變換圖? ? ?

    ??

    ? ? ? ? ? ? ? ? ?5.背景圖? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? 6.前景圖

    ? ? ? ?

    7.未知區域(背景減前景)? ? ? ? ? ? ? ? ? ? ? ?8.markers圖(用來標記開始淹沒的像素點)? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? ? 9.結果圖

    ?五.模板匹配

    cv2.matchTemplate匹配函數返回的是一副灰度圖,最白的地方表示最大的匹配。使用cv2.minMaxLoc()函數可以得到最大匹配值的坐標,以這個點為左上角角點,模板的寬和高畫矩形就是匹配的位置了。

    lena_path='./lena.png'face_path='./face.png'img = cv2.imread(lena_path, 0)print(img.shape)template = cv2.imread(face_path, 0)print(template.shape)h, w = template.shape[:2]res = cv2.matchTemplate(img, template, cv2.TM_CCOEFF_NORMED)print('res=',res.shape)cv2.imshow('res',res)cv2.waitKey(0)min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)print('max_val=',max_val)left_top = max_loc # 左上角right_bottom = (left_top[0] + w, left_top[1] + h) # 右下角cv2.rectangle(img, left_top, right_bottom, 255, 2) # 畫出矩形位置cv2.imwrite('template.jpg',img)

    ? ? ? ? ? ? ? ? ? ? ? ? ? res

    六.直線檢測

    def compute_time_deco(function):def warpper(*args, **kwargs):st = time.time()res = function(*args, **kwargs)print('{}:spend time:{}'.format(function.__name__, time.time() - st))return resreturn warpper@compute_time_deco def line_detection_center(image, name='./tmp.jpg'):"""得到最長直線的中心輸入:圖片輸出:圖片最長直線的中心"""if image.shape[-1] == 3:gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)else:gray = imagetry:img_bin = cv2.threshold(gray, 128, 255,cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]kernel_length = max(np.array(img_bin).shape[1] // 20, 1)hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_length, 1))# Morphological operation to detect horizontal lines from an imageimg_temp2 = cv2.erode(img_bin, hori_kernel, iterations=1)horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=2)#找最長的直線cnts = cv2.findContours(horizontal_lines_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)contours = cnts[0] if imutils.is_cv2() else cnts[1]c_ = sorted(contours, key=cv2.contourArea, reverse=True)long_line_cnt = np.squeeze(c_[0])x, y, w, h = cv2.boundingRect(long_line_cnt)debug = Trueif debug:# cv2.imwrite(name, horizontal_lines_img)# cv2.rectangle(image, (x, y), (x+w,y+h), color=(0, 0, 255), thickness=3)cv2.circle(img, (x+w//2, y+h//2), radius=10, color=(255, 0, 0), thickness=5)cv2.imwrite(name, image)# print('y+h//2:', y+h//2)return y+h//2except:return None

    案例:找發票的線

    #coding:utf-8 """ fzh created on 2020/04/24 """import cv2 import numpy as np import imutils import os import pyzbar.pyzbar as pyzbar import shutil from scipy.signal import find_peaks, peak_widths import timedebug_show = Truedef compute_time_deco(function):def warpper(*args, **kwargs):st = time.time()res = function(*args, **kwargs)print('{}:spend time:{}'.format(function.__name__, time.time() - st))return resreturn warpperdef get_contours_point(thresh,mode=0):"""0代表水平1代表垂直"""points = []cnts = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)contours = cnts[0] if imutils.is_cv2() else cnts[1]contours = sorted(contours, key=cv2.contourArea, reverse=True)for contour in contours:x, y, w, h = cv2.boundingRect(contour)x1, y1, x2, y2 = x, y, x + w, y + hpoints.append([x1,y1,x2,y2])points = np.array(points)if len(points):if mode ==1 :#垂直return pointselse:#水平return points[points[:,1].argsort()]else:return points ##橫線的中心處于10個像素以內就認為是同一高度,則進行合并 def merge_lines(points):centers = []for point in points:x1, y1, x2, y2 = pointcenters.append([(x1+x2)//2, (y1+y2)//2])opt = [0] * len(centers)for i in range(1, len(centers)):if abs(centers[i][-1] - centers[i - 1][-1]) <= 10:opt[i] = 1opt.append(0)index = [j for j in range(len(opt)) if opt[j] == 0]# print('index:', index)new_points = []for k in range(len(index) - 1):x1,y1,x2,y2 = min(points[index[k]:index[k + 1]][:,0]),min(points[index[k]:index[k + 1]][:,1]),\max(points[index[k]:index[k + 1]][:,-2]),max(points[index[k]:index[k + 1]][:,-1])new_points.append([x1,y1,x2,y2])# print(new_points)new_points = np.array(new_points)max_length_point = new_points[(new_points[:, -2] - new_points[:, 0]).argsort()[::-1]][0]fin_points = new_points[(new_points[:,-2]-new_points[:,0]).argsort()[::-1]][:5]fin_points = fin_points[fin_points[:, 1].argsort()]fix_points = []#最長線替代剩余線for point in fin_points:_, y1, _, y2 = pointfix_points.append([max_length_point[0], (y1+y2)//2, max_length_point[2], (y1+y2)//2])return fix_points @compute_time_deco def get_lines(img, name):"""輸入圖片返回水平線點 豎直線點 交點"""gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 灰度圖片# ret, binary = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) # 全局自適應閾值dst = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 101, 20)veri_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 30))veri_dilated = cv2.dilate(dst, veri_kernel, iterations=2)veri_eroded = cv2.erode(veri_dilated, veri_kernel, iterations=2)veri_thresh = cv2.adaptiveThreshold(veri_eroded, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 5)# cv2.imwrite('./veri_thresh.jpg', veri_thresh)hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (30, 1))hori_dilated = cv2.dilate(dst, hori_kernel, iterations=2)hori_eroded = cv2.erode(hori_dilated, hori_kernel, iterations=2)hori_thresh = cv2.adaptiveThreshold(hori_eroded, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, 11, 5)# cv2.imwrite('./hori_thresh.jpg', hori_thresh)hori_points = get_contours_point(hori_thresh, mode=0) # 0代表水平veri_points = get_contours_point(veri_thresh, mode=1) # 1代表垂直hori_points = merge_lines(hori_points)cross_points = []delta = 10for veri_point in veri_points:vx1, vy1, vx2, vy2 = veri_pointfor hori_point in hori_points:hx1, hy1, hx2, hy2 = hori_pointif (hx1 - delta) <= vx1 <= (hx2 + delta) and (vy1 - delta) <= hy1 <= (vy2 + delta):cross_points.append((int(vx1), int(hy1)))cross_points = np.array(cross_points)# print('所有交點:', cross_points)print('交點總計:', len(cross_points))if debug_show:for i, point in enumerate(hori_points):# if i<1:x1, y1, x2, y2 = pointcv2.rectangle(img, (x1, y1), (x2, y2), color=(255, 0, 0), thickness=2)for i, point in enumerate(veri_points):# if i<1:x1, y1, x2, y2 = pointcv2.rectangle(img, (x1, y1), (x2, y2), color=(0, 255, 0), thickness=2)if len(cross_points):s = np.sum(cross_points, axis=1)table_x1, table_y1 = cross_points[np.argmin(s)]table_x2, table_y2 = cross_points[np.argmax(s)]cv2.circle(img, (table_x1, table_y1), 10, (0, 0, 255), thickness=2)cv2.circle(img, (table_x2, table_y2), 10, (0, 0, 255), thickness=2)cv2.imwrite(name, img)return hori_points, veri_points, cross_pointsdef test_lines():path = './電子發票方向矯正后的'output_path ='./電子發票畫邊和角點'if not os.path.exists(output_path):os.mkdir(output_path)imgs_list_path = [os.path.join(path,i) for i in os.listdir(path)]for i, img_list_path in enumerate(imgs_list_path):# if i<1:# img_list_path = './電子發票方向矯正后的/test.png'img = cv2.imread(img_list_path)name = os.path.join(output_path, img_list_path.split('/')[-1])get_lines(img, name)if __name__ == '__main__':test_lines()

    總結

    以上是生活随笔為你收集整理的图像变换dpi(tif->jpg),直方图均衡化,腐蚀膨胀,分水岭,模板匹配,直线检测的全部內容,希望文章能夠幫你解決所遇到的問題。

    如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。