码迷,mamicode.com
首页 > 其他好文 > 详细

图像增强总结和代码

时间:2020-07-02 13:23:44      阅读:44      评论:0      收藏:0      [点我收藏+]

标签:msu   int   import   get   split   矩阵   guide   ref   lte   

调整对比度

  1. 直方图均衡化/对比度限制自适应直方图均衡化(CLAHE)/gamma变换/对数变化
    1. 限制对比度自适应直方图均衡化算法原理
    2. 限制对比度自适应直方图均衡化算法实现
  2. 代码实现
import cv2
import matplotlib.pyplot as plt
import numpy as np


def equalizeHist(im):
    # 对彩色图像分r,g,b三个通道分别做,效果不好
    # (b, g, r) = cv2.split(im)
    # bH = cv2.equalizeHist(b)
    # gH = cv2.equalizeHist(g)
    # rH = cv2.equalizeHist(r)
    # dst = cv2.merge((bH, gH, rH))

    #在hls空间操作,l为亮度相关通道
    hls = cv2.cvtColor(im, cv2.COLOR_BGR2HLS)
    l = hls[:, :, 1]
    l_ = cv2.equalizeHist(l)
    hls[:, :, 1] = l_
    dst = cv2.cvtColor(hls, cv2.COLOR_HLS2BGR)

    #在Lab空间操作,L为亮度相关的通道
    # lab = cv2.cvtColor(im, cv2.COLOR_BGR2Lab)
    # l = lab[:, :, 0]
    # l_ = cv2.equalizeHist(l)
    # lab[:, :, 0] = l_
    # dst = cv2.cvtColor(lab, cv2.COLOR_Lab2BGR)

    return dst


def clahe(im):
    ‘‘‘
    contrast limited adaptive histogram equalization for color image 
    ‘‘‘
    hls = cv2.cvtColor(im, cv2.COLOR_BGR2HLS)
    l = hls[:, :, 1]
    clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(16, 16))
    l_ = clahe.apply(l)

    hls[:, :, 1] = l_
    dst = cv2.cvtColor(hls, cv2.COLOR_HLS2BGR)

    return dst


def logTransform(im):
    img_norm = im/255.0
    img_log = np.log10(1+9*img_norm)*255
    img_log = img_log.astype(np.uint8)

    return img_log


def gammaTransform(im):

    # hls = cv2.cvtColor(im, cv2.COLOR_BGR2HLS)
    # l = hls[:, :, 1]

    # l_ = l/255.0  # 注意255.0得采用浮点数
    # l_gamma = np.power(l_, 0.4)*255.0
    # l_gamma = l_gamma.astype(np.uint8)
    # hls[:, :, 1] = l_gamma
    # dst = cv2.cvtColor(hls, cv2.COLOR_HLS2BGR)

    img_norm = im/255.0  # 注意255.0得采用浮点数
    dst = np.power(img_norm, 0.4)*255.0
    dst = dst.astype(np.uint8)

    return dst


# im = cv2.imread("test4.jpg")
# dst = gammaTransform(im)
# cv2.imshow("dst", dst)
# cv2.imshow("ori", im)
# cv2.waitKey()


去雾

  1. 暗通道先验去雾
  2. 代码实现
import cv2
import numpy as np
from adjustContrast import logTransform, gammaTransform


def zmMinFilterGray(src, r=7):
    ‘‘‘最小值滤波,r是滤波器半径‘‘‘
    ‘‘‘if r <= 0:
        return src
    h, w = src.shape[:2]
    I = src
    res = np.minimum(I  , I[[0]+range(h-1)  , :])
    res = np.minimum(res, I[range(1,h)+[h-1], :])
    I = res
    res = np.minimum(I  , I[:, [0]+range(w-1)])
    res = np.minimum(res, I[:, range(1,w)+[w-1]])
    return zmMinFilterGray(res, r-1)‘‘‘
    return cv2.erode(src, np.ones((2*r+1, 2*r+1)))  # 使用opencv的erode函数更高效


def guidedfilter(I, p, r, eps):
    ‘‘‘引导滤波,直接参考网上的matlab代码‘‘‘
    height, width = I.shape
    m_I = cv2.boxFilter(I, -1, (r, r))
    m_p = cv2.boxFilter(p, -1, (r, r))
    m_Ip = cv2.boxFilter(I*p, -1, (r, r))
    cov_Ip = m_Ip-m_I*m_p

    m_II = cv2.boxFilter(I*I, -1, (r, r))
    var_I = m_II-m_I*m_I

    a = cov_Ip/(var_I+eps)
    b = m_p-a*m_I

    m_a = cv2.boxFilter(a, -1, (r, r))
    m_b = cv2.boxFilter(b, -1, (r, r))
    return m_a*I+m_b


def getV1(m, r, eps, w, maxV1):  # 输入rgb图像,值范围[0,1]
    ‘‘‘计算大气遮罩图像V1和光照值A, V1 = 1-t/A‘‘‘
    V1 = np.min(m, 2)  # 得到暗通道图像
    V1 = guidedfilter(V1, zmMinFilterGray(V1, 7), r, eps)  # 使用引导滤波优化
    bins = 2000
    ht = np.histogram(V1, bins)  # 计算大气光照A
    d = np.cumsum(ht[0])/float(V1.size)
    for lmax in range(bins-1, 0, -1):
        if d[lmax] <= 0.999:
            break
    A = np.mean(m, 2)[V1 >= ht[1][lmax]].max()

    V1 = np.minimum(V1*w, maxV1)  # 对值范围进行限制

    return V1, A


def deHaze(m, r=81, eps=0.001, w=0.95, maxV1=0.80, bGamma=False):
    Y = np.zeros(m.shape)
    V1, A = getV1(m, r, eps, w, maxV1)  # 得到遮罩图像和大气光照
    for k in range(3):
        Y[:, :, k] = (m[:, :, k]-V1)/(1-V1/A)  # 颜色校正
    Y = np.clip(Y, 0, 1)
    if bGamma:
        Y = Y**(np.log(0.5)/np.log(Y.mean()))  # gamma校正,默认不进行该操作
    return Y


if __name__ == ‘__main__‘:
    src = cv2.imread(‘test2.jpg‘)
    dst = deHaze(src/255.0)*255
    dst = dst.astype(np.uint8)

    dst2 = logTransform(dst)
    cv2.imshow("src", src)
    cv2.imshow("dehaze", dst)
    cv2.imshow("dehaze2", dst2)
    cv2.waitKey(0)

同态滤波和retinex增强

  1. 同态滤波
  2. retinex增强
  3. 代码实现
import numpy as np
import cv2

config = {
    "sigma_list": [15, 80, 250],
    "G": 5.0,
    "b": 25.0,
    "alpha": 125.0,
    "beta": 46.0,
    "low_clip": 0.01,
    "high_clip": 0.99
}

def homomorphic_filter(src, d0=10, r1=0.5, rh=2, c=4, h=2.0, l=0.5):
    gray = src.copy()
    if len(src.shape) > 2:
        gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
    gray = np.float64(gray) 
    rows, cols = gray.shape
    gray_fft = np.fft.fft2(gray)
    gray_fftshift = np.fft.fftshift(gray_fft)
    dst_fftshift = np.zeros_like(gray_fftshift)
    M, N = np.meshgrid(np.arange(-cols // 2, cols // 2), np.arange(-rows//2, rows//2))
    D = np.sqrt(M ** 2 + N ** 2)
    Z = (rh - r1) * (1 - np.exp(-c * (D ** 2 / d0 ** 2))) + r1
    dst_fftshift = Z * gray_fftshift
    dst_fftshift = (h - l) * dst_fftshift + l
    dst_ifftshift = np.fft.ifftshift(dst_fftshift)
    dst_ifft = np.fft.ifft2(dst_ifftshift)
    dst = np.real(dst_ifft)
    dst = np.uint8(np.clip(dst, 0, 255))
    return dst

def singleScaleRetinex(img, sigma):

    retinex = np.log10(img) - np.log10(cv2.GaussianBlur(img, (0, 0), sigma))

    return retinex


def multiScaleRetinex(img, sigma_list):

    retinex = np.zeros_like(img)
    for sigma in sigma_list:
        retinex += singleScaleRetinex(img, sigma)

    retinex = retinex / len(sigma_list)

    return retinex


def colorRestoration(img, alpha, beta):

    img_sum = np.sum(img, axis=2, keepdims=True)  # (h,w,1)
    color_restoration = beta *         (np.log10(alpha * img) - np.log10(img_sum))  # (h,w,3)

    return color_restoration


def simplestColorBalance(img, low_clip, high_clip):

    total = img.shape[0] * img.shape[1]
    for i in range(img.shape[2]):
        unique, counts = np.unique(img[:, :, i], return_counts=True)
        current = 0
        for u, c in zip(unique, counts):
            if float(current) / total < low_clip:
                low_val = u
            if float(current) / total < high_clip:
                high_val = u
            current += c

        img[:, :, i] = np.maximum(np.minimum(img[:, :, i], high_val), low_val)

    return img


def MSRCR(img, sigma_list, G, b, alpha, beta, low_clip, high_clip):

    img = np.float64(img) + 1.0

    img_retinex = multiScaleRetinex(img, sigma_list)
    img_color = colorRestoration(img, alpha, beta)
    img_msrcr = G * (img_retinex * img_color + b)

    for i in range(img_msrcr.shape[2]):
        img_msrcr[:, :, i] = (img_msrcr[:, :, i] - np.min(img_msrcr[:, :, i])) /                              (np.max(img_msrcr[:, :, i]) - np.min(img_msrcr[:, :, i])) *             255

    img_msrcr = np.uint8(np.minimum(np.maximum(img_msrcr, 0), 255))
    img_msrcr = simplestColorBalance(img_msrcr, low_clip, high_clip)

    return img_msrcr


def automatedMSRCR(img, sigma_list):

    img = np.float64(img) + 1.0
    img_retinex = multiScaleRetinex(img, sigma_list)

    for i in range(img_retinex.shape[2]):
        unique, count = np.unique(
            np.int32(img_retinex[:, :, i] * 100), return_counts=True)
        for u, c in zip(unique, count):
            if u == 0:
                zero_count = c
                break

        low_val = unique[0] / 100.0
        high_val = unique[-1] / 100.0
        for u, c in zip(unique, count):
            if u < 0 and c < zero_count * 0.1:
                low_val = u / 100.0
            if u > 0 and c < zero_count * 0.1:
                high_val = u / 100.0
                break

        img_retinex[:, :, i] = np.maximum(np.minimum(
            img_retinex[:, :, i], high_val), low_val)

        img_retinex[:, :, i] = (img_retinex[:, :, i] - np.min(img_retinex[:, :, i])) /                                (np.max(img_retinex[:, :, i]) - np.min(img_retinex[:, :, i]))             * 255

    img_retinex = np.uint8(img_retinex)

    return img_retinex


def MSRCP(img, sigma_list, low_clip, high_clip):

    img = np.float64(img) + 1.0

    intensity = np.sum(img, axis=2) / img.shape[2]

    retinex = multiScaleRetinex(intensity, sigma_list)

    intensity = np.expand_dims(intensity, 2)
    retinex = np.expand_dims(retinex, 2)

    intensity1 = simplestColorBalance(retinex, low_clip, high_clip)

    intensity1 = (intensity1 - np.min(intensity1)) /                  (np.max(intensity1) - np.min(intensity1)) *         255.0 + 1.0

    img_msrcp = np.zeros_like(img)

    for y in range(img_msrcp.shape[0]):
        for x in range(img_msrcp.shape[1]):
            B = np.max(img[y, x])
            A = np.minimum(256.0 / B, intensity1[y, x, 0] / intensity[y, x, 0])
            img_msrcp[y, x, 0] = A * img[y, x, 0]
            img_msrcp[y, x, 1] = A * img[y, x, 1]
            img_msrcp[y, x, 2] = A * img[y, x, 2]

    img_msrcp = np.uint8(img_msrcp - 1.0)

    return img_msrcp


im = cv2.imread("test3.jpg")
msrcr = MSRCR(im,
              config[‘sigma_list‘],
              config[‘G‘],
              config[‘b‘],
              config[‘alpha‘],
              config[‘beta‘],
              config[‘low_clip‘],
              config[‘high_clip‘])

amsrcr = automatedMSRCR(
    im,
    config[‘sigma_list‘]
)

msrcp = MSRCP(im,
              config[‘sigma_list‘],
              config[‘low_clip‘],
              config[‘high_clip‘])
cv2.imshow("msrcr", msrcr)
cv2.imshow("amsrcr", amsrcr)
cv2.imshow("msrcp", msrcp)
cv2.waitKey()

自动色阶

  1. 自动色阶去雾
  2. 代码实现
import numpy as np
import cv2

‘‘‘
autoLevel自动色阶去雾
‘‘‘


def nothing(*arg):
    pass


MAX_VALUE = 30
cv2.namedWindow("autoLevel", cv2.WINDOW_NORMAL)
# 滑动块
cv2.createTrackbar("min", "autoLevel", 0, MAX_VALUE, nothing)
cv2.createTrackbar("max", "autoLevel", 0, MAX_VALUE, nothing)


def ComputeHist(img):
    h, w = img.shape
    hist, bin_edge = np.histogram(img.reshape(1, w*h), bins=list(range(257)))
    return hist


def ComputeMinLevel(hist, rate, pnum):
    sum = 0
    for i in range(256):
        sum += hist[i]
        if (sum >= (pnum * rate * 0.01)):
            return i


def ComputeMaxLevel(hist, rate, pnum):
    sum = 0
    for i in range(256):
        sum += hist[255-i]
        if (sum >= (pnum * rate * 0.01)):
            return 255-i


def LinearMap(minlevel, maxlevel):
    if (minlevel >= maxlevel):
        return []
    else:
        newmap = np.zeros(256)
        for i in range(256):
            if (i < minlevel):
                newmap[i] = 0
            elif (i > maxlevel):
                newmap[i] = 255
            else:
                newmap[i] = (i-minlevel)/(maxlevel-minlevel) * 255

        return newmap


def autoLevel(img, minrate, maxrate):
    h, w, d = img.shape
    newimg = np.zeros([h, w, d])
    for i in range(d):
        imgmin = np.min(img[:, :, i])
        imgmax = np.max(img[:, :, i])
        imghist = ComputeHist(img[:, :, i])
        minlevel = ComputeMinLevel(imghist, minrate, h*w)
        maxlevel = ComputeMaxLevel(imghist, maxrate, h*w)
        newmap = LinearMap(minlevel, maxlevel)
        if (newmap.size == 0):
            continue
        for j in range(h):
            newimg[j, :, i] = newmap[img[j, :, i]]

    return newimg


img = cv2.imread(‘test3.jpg‘)
while True:
    minval = cv2.getTrackbarPos(‘min‘, ‘autoLevel‘)
    maxval = cv2.getTrackbarPos(‘max‘, ‘autoLevel‘)
    newimg = autoLevel(img, minval, maxval)

    cv2.imshow(‘autoLevel‘, newimg/255)
    ch = cv2.waitKey(5)
    # 按 ESC 键退出
    if ch == 27:
        break

色彩增强

  1. 饱和度调整
  2. 代码实现
import numpy as np
import cv2



image = cv2.imread(‘test.jpg‘)
alpha = 150
MAX_VALUE = 200
# 调节饱和度窗口
cv2.namedWindow("ColorEnhance", cv2.WINDOW_NORMAL)


def nothing(*arg):
    pass


def adjustSaturation(image, alpha):

    # 图像归一化,且转换为浮点型
    image2 = image.astype(np.float32)
    image2 = image2 / 255.0
    # 颜色空间转换 BGR转为HLS,在hls空间做调整比较好
    hlsImg = cv2.cvtColor(image2, cv2.COLOR_BGR2HLS)
    hlsImg[:, :, 2] = pow(hlsImg[:, :, 2], alpha/100)  # 幂函数
    hlsImg[:, :, 2][hlsImg[:, :, 2] > 1] = 1
    dst = cv2.cvtColor(hlsImg, cv2.COLOR_HLS2BGR)

    return dst


def coloeEnhance(image, alpha):
    image2 = image.astype(np.float32)
    image2 = image2 / 255.0

    W = np.array([0.0721, 0.7154, 0.2125])
    intensity = image2.dot(W)
    intensity = np.expand_dims(intensity, 2)
    dst = intensity*(1-alpha/100)+image2*(alpha/100)

    dst = np.clip(dst, 0, 1)
    dst = dst*255
    dst = dst.astype(np.uint8)

    ‘‘‘
        W=[0.2125,0.7154,0.0721]
        等价于变换矩阵M=[[alpha+w1*(1-alpha),w2*(1-alpha),w3*(1-alpha)],
                        [w1*(1-alpha),alpha+w2*(1-alpha),w3*(1-alpha)],
                        [w1*(1-alpha),w2*(1-alpha),alpha+w3*(1-alpha)]]
        [R‘,G‘,B‘]=M*[R,G,B]^T
    ‘‘‘

    return dst


# 滑动块
cv2.createTrackbar("alpha", "ColorEnhance", alpha, MAX_VALUE, nothing)
dst = np.zeros(image.shape, np.float32)
while True:
    alpha = cv2.getTrackbarPos(‘alpha‘, ‘ColorEnhance‘)
    dst = coloeEnhance(image, alpha)
    cv2.imshow("ColorEnhance", dst)

    ch = cv2.waitKey(5)
    # 按 ESC 键退出
    if ch == 27:
        break
    elif ch == ord(‘s‘):
        # 按 s 键保存并退出
        dst = dst * 255
        dst = dst.astype(np.uint8)
        cv2.imwrite("dst.jpg", dst)
        break

# 关闭所有的窗口
cv2.destroyAllWindows()


‘‘‘
选自python 标准库中的colorsys库,rbg2hls和hls2rgb
‘‘‘
# Some floating point constants
ONE_THIRD = 1.0/3.0
ONE_SIXTH = 1.0/6.0
TWO_THIRD = 2.0/3.0


def rgb_to_hls(r, g, b):
    maxc = max(r, g, b)
    minc = min(r, g, b)
    #  Can optimize (maxc+minc) and (maxc-minc)
    l = (minc+maxc)/2.0
    if minc == maxc:
        return 0.0, l, 0.0
    if l <= 0.5:
        s = (maxc-minc) / (maxc+minc)
    else:
        s = (maxc-minc) / (2.0-maxc-minc)
    rc = (maxc-r) / (maxc-minc)
    gc = (maxc-g) / (maxc-minc)
    bc = (maxc-b) / (maxc-minc)
    if r == maxc:
        h = bc-gc
    elif g == maxc:
        h = 2.0+rc-bc
    else:
        h = 4.0+gc-rc
    h = (h/6.0) % 1.0
    return h, l, s


def hls_to_rgb(h, l, s):
    if s == 0.0:
        return l, l, l
    if l <= 0.5:
        m2 = l * (1.0+s)
    else:
        m2 = l+s-(l*s)
    m1 = 2.0*l - m2
    return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))


def _v(m1, m2, hue):
    hue = hue % 1.0
    if hue < ONE_SIXTH:
        return m1 + (m2-m1)*hue*6.0
    if hue < 0.5:
        return m2
    if hue < TWO_THIRD:
        return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
    return m1

图像锐化

usm锐化/拉普拉斯增强

图像增强总结和代码

标签:msu   int   import   get   split   矩阵   guide   ref   lte   

原文地址:https://www.cnblogs.com/buyizhiyou/p/13224010.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!