187 lines
6.1 KiB
Python
187 lines
6.1 KiB
Python
import numpy as np
|
|
import cv2
|
|
import sys
|
|
import math
|
|
|
|
|
|
class NormalizeImage(object):
|
|
""" normalize image such as substract mean, divide std
|
|
"""
|
|
|
|
def __init__(self, scale=None, mean=None, std=None, order='chw', **kwargs):
|
|
if isinstance(scale, str):
|
|
scale = eval(scale)
|
|
self.scale = np.float32(scale if scale is not None else 1.0 / 255.0)
|
|
mean = mean if mean is not None else [0.485, 0.456, 0.406]
|
|
std = std if std is not None else [0.229, 0.224, 0.225]
|
|
|
|
shape = (3, 1, 1) if order == 'chw' else (1, 1, 3)
|
|
self.mean = np.array(mean).reshape(shape).astype('float32')
|
|
self.std = np.array(std).reshape(shape).astype('float32')
|
|
|
|
def __call__(self, data):
|
|
img = data['image']
|
|
from PIL import Image
|
|
if isinstance(img, Image.Image):
|
|
img = np.array(img)
|
|
assert isinstance(img,
|
|
np.ndarray), "invalid input 'img' in NormalizeImage"
|
|
data['image'] = (
|
|
img.astype('float32') * self.scale - self.mean) / self.std
|
|
return data
|
|
|
|
|
|
class DetResizeForTest(object):
|
|
def __init__(self, **kwargs):
|
|
super(DetResizeForTest, self).__init__()
|
|
self.resize_type = 0
|
|
self.keep_ratio = False
|
|
if 'image_shape' in kwargs:
|
|
self.image_shape = kwargs['image_shape']
|
|
self.resize_type = 1
|
|
if 'keep_ratio' in kwargs:
|
|
self.keep_ratio = kwargs['keep_ratio']
|
|
elif 'limit_side_len' in kwargs:
|
|
self.limit_side_len = kwargs['limit_side_len']
|
|
self.limit_type = kwargs.get('limit_type', 'min')
|
|
elif 'resize_long' in kwargs:
|
|
self.resize_type = 2
|
|
self.resize_long = kwargs.get('resize_long', 960)
|
|
else:
|
|
self.limit_side_len = 736
|
|
self.limit_type = 'min'
|
|
|
|
def __call__(self, data):
|
|
img = data['image']
|
|
src_h, src_w, _ = img.shape
|
|
if sum([src_h, src_w]) < 64:
|
|
img = self.image_padding(img)
|
|
|
|
if self.resize_type == 0:
|
|
# img, shape = self.resize_image_type0(img)
|
|
img, [ratio_h, ratio_w] = self.resize_image_type0(img)
|
|
elif self.resize_type == 2:
|
|
img, [ratio_h, ratio_w] = self.resize_image_type2(img)
|
|
else:
|
|
# img, shape = self.resize_image_type1(img)
|
|
img, [ratio_h, ratio_w] = self.resize_image_type1(img)
|
|
data['image'] = img
|
|
data['shape'] = np.array([src_h, src_w, ratio_h, ratio_w])
|
|
return data
|
|
|
|
def image_padding(self, im, value=0):
|
|
h, w, c = im.shape
|
|
im_pad = np.zeros((max(32, h), max(32, w), c), np.uint8) + value
|
|
im_pad[:h, :w, :] = im
|
|
return im_pad
|
|
|
|
def resize_image_type1(self, img):
|
|
resize_h, resize_w = self.image_shape
|
|
ori_h, ori_w = img.shape[:2] # (h, w, c)
|
|
if self.keep_ratio is True:
|
|
resize_w = ori_w * resize_h / ori_h
|
|
N = math.ceil(resize_w / 32)
|
|
resize_w = N * 32
|
|
ratio_h = float(resize_h) / ori_h
|
|
ratio_w = float(resize_w) / ori_w
|
|
img = cv2.resize(img, (int(resize_w), int(resize_h)))
|
|
# return img, np.array([ori_h, ori_w])
|
|
return img, [ratio_h, ratio_w]
|
|
|
|
def resize_image_type0(self, img):
|
|
"""
|
|
resize image to a size multiple of 32 which is required by the network
|
|
args:
|
|
img(array): array with shape [h, w, c]
|
|
return(tuple):
|
|
img, (ratio_h, ratio_w)
|
|
"""
|
|
limit_side_len = self.limit_side_len
|
|
h, w, c = img.shape
|
|
|
|
# limit the max side
|
|
if self.limit_type == 'max':
|
|
if max(h, w) > limit_side_len:
|
|
if h > w:
|
|
ratio = float(limit_side_len) / h
|
|
else:
|
|
ratio = float(limit_side_len) / w
|
|
else:
|
|
ratio = 1.
|
|
elif self.limit_type == 'min':
|
|
if min(h, w) < limit_side_len:
|
|
if h < w:
|
|
ratio = float(limit_side_len) / h
|
|
else:
|
|
ratio = float(limit_side_len) / w
|
|
else:
|
|
ratio = 1.
|
|
elif self.limit_type == 'resize_long':
|
|
ratio = float(limit_side_len) / max(h, w)
|
|
else:
|
|
raise Exception('not support limit type, image ')
|
|
resize_h = int(h * ratio)
|
|
resize_w = int(w * ratio)
|
|
|
|
resize_h = max(int(round(resize_h / 32) * 32), 32)
|
|
resize_w = max(int(round(resize_w / 32) * 32), 32)
|
|
|
|
try:
|
|
if int(resize_w) <= 0 or int(resize_h) <= 0:
|
|
return None, (None, None)
|
|
img = cv2.resize(img, (int(resize_w), int(resize_h)))
|
|
except:
|
|
print(img.shape, resize_w, resize_h)
|
|
sys.exit(0)
|
|
ratio_h = resize_h / float(h)
|
|
ratio_w = resize_w / float(w)
|
|
return img, [ratio_h, ratio_w]
|
|
|
|
def resize_image_type2(self, img):
|
|
h, w, _ = img.shape
|
|
|
|
resize_w = w
|
|
resize_h = h
|
|
|
|
if resize_h > resize_w:
|
|
ratio = float(self.resize_long) / resize_h
|
|
else:
|
|
ratio = float(self.resize_long) / resize_w
|
|
|
|
resize_h = int(resize_h * ratio)
|
|
resize_w = int(resize_w * ratio)
|
|
|
|
max_stride = 128
|
|
resize_h = (resize_h + max_stride - 1) // max_stride * max_stride
|
|
resize_w = (resize_w + max_stride - 1) // max_stride * max_stride
|
|
img = cv2.resize(img, (int(resize_w), int(resize_h)))
|
|
ratio_h = resize_h / float(h)
|
|
ratio_w = resize_w / float(w)
|
|
|
|
return img, [ratio_h, ratio_w]
|
|
|
|
class ToCHWImage(object):
|
|
""" convert hwc image to chw image
|
|
"""
|
|
|
|
def __init__(self, **kwargs):
|
|
pass
|
|
|
|
def __call__(self, data):
|
|
img = data['image']
|
|
from PIL import Image
|
|
if isinstance(img, Image.Image):
|
|
img = np.array(img)
|
|
data['image'] = img.transpose((2, 0, 1))
|
|
return data
|
|
|
|
|
|
class KeepKeys(object):
|
|
def __init__(self, keep_keys, **kwargs):
|
|
self.keep_keys = keep_keys
|
|
|
|
def __call__(self, data):
|
|
data_list = []
|
|
for key in self.keep_keys:
|
|
data_list.append(data[key])
|
|
return data_list |