update for pylint

This commit is contained in:
yzx835
2023-11-12 19:30:00 +08:00
parent 5c5d8f923f
commit 5ac824bee4
9 changed files with 106 additions and 100 deletions

View File

@@ -26,7 +26,6 @@ import mindspore as ms
from mindspore import Tensor
from mindspore import context
from mindspore import dtype as mstype
from mindspore.context import ParallelMode
from mindspore.dataset import GeneratorDataset
import mindspore.dataset.vision as vision
from model_utils.config import config
@@ -251,11 +250,11 @@ class DetectionEngine:
try:
self.file_path = self.save_prefix + "/predict" + t + ".json"
with os.fdopen(
os.open(
"testfile.txt", flags=os.O_WRONLY, modes=stat.S_IWUSR, mode=0o777
),
"w",
) as f:
os.open(
"testfile.txt", flags=os.O_WRONLY, modes=stat.S_IWUSR, mode=0o777
),
"w",
) as f:
json.dump(self.det_boxes, f)
except IOError as e:
raise RuntimeError(
@@ -373,8 +372,8 @@ class DetectionEngine:
confidence = cls_emb[flag] * conf
for x_lefti, y_lefti, wi, hi, confi, clsi in zip(
x_top_left, y_top_left, w, h, confidence, cls_argmax
):
x_top_left, y_top_left, w, h, confidence, cls_argmax
):
if confi < self.eval_ignore_threshold:
continue
if img_id not in self.results:
@@ -404,8 +403,8 @@ def modelarts_pre_process():
s_time = time.time()
if not os.path.exists(
os.path.join(save_dir, config.modelarts_dataset_unzip_name)
):
os.path.join(save_dir, config.modelarts_dataset_unzip_name)
):
zip_isexist = zipfile.is_zipfile(zip_file)
if zip_isexist:
fz = zipfile.ZipFile(zip_file, "r")
@@ -444,8 +443,8 @@ def modelarts_pre_process():
# Each server contains 8 devices as most.
if get_device_id() % min(get_device_num(), 8) == 0 and not os.path.exists(
sync_lock
):
sync_lock
):
print("Zip file path: ", zip_file_1)
print("Unzip file save dir: ", save_dir_1)
unzip(zip_file_1, save_dir_1)
@@ -470,6 +469,7 @@ def modelarts_pre_process():
def patch_transform(patch, data_shape, patch_shape):
"""Transform adversarial patch."""
# get dummy image
x = np.zeros(data_shape)
# get shape
@@ -508,6 +508,7 @@ def patch_transform(patch, data_shape, patch_shape):
class Iterable:
"""Iterable data."""
def __init__(self, img_path):
self.img_path = img_path
self.imgs = os.listdir(img_path)
@@ -578,7 +579,7 @@ def run_test():
input_shape = Tensor(tuple(config.test_img_shape), mstype.float32)
config.logger.info("Start inference....")
for image_index, data in enumerate(dataset.create_dict_iterator()):
for data in dataset.create_dict_iterator():
image = data["image"].asnumpy()
image = Tensor(image)
image_shape_ = input_shape

View File

@@ -70,7 +70,7 @@ class GradCam(nn.Cell):
)
def construct(self, model_input, in_shape):
"""Calculate grad cam."""
fea1, fea2, weights1, weights2 = self.get_feature_and_weights(model_input, in_shape)
cam1 = (weights1.expand_dims(-1).expand_dims(-1) * fea1).squeeze().sum(0)
@@ -95,6 +95,7 @@ class VisualCAM:
self.show_cam_on_image(raw_img, cam_map[1], img_id=1)
def show_cam_on_image(self, img, cam_map, img_id=0):
"""Show image grad cam."""
mask_np = cam_map.asnumpy()
heatmap = cv2.applyColorMap(np.uint8(255 * mask_np), cv2.COLORMAP_JET)[
@@ -129,18 +130,19 @@ def init_patch_square(patch_size):
return patch_ini, patch_ini.shape
def patch_transform(pattern, data_shape, patch_shape):
def patch_transform(pattern, data_shape, input_patch_shape):
"""Transfrom adversarial patch for different samples."""
# get dummy image
x = np.zeros(data_shape)
# get shape
m_size = patch_shape[-1]
m_size = input_patch_shape[-1]
for i in range(x.shape[0]):
# random rotation
rot = np.random.choice(4)
for j in range(pattern[i].shape[0]):
pattern[i][j] = np.rot90(pattern[i][j], rot)
for j_idx in range(pattern[i].shape[0]):
pattern[i][j_idx] = np.rot90(pattern[i][j_idx], rot)
# random location
random_x = np.random.choice(x.shape[-2])
if random_x + m_size > x.shape[-2]:
@@ -153,13 +155,13 @@ def patch_transform(pattern, data_shape, patch_shape):
# apply patch to dummy image
x[i][0][
random_x : random_x + patch_shape[-1], random_y : random_y + patch_shape[-1]
random_x : random_x + input_patch_shape[-1], random_y : random_y + input_patch_shape[-1]
] = pattern[i][0]
x[i][1][
random_x : random_x + patch_shape[-1], random_y : random_y + patch_shape[-1]
random_x : random_x + input_patch_shape[-1], random_y : random_y + input_patch_shape[-1]
] = pattern[i][1]
x[i][2][
random_x : random_x + patch_shape[-1], random_y : random_y + patch_shape[-1]
random_x : random_x + input_patch_shape[-1], random_y : random_y + input_patch_shape[-1]
] = pattern[i][2]
masks = np.copy(x)
@@ -197,10 +199,10 @@ def loss_sum(img, img_ori, m, canny):
)
def attack(x, model, adv_patch, mask, iters=25):
def attack(x, adv_patch, input_mask, iters=25):
"""Generate attack sample."""
adv_x = x
adv_x = ms.ops.mul((1 - mask), adv_x) + ms.ops.mul(mask, adv_patch)
adv_x = ms.ops.mul((1 - input_mask), adv_x) + ms.ops.mul(input_mask, adv_patch)
adv_x = ms.ops.clip_by_value(adv_x, 0, 1)
adv_x_np = np.uint8(255 * adv_x.asnumpy()[0].transpose((1, 2, 0)))
cv2.imwrite("adv_x.png", adv_x_np)
@@ -213,11 +215,11 @@ def attack(x, model, adv_patch, mask, iters=25):
while True:
count += 1
grad_fn = ms.ops.value_and_grad(loss_sum)
loss, adv_grad = grad_fn(adv_x, adv_x_ori, mask, canny)
loss, adv_grad = grad_fn(adv_x, adv_x_ori, input_mask, canny)
print("Loss:", loss)
adv_patch = adv_patch - STEP_SIZE * adv_grad / adv_grad.max()
adv_x = ms.ops.mul((1 - mask), adv_x) + ms.ops.mul(mask, adv_patch)
adv_x = ms.ops.mul((1 - input_mask), adv_x) + ms.ops.mul(input_mask, adv_patch)
adv_x = ms.ops.clip_by_value(adv_x, 0, 1)
if count >= iters:
@@ -225,10 +227,11 @@ def attack(x, model, adv_patch, mask, iters=25):
adv_x = ms.Parameter(Tensor(adv_x.asnumpy(), ms.float32), requires_grad=True)
return adv_x, mask, adv_patch
return adv_x, input_mask, adv_patch
class Iterable:
"""Iterable dataset."""
def __init__(self, img_path):
self.img_path = img_path
self.imgs = os.listdir(img_path)
@@ -275,7 +278,7 @@ if __name__ == "__main__":
patch, mask = patch_transform(patch, img_data[0].shape, patch_shape)
patch, mask = Tensor(patch), Tensor(mask)
x_adv, mask, patch = attack(
img_data[0], network, patch, mask, iters=ATTACK_ITERS
img_data[0], patch, mask, iters=ATTACK_ITERS
)
masked_patch = ms.ops.mul(mask, patch)
patch_ori = masked_patch.asnumpy()

View File

@@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Split dataset."""
import os
import random
import shutil

View File

@@ -67,10 +67,10 @@ def calculate_gain(nonlinearity, param=None):
if param is None:
negative_slope = 0.01
elif (
not isinstance(param, bool)
and isinstance(param, int)
or isinstance(param, float)
):
not isinstance(param, bool)
and isinstance(param, int)
or isinstance(param, float)
):
# True/False are instances of int, hence check above
negative_slope = param
else:

View File

@@ -66,8 +66,8 @@ def step_lr(lr, epoch_size, steps_per_epoch, max_epoch, gamma=0.1):
def warmup_cosine_annealing_lr(
lr, steps_per_epoch, warmup_epochs, max_epoch, t_max, eta_min=0
):
lr, steps_per_epoch, warmup_epochs, max_epoch, t_max, eta_min=0
):
"""Cosine annealing learning rate."""
base_lr = lr
warmup_init_lr = 0
@@ -92,8 +92,8 @@ def warmup_cosine_annealing_lr(
def warmup_cosine_annealing_lr_v2(
lr, steps_per_epoch, warmup_epochs, max_epoch, t_max, eta_min=0
):
lr, steps_per_epoch, warmup_epochs, max_epoch, t_max, eta_min=0
):
"""Cosine annealing learning rate V2."""
base_lr = lr
warmup_init_lr = 0
@@ -135,8 +135,8 @@ def warmup_cosine_annealing_lr_v2(
def warmup_cosine_annealing_lr_sample(
lr, steps_per_epoch, warmup_epochs, max_epoch, t_max, eta_min=0
):
lr, steps_per_epoch, warmup_epochs, max_epoch, t_max, eta_min=0
):
"""Warmup cosine annealing learning rate."""
start_sample_epoch = 60
step_sample = 2

View File

@@ -132,15 +132,15 @@ def pil_image_reshape(interp):
def _preprocess_true_boxes(
true_boxes,
anchors,
in_shape,
num_classes,
max_boxes,
label_smooth,
label_smooth_factor=0.1,
iou_threshold=0.213,
):
true_boxes,
anchors,
in_shape,
num_classes,
max_boxes,
label_smooth,
label_smooth_factor=0.1,
iou_threshold=0.213,
):
"""
Introduction
------------
@@ -362,8 +362,8 @@ def _is_iou_satisfied_constraint(min_iou, max_iou, box, crop_box):
def _choose_candidate_by_constraints(
max_trial, input_w, input_h, image_w, image_h, jitter, box, use_constraints
):
max_trial, input_w, input_h, image_w, image_h, jitter, box, use_constraints
):
"""Choose candidate by constraints."""
if use_constraints:
constraints = (
@@ -410,8 +410,8 @@ def _choose_candidate_by_constraints(
crop_box = np.array((0, 0, input_w, input_h))
if not _is_iou_satisfied_constraint(
min_iou, max_iou, t_box, crop_box[np.newaxis]
):
min_iou, max_iou, t_box, crop_box[np.newaxis]
):
continue
else:
candidates.append((dx, dy, nw, nh))
@@ -421,17 +421,17 @@ def _choose_candidate_by_constraints(
def _correct_bbox_by_candidates(
candidates,
input_w,
input_h,
image_w,
image_h,
flip,
box,
box_data,
allow_outside_center,
max_boxes,
):
candidates,
input_w,
input_h,
image_w,
image_h,
flip,
box,
box_data,
allow_outside_center,
max_boxes,
):
"""Calculate correct boxes."""
while candidates:
if len(candidates) > 1:
@@ -480,17 +480,17 @@ def _correct_bbox_by_candidates(
def _data_aug(
image,
box,
jitter,
hue,
sat,
val,
image_input_size,
max_boxes,
max_trial=10,
device_num=1,
):
image,
box,
jitter,
hue,
sat,
val,
image_input_size,
max_boxes,
max_trial=10,
device_num=1,
):
"""Crop an image randomly with bounding box constraints.
This data augmentation is used in training of
@@ -600,8 +600,8 @@ class MultiScaleTrans:
self.label_smooth_factor = config.label_smooth_factor
def generate_seed_list(
self, init_seed=1234, seed_num=int(1e6), seed_range=(1, 1000)
):
self, init_seed=1234, seed_num=int(1e6), seed_range=(1, 1000)
):
"""Generate seed list"""
seed_list = []
random.seed(init_seed)

View File

@@ -31,6 +31,7 @@ if not os.path.exists(save_dir):
def convert(xml_list, json_file):
"""Convert."""
json_dict = {"images": [], "type": "instances", "annotations": [], "categories": []}
categories = define_categories.copy()
bnd_id = 1
@@ -84,8 +85,8 @@ def convert(xml_list, json_file):
cat = {"supercategory": "none", "id": cid, "name": cate_name}
json_dict["categories"].append(cat)
with os.fdopen(
os.open(json_file, flags=os.O_WRONLY, modes=stat.S_IWUSR, mode=0o777), "w"
) as json_fp:
os.open(json_file, flags=os.O_WRONLY, modes=stat.S_IWUSR, mode=0o777), "w"
) as json_fp:
json_str = json.dumps(json_dict)
json_fp.write(json_str)
print("------------create {} done--------------".format(json_file))

View File

@@ -241,6 +241,7 @@ class YOLOLossBlock(nn.Cell):
object_mask = y_true[:, :, :, :, 4:5]
class_probs = y_true[:, :, :, :, 5:]
true_boxes = y_true[:, :, :, :, :4]
test = input_shape
pred_boxes = self.concat((pred_xy, pred_wh))
@@ -369,9 +370,9 @@ class TrainingWrapper(nn.Cell):
self.grad_reducer = None
self.parallel_mode = context.get_auto_parallel_context("parallel_mode")
if self.parallel_mode in [
ParallelMode.DATA_PARALLEL,
ParallelMode.HYBRID_PARALLEL,
]:
ParallelMode.DATA_PARALLEL,
ParallelMode.HYBRID_PARALLEL,
]:
self.reducer_flag = True
if self.reducer_flag:
mean = context.get_auto_parallel_context("gradients_mean")

View File

@@ -64,14 +64,14 @@ class COCOYoloDataset:
"""YOLO Dataset for COCO."""
def __init__(
self,
root,
ann_file,
input_size,
remove_images_without_annotations=True,
filter_crowd_anno=True,
is_training=True,
):
self,
root,
ann_file,
input_size,
remove_images_without_annotations=True,
filter_crowd_anno=True,
is_training=True,
):
self.coco = COCO(ann_file)
self.root = root
self.img_ids = list(sorted(self.coco.imgs.keys()))
@@ -254,17 +254,17 @@ class COCOYoloDataset:
def create_yolo_dataset(
image_dir,
anno_path,
batch_size,
max_epoch,
device_num,
rank,
num_parallel_workers=8,
config=None,
is_training=True,
shuffle=True,
):
image_dir,
anno_path,
batch_size,
max_epoch,
device_num,
rank,
num_parallel_workers=8,
config=None,
is_training=True,
shuffle=True,
):
"""Create dataset for YOLO."""
cv2.setNumThreads(0)
if config.is_distributed: