torch.squeeze is a popular PyTorch method that returns a tensor with all the dimensions of input of size 1 removed.
This article contains 50 examples of how open-source developers are using torch.squeeze in their projects. You can safely reference or copy them, since they are either licensed with MIT, Apache or BSD license.
Example 1: The code snippet below is referenced from open-source project biaffineparser by chantera.
def forward(self, pretrained_word_tokens, word_tokens, pos_tokens):
lengths = np.array([len(tokens) for tokens in word_tokens])
X = self.forward_embed(
pretrained_word_tokens, word_tokens, pos_tokens, lengths)
indices = np.argsort(-np.array(lengths)).astype(np.int64)
lengths = lengths[indices]
X = torch.stack([X[idx] for idx in indices])
X = nn.utils.rnn.pack_padded_sequence(X, lengths, batch_first=True)
R = self.blstm(X)[0]
R = nn.utils.rnn.pad_packed_sequence(R, batch_first=True)[0]
R = R.index_select(dim=0, index=_model_var(
self, torch.from_numpy(np.argsort(indices).astype(np.int64))))
H_arc_head = self.mlp_arc_head(R)
H_arc_dep = self.mlp_arc_dep(R)
arc_logits = self.arc_biaffine(H_arc_dep, H_arc_head)
arc_logits = torch.squeeze(arc_logits, dim=3)
H_label_dep = self.mlp_label_dep(R)
H_label_head = self.mlp_label_head(R)
label_logits = self.label_biaffine(H_label_dep, H_label_head)
return arc_logits, label_logits
Example 2: The code snippet below is referenced from open-source project pytorch_RFCN by PureDiors.
def build_loss(self, rpn_cls_score_reshape, rpn_bbox_pred, rpn_data):
# classification loss
rpn_cls_score = rpn_cls_score_reshape.permute(0, 2, 3, 1).contiguous().view(-1, 2)
rpn_label = rpn_data[0].view(-1)
rpn_keep = Variable(rpn_label.data.ne(-1).nonzero().squeeze()).cuda()
rpn_cls_score = torch.index_select(rpn_cls_score, 0, rpn_keep)
rpn_label = torch.index_select(rpn_label, 0, rpn_keep)
fg_cnt = torch.sum(rpn_label.data.ne(0))
rpn_cross_entropy = F.cross_entropy(rpn_cls_score, rpn_label)
# box loss
rpn_bbox_targets, rpn_bbox_inside_weights, rpn_bbox_outside_weights = rpn_data[1:]
rpn_bbox_targets = torch.mul(rpn_bbox_targets, rpn_bbox_inside_weights)
rpn_bbox_pred = torch.mul(rpn_bbox_pred, rpn_bbox_inside_weights)
rpn_loss_box = F.smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, size_average=False) / (fg_cnt + 1e-4)
return rpn_cross_entropy, rpn_loss_box
Example 3: The code snippet below is referenced from open-source project pytorch_RFCN by PureDiors.
def forward(self, im_data, im_info, gt_boxes=None, gt_ishard=None, dontcare_areas=None):
features, rois = self.rpn(im_data, im_info, gt_boxes, gt_ishard, dontcare_areas)
if self.training:
roi_data = self.proposal_target_layer(rois, gt_boxes, gt_ishard, dontcare_areas, self.n_classes)
rois = roi_data[0]
# roi pool
conv_new1 = self.new_conv(features)
r_score_map = self.rfcn_score(conv_new1)
r_bbox_map = self.rfcn_bbox(conv_new1)
psroi_pooled_cls = self.psroi_pool_cls(r_score_map, rois)
psroi_pooled_loc = self.psroi_pool_loc(r_bbox_map, rois)
bbox_pred = self.bbox_pred(psroi_pooled_loc)
bbox_pred = torch.squeeze(bbox_pred)
cls_score = self.cls_score(psroi_pooled_cls)
cls_score = torch.squeeze(cls_score)
cls_prob = F.softmax(cls_score)
if self.training:
self.cross_entropy, self.loss_box = self.build_loss(cls_score, bbox_pred, roi_data)
return cls_prob, bbox_pred, rois
Example 4: The code snippet below is referenced from open-source project treelstm-pytorch by pklfz.
def node_forward(self, inputs, child_c, child_h):
child_h_sum = F.torch.sum(torch.squeeze(child_h, 1), 0)
i = F.sigmoid(self.ix(inputs) + self.ih(child_h_sum))
o = F.sigmoid(self.ox(inputs) + self.oh(child_h_sum))
u = F.tanh(self.ux(inputs) + self.uh(child_h_sum))
# add extra singleton dimension
fx = F.torch.unsqueeze(self.fx(inputs), 1)
f = F.torch.cat([self.fh(child_hi) + fx for child_hi in child_h], 0)
f = F.sigmoid(f)
# removing extra singleton dimension
f = F.torch.unsqueeze(f, 1)
fc = F.torch.squeeze(F.torch.mul(f, child_c), 1)
c = F.torch.mul(i, u) + F.torch.sum(fc, 0)
h = F.torch.mul(o, F.tanh(c))
return c, h
Example 5: The code snippet below is referenced from open-source project nmp_qc by priba.
def r_duvenaud(self, h):
# layers
aux = []
for l in range(len(h)):
param_sz = self.learn_args[l].size()
parameter_mat = torch.t(self.learn_args[l])[None, ...].expand(h[l].size(0), param_sz[1],
param_sz[0])
aux.append(torch.transpose(torch.bmm(parameter_mat, torch.transpose(h[l], 1, 2)), 1, 2))
for j in range(0, aux[l].size(1)):
# Mask whole 0 vectors
aux[l][:, j, :] = nn.Softmax()(aux[l][:, j, :].clone())*(torch.sum(aux[l][:, j, :] != 0, 1) > 0).expand_as(aux[l][:, j, :]).type_as(aux[l])
aux = torch.sum(torch.sum(torch.stack(aux, 3), 3), 1)
return self.learn_modules[0](torch.squeeze(aux))
Example 6: The code snippet below is referenced from open-source project nmp_qc by priba.
def m_ggnn(self, h_v, h_w, e_vw, opt={}):
m = Variable(torch.zeros(h_w.size(0), h_w.size(1), self.args['out']).type_as(h_w.data))
for w in range(h_w.size(1)):
if torch.nonzero(e_vw[:, w, :].data).size():
for i, el in enumerate(self.args['e_label']):
ind = (el == e_vw[:,w,:]).type_as(self.learn_args[0][i])
parameter_mat = self.learn_args[0][i][None, ...].expand(h_w.size(0), self.learn_args[0][i].size(0),
self.learn_args[0][i].size(1))
m_w = torch.transpose(torch.bmm(torch.transpose(parameter_mat, 1, 2),
torch.transpose(torch.unsqueeze(h_w[:, w, :], 1),
1, 2)), 1, 2)
m_w = torch.squeeze(m_w)
m[:,w,:] = ind.expand_as(m_w)*m_w
return m
Example 7: The code snippet below is referenced from open-source project kdnet.pytorch by fxia22.
def split_ps(point_set):
#print point_set.size()
num_points = point_set.size()[0]/2
diff = point_set.max(dim=0)[0] - point_set.min(dim=0)[0]
dim = torch.max(diff, dim = 1)[1][0,0]
cut = torch.median(point_set[:,dim])[0][0]
left_idx = torch.squeeze(torch.nonzero(point_set[:,dim] > cut))
right_idx = torch.squeeze(torch.nonzero(point_set[:,dim] < cut))
middle_idx = torch.squeeze(torch.nonzero(point_set[:,dim] == cut))
if torch.numel(left_idx) < num_points:
left_idx = torch.cat([left_idx, middle_idx[0:1].repeat(num_points - torch.numel(left_idx))], 0)
if torch.numel(right_idx) < num_points:
right_idx = torch.cat([right_idx, middle_idx[0:1].repeat(num_points - torch.numel(right_idx))], 0)
left_ps = torch.index_select(point_set, dim = 0, index = left_idx)
right_ps = torch.index_select(point_set, dim = 0, index = right_idx)
return left_ps, right_ps, dim
Example 8: The code snippet below is referenced from open-source project kdnet.pytorch by fxia22.
def split_ps(point_set):
#print point_set.size()
num_points = point_set.size()[0]/2
diff = point_set.max(dim=0, keepdim = True)[0] - point_set.min(dim=0, keepdim = True)[0]
dim = torch.max(diff, dim = 1, keepdim = True)[1][0,0]
cut = torch.median(point_set[:,dim], keepdim = True)[0][0]
left_idx = torch.squeeze(torch.nonzero(point_set[:,dim] > cut))
right_idx = torch.squeeze(torch.nonzero(point_set[:,dim] < cut))
middle_idx = torch.squeeze(torch.nonzero(point_set[:,dim] == cut))
if torch.numel(left_idx) < num_points:
left_idx = torch.cat([left_idx, middle_idx[0:1].repeat(num_points - torch.numel(left_idx))], 0)
if torch.numel(right_idx) < num_points:
right_idx = torch.cat([right_idx, middle_idx[0:1].repeat(num_points - torch.numel(right_idx))], 0)
left_ps = torch.index_select(point_set, dim = 0, index = left_idx)
right_ps = torch.index_select(point_set, dim = 0, index = right_idx)
return left_ps, right_ps, dim
Example 9: The code snippet below is referenced from open-source project kdnet.pytorch by fxia22.
def split_ps(point_set):
#print point_set.size()
num_points = point_set.size()[0]/2
diff = point_set.max(dim=0)[0] - point_set.min(dim=0)[0]
diff = diff[:3]
dim = torch.max(diff, dim = 1)[1][0,0]
cut = torch.median(point_set[:,dim])[0][0]
left_idx = torch.squeeze(torch.nonzero(point_set[:,dim] > cut))
right_idx = torch.squeeze(torch.nonzero(point_set[:,dim] < cut))
middle_idx = torch.squeeze(torch.nonzero(point_set[:,dim] == cut))
if torch.numel(left_idx) < num_points:
left_idx = torch.cat([left_idx, middle_idx[0:1].repeat(num_points - torch.numel(left_idx))], 0)
if torch.numel(right_idx) < num_points:
right_idx = torch.cat([right_idx, middle_idx[0:1].repeat(num_points - torch.numel(right_idx))], 0)
left_ps = torch.index_select(point_set, dim = 0, index = left_idx)
right_ps = torch.index_select(point_set, dim = 0, index = right_idx)
return left_ps, right_ps, dim
Example 10: The code snippet below is referenced from open-source project kdnet.pytorch by fxia22.
def split_ps(point_set):
#print point_set.size()
num_points = point_set.size()[0]/2
diff = point_set.max(dim=0)[0] - point_set.min(dim=0)[0]
dim = torch.max(diff, dim = 1)[1][0,0]
cut = torch.median(point_set[:,dim])[0][0]
left_idx = torch.squeeze(torch.nonzero(point_set[:,dim] > cut))
right_idx = torch.squeeze(torch.nonzero(point_set[:,dim] < cut))
middle_idx = torch.squeeze(torch.nonzero(point_set[:,dim] == cut))
if torch.numel(left_idx) < num_points:
left_idx = torch.cat([left_idx, middle_idx[0:1].repeat(num_points - torch.numel(left_idx))], 0)
if torch.numel(right_idx) < num_points:
right_idx = torch.cat([right_idx, middle_idx[0:1].repeat(num_points - torch.numel(right_idx))], 0)
left_ps = torch.index_select(point_set, dim = 0, index = left_idx)
right_ps = torch.index_select(point_set, dim = 0, index = right_idx)
return left_ps, right_ps, dim
Example 11: The code snippet below is referenced from open-source project ktorch by farizrahman4u.
def max(x, axis=None, keepdims=False):
def _max(x, axis, keepdims):
y = torch.max(x, axis)[0]
# Since keepdims argument of torch not functional
return y if keepdims else torch.squeeze(y, axis)
def _compute_output_shape(x, axis, keepdims):
if axis is None:
return ()
shape = list(_get_shape(x))
if keepdims:
shape[axis] = 1
else:
del shape[axis]
return tuple(shape)
return get_op(_max, output_shape=_compute_output_shape, arguments=[axis, keepdims])(x)
Example 12: The code snippet below is referenced from open-source project ktorch by farizrahman4u.
def min(x, axis=None, keepdims=False):
def _min(x, axis, keepdims):
y = torch.min(x, axis)[0]
# Since keepdims argument of torch not functional
return y if keepdims else torch.squeeze(y, axis)
def _compute_output_shape(x, axis, keepdims):
if axis is None:
return ()
shape = list(_get_shape(x))
if keepdims:
shape[axis] = 1
else:
del shape[axis]
return tuple(shape)
return get_op(_min, output_shape=_compute_output_shape, arguments=[axis, keepdims])(x)
Example 13: The code snippet below is referenced from open-source project ktorch by farizrahman4u.
def sum(x, axis=None, keepdims=False):
def _sum(x, axis, keepdims):
y = torch.sum(x, axis)
# Since keepdims argument of torch not functional
return y if keepdims else torch.squeeze(y, axis)
def _compute_output_shape(x, axis, keepdims):
if axis is None:
return ()
shape = list(_get_shape(x))
if keepdims:
shape[axis] = 1
else:
del shape[axis]
return tuple(shape)
return get_op(_sum, output_shape=_compute_output_shape, arguments=[axis, keepdims])(x)
Example 14: The code snippet below is referenced from open-source project ktorch by farizrahman4u.
def prod(x, axis=None, keepdims=False):
def _prod(x, axis, keepdims):
y = torch.prod(x, axis)
# Since keepdims argument of torch not functional
return y if keepdims else torch.squeeze(y, axis)
def _compute_output_shape(x, axis, keepdims):
if axis is None:
return ()
shape = list(_get_shape(x))
if keepdims:
shape[axis] = 1
else:
del shape[axis]
return tuple(shape)
return get_op(_prod, output_shape=_compute_output_shape, arguments=[axis, keepdims])(x)
Example 15: The code snippet below is referenced from open-source project ktorch by farizrahman4u.
def std(x, axis=None, keepdims=False):
def _std(x, axis, keepdims):
y = torch.std(x, axis)
# Since keepdims argument of torch not functional
return y if keepdims else torch.squeeze(y, axis)
def _compute_output_shape(x, axis, keepdims):
if axis is None:
return ()
shape = list(_get_shape(x))
if keepdims:
shape[axis] = 1
else:
del shape[axis]
return tuple(shape)
return get_op(_std, output_shape=_compute_output_shape, arguments=[axis, keepdims])(x)
Example 16: The code snippet below is referenced from open-source project ktorch by farizrahman4u.
def mean(x, axis=None, keepdims=False):
def _mean(x, axis=axis, keepdims=keepdims):
y = torch.mean(x, axis)
# Since keepdims argument of torch not functional
return y if keepdims else torch.squeeze(y, axis)
def _compute_output_shape(x, axis=axis, keepdims=keepdims):
if axis is None:
return ()
shape = list(_get_shape(x))
if keepdims:
shape[axis] = 1
else:
del shape[axis]
return tuple(shape)
return get_op(_mean, output_shape=_compute_output_shape, arguments=[axis, keepdims])(x)
Example 17: The code snippet below is referenced from open-source project ktorch by farizrahman4u.
def any(x, axis=None, keepdims=False):
def _any(x, axis=axis, keepdims=keepdims):
y = torch.sum(x != 0, axis) != 0
# Since keepdims argument of torch not functional
return y if keepdims else torch.squeeze(y, axis)
def _compute_output_shape(x, axis=axis, keepdims=keepdims):
if axis is None:
return ()
shape = list(_get_shape(x))
if keepdims:
shape[axis] = 1
else:
del shape[axis]
return tuple(shape)
return get_op(_any, output_shape=_compute_output_shape, arguments=[axis, keepdims])(x)
Example 18: The code snippet below is referenced from open-source project ktorch by farizrahman4u.
def all(x, axis=None, keepdims=False):
def _all(x, axis=axis, keepdims=keepdims):
y = torch.sum(x == False, axis) == 0
# Since keepdims argument of torch not functional
return y if keepdims else torch.squeeze(y, axis)
def _compute_output_shape(x, axis=axis, keepdims=keepdims):
if axis is None:
return ()
shape = list(_get_shape(x))
if keepdims:
shape[axis] = 1
else:
del shape[axis]
return tuple(shape)
return get_op(_all, output_shape=_compute_output_shape, arguments=[axis, keepdims])(x)
Example 19: The code snippet below is referenced from open-source project torchbiomed by mattmacy.
def dice_error(input, target):
eps = 0.000001
_, result_ = input.max(1)
result_ = torch.squeeze(result_)
if input.is_cuda:
result = torch.cuda.FloatTensor(result_.size())
target_ = torch.cuda.FloatTensor(target.size())
else:
result = torch.FloatTensor(result_.size())
target_ = torch.FloatTensor(target.size())
result.copy_(result_.data)
target_.copy_(target.data)
target = target_
intersect = torch.dot(result, target)
result_sum = torch.sum(result)
target_sum = torch.sum(target)
union = result_sum + target_sum + 2*eps
intersect = np.max([eps, intersect])
# the target volume can be empty - so we still want to
# end up with a score of 1 if the result is 0/0
IoU = intersect / union
# print('union: {:.3f}\t intersect: {:.6f}\t target_sum: {:.0f} IoU: result_sum: {:.0f} IoU {:.7f}'.format(
# union, intersect, target_sum, result_sum, 2*IoU))
return 2*IoU
Example 20: The code snippet below is referenced from open-source project ResNeXt-DenseNet by D-X-Y.
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.dense3(out)
out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
out = F.log_softmax(self.fc(out))
return out
Example 21: The code snippet below is referenced from open-source project Tree-LSTM-LM by vgene.
def run_epoch(model, reader, criterion, is_train=False, use_cuda=False, lr=0.01):
"""
reader: data provider
criterion: loss calculation
"""
# if is_train:
# model.train()
# else:
# model.eval()
epoch_size = ((reader.file_length // model.batch_size)-1) // model.seq_length
hidden = model.init_hidden()
iters = 0
costs = 0
for steps, (inputs, targets) in tqdm.tqdm(enumerate(reader.iterator_char(model.batch_size, model.seq_length))):
#print(len(inputs))
model.optimizer.zero_grad()
inputs = Variable(torch.from_numpy(inputs.astype(np.int64)).transpose(0,1).contiguous())
targets = Variable(torch.from_numpy(targets.astype(np.int64)).transpose(0,1).contiguous())
if use_cuda:
inputs = inputs.cuda()
targets = targets.cuda()
targets = torch.squeeze(targets.view(-1, model.batch_size*model.seq_length))
hidden = repackage_hidden(hidden, use_cuda=use_cuda)
outputs, hidden = model(inputs, hidden)
loss = criterion(outputs.view(-1, model.vocab_size), targets)
costs += loss.data[0] * model.seq_length
perplexity = np.exp(costs/((steps+1)*model.seq_length))
#print("Iter {}/{},Perplexity:{}".format(steps+1, epoch_size, perplexity))
if is_train:
loss.backward()
model.optimizer.step()
return perplexity
Example 22: The code snippet below is referenced from open-source project Tree-LSTM-LM by vgene.
def run_epoch(model, provider, criterion, is_train=False, use_cuda=False, lr=0.01):
"""
reader: data provider
criterion: loss calculation
"""
# if is_train:
# model.train()
# else:
# model.eval()
# epoch_size = ((provider.file_length // model.batch_size)-1) // model.seq_length
hidden = model.init_hidden()
iters = 0
costs = 0
for steps, (inputs, targets) in enumerate(provider.iterator(model.batch_size, model.seq_length)):
# print(inputs)
model.optimizer.zero_grad()
inputs = Variable(torch.from_numpy(inputs.astype(np.int64)).transpose(0,1).contiguous())
targets = Variable(torch.from_numpy(targets.astype(np.int64)).transpose(0,1).contiguous())
if use_cuda:
inputs = inputs.cuda()
targets = targets.cuda()
targets = torch.squeeze(targets.view(-1, model.batch_size*model.seq_length))
hidden = repackage_hidden(hidden, use_cuda=use_cuda)
outputs, hidden = model(inputs, hidden)
loss = criterion(outputs.view(-1, model.node_size), targets)
costs += loss.data[0] * model.seq_length
perplexity = np.exp(costs/((steps+1)*model.seq_length))
#print("Iter {}/{},Perplexity:{}".format(steps+1, epoch_size, perplexity))
if is_train:
loss.backward()
model.optimizer.step()
return perplexity
Example 23: The code snippet below is referenced from open-source project MMdnn by Microsoft.
def emit_Squeeze(self, IR_node):
self.add_body(2, "{:<15} = torch.squeeze({})".format(
IR_node.variable_name, self.parent_variable_name(IR_node)
))
Example 24: The code snippet below is referenced from open-source project MMdnn by Microsoft.
def _layer_LRN(self):
self.add_body(0, """
class LRN(nn.Module):
def __init__(self, size=1, alpha=1.0, beta=0.75, ACROSS_CHANNELS=False):
super(KitModel.LRN, self).__init__()
self.ACROSS_CHANNELS = ACROSS_CHANNELS
if self.ACROSS_CHANNELS:
self.average=nn.AvgPool3d(kernel_size=(size, 1, 1),
stride=1,
padding=(int((size-1.0)/2), 0, 0))
else:
self.average=nn.AvgPool2d(kernel_size=size,
stride=1,
padding=int((size-1.0)/2))
self.alpha = alpha
self.beta = beta
def forward(self, x):
if self.ACROSS_CHANNELS:
div = x.pow(2).unsqueeze(1)
div = self.average(div).squeeze(1)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
else:
div = x.pow(2)
div = self.average(div)
div = div.mul(self.alpha).add(1.0).pow(self.beta)
x = x.div(div)
return x""")
Example 25: The code snippet below is referenced from open-source project treehopper by tomekkorbak.
def node_forward(self, inputs, child_c, child_h, training):
child_h_sum = F.torch.sum(torch.squeeze(child_h, 1), 0, keepdim = True)
i = F.sigmoid(self.ix(inputs)+self.ih(child_h_sum))
o = F.sigmoid(self.ox(inputs)+self.oh(child_h_sum))
u = F.tanh(self.ux(inputs)+self.uh(child_h_sum))
# add extra singleton dimension
fx = F.torch.unsqueeze(self.fx(inputs), 1)
f = F.torch.cat([self.fh(child_hi) + torch.squeeze(fx, 1) for child_hi in child_h], 0)
# f = torch.squeeze(f, 0)
f = F.sigmoid(f)
# removing extra singleton dimension
f = F.torch.unsqueeze(f, 1)
fc = F.torch.squeeze(F.torch.mul(f, child_c), 1)
idx = Var(torch.multinomial(torch.ones(child_c.size(0)), 1), requires_grad=False)
if self.cuda_flag:
idx = idx.cuda()
c = zoneout(
current_input=F.torch.mul(i, u) + F.torch.sum(fc, 0, keepdim=True),
previous_input=F.torch.squeeze(child_c.index_select(0, idx), 0) if self.zoneout_choose_child else F.torch.sum(torch.squeeze(child_c, 1), 0, keepdim=True),
p=self.recurrent_dropout_c,
training=training,
mask=self.mask if self.commons_mask else None
)
h = zoneout(
current_input=F.torch.mul(o, F.tanh(c)),
previous_input=F.torch.squeeze(child_h.index_select(0, idx), 0) if self.zoneout_choose_child else child_h_sum,
p=self.recurrent_dropout_h,
training=training,
mask=self.mask if self.commons_mask else None
)
return c, h
Example 26: The code snippet below is referenced from open-source project vnet.pytorch by mattmacy.
def inference(args, loader, model, transforms):
src = args.inference
dst = args.save
model.eval()
nvols = reduce(operator.mul, target_split, 1)
# assume single GPU / batch size 1
for data in loader:
data, series, origin, spacing = data[0]
shape = data.size()
# convert names to batch tensor
if args.cuda:
data.pin_memory()
data = data.cuda()
data = Variable(data, volatile=True)
output = model(data)
_, output = output.max(1)
output = output.view(shape)
output = output.cpu()
# merge subvolumes and save
results = output.chunk(nvols)
results = map(lambda var : torch.squeeze(var.data).numpy().astype(np.int16), results)
volume = utils.merge_image([*results], target_split)
print("save {}".format(series))
utils.save_updated_image(volume, os.path.join(dst, series + ".mhd"), origin, spacing)
# performing post-train inference:
# train.py --resume <model checkpoint> --i <input directory (*.mhd)> --save <output directory>
Example 27: The code snippet below is referenced from open-source project biaffineparser by chantera.
def extract_best_label_logits(self, arc_logits, label_logits, lengths):
pred_arcs = torch.squeeze(
torch.max(arc_logits, dim=1)[1], dim=1).data.cpu().numpy()
size = label_logits.size()
output_logits = _model_var(
self.model,
torch.zeros(size[0], size[1], size[3]))
for batch_index, (_logits, _arcs, _length) \
in enumerate(zip(label_logits, pred_arcs, lengths)):
for i in range(_length):
output_logits[batch_index] = _logits[_arcs[i]]
return output_logits
Example 28: The code snippet below is referenced from open-source project densenet.pytorch by bamos.
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.dense3(out)
out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
out = F.log_softmax(self.fc(out))
return out
Example 29: The code snippet below is referenced from open-source project FreezeOut by ajbrock.
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.dense3(out)
out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
out = F.log_softmax(self.fc(out))
return out
Example 30: The code snippet below is referenced from open-source project optnet by locuslab.
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.dense3(out)
out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
out = F.log_softmax(self.fc(out))
return out
Example 31: The code snippet below is referenced from open-source project foolbox by bethgelab.
def test_pytorch_backward(num_classes):
bounds = (0, 255)
channels = num_classes
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
def forward(self, x):
x = torch.mean(x, 3)
x = torch.squeeze(x, dim=3)
x = torch.mean(x, 2)
x = torch.squeeze(x, dim=2)
logits = x
return logits
model = Net()
model = PyTorchModel(
model,
bounds=bounds,
num_classes=num_classes,
cuda=False)
test_image = np.random.rand(channels, 5, 5).astype(np.float32)
test_grad_pre = np.random.rand(num_classes).astype(np.float32)
test_grad = model.backward(test_grad_pre, test_image)
assert test_grad.shape == test_image.shape
manual_grad = np.repeat(np.repeat(
(test_grad_pre / 25.).reshape((-1, 1, 1)),
5, axis=1), 5, axis=2)
np.testing.assert_almost_equal(
test_grad,
manual_grad)
Example 32: The code snippet below is referenced from open-source project foolbox by bethgelab.
def bn_model_pytorch():
"""Same as bn_model but with PyTorch."""
import torch
import torch.nn as nn
bounds = (0, 1)
num_classes = 10
class Net(nn.Module):
def forward(self, x):
assert isinstance(x.data, torch.FloatTensor)
x = torch.mean(x, 3)
x = torch.squeeze(x, dim=3)
x = torch.mean(x, 2)
x = torch.squeeze(x, dim=2)
logits = x
return logits
model = Net()
model = PyTorchModel(
model,
bounds=bounds,
num_classes=num_classes,
cuda=False)
return model
Example 33: The code snippet below is referenced from open-source project pytorch_crowd_count by BingzheWu.
def demo(img_path):
net = predict_net()
net.load_state_dict(torch.load('checkpoint/crowd_net2.pth'))
input_img = read_gray_img(img_path)
input_img = torch.autograd.Variable(torch.Tensor(input_img/255.0))
print(input_img.size())
#input_image = input_image.view(1, 3, 255, 255)
heat_map = net.forward(input_img)
print heat_map.size()
heat_map = torch.squeeze(heat_map)
heat_map = heat_map.data.numpy()
plt.imshow(heat_map, cmap = 'hot')
plt.show()
Example 34: The code snippet below is referenced from open-source project pytorch_RFCN by PureDiors.
def build_loss(self, cls_score, bbox_pred, roi_data):
# classification loss
label = roi_data[1].squeeze()
fg_cnt = torch.sum(label.data.ne(0))
bg_cnt = label.data.numel() - fg_cnt
# for log
if self.debug:
maxv, predict = cls_score.data.max(1)
self.tp = torch.sum(predict[:fg_cnt].eq(label.data[:fg_cnt])) if fg_cnt > 0 else 0
self.tf = torch.sum(predict[fg_cnt:].eq(label.data[fg_cnt:]))
self.fg_cnt = fg_cnt
self.bg_cnt = bg_cnt
ce_weights = torch.ones(cls_score.size()[1])
ce_weights[0] = float(fg_cnt) / bg_cnt
ce_weights = ce_weights.cuda()
cross_entropy = F.cross_entropy(cls_score, label, weight=ce_weights)
# bounding box regression L1 loss
bbox_targets, bbox_inside_weights, bbox_outside_weights = roi_data[2:]
bbox_targets = torch.mul(bbox_targets, bbox_inside_weights)
bbox_pred = torch.mul(bbox_pred, bbox_inside_weights)
loss_box = F.smooth_l1_loss(bbox_pred, bbox_targets, size_average=False) / (fg_cnt + 1e-4)
return cross_entropy, loss_box
Example 35: The code snippet below is referenced from open-source project SMASH by ajbrock.
def forward(self,x):
# Stem convolution
out = self.conv1(x)
# Allocate memory banks
m = [[None for _ in range(d)] for d in self.D]
module_index = 0
for i,(incoming_channels,outgoing_channels,g_values, bs, trans) in enumerate(zip(
self.incoming,self.outgoing, self.G, self.bank_sizes, [self.trans1,self.trans2,None])):
# Write to initial memory banks
for j in range(out.size(1) // (bs * self.N) ):
m[i][j] = out[:, j * bs * self.N : (j + 1) * bs * self.N]
for read,write,g in zip(incoming_channels,outgoing_channels,g_values):
# Cat read tensors
inp = torch.cat([m[i][index] for index in read], 1)
# Apply module and increment op index
out = self.mod[module_index](inp)
module_index += 1
for j, w in enumerate(write):
# Allocate dat memory if it's None
if m[i][w] is None:
m[i][w] = out[:, (j % (g // bs)) * (bs * self.N) : (j % (g // bs) + 1) * (bs * self.N)]
# Else, if already written, add to it.
else:
m[i][w] = m[i][w] + out[:, (j % (g // bs)) * (bs * self.N) : (j % (g // bs) + 1) * (bs * self.N)]
if trans is not None:
out = trans(torch.cat(m[i], 1))
else:
out = torch.cat(m[i], 1)
out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), out.size(2)))
out = F.log_softmax(self.fc(out))
return out
Example 36: The code snippet below is referenced from open-source project pyscatwave by edouardoyallon.
def testModulus(self):
for jit in [True, False]:
modulus = sl.Modulus(jit=jit)
x = torch.cuda.FloatTensor(100,10,4,2).copy_(torch.rand(100,10,4,2))
y = modulus(x)
u = torch.squeeze(torch.sqrt(torch.sum(x * x, 3)))
v = y.narrow(3, 0, 1)
self.assertLess((u - v).abs().max(), 1e-6)
Example 37: The code snippet below is referenced from open-source project pytorch by pytorch.
def __bool__(self):
if self.numel() == 0:
return False
elif self.numel() == 1:
return torch.squeeze(self)[0] != 0
raise RuntimeError("bool value of " + torch.typename(self) +
" containing more than one value is ambiguous")
Example 38: The code snippet below is referenced from open-source project nmp_qc by priba.
def u_intnet(self, h_v, m_v, opt):
if opt['x_v'].ndimension():
input_tensor = torch.cat([h_v, opt['x_v'], torch.squeeze(m_v)], 1)
else:
input_tensor = torch.cat([h_v, torch.squeeze(m_v)], 1)
return self.learn_modules[0](input_tensor)
Example 39: The code snippet below is referenced from open-source project nmp_qc by priba.
def u_mpnn(self, h_v, m_v, opt={}):
h_in = h_v.view(-1,h_v.size(2))
m_in = m_v.view(-1,m_v.size(2))
h_new = self.learn_modules[0](m_in[None,...],h_in[None,...])[0] # 0 or 1???
return torch.squeeze(h_new).view(h_v.size())
Example 40: The code snippet below is referenced from open-source project nmp_qc by priba.
def forward(self, g, h_in, e):
h = []
# Padding to some larger dimension d
h_t = torch.cat([h_in, Variable(
torch.zeros(h_in.size(0), h_in.size(1), self.args['out'] - h_in.size(2)).type_as(h_in.data))], 2)
h.append(h_t.clone())
# Layer
for t in range(0, self.n_layers):
e_aux = e.view(-1, e.size(3))
h_aux = h[t].view(-1, h[t].size(2))
m = self.m[0].forward(h[t], h_aux, e_aux)
m = m.view(h[0].size(0), h[0].size(1), -1, m.size(1))
# Nodes without edge set message to 0
m = torch.unsqueeze(g, 3).expand_as(m) * m
m = torch.squeeze(torch.sum(m, 1))
h_t = self.u[0].forward(h[t], m)
# Delete virtual nodes
h_t = (torch.sum(h_in, 2).expand_as(h_t) > 0).type_as(h_t) * h_t
h.append(h_t)
# Readout
res = self.r.forward(h)
if self.type == 'classification':
res = nn.LogSoftmax()(res)
return res
Example 41: The code snippet below is referenced from open-source project nmp_qc by priba.
def validate(val_loader, model, criterion, evaluation, logger=None):
losses = AverageMeter()
accuracies = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (g, h, e, target) in enumerate(val_loader):
# Prepare input data
target = torch.squeeze(target).type(torch.LongTensor)
if args.cuda:
g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)
# Compute output
output = model(g, h, e)
# Logs
test_loss = criterion(output, target)
acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])
losses.update(test_loss.data[0], g.size(0))
accuracies.update(acc.data[0], g.size(0))
print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
.format(acc=accuracies, loss=losses))
if logger is not None:
logger.log_value('test_epoch_loss', losses.avg)
logger.log_value('test_epoch_accuracy', accuracies.avg)
return accuracies.avg
Example 42: The code snippet below is referenced from open-source project nmp_qc by priba.
def validate(val_loader, model, criterion, evaluation, logger=None):
losses = AverageMeter()
accuracies = AverageMeter()
# switch to evaluate mode
model.eval()
for i, (g, h, e, target) in enumerate(val_loader):
# Prepare input data
target = torch.squeeze(target).type(torch.LongTensor)
if args.cuda:
g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)
# Compute output
output = model(g, h, e)
# Logs
test_loss = criterion(output, target)
acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])
losses.update(test_loss.data[0], g.size(0))
accuracies.update(acc.data[0], g.size(0))
print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
.format(acc=accuracies, loss=losses))
if logger is not None:
logger.log_value('test_epoch_loss', losses.avg)
logger.log_value('test_epoch_accuracy', accuracies.avg)
return accuracies.avg
Example 43: The code snippet below is referenced from open-source project nmp_qc by priba.
def validate(val_loader, model, criterion, evaluation, logger=None):
losses = AverageMeter()
accuracies = AverageMeter()
# switch to evaluate mode
model.eval()
for i, (g, h, e, target) in enumerate(val_loader):
# Prepare input data
target = torch.squeeze(target).type(torch.LongTensor)
if args.cuda:
g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)
# Compute output
output = model(g, h, e)
# Logs
test_loss = criterion(output, target)
acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])
losses.update(test_loss.data[0], g.size(0))
accuracies.update(acc.data[0], g.size(0))
print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
.format(acc=accuracies, loss=losses))
if logger is not None:
logger.log_value('test_epoch_loss', losses.avg)
logger.log_value('test_epoch_accuracy', accuracies.avg)
return accuracies.avg
Example 44: The code snippet below is referenced from open-source project nmp_qc by priba.
def validate(val_loader, model, criterion, evaluation, logger=None):
losses = AverageMeter()
accuracies = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (g, h, e, target) in enumerate(val_loader):
# Prepare input data
target = torch.squeeze(target).type(torch.LongTensor)
if args.cuda:
g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)
# Compute output
output = model(g, h, e)
# Logs
test_loss = criterion(output, target)
acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])
losses.update(test_loss.data[0], g.size(0))
accuracies.update(acc.data[0], g.size(0))
print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
.format(acc=accuracies, loss=losses))
if logger is not None:
logger.log_value('test_epoch_loss', losses.avg)
logger.log_value('test_epoch_accuracy', accuracies.avg)
return accuracies.avg
Example 45: The code snippet below is referenced from open-source project nmp_qc by priba.
def validate(val_loader, model, criterion, evaluation, logger=None):
losses = AverageMeter()
accuracies = AverageMeter()
# switch to evaluate mode
model.eval()
for i, (g, h, e, target) in enumerate(val_loader):
# Prepare input data
target = torch.squeeze(target).type(torch.LongTensor)
if args.cuda:
g, h, e, target = g.cuda(), h.cuda(), e.cuda(), target.cuda()
g, h, e, target = Variable(g), Variable(h), Variable(e), Variable(target)
# Compute output
output = model(g, h, e)
# Logs
test_loss = criterion(output, target)
acc = Variable(evaluation(output.data, target.data, topk=(1,))[0])
losses.update(test_loss.data[0], g.size(0))
accuracies.update(acc.data[0], g.size(0))
print(' * Average Accuracy {acc.avg:.3f}; Average Loss {loss.avg:.3f}'
.format(acc=accuracies, loss=losses))
if logger is not None:
logger.log_value('test_epoch_loss', losses.avg)
logger.log_value('test_epoch_accuracy', accuracies.avg)
return accuracies.avg
Example 46: The code snippet below is referenced from open-source project nmp_qc by priba.
def m_mpnn(self, h_v, h_w, e_vw, opt={}):
# Matrices for each edge
edge_output = self.learn_modules[0](e_vw)
edge_output = edge_output.view(-1, self.args['out'], self.args['in'])
h_w_rows = h_w[..., None].expand(h_w.size(0), h_v.size(1), h_w.size(1)).contiguous()
h_w_rows = h_w_rows.view(-1, self.args['in'])
h_multiply = torch.bmm(edge_output, torch.unsqueeze(h_w_rows,2))
m_new = torch.squeeze(h_multiply)
return m_new
Example 47: The code snippet below is referenced from open-source project pytorch_resnet by taokong.
def forward(self, x):
out = self.conv1(x)
out = self.trans1(self.dense1(out))
out = self.trans2(self.dense2(out))
out = self.dense3(out)
out = torch.squeeze(F.avg_pool2d(F.relu(self.bn1(out)), 8))
out = F.log_softmax(self.fc(out))
return out
Example 48: The code snippet below is referenced from open-source project pytorch_resnet by taokong.
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.avgpool(x)
x = torch.squeeze(x)
x = F.log_softmax(x)
return x
Example 49: The code snippet below is referenced from open-source project pytorch_resnet by taokong.
def forward(self, x):
x = self.conv_1_3x3(x)
x = F.relu(self.bn_1(x), inplace=True)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.avgpool(x)
x = torch.squeeze(x)
x = F.log_softmax(x)
return x
Example 50: The code snippet below is referenced from open-source project pytorch_resnet by taokong.
def forward(self, x):
x = self.conv_1_3x3.forward(x)
x = F.relu(self.bn_1.forward(x), inplace=True)
x = self.stage_1.forward(x)
x = self.stage_2.forward(x)
x = self.stage_3.forward(x)
x = F.avg_pool2d(x, 8, 1)
x = torch.squeeze(x)
x = F.log_softmax(x)
return x