当前位置:网站首页>Pytorch tensor高階操作
Pytorch tensor高階操作
2022-06-10 11:37:00 【MallocLu】
masked_?
masked_select
# ~對Boolean取反
# torch.masked_select(x, mask) 與 x[mask]等效
# Returns a new 1-D tensor which indexes the input tensor according to the boolean mask mask which is a BoolTensor.
x = torch.randn(3, 4)
mask = x.ge(0.5)
x
Out[5]:
tensor([[-0.3873, -0.4278, 0.1722, -0.9274],
[ 0.5413, 1.3651, 2.1112, -0.5613],
[-0.2118, -0.8192, 1.1267, 1.2863]])
mask
Out[6]:
tensor([[False, False, False, False],
[ True, True, True, False],
[False, False, True, True]])
torch.masked_select(x, mask)
Out[7]: tensor([0.5413, 1.3651, 2.1112, 1.1267, 1.2863])
torch.masked_select(x, ~mask)
Out[16]: tensor([-0.3873, -0.4278, 0.1722, -0.9274, -0.5613, -0.2118, -0.8192])
x[mask]
Out[8]: tensor([0.5413, 1.3651, 2.1112, 1.1267, 1.2863])
x[~mask]
Out[15]: tensor([-0.3873, -0.4278, 0.1722, -0.9274, -0.5613, -0.2118, -0.8192])
masked_fill
# Fills elements of self tensor with value where mask is True.
x
Out[18]:
tensor([[-0.3873, -0.4278, 0.1722, -0.9274],
[ 0.5413, 1.3651, 2.1112, -0.5613],
[-0.2118, -0.8192, 1.1267, 1.2863]])
mask
Out[19]:
tensor([[False, False, False, False],
[ True, True, True, False],
[False, False, True, True]])
x.masked_fill(mask, 0)
Out[20]:
tensor([[-0.3873, -0.4278, 0.1722, -0.9274],
[ 0.0000, 0.0000, 0.0000, -0.5613],
[-0.2118, -0.8192, 0.0000, 0.0000]])
torch.masked_fill(x, mask, 0)
Out[21]:
tensor([[-0.3873, -0.4278, 0.1722, -0.9274],
[ 0.0000, 0.0000, 0.0000, -0.5613],
[-0.2118, -0.8192, 0.0000, 0.0000]])
masked_scatter
# Copies elements from source into self tensor at positions where the mask is True
x
Out[25]:
tensor([[-0.3873, -0.4278, 0.1722, -0.9274],
[ 0.5413, 1.3651, 2.1112, -0.5613],
[-0.2118, -0.8192, 1.1267, 1.2863]])
mask
Out[26]:
tensor([[False, False, False, False],
[ True, True, True, False],
[False, False, True, True]])
x.masked_scatter(mask, torch.ones_like(x))
Out[27]:
tensor([[-0.3873, -0.4278, 0.1722, -0.9274],
[ 1.0000, 1.0000, 1.0000, -0.5613],
[-0.2118, -0.8192, 1.0000, 1.0000]])
torch.masked_scatter(x, mask, torch.ones_like(x))
Out[28]:
tensor([[-0.3873, -0.4278, 0.1722, -0.9274],
[ 1.0000, 1.0000, 1.0000, -0.5613],
[-0.2118, -0.8192, 1.0000, 1.0000]])
重要 梯度傳播
# 以下3種方式均不影響梯度傳播過程
# out = out[mask]
# out = torch.masked_select(out, mask)
# out = out[0:2, 0:2]
import torch
from torch import nn
import torch.optim as optim
from torch.nn import functional as F
mask = torch.tensor([[True, True, False, False],
[True, True, False, False],
[False, False, False, False]])
x = torch.tensor([[-0.3873, -0.4278, 0.1722, -0.9274],
[0.5413, 1.3651, 2.1112, -0.5613],
[-0.2118, -0.8192, 1.1267, 1.2863]])
# [1, 1, 3, 4] -> [1, 1, 3, 4]
net = nn.Conv2d(1, 1, 3, 1, 1)
optimizer = optim.SGD(net.parameters(), lr=0.002, momentum=0.99)
for i in range(10):
out = net(x.view(1, 1, 3, 4)) # [1, 1, 3, 4]
out = torch.squeeze(out) # [3, 4]
# out = out[mask] 等效於 out = torch.masked_select(out, mask) 等效於
out = out[0:2, 0:2]
loss = F.mse_loss(out, torch.zeros(out.shape)) # [1]
print(loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# tensor(0.2077, grad_fn=<MseLossBackward>)
# tensor(0.2038, grad_fn=<MseLossBackward>)
# tensor(0.1963, grad_fn=<MseLossBackward>)
# tensor(0.1856, grad_fn=<MseLossBackward>)
# tensor(0.1720, grad_fn=<MseLossBackward>)
# tensor(0.1561, grad_fn=<MseLossBackward>)
# tensor(0.1387, grad_fn=<MseLossBackward>)
# tensor(0.1205, grad_fn=<MseLossBackward>)
# tensor(0.1021, grad_fn=<MseLossBackward>)
# tensor(0.0843, grad_fn=<MseLossBackward>)
gather & scatter
# gather
out[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
out[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
out[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
# scatter
self[index[i][j][k]][j][k] = src[i][j][k] # if dim == 0
self[i][index[i][j][k]][k] = src[i][j][k] # if dim == 1
self[i][j][index[i][j][k]] = src[i][j][k] # if dim == 2
torch.index_select
>>> x = torch.randn(3, 4)
>>> x
tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
[-0.4664, 0.2647, -0.1228, -1.1068],
[-1.1734, -0.6571, 0.7230, -0.6004]])
>>> indices = torch.tensor([0, 2])
>>> torch.index_select(x, 0, indices)
tensor([[ 0.1427, 0.0231, -0.5414, -1.0009],
[-1.1734, -0.6571, 0.7230, -0.6004]])
>>> torch.index_select(x, 1, indices)
tensor([[ 0.1427, -0.5414],
[-0.4664, -0.1228],
[-1.1734, 0.7230]])
torch.take
# 多維tensor按照一維的下標取
>>> src = torch.tensor([[4, 3, 5],
... [6, 7, 8]])
>>> torch.take(src, torch.tensor([0, 2, 5]))
tensor([ 4, 5, 8])
torch.where
可通過換號轉變為torch.masked_scatter
# True取A,False的比特置取b
>>> x = torch.randn(3, 2)
>>> y = torch.ones(3, 2)
>>> x
tensor([[-0.4620, 0.3139],
[ 0.3898, -0.7197],
[ 0.0478, -0.1657]])
>>> torch.where(x > 0, x, y) # 等效於torch.masked_scatter(x, x<=0, y)
tensor([[ 1.0000, 0.3139],
[ 0.3898, 1.0000],
[ 0.0478, 1.0000]])
>>> x = torch.randn(2, 2, dtype=torch.double)
>>> x
tensor([[ 1.0779, 0.0383],
[-0.8785, -1.1089]], dtype=torch.float64)
>>> torch.where(x > 0, x, 0.)
tensor([[1.0779, 0.0383],
[0.0000, 0.0000]], dtype=torch.float64)
cat & stack
a1 = torch.rand(4,3,32,32)
a2 = torch.rand(5,3,32,32)
# cat
t = torch.cat([a1, a2, a2], dim=0)
t.shape
Out[76]: torch.Size([14, 3, 32, 32])
# stack
torch.stack([a1, a1], dim=0).shape
Out[79]: torch.Size([2, 4, 3, 32, 32])
torch.stack([a1, a1], dim=1).shape
Out[80]: torch.Size([4, 2, 3, 32, 32])
torch.stack([a1, a1], dim=3).shape
Out[81]: torch.Size([4, 3, 32, 2, 32])
chunk & split & unbind
a1 = torch.rand(4,3,32,32)
a2 = torch.rand(5,3,32,32)
# 按長度分 split
t1, t2 = a1.split(2, dim=0)
t1.shape, t2.shape
Out[86]: (torch.Size([2, 3, 32, 32]), torch.Size([2, 3, 32, 32]))
t1, t2 = a1.split([1,3], dim=0)
t1.shape, t2.shape
Out[88]: (torch.Size([1, 3, 32, 32]), torch.Size([3, 3, 32, 32]))
# 按份數分
t1, t2 = a1.chunk(2, dim=0)
t1.shape, t2.shape
Out[90]: (torch.Size([2, 3, 32, 32]), torch.Size([2, 3, 32, 32]))
t1, t2, t3, t4 = a1.chunk(4, dim=0)
t1.shape, t2.shape, t3.shape, t4.shape
Out[93]:
(torch.Size([1, 3, 32, 32]),
torch.Size([1, 3, 32, 32]),
torch.Size([1, 3, 32, 32]),
torch.Size([1, 3, 32, 32]))
t1, t2, t3, t4 = a1.chunk(4, dim=0)
t1.shape, t2.shape, t3.shape, t4.shape
Out[59]:
(torch.Size([1, 3, 32, 32]),
torch.Size([1, 3, 32, 32]),
torch.Size([1, 3, 32, 32]),
torch.Size([1, 3, 32, 32]))
# torch.unbind(input, dim=0) → seq Removes a tensor dimension.
# a1.unbind(dim=i) 等效於 a1.chunk(a1.shape[i], dim=i)
t1, t2, t3, t4 = a1.unbind(dim=0)
t1.shape, t2.shape, t3.shape, t4.shape
Out[61]:
(torch.Size([3, 32, 32]),
torch.Size([3, 32, 32]),
torch.Size([3, 32, 32]),
torch.Size([3, 32, 32]))
t1, t2, t3, t4 = a1.chunk(4, dim=0)
t1.shape, t2.shape, t3.shape, t4.shape
Out[59]:
(torch.Size([1, 3, 32, 32]),
torch.Size([1, 3, 32, 32]),
torch.Size([1, 3, 32, 32]),
torch.Size([1, 3, 32, 32]))
squeeze & unsqueeze
# 降低維度,去除指定比特置的1 torch.unsqueeze(input, dim)
# 昇高維度,在指定比特置添加1 torch.squeeze(input, dim=None, *, out=None)
t = torch.randn(5, 1, 4, 1, 10)
t.squeeze().shape
Out[50]: torch.Size([5, 4, 10])
t.squeeze(dim=3).shape
Out[51]: torch.Size([5, 1, 4, 10])
t.unsqueeze(dim=0).shape
Out[52]: torch.Size([1, 5, 1, 4, 1, 10])
reshape & view
Me:直接全用reshape即可
- 當tensor滿足連續性要求時,reshape() = view(),和原來tensor共用內存
- 當tensor不滿足連續性要求時,reshape() = contiguous() + view(),會產生新的存儲區的tensor,與原來tensor不共用內存
tensor多維數組底層使用一塊連續內存的一維數組存儲,通過元信息裏的索引訪問各元素。
is_contiguous連續:一維數組元素的存儲順序(使用storage()函數可以查看存儲順序)與Tensor按行優先一維展開的順序(使用flatten()函數可以查看按行優先一維展開的順序)是否一致
view():tensor連續 則成功改變形狀,存儲區沒有改變,修改了元信息;
tensor不連續 則報錯;
contiguous():對不連續的tensor新建了一個連續的tensor(存儲區元信息均發生了改變)
reshape():tensor連續 則同view()成功改變形狀,存儲區沒有改變,修改了元信息;
tensor不連續 則新建了一個tensor(存儲區元信息均發生了改變)
# 一個新的tensor a,是連續的
a = torch.arange(12).reshape(3,4)
a.is_contiguous()
Out[19]: True
a.flatten()
Out[22]: tensor([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
a.storage()
Out[25]:
0
1
2
3
4
5
6
7
8
9
10
11
[torch.LongStorage of size 12]
a.storage().data_ptr()
Out[27]: 2214873611520
# 轉置後,b仍使用a的存儲區,但其是不連續的
b = a.transpose(0, 1)
b.is_contiguous()
Out[21]: False
b.flatten()
Out[23]: tensor([ 0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11])
b.storage()
Out[26]:
0
1
2
3
4
5
6
7
8
9
10
11
[torch.LongStorage of size 12]
b.storage().data_ptr()
Out[28]: 2214873611520
# 對連續tensor a執行view
c = a.view(-1)
c.is_contiguous()
Out[30]: True
c.storage().data_ptr()
Out[33]: 2214873611520
# 對連續tensor a執行reshape
d = a.reshape(-1)
d.is_contiguous()
Out[32]: True
d.storage().data_ptr()
Out[34]: 2214873611520
# 對非連續tensor b執行view,報錯
e = b.view(-1)
Traceback (most recent call last):
File "E:\anaconda\envs\py37torch\lib\site-packages\IPython\core\interactiveshell.py", line 3457, in run_code
exec(code_obj, self.user_global_ns, self.user_ns)
File "<ipython-input-36-858eec869bd0>", line 1, in <module>
e = b.view(-1)
RuntimeError: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Use .reshape(...) instead.
# 對非連續tensor b執行reshape,直接新建tensor(存儲區發生了變化)
e = b.reshape(-1)
e.storage().data_ptr()
Out[38]: 2214875150592
# 對非連續tensor b執行連續化(新建,存儲區發生了變化),然後再view
f = b.contiguous().view(-1)
f.storage().data_ptr()
Out[40]: 2214875145472
expand & repeat
Expand: broadcasting 參數為目標形狀
Repeat: memory copied 參數為倍數
# -1默認不改變
b = torch.randn(1, 32, 1, 1)
b.expand(4, 32, 14, 14).shape
Out[42]: torch.Size([4, 32, 14, 14])
b.expand(-1, 32, -1, -1).shape
Out[43]: torch.Size([1, 32, 1, 1])
# 注意,如果多一維則是整個再複制,例如下例2
# 例1
b.repeat(4, 32, 1, 1).shape
Out[44]: torch.Size([4, 1024, 1, 1])
b.repeat(5, 4, 32, 1, 1).shape
Out[46]: torch.Size([5, 4, 1024, 1, 1])
t & transpose & permute
a = torch.randn(2, 3)
b = torch.randn(4, 5, 6)
# 1維/2維轉置
# 1維轉置完全不變
a.t()
Out[47]:
tensor([[-0.5892, 0.5083],
[-1.5597, 1.2658],
[ 0.8393, 1.2811]])
# 交換兩維
a.transpose(0, 1).shape
Out[48]: torch.Size([3, 2])
# 交換多維
b.permute(2,0,1).shape
Out[49]: torch.Size([6, 4, 5])
norm範數
norm錶示範數,normalize錶示歸一化
# 範數
a = torch.full([8], 1).float()
b = a.view(2, 4)
c = a.view(2, 2, 2)
# 1-範數 元素絕對值之和
a.norm(1), b.norm(1), c.norm(1)
Out[13]: (tensor(8.), tensor(8.), tensor(8.))
# 2-範數 元素平方和開根號
a.norm(2), b.norm(2), c.norm(2)
Out[14]: (tensor(2.8284), tensor(2.8284), tensor(2.8284))
b.norm(1, dim=1)
Out[15]: tensor([4., 4.])
b.norm(2, dim=1)
Out[16]: tensor([2., 2.])
normalize
norm錶示範數,normalize錶示歸一化
v = v m a x ( ∥ v ∥ p , ϵ ) v= \frac{v}{max(∥v∥_p,ϵ)} v=max(∥v∥p,ϵ)v
# Performs L_p normalization of inputs over specified dimension.
# torch.nn.functional.normalize(input, p=2.0, dim=1, eps=1e-12, out=None)
# 默認二範數
t = torch.randn(2, 2, 3)
t
Out[65]:
tensor([[[-1.6034, 2.3001, 0.8102],
[-0.3921, 0.1690, 1.9939]],
[[ 0.9074, -0.6176, -0.1238],
[ 0.0509, 0.1963, -0.3056]]])
F.normalize(t, dim=2)
Out[67]:
tensor([[[-0.5494, 0.7881, 0.2776],
[-0.1923, 0.0829, 0.9778]],
[[ 0.8215, -0.5591, -0.1121],
[ 0.1387, 0.5354, -0.8332]]])
max min mean prod
# min max mean prod(元素的累乘)
a
Out[25]:
tensor([[ 0.2903, 0.3979, -0.1571, 2.8208, 0.3580, 0.3855],
[-1.3406, 2.1643, -0.8738, -0.4514, 0.5275, -1.8139],
[ 0.8159, 0.2201, 0.7153, 3.1825, 1.5754, -1.1163],
[ 1.0856, 0.1158, -0.3165, -0.6676, 2.2467, -0.3533]])
a.max(dim=1)
Out[26]:
torch.return_types.max(
values=tensor([2.8208, 2.1643, 3.1825, 2.2467]),
indices=tensor([3, 1, 3, 4]))
a.max(dim=1).values
Out[27]: tensor([2.8208, 2.1643, 3.1825, 2.2467])
a.max(dim=1, keepdim=True).values
Out[28]:
tensor([[2.8208],
[2.1643],
[3.1825],
[2.2467]])
argmax argmin
# argmax argmin
a
Out[25]:
tensor([[ 0.2903, 0.3979, -0.1571, 2.8208, 0.3580, 0.3855],
[-1.3406, 2.1643, -0.8738, -0.4514, 0.5275, -1.8139],
[ 0.8159, 0.2201, 0.7153, 3.1825, 1.5754, -1.1163],
[ 1.0856, 0.1158, -0.3165, -0.6676, 2.2467, -0.3533]])
a.argmax(dim=1)
Out[29]: tensor([3, 1, 3, 4])
a.argmax(dim=1, keepdim=True)
Out[30]:
tensor([[3],
[1],
[3],
[4]])
topk kthvalue
# topk
a
Out[25]:
tensor([[ 0.2903, 0.3979, -0.1571, 2.8208, 0.3580, 0.3855],
[-1.3406, 2.1643, -0.8738, -0.4514, 0.5275, -1.8139],
[ 0.8159, 0.2201, 0.7153, 3.1825, 1.5754, -1.1163],
[ 1.0856, 0.1158, -0.3165, -0.6676, 2.2467, -0.3533]])
a.topk(3, dim=1)
Out[31]:
torch.return_types.topk(
values=tensor([[ 2.8208, 0.3979, 0.3855],
[ 2.1643, 0.5275, -0.4514],
[ 3.1825, 1.5754, 0.8159],
[ 2.2467, 1.0856, 0.1158]]),
indices=tensor([[3, 1, 5],
[1, 4, 3],
[3, 4, 0],
[4, 0, 1]]))
# 返回第3小的值
a.kthvalue(3, dim=1)
Out[32]:
torch.return_types.kthvalue(
values=tensor([ 0.3580, -0.8738, 0.7153, -0.3165]),
indices=tensor([4, 2, 2, 2]))
eq vs. equal
# > >= < <= != ==
a > 0
Out[35]:
tensor([[ True, True, False, True, True, True],
[False, True, False, False, True, False],
[ True, True, True, True, True, False],
[ True, True, False, False, True, False]])
# torch.eq(a, b) vs. torch.equal(a, b)
b = a
torch.eq(a, b)
Out[37]:
tensor([[True, True, True, True, True, True],
[True, True, True, True, True, True],
[True, True, True, True, True, True],
[True, True, True, True, True, True]])
torch.equal(a, b)
Out[38]: True
數學運算
element-wise:
+ add
- sub
* mul
/ div
** pow
sqrt
rsqrt
exp
log(以e為底)
floor ceil round trunc frac
矩陣運算
# mm(2維) matmul(多維) @(多維)
# 多維矩陣相乘 實際為後兩維矩陣相乘
# 一維向量右乘矩陣,認為是行向量
(torch.ones(3)@torch.ones(3, 4)).shape
Out[61]: torch.Size([4])
# 一維向量左乘矩陣,認為是列向量
(torch.ones(3, 4)@torch.ones(4)).shape
Out[62]: torch.Size([3])
边栏推荐
- [Huang ah code] how to ensure that the images uploaded by PHP are safe?
- La poignée d'enseignement de la station B vous apprend à utiliser le masque yolov5 pour tester les éléments de l'enregistrement le plus complet (apprentissage profond / détection d'objets / pythorch)
- How can the team be dissolved...
- Lvs+keepalived highly available cluster
- flutter websocket示例
- 北大、微软|关于使语言模型更好的推理的进展
- Unable to start ServletWebServerApplicationContext due to missing ServletWebServerFactory bean
- What about the interface that needs to be logged in first when using the apipost test interface (based on cookies)?
- 还活在上个时代,Etcd 3.0 实现分布式锁竟如此简单!
- Fundamentals of software testing
猜你喜欢

The teaching staff at station B will teach you how to use the most complete record of the mask detection items of yolov5 (in-depth learning / target detection / pytorch)
![[WIP] Openstack Masakari (by quqi99)](/img/ea/c0a1c80251a76a6bb7fa0f58424591.png)
[WIP] Openstack Masakari (by quqi99)

图文,文字预训练方式长期学习ing。
![[how much do you know about management] apart from independent conflict, you can't do it](/img/dd/4ed187688b4ae9db3a04495576a4a6.jpg)
[how much do you know about management] apart from independent conflict, you can't do it

87.(leaflet之家)leaflet军事标绘-直线箭头修改

Fun face recognition software

The facial scriptures of China Saibao

Cvpr22 oral | Hong Kong Chinese proposed transrank: sequencing loss + self supervision =sota

flutter SocketIO示例

SaaS management system for digital commerce cloud business service industry: to achieve efficient business collaboration and help enterprises improve their digital transformation
随机推荐
二叉树的前序中序后序递归遍历和非递归遍历(c语言版本)
剑指位运算
"Forget to learn again" shell Basics - 29. Awk built-in variables
What about the interface that needs to be logged in first when using the apipost test interface (based on cookies)?
Linked list, stack, queue
JS implements tree data operation through recursion
MySQL数据类型
证券期货业迎数据监管新规,IP-guard助力完善数据安全管理
JPA + MySQL list converted to string stored in database
zoom如何在加入会议时关闭microphone
多线程杀手锏---countDownLatch&&CyclicBarrier
Testing ovn manually based on LXD (by quqi99)
“易 +”开源计划丨基于 WebRTC 的低延时播放器设计和实践
The essence of linear algebra 4 matrix multiplication and linear compound transformation
Start from 0 to build a high-performance R & D full stack team
PID光离子化检测器用于电厂压缩空气含油量的在线监测
Flink CDC 在大健云仓的实践
Cvpr22 oral | Hong Kong Chinese proposed transrank: sequencing loss + self supervision =sota
360、清华|Zero和R2D2:一种大规模的中文跨模态基准测试和视觉语言框架
flutter SocketIO示例