Creating Tensors
x = torch.empty(3, 4)
# memeory allocate만 하고 값 초기화 x
zeros = torch.zeros(2, 3)
# 0.으로 초기화
ones = torch.ones(2, 3)
# 1.으로 초기화
torch.manual_seed(1828)
random = torch.rand(2, 3)
# 랜덤 값으로 초기화
random2 = torch.rand(2, 3)
# 이 값은 manual_seed에 영향 받지 않음
Tensor Shapes
torch.*_like() 함수로 같은 shape을 가진 tensor를 만들 수 있음
empty_like_x = torch.empty_like(x)
zeros_like_x = torch.zeros_like(x)
...
torch.tensor로 직접적으로 tensor 정의 가능
some_constants = torch.tensor([[3.12, 2.8], [1.28, 0.0038]])
some_integers = torch.tensor(((2, 4, 5), [3, 65, 4]))
Tensor Data Types
a = torch.ones((2, 3), dtype=torch.int16)
b = torch.rand((2, 3), dtype=torch.float64)
c = b.to(torch.int32)
# to method를 사용해 b의 type을 변경
- Shape를 인자로 넣을 때 다른 인자가 있으면 tuple을 사용
Math & Logic with PyTorch Tensors
ones = torch.zeros(2, 2) + 1
twos = torch.ones(2, 2) * 2
threes = (torch.ones(2, 2) * 7 - 1) / 2
fours = twos ** 2
sqrt2s = twos ** 0.5
- Integer와 수행되는 operation은 모두 element-wise로 적용
powers2 = twos ** torch.tensor([[1, 2], [3, 4]])
fives = ones + fours
dozens = threes * fours
- 같은 모양을 가진 tensor끼리 계산은 마찬가지로 element-wise로 적용
- 다른 모양의 tensor는 run-time error
Tensor Broadcasting
rand = torch.rand(2, 4)
doubled = rand * (torch.ones(1, 4) * 2)
# This works! Why?
- Broadcast dimension size 조건:
- 각각 dimension이 같음
- 또는 하나의 dimension이 1임
- 또는 두 tensor중 하나는 dimension이 없음
a = torch.ones(4, 3, 2)
b = a * torch.rand( 3, 2) # 3rd & 2nd dims identical to a, dim 1 absent
# a의 각 layer에 broadcast
c = a * torch.rand( 3, 1) # 3rd dim = 1, 2nd dim identical to a
# a의 각 layer와 row에 broadcast
d = a * torch.rand( 1, 2) # 3rd dim identical to a, 2nd dim = 1
# a의 각 layer와 column에 broadcast
- Output
tensor([[[0.6493, 0.2633],
[0.4762, 0.0548],
[0.2024, 0.5731]],
[[0.6493, 0.2633],
[0.4762, 0.0548],
[0.2024, 0.5731]],
[[0.6493, 0.2633],
[0.4762, 0.0548],
[0.2024, 0.5731]],
[[0.6493, 0.2633],
[0.4762, 0.0548],
[0.2024, 0.5731]]])
tensor([[[0.7191, 0.7191],
[0.4067, 0.4067],
[0.7301, 0.7301]],
[[0.7191, 0.7191],
[0.4067, 0.4067],
[0.7301, 0.7301]],
[[0.7191, 0.7191],
[0.4067, 0.4067],
[0.7301, 0.7301]],
[[0.7191, 0.7191],
[0.4067, 0.4067],
[0.7301, 0.7301]]])
tensor([[[0.6276, 0.7357],
[0.6276, 0.7357],
[0.6276, 0.7357]],
[[0.6276, 0.7357],
[0.6276, 0.7357],
[0.6276, 0.7357]],
[[0.6276, 0.7357],
[0.6276, 0.7357],
[0.6276, 0.7357]],
[[0.6276, 0.7357],
[0.6276, 0.7357],
[0.6276, 0.7357]]])
Runtime errors
a = torch.ones(4, 3, 2)
b = a * torch.rand(4, 3) # dimensions must match last-to-first
c = a * torch.rand( 2, 3) # both 3rd & 2nd dims different
d = a * torch.rand((0, )) # can't broadcast with an empty tensor
More Math with Tensors
# common functions
a = torch.rand(2, 4) * 2 - 1
print('Common functions:')
print(torch.abs(a))
print(torch.ceil(a))
print(torch.floor(a))
print(torch.clamp(a, -0.5, 0.5))
# trigonometric functions and their inverses
angles = torch.tensor([0, math.pi / 4, math.pi / 2, 3 * math.pi / 4])
sines = torch.sin(angles)
inverses = torch.asin(sines)
print('\\nSine and arcsine:')
print(angles)
print(sines)
print(inverses)
# bitwise operations
print('\\nBitwise XOR:')
b = torch.tensor([1, 5, 11])
c = torch.tensor([2, 7, 10])
print(torch.bitwise_xor(b, c))
# comparisons:
print('\\nBroadcasted, element-wise equality comparison:')
d = torch.tensor([[1., 2.], [3., 4.]])
e = torch.ones(1, 2) # many comparison ops support broadcasting!
print(torch.eq(d, e)) # returns a tensor of type bool
# reductions:
print('\\nReduction ops:')
print(torch.max(d)) # returns a single-element tensor
print(torch.max(d).item()) # extracts the value from the returned tensor
print(torch.mean(d)) # average
print(torch.std(d)) # standard deviation
print(torch.prod(d)) # product of all numbers
print(torch.unique(torch.tensor([1, 2, 1, 2, 1, 2]))) # filter unique elements
# vector and linear algebra operations
v1 = torch.tensor([1., 0., 0.]) # x unit vector
v2 = torch.tensor([0., 1., 0.]) # y unit vector
m1 = torch.rand(2, 2) # random matrix
m2 = torch.tensor([[3., 0.], [0., 3.]]) # three times identity matrix
print('\\nVectors & Matrices:')
print(torch.linalg.cross(v2, v1)) # negative of z unit vector (v1 x v2 == -v2 x v1)
print(m1)
m3 = torch.linalg.matmul(m1, m2)
print(m3) # 3 times m1
print(torch.linalg.svd(m3)) # singular value decomposition
Altering Tensors in Place
밑줄 (_)을 method 뒤에 추가
a = torch.tensor([0, math.pi / 4, math.pi / 2, 3 * math.pi / 4])
print('a:')
print(a)
print(torch.sin(a)) # this operation creates a new tensor in memory
print(a) # a has not changed
b = torch.tensor([0, math.pi / 4, math.pi / 2, 3 * math.pi / 4])
print('\\nb:')
print(b)
print(torch.sin_(b)) # note the underscore
print(b) # b has changed
또는 out 인자를 사용
a = torch.rand(2, 2)
b = torch.rand(2, 2)
c = torch.zeros(2, 2)
old_id = id(c)
print(c)
d = torch.matmul(a, b, out=c)
print(c) # contents of c have changed
assert c is d # test c & d are same object, not just containing equal values
assert id(c) == old_id # make sure that our new c is the same object as the old one
torch.rand(2, 2, out=c) # works for creation too!
print(c) # c has changed again
assert id(c) == old_id # still the same object!
Copying Tensors
간단한 assign은 shallow copy
a = torch.ones(2, 2)
b = a
a[0][1] = 561 # we change a...
print(b) # ...and b is also altered
clone()을 사용해 다른 copy를 생성
a = torch.ones(2, 2)
b = a.clone()
assert b is not a # different objects in memory...
print(torch.eq(a, b)) # ...but still with the same contents!
a[0][1] = 561 # a changes...
print(b) # ...but b is still all ones
- autograd가 설정되어 있으면 copy에도 똑같이 설정되고 기록을 똑같이 복사해옴
- detach() → 기록은 복사하지 않음
a = torch.rand(2, 2, requires_grad=True) # turn on autograd c = a.detach().clone()
Moving to GPU
GPU가 있다면 이동 (’cuda’)
if torch.cuda.is_available():
gpu_rand = torch.rand(2, 2, device='cuda')
print(gpu_rand)
else:
print('Sorry, CPU only.')
좋은 습관: handle을 만들어 사용
if torch.cuda.is_available():
my_device = torch.device('cuda')
else:
my_device = torch.device('cpu')
print('Device: {}'.format(my_device))
x = torch.rand(2, 2, device=my_device)
print(x)
Datatype 바꿀 때와 마찬가지로 to 메소드 사용 가능
y = torch.rand(2, 2)
y = y.to(my_device)
Manipulating Tensor Shapes
unsqueeze() 메소드로 차원 추가 가능
a = torch.rand(3, 226, 226)
b = a.unsqueeze(0) # 0번째에 차원 추가
# [1, 3, 266, 266)
반대로 squeeze() 메소드로 차원 축소 가능
a = torch.rand(1, 20)
print(a.shape)
# [1, 20]
b = a.squeeze(0)
print(b.shape)
# [20]
c = torch.rand(2, 2)
print(c.shape)
# [2, 2]
d = c.squeeze(0)
print(d.shape)
# [2, 2] Same!
reshape() 메소드로 1차원 tensor로 만들기 가능
output3d = torch.rand(6, 20, 20)
print(output3d.shape)
# [6, 20, 20]
input1d = output3d.reshape(6 * 20 * 20)
print(input1d.shape)
# [2400]
# can also call it as a method on the torch module:
print(torch.reshape(output3d, (6 * 20 * 20,)).shape)
# [2400]
NumPy Bridge
from_numpy 메소드나 numpy() 메소드로 numpy array를 tensor로, tensor를 numpy array로 변환 가능
import numpy as np
numpy_array = np.ones((2, 3))
pytorch_tensor = torch.from_numpy(numpy_array)
pytorch_rand = torch.rand(2, 3)
numpy_rand = pytorch_rand.numpy()
- 같은 memory space를 사용함
- 하나를 바꾸면 다른 하나도 같이 바뀜
numpy_array[1, 1] = 23
print(pytorch_tensor)
# Changed
pytorch_rand[1, 1] = 17
print(numpy_rand)
# Changed as well
'코딩 > PyTorch' 카테고리의 다른 글
[Introduction to PyTorch] Building Models (0) | 2025.01.11 |
---|---|
[Introduction to PyTorch] Autograd (0) | 2025.01.11 |
[PyTorch] Save and Load the Model (1) | 2025.01.11 |
[PyTroch] Optimizing Model Parameters (2) | 2025.01.11 |
[PyTorch] Automatic Differentiation with torch.autogrid (0) | 2025.01.10 |