Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions 2.ASimpleNeuralNetwork/fizbuz.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,8 +77,8 @@ def forward(self, batch):
for i in range(len(teX)):
num = decoder(teX[i])
print(
'Number: {} -- Actual: {} -- Prediction: {}'.format(
num, check_fizbuz(num), outli[hyp[i].max(0)[1].item()]))
f'Number: {num} -- Actual: {check_fizbuz(num)} -- Prediction: {outli[hyp[i].max(0)[1].item()]}'
)
Comment on lines -80 to +81
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Lines 80-81 refactored with the following changes:

print('Test loss: ', output.item() / len(x))
accuracy = hyp.max(1)[1] == y
print('accuracy: ', accuracy.sum().item() / len(accuracy))
Expand Down
6 changes: 3 additions & 3 deletions 2.ASimpleNeuralNetwork/numpy_like_fizbuz.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
b1 = torch.zeros(1, hidden_size, requires_grad=True, device=device, dtype=dtype)
b2 = torch.zeros(1, output_size, requires_grad=True, device=device, dtype=dtype)

no_of_batches = int(len(trX) / batches)
no_of_batches = len(trX) // batches
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Lines 35-110 refactored with the following changes:

for epoch in range(epochs):
for batch in range(no_of_batches):
start = batch * batches
Expand Down Expand Up @@ -106,8 +106,8 @@
for i in range(len(teX)):
num = decoder(teX[i])
print(
'Number: {} -- Actual: {} -- Prediction: {}'.format(
num, check_fizbuz(num), outli[hyp[i].max(0)[1].item()]))
f'Number: {num} -- Actual: {check_fizbuz(num)} -- Prediction: {outli[hyp[i].max(0)[1].item()]}'
)
print('Test loss: ', output.item() / len(x))
accuracy = hyp.max(1)[1] == y.max(1)[1]
print('accuracy: ', accuracy.sum().item() / len(accuracy))
4 changes: 2 additions & 2 deletions 3.DLWorkFlow/ModelImplementation/bottleneck_support.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,8 @@ def forward(self, batch):
net = FizBuzNet(input_size, 4)
loss_fn = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=lr)
x_ = x[0:10]
y_ = y[0:10]
x_ = x[:10]
y_ = y[:10]
Comment on lines -92 to +93
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Lines 92-93 refactored with the following changes:

hyp = net(x_)
loss = loss_fn(hyp, y_)
loss.backward()
Expand Down
4 changes: 2 additions & 2 deletions 3.DLWorkFlow/ModelImplementation/profile_support.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,8 @@ def forward(self, batch):
net = FizBuzNet(input_size, 4)
loss_fn = nn.MSELoss()
optimizer = optim.Adam(net.parameters(), lr=lr)
x_ = x[0:10]
y_ = y[0:10]
x_ = x[:10]
y_ = y[:10]
Comment on lines -94 to +95
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Lines 94-95 refactored with the following changes:


with torch.autograd.profiler.profile() as prof:
hyp = net(x_)
Expand Down
4 changes: 2 additions & 2 deletions 4.ComputerVision/SemSeg/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,12 +22,12 @@ def __init__(self, split, path):
try:
input_files.remove('labels')
except ValueError:
raise FileNotFoundError("Couldn't find 'labels' folder in {}".format(path))
raise FileNotFoundError(f"Couldn't find 'labels' folder in {path}")
self.files = []
for file in input_files:
name, ext = os.path.splitext(file)
input_file = os.path.join(inputdir_path, file)
label_file = os.path.join(labledir_path, '{}_L{}'.format(name, ext))
label_file = os.path.join(labledir_path, f'{name}_L{ext}')
Comment on lines -25 to +30
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function CamvidDataSet.__init__ refactored with the following changes:

self.files.append({'input': input_file, 'label': label_file})
mean = [104.00699, 116.66877, 122.67892] # found from meetshah1995/pytorch-semseg
std = [255, 255, 255]
Expand Down
4 changes: 2 additions & 2 deletions 4.ComputerVision/SemSeg/segmentation.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,11 +35,11 @@
def create_image(out):
""" Creating image from the outbatch """
img = out[0].max(0)[1].data.cpu().numpy()
misc.imsave('{}.png'.format(time.time()), img)
misc.imsave(f'{time.time()}.png', img)
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function create_image refactored with the following changes:



def save_model(model):
torch.save(model.state_dict(), '{}.pth'.format(time.time()))
torch.save(model.state_dict(), f'{time.time()}.pth')
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function save_model refactored with the following changes:



for epoch in range(epochs):
Expand Down
3 changes: 1 addition & 2 deletions 5.SequentialDataProcessing/AdvancedRNN/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,5 +65,4 @@ def forward(self, batch):
hypo_embed = self.embed(batch.hypothesis)
premise = self.encoder(prem_embed)
hypothesis = self.encoder(hypo_embed)
scores = self.classifier((premise, hypothesis))
return scores
return self.classifier((premise, hypothesis))
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function RNNClassifier.forward refactored with the following changes:

6 changes: 3 additions & 3 deletions 5.SequentialDataProcessing/AdvancedRNN/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,10 @@
best_dev_acc = -1
train_iter.repeat = False

for epoch in range(epochs):
for _ in range(epochs):
train_iter.init_epoch()
n_correct, n_total = 0, 0
for batch_idx, batch in enumerate(train_iter):
for batch in train_iter:
Comment on lines -66 to +69
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Lines 66-86 refactored with the following changes:

model.train()
opt.zero_grad()
iterations += 1
Expand All @@ -83,7 +83,7 @@
model.eval()
dev_iter.init_epoch()
n_dev_correct, dev_loss = 0, 0
for dev_batch_idx, dev_batch in enumerate(dev_iter):
for dev_batch in dev_iter:
answer = model(dev_batch)
n_dev_correct += (torch.max(answer, 1)
[1].view(dev_batch.label.size()) == dev_batch.label).sum()
Expand Down
12 changes: 4 additions & 8 deletions 5.SequentialDataProcessing/RecursiveNet/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,7 @@ def bundle(lstm_iter):
if lstm_iter is None:
return None
lstm_iter = tuple(lstm_iter)
if lstm_iter[0] is None:
return None
return torch.cat(lstm_iter, 0).chunk(2, 1)
return None if lstm_iter[0] is None else torch.cat(lstm_iter, 0).chunk(2, 1)
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function bundle refactored with the following changes:



class Bottle(nn.Module):
Expand Down Expand Up @@ -60,8 +58,7 @@ def forward(self, left_in, right_in, tracking=None):
lstm_in += self.right(right[0])
if hasattr(self, 'track'):
lstm_in += self.track(tracking[0])
out = unbundle(tree_lstm(left[1], right[1], lstm_in))
return out
return unbundle(tree_lstm(left[1], right[1], lstm_in))
Comment on lines -63 to +61
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function Reduce.forward refactored with the following changes:



class Tracker(nn.Module):
Expand Down Expand Up @@ -172,7 +169,7 @@ def __init__(self, config):
mlp_in_size = 4 * feat_in_size
mlp = [nn.Linear(mlp_in_size, config.d_mlp), self.relu,
nn.BatchNorm1d(config.d_mlp), self.mlp_dropout]
for i in range(config.n_mlp_layers - 1):
for _ in range(config.n_mlp_layers - 1):
Comment on lines -175 to +172
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function SNLIClassifier.__init__ refactored with the following changes:

mlp.extend([nn.Linear(config.d_mlp, config.d_mlp), self.relu,
nn.BatchNorm1d(config.d_mlp), self.mlp_dropout])
mlp.append(nn.Linear(config.d_mlp, config.d_out))
Expand All @@ -190,5 +187,4 @@ def forward(self, batch):
prem_trans = hypo_trans = None
premise = self.encoder(prem_embed, prem_trans)
hypothesis = self.encoder(hypo_embed, hypo_trans)
scores = self.out(self.merger(premise, hypothesis))
return scores
return self.out(self.merger(premise, hypothesis))
Comment on lines -193 to +190
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function SNLIClassifier.forward refactored with the following changes:

2 changes: 1 addition & 1 deletion 5.SequentialDataProcessing/RecursiveNet/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,7 @@ class Config:
model.eval()
dev_iter.init_epoch()
n_dev_correct, dev_loss = 0, 0
for dev_batch_idx, dev_batch in enumerate(dev_iter):
for dev_batch in dev_iter:
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Lines 115-115 refactored with the following changes:

answer = model(dev_batch)
n_dev_correct += (torch.max(
answer, 1)[1].view(dev_batch.label.size()).data == dev_batch.label.data).sum()
Expand Down
3 changes: 1 addition & 2 deletions 5.SequentialDataProcessing/SimpleRNN/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,5 +79,4 @@ def forward(self, batch):
hypo_embed = self.embed(batch.hypothesis)
premise = self.encoder(prem_embed)
hypothesis = self.encoder(hypo_embed)
scores = self.classifier((premise, hypothesis))
return scores
return self.classifier((premise, hypothesis))
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function RNNClassifier.forward refactored with the following changes:

4 changes: 2 additions & 2 deletions 5.SequentialDataProcessing/SimpleRNN/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@ def init_weights(m):
train_iter.repeat = False

model.train()
for epoch in range(epochs):
for _ in range(epochs):
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Lines 76-101 refactored with the following changes:

train_iter.init_epoch()
n_correct, n_total = 0, 0
for batch_idx, batch in enumerate(train_iter):
Expand All @@ -98,7 +98,7 @@ def init_weights(m):
model.eval()
dev_iter.init_epoch()
n_dev_correct, dev_loss = 0, 0
for dev_batch_idx, dev_batch in enumerate(dev_iter):
for dev_batch in dev_iter:
answer = model(dev_batch)
n_dev_correct += (torch.max(
answer, 1)[1].view(
Expand Down
2 changes: 1 addition & 1 deletion 6.GenerativeNetworks/AutoRegressive/wavenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ class ResidualStack(torch.nn.Module):
def __init__(self, layer_size, stack_size, res_channels, skip_channels):
super().__init__()
self.res_blocks = torch.nn.ModuleList()
for s in range(stack_size):
for _ in range(stack_size):
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function ResidualStack.__init__ refactored with the following changes:

for l in range(layer_size):
dilation = 2 ** l
block = ResidualBlock(res_channels, skip_channels, dilation)
Expand Down
13 changes: 4 additions & 9 deletions 6.GenerativeNetworks/AutoRegressive/wavenet_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,7 @@ def one_hot_encode(data, channels=256):


def one_hot_decode(data, axis=1):
decoded = np.argmax(data, axis=axis)

return decoded
return np.argmax(data, axis=axis)
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function one_hot_decode refactored with the following changes:



def mu_law_encode(audio, quantization_channels=256):
Expand All @@ -55,9 +53,7 @@ def mu_law_decode(output, quantization_channels=256):
mu = float(quantization_channels - 1)

expanded = (output / quantization_channels) * 2. - 1
waveform = np.sign(expanded) * (np.exp(np.abs(expanded) * np.log(mu + 1)) - 1) / mu

return waveform
return np.sign(expanded) * (np.exp(np.abs(expanded) * np.log(mu + 1)) - 1) / mu
Comment on lines -58 to +56
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function mu_law_decode refactored with the following changes:



class Dataset(data.Dataset):
Expand All @@ -69,7 +65,7 @@ def __init__(self, data_dir, sample_rate=16000, in_channels=256, trim=True):
self.trim = trim

self.root_path = data_dir
self.filenames = [x for x in sorted(os.listdir(data_dir))]
self.filenames = list(sorted(os.listdir(data_dir)))
Comment on lines -72 to +68
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function Dataset.__init__ refactored with the following changes:


def __getitem__(self, index):
filepath = os.path.join(self.root_path, self.filenames[index])
Expand Down Expand Up @@ -116,8 +112,7 @@ def __init__(self, data_dir, receptive_fields,
self.collate_fn = self._collate_fn

def calc_sample_size(self, audio):
return self.sample_size if len(audio[0]) >= self.sample_size\
else len(audio[0])
return min(len(audio[0]), self.sample_size)
Comment on lines -119 to +115
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function DataLoader.calc_sample_size refactored with the following changes:


@staticmethod
def _variable(data):
Expand Down
19 changes: 9 additions & 10 deletions 6.GenerativeNetworks/GAN/CycleGAN/mode.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ def __init__(self, root, transforms_=None, unaligned=False, mode='train'):
self.transform = transforms.Compose(transforms_)
self.unaligned = unaligned

self.files_A = sorted(glob.glob(os.path.join(root, '%sA' % mode) + '/*.*'))
self.files_B = sorted(glob.glob(os.path.join(root, '%sB' % mode) + '/*.*'))
self.files_A = sorted(glob.glob(f"{os.path.join(root, f'{mode}A')}/*.*"))
self.files_B = sorted(glob.glob(f"{os.path.join(root, f'{mode}B')}/*.*"))
Comment on lines -29 to +30
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function ImageDataset.__init__ refactored with the following changes:


def __getitem__(self, index):
item_A = self.transform(Image.open(self.files_A[index % len(self.files_A)]))
Expand Down Expand Up @@ -223,13 +223,12 @@ def push_and_pop(self, data):
if len(self.data) < self.max_size:
self.data.append(element)
to_return.append(element)
elif random.uniform(0, 1) > 0.5:
i = random.randint(0, self.max_size - 1)
to_return.append(self.data[i].clone())
self.data[i] = element
else:
if random.uniform(0, 1) > 0.5:
i = random.randint(0, self.max_size - 1)
to_return.append(self.data[i].clone())
self.data[i] = element
else:
to_return.append(element)
to_return.append(element)
Comment on lines +226 to +231
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function ReplayBuffer.push_and_pop refactored with the following changes:

return torch.cat(to_return)


Expand Down Expand Up @@ -316,8 +315,8 @@ def weights_init_normal(m):


###### Training ######
for epoch in range(opt.epoch, opt.n_epochs):
for i, batch in enumerate(dataloader):
for _ in range(opt.epoch, opt.n_epochs):
for batch in dataloader:
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Lines 319-320 refactored with the following changes:

# Set model input
real_A = input_A.copy_(batch['A'])
real_B = input_B.copy_(batch['B'])
Expand Down
3 changes: 1 addition & 2 deletions 6.GenerativeNetworks/GAN/CycleGAN/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,5 +26,4 @@ def get_args():
"--cuda", type=bool, default=torch.cuda.is_available(),
help='CUDA availability check')
parser.add_argument('--size', type=int, default=256, help='crop to this size')
args = parser.parse_args(args=[])
return args
return parser.parse_args(args=[])
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function get_args refactored with the following changes:

7 changes: 3 additions & 4 deletions 6.GenerativeNetworks/GAN/SimpleGAN/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ def mnist_data():
compose = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
out_dir = '{}/dataset'.format(DATA_FOLDER)
out_dir = f'{DATA_FOLDER}/dataset'
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function mnist_data refactored with the following changes:

return datasets.MNIST(root=out_dir, train=True, transform=compose, download=True)


Expand Down Expand Up @@ -182,9 +182,8 @@ def train_generator(optimizer, fake_data):
test_noise = noise(num_test_samples)


for epoch in range(num_epochs):
for n_batch, (real_batch, _) in enumerate(data_loader):

for _ in range(num_epochs):
for real_batch, _ in data_loader:
Comment on lines -185 to +186
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Lines 185-187 refactored with the following changes:

# 1. Train Discriminator
real_data = images_to_vectors(real_batch).to(device)
# Generate fake data
Expand Down
20 changes: 6 additions & 14 deletions 7.ReinforcementLearning/reinforcement_learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,16 +87,12 @@ def select_action(state):
steps_done += 1

sample = random.random()
if sample > eps_threshold:

# freeze the network and get predictions
with torch.no_grad():
return policy_net(state).max(1)[1].view(1, 1)

else:

if sample <= eps_threshold:
# select random action
return torch.tensor([[random.randrange(2)]], device=device, dtype=torch.long)
# freeze the network and get predictions
with torch.no_grad():
return policy_net(state).max(1)[1].view(1, 1)
Comment on lines -90 to +95
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function select_action refactored with the following changes:



Transition = namedtuple('Transition',
Expand Down Expand Up @@ -179,7 +175,7 @@ def optimize_model():
current_screen = get_screen()
state = current_screen - last_screen

for t in count(): # for each timestep in an episode
for _ in count():
Comment on lines -182 to +178
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Lines 182-195 refactored with the following changes:

This removes the following comments ( why? ):

# for each timestep in an episode

# Select action for the given state and get rewards
action = select_action(state)
_, reward, done, _ = env.step(action.item())
Expand All @@ -188,11 +184,7 @@ def optimize_model():
# Observe new state
last_screen = current_screen
current_screen = get_screen()
if not done:
next_state = current_screen - last_screen
else:
next_state = None

next_state = current_screen - last_screen if not done else None
# Store the transition in memory
memory.push(state, action, next_state, reward)

Expand Down
5 changes: 1 addition & 4 deletions 8.PyTorchInProduction/FizBuzFlask/controller.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,7 @@ def get_readable_output(input_num, prediction):
0: 'FizBuz',
1: 'Buz',
2: 'Fiz'}
if prediction == 3:
return input_num
else:
return input_output_map[prediction]
return input_num if prediction == 3 else input_output_map[prediction]
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Function get_readable_output refactored with the following changes:



def binary_encoder():
Expand Down
Loading