-
Notifications
You must be signed in to change notification settings - Fork 0
Sourcery refactored master branch #2
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -32,7 +32,7 @@ | |
| b1 = torch.zeros(1, hidden_size, requires_grad=True, device=device, dtype=dtype) | ||
| b2 = torch.zeros(1, output_size, requires_grad=True, device=device, dtype=dtype) | ||
|
|
||
| no_of_batches = int(len(trX) / batches) | ||
| no_of_batches = len(trX) // batches | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Lines
|
||
| for epoch in range(epochs): | ||
| for batch in range(no_of_batches): | ||
| start = batch * batches | ||
|
|
@@ -106,8 +106,8 @@ | |
| for i in range(len(teX)): | ||
| num = decoder(teX[i]) | ||
| print( | ||
| 'Number: {} -- Actual: {} -- Prediction: {}'.format( | ||
| num, check_fizbuz(num), outli[hyp[i].max(0)[1].item()])) | ||
| f'Number: {num} -- Actual: {check_fizbuz(num)} -- Prediction: {outli[hyp[i].max(0)[1].item()]}' | ||
| ) | ||
| print('Test loss: ', output.item() / len(x)) | ||
| accuracy = hyp.max(1)[1] == y.max(1)[1] | ||
| print('accuracy: ', accuracy.sum().item() / len(accuracy)) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -89,8 +89,8 @@ def forward(self, batch): | |
| net = FizBuzNet(input_size, 4) | ||
| loss_fn = nn.MSELoss() | ||
| optimizer = optim.Adam(net.parameters(), lr=lr) | ||
| x_ = x[0:10] | ||
| y_ = y[0:10] | ||
| x_ = x[:10] | ||
| y_ = y[:10] | ||
|
Comment on lines
-92
to
+93
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Lines
|
||
| hyp = net(x_) | ||
| loss = loss_fn(hyp, y_) | ||
| loss.backward() | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -91,8 +91,8 @@ def forward(self, batch): | |
| net = FizBuzNet(input_size, 4) | ||
| loss_fn = nn.MSELoss() | ||
| optimizer = optim.Adam(net.parameters(), lr=lr) | ||
| x_ = x[0:10] | ||
| y_ = y[0:10] | ||
| x_ = x[:10] | ||
| y_ = y[:10] | ||
|
Comment on lines
-94
to
+95
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Lines
|
||
|
|
||
| with torch.autograd.profiler.profile() as prof: | ||
| hyp = net(x_) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -22,12 +22,12 @@ def __init__(self, split, path): | |
| try: | ||
| input_files.remove('labels') | ||
| except ValueError: | ||
| raise FileNotFoundError("Couldn't find 'labels' folder in {}".format(path)) | ||
| raise FileNotFoundError(f"Couldn't find 'labels' folder in {path}") | ||
| self.files = [] | ||
| for file in input_files: | ||
| name, ext = os.path.splitext(file) | ||
| input_file = os.path.join(inputdir_path, file) | ||
| label_file = os.path.join(labledir_path, '{}_L{}'.format(name, ext)) | ||
| label_file = os.path.join(labledir_path, f'{name}_L{ext}') | ||
|
Comment on lines
-25
to
+30
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
| self.files.append({'input': input_file, 'label': label_file}) | ||
| mean = [104.00699, 116.66877, 122.67892] # found from meetshah1995/pytorch-semseg | ||
| std = [255, 255, 255] | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -35,11 +35,11 @@ | |
| def create_image(out): | ||
| """ Creating image from the outbatch """ | ||
| img = out[0].max(0)[1].data.cpu().numpy() | ||
| misc.imsave('{}.png'.format(time.time()), img) | ||
| misc.imsave(f'{time.time()}.png', img) | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
|
||
|
|
||
| def save_model(model): | ||
| torch.save(model.state_dict(), '{}.pth'.format(time.time())) | ||
| torch.save(model.state_dict(), f'{time.time()}.pth') | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
|
||
|
|
||
| for epoch in range(epochs): | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -65,5 +65,4 @@ def forward(self, batch): | |
| hypo_embed = self.embed(batch.hypothesis) | ||
| premise = self.encoder(prem_embed) | ||
| hypothesis = self.encoder(hypo_embed) | ||
| scores = self.classifier((premise, hypothesis)) | ||
| return scores | ||
| return self.classifier((premise, hypothesis)) | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -63,10 +63,10 @@ | |
| best_dev_acc = -1 | ||
| train_iter.repeat = False | ||
|
|
||
| for epoch in range(epochs): | ||
| for _ in range(epochs): | ||
| train_iter.init_epoch() | ||
| n_correct, n_total = 0, 0 | ||
| for batch_idx, batch in enumerate(train_iter): | ||
| for batch in train_iter: | ||
|
Comment on lines
-66
to
+69
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Lines
|
||
| model.train() | ||
| opt.zero_grad() | ||
| iterations += 1 | ||
|
|
@@ -83,7 +83,7 @@ | |
| model.eval() | ||
| dev_iter.init_epoch() | ||
| n_dev_correct, dev_loss = 0, 0 | ||
| for dev_batch_idx, dev_batch in enumerate(dev_iter): | ||
| for dev_batch in dev_iter: | ||
| answer = model(dev_batch) | ||
| n_dev_correct += (torch.max(answer, 1) | ||
| [1].view(dev_batch.label.size()) == dev_batch.label).sum() | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -15,9 +15,7 @@ def bundle(lstm_iter): | |
| if lstm_iter is None: | ||
| return None | ||
| lstm_iter = tuple(lstm_iter) | ||
| if lstm_iter[0] is None: | ||
| return None | ||
| return torch.cat(lstm_iter, 0).chunk(2, 1) | ||
| return None if lstm_iter[0] is None else torch.cat(lstm_iter, 0).chunk(2, 1) | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
|
||
|
|
||
| class Bottle(nn.Module): | ||
|
|
@@ -60,8 +58,7 @@ def forward(self, left_in, right_in, tracking=None): | |
| lstm_in += self.right(right[0]) | ||
| if hasattr(self, 'track'): | ||
| lstm_in += self.track(tracking[0]) | ||
| out = unbundle(tree_lstm(left[1], right[1], lstm_in)) | ||
| return out | ||
| return unbundle(tree_lstm(left[1], right[1], lstm_in)) | ||
|
Comment on lines
-63
to
+61
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
|
||
|
|
||
| class Tracker(nn.Module): | ||
|
|
@@ -172,7 +169,7 @@ def __init__(self, config): | |
| mlp_in_size = 4 * feat_in_size | ||
| mlp = [nn.Linear(mlp_in_size, config.d_mlp), self.relu, | ||
| nn.BatchNorm1d(config.d_mlp), self.mlp_dropout] | ||
| for i in range(config.n_mlp_layers - 1): | ||
| for _ in range(config.n_mlp_layers - 1): | ||
|
Comment on lines
-175
to
+172
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
| mlp.extend([nn.Linear(config.d_mlp, config.d_mlp), self.relu, | ||
| nn.BatchNorm1d(config.d_mlp), self.mlp_dropout]) | ||
| mlp.append(nn.Linear(config.d_mlp, config.d_out)) | ||
|
|
@@ -190,5 +187,4 @@ def forward(self, batch): | |
| prem_trans = hypo_trans = None | ||
| premise = self.encoder(prem_embed, prem_trans) | ||
| hypothesis = self.encoder(hypo_embed, hypo_trans) | ||
| scores = self.out(self.merger(premise, hypothesis)) | ||
| return scores | ||
| return self.out(self.merger(premise, hypothesis)) | ||
|
Comment on lines
-193
to
+190
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -112,7 +112,7 @@ class Config: | |
| model.eval() | ||
| dev_iter.init_epoch() | ||
| n_dev_correct, dev_loss = 0, 0 | ||
| for dev_batch_idx, dev_batch in enumerate(dev_iter): | ||
| for dev_batch in dev_iter: | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Lines
|
||
| answer = model(dev_batch) | ||
| n_dev_correct += (torch.max( | ||
| answer, 1)[1].view(dev_batch.label.size()).data == dev_batch.label.data).sum() | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -79,5 +79,4 @@ def forward(self, batch): | |
| hypo_embed = self.embed(batch.hypothesis) | ||
| premise = self.encoder(prem_embed) | ||
| hypothesis = self.encoder(hypo_embed) | ||
| scores = self.classifier((premise, hypothesis)) | ||
| return scores | ||
| return self.classifier((premise, hypothesis)) | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -73,7 +73,7 @@ def init_weights(m): | |
| train_iter.repeat = False | ||
|
|
||
| model.train() | ||
| for epoch in range(epochs): | ||
| for _ in range(epochs): | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Lines
|
||
| train_iter.init_epoch() | ||
| n_correct, n_total = 0, 0 | ||
| for batch_idx, batch in enumerate(train_iter): | ||
|
|
@@ -98,7 +98,7 @@ def init_weights(m): | |
| model.eval() | ||
| dev_iter.init_epoch() | ||
| n_dev_correct, dev_loss = 0, 0 | ||
| for dev_batch_idx, dev_batch in enumerate(dev_iter): | ||
| for dev_batch in dev_iter: | ||
| answer = model(dev_batch) | ||
| n_dev_correct += (torch.max( | ||
| answer, 1)[1].view( | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -29,7 +29,7 @@ class ResidualStack(torch.nn.Module): | |
| def __init__(self, layer_size, stack_size, res_channels, skip_channels): | ||
| super().__init__() | ||
| self.res_blocks = torch.nn.ModuleList() | ||
| for s in range(stack_size): | ||
| for _ in range(stack_size): | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
| for l in range(layer_size): | ||
| dilation = 2 ** l | ||
| block = ResidualBlock(res_channels, skip_channels, dilation) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -28,9 +28,7 @@ def one_hot_encode(data, channels=256): | |
|
|
||
|
|
||
| def one_hot_decode(data, axis=1): | ||
| decoded = np.argmax(data, axis=axis) | ||
|
|
||
| return decoded | ||
| return np.argmax(data, axis=axis) | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
|
||
|
|
||
| def mu_law_encode(audio, quantization_channels=256): | ||
|
|
@@ -55,9 +53,7 @@ def mu_law_decode(output, quantization_channels=256): | |
| mu = float(quantization_channels - 1) | ||
|
|
||
| expanded = (output / quantization_channels) * 2. - 1 | ||
| waveform = np.sign(expanded) * (np.exp(np.abs(expanded) * np.log(mu + 1)) - 1) / mu | ||
|
|
||
| return waveform | ||
| return np.sign(expanded) * (np.exp(np.abs(expanded) * np.log(mu + 1)) - 1) / mu | ||
|
Comment on lines
-58
to
+56
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
|
||
|
|
||
| class Dataset(data.Dataset): | ||
|
|
@@ -69,7 +65,7 @@ def __init__(self, data_dir, sample_rate=16000, in_channels=256, trim=True): | |
| self.trim = trim | ||
|
|
||
| self.root_path = data_dir | ||
| self.filenames = [x for x in sorted(os.listdir(data_dir))] | ||
| self.filenames = list(sorted(os.listdir(data_dir))) | ||
|
Comment on lines
-72
to
+68
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
|
||
| def __getitem__(self, index): | ||
| filepath = os.path.join(self.root_path, self.filenames[index]) | ||
|
|
@@ -116,8 +112,7 @@ def __init__(self, data_dir, receptive_fields, | |
| self.collate_fn = self._collate_fn | ||
|
|
||
| def calc_sample_size(self, audio): | ||
| return self.sample_size if len(audio[0]) >= self.sample_size\ | ||
| else len(audio[0]) | ||
| return min(len(audio[0]), self.sample_size) | ||
|
Comment on lines
-119
to
+115
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
|
||
| @staticmethod | ||
| def _variable(data): | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -26,8 +26,8 @@ def __init__(self, root, transforms_=None, unaligned=False, mode='train'): | |
| self.transform = transforms.Compose(transforms_) | ||
| self.unaligned = unaligned | ||
|
|
||
| self.files_A = sorted(glob.glob(os.path.join(root, '%sA' % mode) + '/*.*')) | ||
| self.files_B = sorted(glob.glob(os.path.join(root, '%sB' % mode) + '/*.*')) | ||
| self.files_A = sorted(glob.glob(f"{os.path.join(root, f'{mode}A')}/*.*")) | ||
| self.files_B = sorted(glob.glob(f"{os.path.join(root, f'{mode}B')}/*.*")) | ||
|
Comment on lines
-29
to
+30
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
|
||
| def __getitem__(self, index): | ||
| item_A = self.transform(Image.open(self.files_A[index % len(self.files_A)])) | ||
|
|
@@ -223,13 +223,12 @@ def push_and_pop(self, data): | |
| if len(self.data) < self.max_size: | ||
| self.data.append(element) | ||
| to_return.append(element) | ||
| elif random.uniform(0, 1) > 0.5: | ||
| i = random.randint(0, self.max_size - 1) | ||
| to_return.append(self.data[i].clone()) | ||
| self.data[i] = element | ||
| else: | ||
| if random.uniform(0, 1) > 0.5: | ||
| i = random.randint(0, self.max_size - 1) | ||
| to_return.append(self.data[i].clone()) | ||
| self.data[i] = element | ||
| else: | ||
| to_return.append(element) | ||
| to_return.append(element) | ||
|
Comment on lines
+226
to
+231
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
| return torch.cat(to_return) | ||
|
|
||
|
|
||
|
|
@@ -316,8 +315,8 @@ def weights_init_normal(m): | |
|
|
||
|
|
||
| ###### Training ###### | ||
| for epoch in range(opt.epoch, opt.n_epochs): | ||
| for i, batch in enumerate(dataloader): | ||
| for _ in range(opt.epoch, opt.n_epochs): | ||
| for batch in dataloader: | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Lines
|
||
| # Set model input | ||
| real_A = input_A.copy_(batch['A']) | ||
| real_B = input_B.copy_(batch['B']) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -26,5 +26,4 @@ def get_args(): | |
| "--cuda", type=bool, default=torch.cuda.is_available(), | ||
| help='CUDA availability check') | ||
| parser.add_argument('--size', type=int, default=256, help='crop to this size') | ||
| args = parser.parse_args(args=[]) | ||
| return args | ||
| return parser.parse_args(args=[]) | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -11,7 +11,7 @@ def mnist_data(): | |
| compose = transforms.Compose( | ||
| [transforms.ToTensor(), | ||
| transforms.Normalize((0.5,), (0.5,))]) | ||
| out_dir = '{}/dataset'.format(DATA_FOLDER) | ||
| out_dir = f'{DATA_FOLDER}/dataset' | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
| return datasets.MNIST(root=out_dir, train=True, transform=compose, download=True) | ||
|
|
||
|
|
||
|
|
@@ -182,9 +182,8 @@ def train_generator(optimizer, fake_data): | |
| test_noise = noise(num_test_samples) | ||
|
|
||
|
|
||
| for epoch in range(num_epochs): | ||
| for n_batch, (real_batch, _) in enumerate(data_loader): | ||
|
|
||
| for _ in range(num_epochs): | ||
| for real_batch, _ in data_loader: | ||
|
Comment on lines
-185
to
+186
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Lines
|
||
| # 1. Train Discriminator | ||
| real_data = images_to_vectors(real_batch).to(device) | ||
| # Generate fake data | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -87,16 +87,12 @@ def select_action(state): | |
| steps_done += 1 | ||
|
|
||
| sample = random.random() | ||
| if sample > eps_threshold: | ||
|
|
||
| # freeze the network and get predictions | ||
| with torch.no_grad(): | ||
| return policy_net(state).max(1)[1].view(1, 1) | ||
|
|
||
| else: | ||
|
|
||
| if sample <= eps_threshold: | ||
| # select random action | ||
| return torch.tensor([[random.randrange(2)]], device=device, dtype=torch.long) | ||
| # freeze the network and get predictions | ||
| with torch.no_grad(): | ||
| return policy_net(state).max(1)[1].view(1, 1) | ||
|
Comment on lines
-90
to
+95
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
|
||
|
|
||
| Transition = namedtuple('Transition', | ||
|
|
@@ -179,7 +175,7 @@ def optimize_model(): | |
| current_screen = get_screen() | ||
| state = current_screen - last_screen | ||
|
|
||
| for t in count(): # for each timestep in an episode | ||
| for _ in count(): | ||
|
Comment on lines
-182
to
+178
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Lines
This removes the following comments ( why? ): |
||
| # Select action for the given state and get rewards | ||
| action = select_action(state) | ||
| _, reward, done, _ = env.step(action.item()) | ||
|
|
@@ -188,11 +184,7 @@ def optimize_model(): | |
| # Observe new state | ||
| last_screen = current_screen | ||
| current_screen = get_screen() | ||
| if not done: | ||
| next_state = current_screen - last_screen | ||
| else: | ||
| next_state = None | ||
|
|
||
| next_state = current_screen - last_screen if not done else None | ||
| # Store the transition in memory | ||
| memory.push(state, action, next_state, reward) | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -11,10 +11,7 @@ def get_readable_output(input_num, prediction): | |
| 0: 'FizBuz', | ||
| 1: 'Buz', | ||
| 2: 'Fiz'} | ||
| if prediction == 3: | ||
| return input_num | ||
| else: | ||
| return input_output_map[prediction] | ||
| return input_num if prediction == 3 else input_output_map[prediction] | ||
|
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Function
|
||
|
|
||
|
|
||
| def binary_encoder(): | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Lines
80-81refactored with the following changes:use-fstring-for-formatting)