Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 18 additions & 0 deletions NAM/convnet.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <unordered_set>

#include "dsp.h"
#include "registry.h"
#include "convnet.h"

nam::convnet::BatchNorm::BatchNorm(const int dim, std::vector<float>::iterator& weights)
Expand Down Expand Up @@ -184,3 +185,20 @@ void nam::convnet::ConvNet::_rewind_buffers_()
// Now we can do the rest of the rewind
this->Buffer::_rewind_buffers_();
}

// Factory
std::unique_ptr<nam::DSP> nam::convnet::Factory(const nlohmann::json& config, std::vector<float>& weights,
const double expectedSampleRate)
{
const int channels = config["channels"];
const std::vector<int> dilations = config["dilations"];
const bool batchnorm = config["batchnorm"];
const std::string activation = config["activation"];
return std::make_unique<nam::convnet::ConvNet>(
channels, dilations, batchnorm, activation, weights, expectedSampleRate);
}

namespace
{
static nam::factory::Helper _register_ConvNet("ConvNet", nam::convnet::Factory);
}
5 changes: 5 additions & 0 deletions NAM/convnet.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,5 +86,10 @@ class ConvNet : public Buffer
int mPrewarmSamples = 0; // Pre-compute during initialization
int PrewarmSamples() override { return mPrewarmSamples; };
};

// Factory
std::unique_ptr<DSP> Factory(const nlohmann::json& config, std::vector<float>& weights,
const double expectedSampleRate);

}; // namespace convnet
}; // namespace nam
10 changes: 10 additions & 0 deletions NAM/dsp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include <unordered_set>

#include "dsp.h"
#include "registry.h"

#define tanh_impl_ std::tanh
// #define tanh_impl_ fast_tanh_
Expand Down Expand Up @@ -192,6 +193,15 @@ void nam::Linear::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const int num_f
nam::Buffer::_advance_input_buffer_(num_frames);
}

// Factory
std::unique_ptr<nam::DSP> nam::linear::Factory(const nlohmann::json& config, std::vector<float>& weights,
const double expectedSampleRate)
{
const int receptive_field = config["receptive_field"];
const bool bias = config["bias"];
return std::make_unique<nam::Linear>(receptive_field, bias, weights, expectedSampleRate);
}

// NN modules =================================================================

void nam::Conv1D::set_weights_(std::vector<float>::iterator& weights)
Expand Down
6 changes: 6 additions & 0 deletions NAM/dsp.h
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,12 @@ class Linear : public Buffer
float _bias;
};

namespace linear
{
std::unique_ptr<DSP> Factory(const nlohmann::json& config, std::vector<float>& weights,
const double expectedSampleRate);
} // namespace linear

// NN modules =================================================================

// TODO conv could take care of its own ring buffer.
Expand Down
62 changes: 15 additions & 47 deletions NAM/get_dsp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,12 @@
#include <unordered_set>

#include "dsp.h"
#include "registry.h"
#include "json.hpp"
#include "lstm.h"
#include "convnet.h"
#include "wavenet.h"
#include "get_dsp.h"

namespace nam
{
Expand Down Expand Up @@ -102,12 +104,7 @@ std::unique_ptr<DSP> get_dsp(const std::filesystem::path config_filename, dspDat
returnedConfig.config = j["config"];
returnedConfig.metadata = j["metadata"];
returnedConfig.weights = weights;
if (j.find("sample_rate") != j.end())
returnedConfig.expected_sample_rate = j["sample_rate"];
else
{
returnedConfig.expected_sample_rate = -1.0;
}
returnedConfig.expected_sample_rate = nam::get_sample_rate_from_nam_file(j);

/*Copy to a new dsp_config object for get_dsp below,
since not sure if weights actually get modified as being non-const references on some
Expand Down Expand Up @@ -152,47 +149,9 @@ std::unique_ptr<DSP> get_dsp(dspData& conf)
}
const double expectedSampleRate = conf.expected_sample_rate;

std::unique_ptr<DSP> out = nullptr;
if (architecture == "Linear")
{
const int receptive_field = config["receptive_field"];
const bool _bias = config["bias"];
out = std::make_unique<Linear>(receptive_field, _bias, weights, expectedSampleRate);
}
else if (architecture == "ConvNet")
{
const int channels = config["channels"];
const bool batchnorm = config["batchnorm"];
std::vector<int> dilations = config["dilations"];
const std::string activation = config["activation"];
out = std::make_unique<convnet::ConvNet>(channels, dilations, batchnorm, activation, weights, expectedSampleRate);
}
else if (architecture == "LSTM")
{
const int num_layers = config["num_layers"];
const int input_size = config["input_size"];
const int hidden_size = config["hidden_size"];
out = std::make_unique<lstm::LSTM>(num_layers, input_size, hidden_size, weights, expectedSampleRate);
}
else if (architecture == "WaveNet")
{
std::vector<wavenet::LayerArrayParams> layer_array_params;
for (size_t i = 0; i < config["layers"].size(); i++)
{
nlohmann::json layer_config = config["layers"][i];
layer_array_params.push_back(
wavenet::LayerArrayParams(layer_config["input_size"], layer_config["condition_size"], layer_config["head_size"],
layer_config["channels"], layer_config["kernel_size"], layer_config["dilations"],
layer_config["activation"], layer_config["gated"], layer_config["head_bias"]));
}
const bool with_head = !config["head"].is_null();
const float head_scale = config["head_scale"];
out = std::make_unique<wavenet::WaveNet>(layer_array_params, head_scale, with_head, weights, expectedSampleRate);
}
else
{
throw std::runtime_error("Unrecognized architecture");
}
// Initialize using registry-based factory
std::unique_ptr<DSP> out =
nam::factory::FactoryRegistry::instance().create(architecture, config, weights, expectedSampleRate);
if (loudness.have)
{
out->SetLoudness(loudness.value);
Expand All @@ -212,4 +171,13 @@ std::unique_ptr<DSP> get_dsp(dspData& conf)

return out;
}

double get_sample_rate_from_nam_file(const nlohmann::json& j)
{
if (j.find("sample_rate") != j.end())
return j["sample_rate"];
else
return -1.0;
}

}; // namespace nam
6 changes: 6 additions & 0 deletions NAM/get_dsp.h
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#pragma once

#include <fstream>

#include "dsp.h"
Expand All @@ -12,4 +14,8 @@ std::unique_ptr<DSP> get_dsp(dspData& conf);

// Get NAM from a provided .nam file path and store its configuration in the provided conf
std::unique_ptr<DSP> get_dsp(const std::filesystem::path config_filename, dspData& returnedConfig);

// Get sample rate from a .nam file
// Returns -1 if not known (Really old .nam files)
double get_sample_rate_from_nam_file(const nlohmann::json& j);
}; // namespace nam
18 changes: 18 additions & 0 deletions NAM/lstm.cpp
Original file line number Diff line number Diff line change
@@ -1,7 +1,9 @@
#include <algorithm>
#include <string>
#include <vector>
#include <memory>

#include "registry.h"
#include "lstm.h"

nam::lstm::LSTMCell::LSTMCell(const int input_size, const int hidden_size, std::vector<float>::iterator& weights)
Expand Down Expand Up @@ -102,3 +104,19 @@ float nam::lstm::LSTM::_process_sample(const float x)
this->_layers[i].process_(this->_layers[i - 1].get_hidden_state());
return this->_head_weight.dot(this->_layers[this->_layers.size() - 1].get_hidden_state()) + this->_head_bias;
}

// Factory to instantiate from nlohmann json
std::unique_ptr<nam::DSP> nam::lstm::Factory(const nlohmann::json& config, std::vector<float>& weights,
const double expectedSampleRate)
{
const int num_layers = config["num_layers"];
const int input_size = config["input_size"];
const int hidden_size = config["hidden_size"];
return std::make_unique<nam::lstm::LSTM>(num_layers, input_size, hidden_size, weights, expectedSampleRate);
}

// Register the factory
namespace
{
static nam::factory::Helper _register_LSTM("LSTM", nam::lstm::Factory);
}
6 changes: 6 additions & 0 deletions NAM/lstm.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

#include <map>
#include <vector>
#include <memory>

#include <Eigen/Dense>

Expand Down Expand Up @@ -69,5 +70,10 @@ class LSTM : public DSP
// Since this is assumed to not be a parametric model, its shape should be (1,)
Eigen::VectorXf _input;
};

// Factory to instantiate from nlohmann json
std::unique_ptr<DSP> Factory(const nlohmann::json& config, std::vector<float>& weights,
const double expectedSampleRate);

}; // namespace lstm
}; // namespace nam
63 changes: 63 additions & 0 deletions NAM/registry.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
#pragma once

// Registry for DSP objects

#include <string>
#include <memory>
#include <unordered_map>
#include <functional>

#include "dsp.h"

namespace nam
{
namespace factory
{
// TODO get rid of weights and expectedSampleRate
using FactoryFunction = std::function<std::unique_ptr<DSP>(const nlohmann::json&, std::vector<float>&, const double)>;

// Register factories for instantiating DSP objects
class FactoryRegistry
{
public:
static FactoryRegistry& instance()
{
static FactoryRegistry inst;
return inst;
}

void registerFactory(const std::string& key, FactoryFunction func)
{
// Assert that the key is not already registered
if (factories_.find(key) != factories_.end())
{
throw std::runtime_error("Factory already registered for key: " + key);
}
factories_[key] = func;
}

std::unique_ptr<DSP> create(const std::string& name, const nlohmann::json& config, std::vector<float>& weights,
const double expectedSampleRate) const
{
auto it = factories_.find(name);
if (it != factories_.end())
{
return it->second(config, weights, expectedSampleRate);
}
throw std::runtime_error("Factory not found for name: " + name);
}

private:
std::unordered_map<std::string, FactoryFunction> factories_;
};

// Registration helper. Use this to register your factories.
struct Helper
{
Helper(const std::string& name, FactoryFunction factory)
{
FactoryRegistry::instance().registerFactory(name, std::move(factory));
}
};
} // namespace factory
} // namespace nam
5 changes: 1 addition & 4 deletions NAM/version.h
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
#ifndef version_h
#define version_h
#pragma once

// Make sure this matches NAM version in ../CMakeLists.txt!
#define NEURAL_AMP_MODELER_DSP_VERSION_MAJOR 0
#define NEURAL_AMP_MODELER_DSP_VERSION_MINOR 3
#define NEURAL_AMP_MODELER_DSP_VERSION_PATCH 0

#endif
26 changes: 26 additions & 0 deletions NAM/wavenet.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@

#include <Eigen/Dense>

#include "registry.h"
#include "wavenet.h"

nam::wavenet::_DilatedConv::_DilatedConv(const int in_channels, const int out_channels, const int kernel_size,
Expand Down Expand Up @@ -397,3 +398,28 @@ void nam::wavenet::WaveNet::process(NAM_SAMPLE* input, NAM_SAMPLE* output, const
// Finalize to prepare for the next call:
this->_advance_buffers_(num_frames);
}

// Factory to instantiate from nlohmann json
std::unique_ptr<nam::DSP> nam::wavenet::Factory(const nlohmann::json& config, std::vector<float>& weights,
const double expectedSampleRate)
{
std::vector<nam::wavenet::LayerArrayParams> layer_array_params;
for (size_t i = 0; i < config["layers"].size(); i++)
{
nlohmann::json layer_config = config["layers"][i];
layer_array_params.push_back(nam::wavenet::LayerArrayParams(
layer_config["input_size"], layer_config["condition_size"], layer_config["head_size"], layer_config["channels"],
layer_config["kernel_size"], layer_config["dilations"], layer_config["activation"], layer_config["gated"],
layer_config["head_bias"]));
}
const bool with_head = !config["head"].is_null();
const float head_scale = config["head_scale"];
return std::make_unique<nam::wavenet::WaveNet>(
layer_array_params, head_scale, with_head, weights, expectedSampleRate);
}

// Register the factory
namespace
{
static nam::factory::Helper _register_WaveNet("WaveNet", nam::wavenet::Factory);
}
4 changes: 4 additions & 0 deletions NAM/wavenet.h
Original file line number Diff line number Diff line change
Expand Up @@ -220,5 +220,9 @@ class WaveNet : public DSP
int mPrewarmSamples = 0; // Pre-compute during initialization
int PrewarmSamples() override { return mPrewarmSamples; };
};

// Factory to instantiate from nlohmann json
std::unique_ptr<DSP> Factory(const nlohmann::json& config, std::vector<float>& weights,
const double expectedSampleRate);
}; // namespace wavenet
}; // namespace nam