Skip to content

Commit

Permalink
Enhance/rtalloc (#319)
Browse files Browse the repository at this point in the history
* Wrapper: Add allocator support

* Use allocator

* `rt::string` handling; fix overload for tuples from messages

* Wrapper: Correctly update RT context in DSP
  • Loading branch information
weefuzzy authored Sep 28, 2022
1 parent 1cbe8f5 commit fb8b292
Show file tree
Hide file tree
Showing 3 changed files with 53 additions and 39 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -22,3 +22,4 @@ interfaces/flucoma-obj-qlookup.json
release-packaging/**
winbuild/*
.DS_Store
.vscode/*
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ Minimal build steps below. For detailed guidance see https://github.com/flucoma/

## Prerequisites

* C++14 compliant compiler (clang, GCC or MSVC)
* C++17 compliant compiler (clang, GCC or MSVC)
* cmake
* make (or Ninja or XCode or VisualStudio)
* git
Expand Down
89 changes: 51 additions & 38 deletions source/include/FluidMaxWrapper.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@ under the European Union’s Horizon 2020 research and innovation programme
#include <clients/common/ParameterTypes.hpp>
#include <clients/nrt/FluidSharedInstanceAdaptor.hpp>

#include <data/FluidMemory.hpp>

#include "MaxBufferAdaptor.hpp"

#include <FluidVersion.hpp>
Expand Down Expand Up @@ -117,12 +119,14 @@ class RealTime
x->perform(dsp64, ins, numins, outs, numouts, vec_size, flags, userparam);
}

void dsp(t_object* dsp64, short* count, double samplerate,
long /*maxvectorsize*/, long /*flags*/)
void dsp(t_object* dsp64, short* count, double samplerate, long maxvectorsize,
long /*flags*/)
{
Wrapper* wrapper = static_cast<Wrapper*>(this);
mContext = FluidContext(maxvectorsize, FluidDefaultAllocator());
if (!Wrapper::template IsModel_t<typename Wrapper::ClientType>::value)
wrapper->mClient = typename Wrapper::ClientType{wrapper->mParams};
wrapper->mClient =
typename Wrapper::ClientType{wrapper->mParams, mContext};

auto& client = wrapper->client();

Expand Down Expand Up @@ -636,7 +640,7 @@ class FluidMaxWrapper
template <size_t N>
static constexpr auto makeValue()
{
return Client::getParameterDescriptors().template makeValue<N>();
return Client::getParameterDescriptors().template makeValue<N>(FluidDefaultAllocator());
}

bool checkResult(Result& res)
Expand Down Expand Up @@ -712,7 +716,7 @@ class FluidMaxWrapper
template <size_t N>
struct Fetcher<N, StringT>
{
std::string operator()(const long ac, t_atom* av, long& currentCount)
rt::string operator()(const long ac, t_atom* av, long& currentCount)
{
auto defaultValue = paramDescriptor<N>().defaultValue;
return {currentCount < ac ? atom_getsym(av + currentCount++)->s_name
Expand All @@ -726,12 +730,12 @@ class FluidMaxWrapper
struct ParamAtomConverter
{

static std::string getString(t_atom* a)
static rt::string getString(t_atom* a)
{
switch (atom_gettype(a))
{
case A_LONG: return std::to_string(atom_getlong(a));
case A_FLOAT: return std::to_string(atom_getfloat(a));
case A_LONG: return rt::string{std::to_string(atom_getlong(a))};
case A_FLOAT: return rt::string{std::to_string(atom_getfloat(a))};
default: return {atom_getsym(a)->s_name};
}
}
Expand Down Expand Up @@ -766,9 +770,10 @@ class FluidMaxWrapper
return InputBufferT::type(new MaxBufferAdaptor(x, atom_getsym(a)));
}

static auto fromAtom(t_object*, t_atom* a, StringT::type)
template <typename Allocator>
static auto fromAtom(t_object*, t_atom* a, std::basic_string<char,std::char_traits<char>, Allocator>)
{
return getString(a);
return std::basic_string<char,std::char_traits<char>, Allocator>{getString(a)};
}

template <typename T>
Expand Down Expand Up @@ -803,12 +808,14 @@ class FluidMaxWrapper
atom_setsym(a, b ? b->name() : nullptr);
}

static auto toAtom(t_atom* a, StringT::type v)
template<typename Allocator>
static auto toAtom(t_atom* a, std::basic_string<char,std::char_traits<char>,Allocator> v)
{
atom_setsym(a, gensym(v.c_str()));
}

static auto toAtom(t_atom* a, FluidTensor<std::string, 1> v)
template<typename Allocator>
static auto toAtom(t_atom* a, FluidTensor<std::basic_string<char,std::char_traits<char>,Allocator>, 1> v)
{
for (auto& s : v) atom_setsym(a++, gensym(s.c_str()));
}
Expand Down Expand Up @@ -836,7 +843,7 @@ class FluidMaxWrapper
}

template <typename... Ts, size_t... Is>
static void toAtom(t_atom* a, std::tuple<Ts...>&& x,
static void toAtom(t_atom* a, std::tuple<Ts...> const& x,
std::index_sequence<Is...>,
std::array<size_t, sizeof...(Ts)> offsets)
{
Expand Down Expand Up @@ -1124,8 +1131,8 @@ class FluidMaxWrapper

for (index i = 0, arg = 0; i < desc.numOptions; i++)
{
if(a[i])
ParamAtomConverter::toAtom(*av + arg++,desc.strings[i]);
if (a[i])
ParamAtomConverter::toAtom(*av + arg++, std::string{desc.strings[i]});
}

return MAX_ERR_NONE;
Expand Down Expand Up @@ -1185,9 +1192,9 @@ class FluidMaxWrapper
static constexpr index NumOutputBuffers = ParamDescType::template NumOfType<BufferT>;

FluidMaxWrapper(t_symbol*, long ac, t_atom* av)
: mListSize{32}, mMessages{}, mParams(Client::getParameterDescriptors()),
: mListSize{32}, mMessages{}, mParams(Client::getParameterDescriptors(), FluidDefaultAllocator()),
mParamSnapshot{mParams.toTuple()}, mAutosize{true},
mClient{initParamsFromArgs(ac, av)}, mDumpDictionary{nullptr}
mClient{initParamsFromArgs(ac, av), FluidContext()}, mDumpDictionary{nullptr}
{
if (mClient.audioChannelsIn())
{
Expand Down Expand Up @@ -1432,7 +1439,9 @@ class FluidMaxWrapper
getClass(class_new(className, (method) create, (method) destroy,
sizeof(FluidMaxWrapper), 0, A_GIMME, 0));
WrapperBase::setup(getClass());


(void)FFTSetup();

if (isControlIn<typename Client::Client>)
{
class_addmethod(getClass(), (method) handleList, "list", A_GIMME, 0);
Expand Down Expand Up @@ -1569,8 +1578,8 @@ class FluidMaxWrapper

static void doSharedClientRefer(FluidMaxWrapper* x, t_symbol* newName)
{
std::string name(newName->s_name);
if (std::string(name) != x->mParams.template get<0>())
rt::string name(newName->s_name);
if (name != x->mParams.template get<0>())
{
// auto newParams = ParamSetType(Client::getParameterDescriptors());
Result r = x->mParams.lookup(name);
Expand All @@ -1579,7 +1588,7 @@ class FluidMaxWrapper
Client::getParameterDescriptors().template iterate<RemoveListener>(
x, x->mParams);
x->mParams.refer(name);
x->mClient = Client(x->mParams);
x->mClient = Client(x->mParams, FluidContext());
Client::getParameterDescriptors().template iterate<AddListener>(
x, x->mParams);
}
Expand Down Expand Up @@ -1858,14 +1867,14 @@ class FluidMaxWrapper
}

template <template <typename, size_t> class Tensor, typename T>
static size_t ResultSize(Tensor<T, 1>&& x)
static size_t ResultSize(Tensor<T, 1> const& x)
{
return static_cast<FluidTensor<T, 1>>(x).size();
}

template <typename... Ts, size_t... Is>
static std::tuple<std::array<size_t, sizeof...(Ts)>, size_t>
ResultSize(std::tuple<Ts...>&& x, std::index_sequence<Is...>)
ResultSize(std::tuple<Ts...> const& x, std::index_sequence<Is...>)
{
size_t size = 0;
std::array<size_t, sizeof...(Ts)> offsets;
Expand All @@ -1885,21 +1894,24 @@ class FluidMaxWrapper
outlet_anything(x->mDataOutlets[0],s,static_cast<long>(resultSize), out.data());
}

template <typename... Ts>
static void messageOutput(FluidMaxWrapper* x, t_symbol* s, std::vector<t_atom>& outputTokens,
MessageResult<std::tuple<Ts...>> r)
template <typename Tuple>
static std::enable_if_t<isSpecialization<Tuple, std::tuple>::value>
messageOutput(FluidMaxWrapper* x, t_symbol* s,
std::vector<t_atom>& outputTokens, MessageResult<Tuple> r)
{
auto indices = std::index_sequence_for<Ts...>();
size_t resultSize;
std::array<size_t, sizeof...(Ts)> offsets;
std::tie(offsets, resultSize) =
ResultSize(static_cast<std::tuple<Ts...>>(r), indices);
constexpr auto N = std::tuple_size_v<Tuple>;
auto indices = std::make_index_sequence<N>();

size_t resultSize;
std::array<size_t, N> offsets;
std::tie(offsets, resultSize) = ResultSize(r.value(), indices);
resultSize += outputTokens.size();
std::vector<t_atom> out(resultSize);
std::copy_n(outputTokens.begin(), outputTokens.size(),out.begin());
ParamAtomConverter::toAtom(out.data() + outputTokens.size(), static_cast<std::tuple<Ts...>>(r),
std::copy_n(outputTokens.begin(), outputTokens.size(), out.begin());
ParamAtomConverter::toAtom(out.data() + outputTokens.size(), r.value(),
indices, offsets);
outlet_anything(x->mDataOutlets[0],s,static_cast<long>(resultSize), out.data());
outlet_anything(x->mDataOutlets[0], s, static_cast<long>(resultSize),
out.data());
}

static void messageOutput(FluidMaxWrapper* x, t_symbol* s, std::vector<t_atom>& outputTokens,
Expand Down Expand Up @@ -2050,10 +2062,11 @@ class FluidMaxWrapper
object_obex_dumpout(x, gensym("load"), 0, nullptr);
}

static void updateParams(FluidMaxWrapper* x,
MessageResult<typename ParamSetType::ValueTuple> v)
static void
updateParams(FluidMaxWrapper* x,
MessageResult<typename ParamSetType::ValueTuple> const& v)
{
x->mParams.fromTuple(typename ParamSetType::ValueTuple(v));
x->mParams.fromTuple(typename ParamSetType::ValueTuple(v.value()));
}

static void updateParams(FluidMaxWrapper*, MessageResult<void>) {}
Expand Down Expand Up @@ -2418,7 +2431,7 @@ class FluidMaxWrapper
static t_symbol* maxAttrType(LongArrayT) { return gensym("atom"); }
static t_symbol* maxAttrType(LongRuntimeMaxT) { return USESYM(atom_long); }
static t_symbol* maxAttrType(ChoicesT) { return gensym("atom"); }

template <typename T>
static std::enable_if_t<IsSharedClient<typename T::type>::value, t_symbol*>
maxAttrType(T)
Expand Down

0 comments on commit fb8b292

Please sign in to comment.