First Commit

This commit is contained in:
2025-02-06 22:24:29 +08:00
parent ed7df4c81e
commit 7539e6a53c
18116 changed files with 6181499 additions and 0 deletions

4125
externals/openal-soft/alc/alc.cpp vendored Normal file

File diff suppressed because it is too large Load Diff

528
externals/openal-soft/alc/alconfig.cpp vendored Normal file
View File

@ -0,0 +1,528 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 1999-2007 by authors.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include "alconfig.h"
#include <cstdlib>
#include <cctype>
#include <cstring>
#ifdef _WIN32
#include <windows.h>
#include <shlobj.h>
#endif
#ifdef __APPLE__
#include <CoreFoundation/CoreFoundation.h>
#endif
#include <algorithm>
#include <cstdio>
#include <string>
#include <utility>
#include "alfstream.h"
#include "alstring.h"
#include "core/helpers.h"
#include "core/logging.h"
#include "strutils.h"
#include "vector.h"
namespace {
struct ConfigEntry {
std::string key;
std::string value;
};
al::vector<ConfigEntry> ConfOpts;
std::string &lstrip(std::string &line)
{
size_t pos{0};
while(pos < line.length() && std::isspace(line[pos]))
++pos;
line.erase(0, pos);
return line;
}
bool readline(std::istream &f, std::string &output)
{
while(f.good() && f.peek() == '\n')
f.ignore();
return std::getline(f, output) && !output.empty();
}
std::string expdup(const char *str)
{
std::string output;
std::string envval;
while(*str != '\0')
{
const char *addstr;
size_t addstrlen;
if(str[0] != '$')
{
const char *next = std::strchr(str, '$');
addstr = str;
addstrlen = next ? static_cast<size_t>(next-str) : std::strlen(str);
str += addstrlen;
}
else
{
str++;
if(*str == '$')
{
const char *next = std::strchr(str+1, '$');
addstr = str;
addstrlen = next ? static_cast<size_t>(next-str) : std::strlen(str);
str += addstrlen;
}
else
{
const bool hasbraces{(*str == '{')};
if(hasbraces) str++;
const char *envstart = str;
while(std::isalnum(*str) || *str == '_')
++str;
if(hasbraces && *str != '}')
continue;
const std::string envname{envstart, str};
if(hasbraces) str++;
envval = al::getenv(envname.c_str()).value_or(std::string{});
addstr = envval.data();
addstrlen = envval.length();
}
}
if(addstrlen == 0)
continue;
output.append(addstr, addstrlen);
}
return output;
}
void LoadConfigFromFile(std::istream &f)
{
std::string curSection;
std::string buffer;
while(readline(f, buffer))
{
if(lstrip(buffer).empty())
continue;
if(buffer[0] == '[')
{
auto line = const_cast<char*>(buffer.data());
char *section = line+1;
char *endsection;
endsection = std::strchr(section, ']');
if(!endsection || section == endsection)
{
ERR(" config parse error: bad line \"%s\"\n", line);
continue;
}
if(endsection[1] != 0)
{
char *end = endsection+1;
while(std::isspace(*end))
++end;
if(*end != 0 && *end != '#')
{
ERR(" config parse error: bad line \"%s\"\n", line);
continue;
}
}
*endsection = 0;
curSection.clear();
if(al::strcasecmp(section, "general") != 0)
{
do {
char *nextp = std::strchr(section, '%');
if(!nextp)
{
curSection += section;
break;
}
curSection.append(section, nextp);
section = nextp;
if(((section[1] >= '0' && section[1] <= '9') ||
(section[1] >= 'a' && section[1] <= 'f') ||
(section[1] >= 'A' && section[1] <= 'F')) &&
((section[2] >= '0' && section[2] <= '9') ||
(section[2] >= 'a' && section[2] <= 'f') ||
(section[2] >= 'A' && section[2] <= 'F')))
{
int b{0};
if(section[1] >= '0' && section[1] <= '9')
b = (section[1]-'0') << 4;
else if(section[1] >= 'a' && section[1] <= 'f')
b = (section[1]-'a'+0xa) << 4;
else if(section[1] >= 'A' && section[1] <= 'F')
b = (section[1]-'A'+0x0a) << 4;
if(section[2] >= '0' && section[2] <= '9')
b |= (section[2]-'0');
else if(section[2] >= 'a' && section[2] <= 'f')
b |= (section[2]-'a'+0xa);
else if(section[2] >= 'A' && section[2] <= 'F')
b |= (section[2]-'A'+0x0a);
curSection += static_cast<char>(b);
section += 3;
}
else if(section[1] == '%')
{
curSection += '%';
section += 2;
}
else
{
curSection += '%';
section += 1;
}
} while(*section != 0);
}
continue;
}
auto cmtpos = std::min(buffer.find('#'), buffer.size());
while(cmtpos > 0 && std::isspace(buffer[cmtpos-1]))
--cmtpos;
if(!cmtpos) continue;
buffer.erase(cmtpos);
auto sep = buffer.find('=');
if(sep == std::string::npos)
{
ERR(" config parse error: malformed option line: \"%s\"\n", buffer.c_str());
continue;
}
auto keyend = sep++;
while(keyend > 0 && std::isspace(buffer[keyend-1]))
--keyend;
if(!keyend)
{
ERR(" config parse error: malformed option line: \"%s\"\n", buffer.c_str());
continue;
}
while(sep < buffer.size() && std::isspace(buffer[sep]))
sep++;
std::string fullKey;
if(!curSection.empty())
{
fullKey += curSection;
fullKey += '/';
}
fullKey += buffer.substr(0u, keyend);
std::string value{(sep < buffer.size()) ? buffer.substr(sep) : std::string{}};
if(value.size() > 1)
{
if((value.front() == '"' && value.back() == '"')
|| (value.front() == '\'' && value.back() == '\''))
{
value.pop_back();
value.erase(value.begin());
}
}
TRACE(" found '%s' = '%s'\n", fullKey.c_str(), value.c_str());
/* Check if we already have this option set */
auto find_key = [&fullKey](const ConfigEntry &entry) -> bool
{ return entry.key == fullKey; };
auto ent = std::find_if(ConfOpts.begin(), ConfOpts.end(), find_key);
if(ent != ConfOpts.end())
{
if(!value.empty())
ent->value = expdup(value.c_str());
else
ConfOpts.erase(ent);
}
else if(!value.empty())
ConfOpts.emplace_back(ConfigEntry{std::move(fullKey), expdup(value.c_str())});
}
ConfOpts.shrink_to_fit();
}
const char *GetConfigValue(const char *devName, const char *blockName, const char *keyName)
{
if(!keyName)
return nullptr;
std::string key;
if(blockName && al::strcasecmp(blockName, "general") != 0)
{
key = blockName;
if(devName)
{
key += '/';
key += devName;
}
key += '/';
key += keyName;
}
else
{
if(devName)
{
key = devName;
key += '/';
}
key += keyName;
}
auto iter = std::find_if(ConfOpts.cbegin(), ConfOpts.cend(),
[&key](const ConfigEntry &entry) -> bool
{ return entry.key == key; });
if(iter != ConfOpts.cend())
{
TRACE("Found %s = \"%s\"\n", key.c_str(), iter->value.c_str());
if(!iter->value.empty())
return iter->value.c_str();
return nullptr;
}
if(!devName)
{
TRACE("Key %s not found\n", key.c_str());
return nullptr;
}
return GetConfigValue(nullptr, blockName, keyName);
}
} // namespace
#ifdef _WIN32
void ReadALConfig()
{
WCHAR buffer[MAX_PATH];
if(SHGetSpecialFolderPathW(nullptr, buffer, CSIDL_APPDATA, FALSE) != FALSE)
{
std::string filepath{wstr_to_utf8(buffer)};
filepath += "\\alsoft.ini";
TRACE("Loading config %s...\n", filepath.c_str());
al::ifstream f{filepath};
if(f.is_open())
LoadConfigFromFile(f);
}
std::string ppath{GetProcBinary().path};
if(!ppath.empty())
{
ppath += "\\alsoft.ini";
TRACE("Loading config %s...\n", ppath.c_str());
al::ifstream f{ppath};
if(f.is_open())
LoadConfigFromFile(f);
}
if(auto confpath = al::getenv(L"ALSOFT_CONF"))
{
TRACE("Loading config %s...\n", wstr_to_utf8(confpath->c_str()).c_str());
al::ifstream f{*confpath};
if(f.is_open())
LoadConfigFromFile(f);
}
}
#else
void ReadALConfig()
{
const char *str{"/etc/openal/alsoft.conf"};
TRACE("Loading config %s...\n", str);
al::ifstream f{str};
if(f.is_open())
LoadConfigFromFile(f);
f.close();
std::string confpaths{al::getenv("XDG_CONFIG_DIRS").value_or("/etc/xdg")};
/* Go through the list in reverse, since "the order of base directories
* denotes their importance; the first directory listed is the most
* important". Ergo, we need to load the settings from the later dirs
* first so that the settings in the earlier dirs override them.
*/
std::string fname;
while(!confpaths.empty())
{
auto next = confpaths.find_last_of(':');
if(next < confpaths.length())
{
fname = confpaths.substr(next+1);
confpaths.erase(next);
}
else
{
fname = confpaths;
confpaths.clear();
}
if(fname.empty() || fname.front() != '/')
WARN("Ignoring XDG config dir: %s\n", fname.c_str());
else
{
if(fname.back() != '/') fname += "/alsoft.conf";
else fname += "alsoft.conf";
TRACE("Loading config %s...\n", fname.c_str());
f = al::ifstream{fname};
if(f.is_open())
LoadConfigFromFile(f);
}
fname.clear();
}
#ifdef __APPLE__
CFBundleRef mainBundle = CFBundleGetMainBundle();
if(mainBundle)
{
unsigned char fileName[PATH_MAX];
CFURLRef configURL;
if((configURL=CFBundleCopyResourceURL(mainBundle, CFSTR(".alsoftrc"), CFSTR(""), nullptr)) &&
CFURLGetFileSystemRepresentation(configURL, true, fileName, sizeof(fileName)))
{
f = al::ifstream{reinterpret_cast<char*>(fileName)};
if(f.is_open())
LoadConfigFromFile(f);
}
}
#endif
if(auto homedir = al::getenv("HOME"))
{
fname = *homedir;
if(fname.back() != '/') fname += "/.alsoftrc";
else fname += ".alsoftrc";
TRACE("Loading config %s...\n", fname.c_str());
f = al::ifstream{fname};
if(f.is_open())
LoadConfigFromFile(f);
}
if(auto configdir = al::getenv("XDG_CONFIG_HOME"))
{
fname = *configdir;
if(fname.back() != '/') fname += "/alsoft.conf";
else fname += "alsoft.conf";
}
else
{
fname.clear();
if(auto homedir = al::getenv("HOME"))
{
fname = *homedir;
if(fname.back() != '/') fname += "/.config/alsoft.conf";
else fname += ".config/alsoft.conf";
}
}
if(!fname.empty())
{
TRACE("Loading config %s...\n", fname.c_str());
f = al::ifstream{fname};
if(f.is_open())
LoadConfigFromFile(f);
}
std::string ppath{GetProcBinary().path};
if(!ppath.empty())
{
if(ppath.back() != '/') ppath += "/alsoft.conf";
else ppath += "alsoft.conf";
TRACE("Loading config %s...\n", ppath.c_str());
f = al::ifstream{ppath};
if(f.is_open())
LoadConfigFromFile(f);
}
if(auto confname = al::getenv("ALSOFT_CONF"))
{
TRACE("Loading config %s...\n", confname->c_str());
f = al::ifstream{*confname};
if(f.is_open())
LoadConfigFromFile(f);
}
}
#endif
al::optional<std::string> ConfigValueStr(const char *devName, const char *blockName, const char *keyName)
{
if(const char *val{GetConfigValue(devName, blockName, keyName)})
return val;
return al::nullopt;
}
al::optional<int> ConfigValueInt(const char *devName, const char *blockName, const char *keyName)
{
if(const char *val{GetConfigValue(devName, blockName, keyName)})
return static_cast<int>(std::strtol(val, nullptr, 0));
return al::nullopt;
}
al::optional<unsigned int> ConfigValueUInt(const char *devName, const char *blockName, const char *keyName)
{
if(const char *val{GetConfigValue(devName, blockName, keyName)})
return static_cast<unsigned int>(std::strtoul(val, nullptr, 0));
return al::nullopt;
}
al::optional<float> ConfigValueFloat(const char *devName, const char *blockName, const char *keyName)
{
if(const char *val{GetConfigValue(devName, blockName, keyName)})
return std::strtof(val, nullptr);
return al::nullopt;
}
al::optional<bool> ConfigValueBool(const char *devName, const char *blockName, const char *keyName)
{
if(const char *val{GetConfigValue(devName, blockName, keyName)})
return al::strcasecmp(val, "on") == 0 || al::strcasecmp(val, "yes") == 0
|| al::strcasecmp(val, "true")==0 || atoi(val) != 0;
return al::nullopt;
}
bool GetConfigValueBool(const char *devName, const char *blockName, const char *keyName, bool def)
{
if(const char *val{GetConfigValue(devName, blockName, keyName)})
return (al::strcasecmp(val, "on") == 0 || al::strcasecmp(val, "yes") == 0
|| al::strcasecmp(val, "true") == 0 || atoi(val) != 0);
return def;
}

18
externals/openal-soft/alc/alconfig.h vendored Normal file
View File

@ -0,0 +1,18 @@
#ifndef ALCONFIG_H
#define ALCONFIG_H
#include <string>
#include "aloptional.h"
void ReadALConfig();
bool GetConfigValueBool(const char *devName, const char *blockName, const char *keyName, bool def);
al::optional<std::string> ConfigValueStr(const char *devName, const char *blockName, const char *keyName);
al::optional<int> ConfigValueInt(const char *devName, const char *blockName, const char *keyName);
al::optional<unsigned int> ConfigValueUInt(const char *devName, const char *blockName, const char *keyName);
al::optional<float> ConfigValueFloat(const char *devName, const char *blockName, const char *keyName);
al::optional<bool> ConfigValueBool(const char *devName, const char *blockName, const char *keyName);
#endif /* ALCONFIG_H */

2210
externals/openal-soft/alc/alu.cpp vendored Normal file

File diff suppressed because it is too large Load Diff

38
externals/openal-soft/alc/alu.h vendored Normal file
View File

@ -0,0 +1,38 @@
#ifndef ALU_H
#define ALU_H
#include <bitset>
#include "aloptional.h"
struct ALCcontext;
struct ALCdevice;
struct EffectSlot;
enum class StereoEncoding : unsigned char;
constexpr float GainMixMax{1000.0f}; /* +60dB */
enum CompatFlags : uint8_t {
ReverseX,
ReverseY,
ReverseZ,
Count
};
using CompatFlagBitset = std::bitset<CompatFlags::Count>;
void aluInit(CompatFlagBitset flags, const float nfcscale);
/* aluInitRenderer
*
* Set up the appropriate panning method and mixing method given the device
* properties.
*/
void aluInitRenderer(ALCdevice *device, int hrtf_id, al::optional<StereoEncoding> stereomode);
void aluInitEffectPanning(EffectSlot *slot, ALCcontext *context);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,19 @@
#ifndef BACKENDS_ALSA_H
#define BACKENDS_ALSA_H
#include "base.h"
struct AlsaBackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_ALSA_H */

View File

@ -0,0 +1,202 @@
#include "config.h"
#include "base.h"
#include <algorithm>
#include <array>
#include <atomic>
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <mmreg.h>
#include "albit.h"
#include "core/logging.h"
#include "aloptional.h"
#endif
#include "atomic.h"
#include "core/devformat.h"
namespace al {
backend_exception::backend_exception(backend_error code, const char *msg, ...) : mErrorCode{code}
{
std::va_list args;
va_start(args, msg);
setMessage(msg, args);
va_end(args);
}
backend_exception::~backend_exception() = default;
} // namespace al
bool BackendBase::reset()
{ throw al::backend_exception{al::backend_error::DeviceError, "Invalid BackendBase call"}; }
void BackendBase::captureSamples(al::byte*, uint)
{ }
uint BackendBase::availableSamples()
{ return 0; }
ClockLatency BackendBase::getClockLatency()
{
ClockLatency ret;
uint refcount;
do {
refcount = mDevice->waitForMix();
ret.ClockTime = GetDeviceClockTime(mDevice);
std::atomic_thread_fence(std::memory_order_acquire);
} while(refcount != ReadRef(mDevice->MixCount));
/* NOTE: The device will generally have about all but one periods filled at
* any given time during playback. Without a more accurate measurement from
* the output, this is an okay approximation.
*/
ret.Latency = std::max(std::chrono::seconds{mDevice->BufferSize-mDevice->UpdateSize},
std::chrono::seconds::zero());
ret.Latency /= mDevice->Frequency;
return ret;
}
void BackendBase::setDefaultWFXChannelOrder()
{
mDevice->RealOut.ChannelIndex.fill(InvalidChannelIndex);
switch(mDevice->FmtChans)
{
case DevFmtMono:
mDevice->RealOut.ChannelIndex[FrontCenter] = 0;
break;
case DevFmtStereo:
mDevice->RealOut.ChannelIndex[FrontLeft] = 0;
mDevice->RealOut.ChannelIndex[FrontRight] = 1;
break;
case DevFmtQuad:
mDevice->RealOut.ChannelIndex[FrontLeft] = 0;
mDevice->RealOut.ChannelIndex[FrontRight] = 1;
mDevice->RealOut.ChannelIndex[BackLeft] = 2;
mDevice->RealOut.ChannelIndex[BackRight] = 3;
break;
case DevFmtX51:
mDevice->RealOut.ChannelIndex[FrontLeft] = 0;
mDevice->RealOut.ChannelIndex[FrontRight] = 1;
mDevice->RealOut.ChannelIndex[FrontCenter] = 2;
mDevice->RealOut.ChannelIndex[LFE] = 3;
mDevice->RealOut.ChannelIndex[SideLeft] = 4;
mDevice->RealOut.ChannelIndex[SideRight] = 5;
break;
case DevFmtX61:
mDevice->RealOut.ChannelIndex[FrontLeft] = 0;
mDevice->RealOut.ChannelIndex[FrontRight] = 1;
mDevice->RealOut.ChannelIndex[FrontCenter] = 2;
mDevice->RealOut.ChannelIndex[LFE] = 3;
mDevice->RealOut.ChannelIndex[BackCenter] = 4;
mDevice->RealOut.ChannelIndex[SideLeft] = 5;
mDevice->RealOut.ChannelIndex[SideRight] = 6;
break;
case DevFmtX71:
mDevice->RealOut.ChannelIndex[FrontLeft] = 0;
mDevice->RealOut.ChannelIndex[FrontRight] = 1;
mDevice->RealOut.ChannelIndex[FrontCenter] = 2;
mDevice->RealOut.ChannelIndex[LFE] = 3;
mDevice->RealOut.ChannelIndex[BackLeft] = 4;
mDevice->RealOut.ChannelIndex[BackRight] = 5;
mDevice->RealOut.ChannelIndex[SideLeft] = 6;
mDevice->RealOut.ChannelIndex[SideRight] = 7;
break;
case DevFmtX714:
mDevice->RealOut.ChannelIndex[FrontLeft] = 0;
mDevice->RealOut.ChannelIndex[FrontRight] = 1;
mDevice->RealOut.ChannelIndex[FrontCenter] = 2;
mDevice->RealOut.ChannelIndex[LFE] = 3;
mDevice->RealOut.ChannelIndex[BackLeft] = 4;
mDevice->RealOut.ChannelIndex[BackRight] = 5;
mDevice->RealOut.ChannelIndex[SideLeft] = 6;
mDevice->RealOut.ChannelIndex[SideRight] = 7;
mDevice->RealOut.ChannelIndex[TopFrontLeft] = 8;
mDevice->RealOut.ChannelIndex[TopFrontRight] = 9;
mDevice->RealOut.ChannelIndex[TopBackLeft] = 10;
mDevice->RealOut.ChannelIndex[TopBackRight] = 11;
break;
case DevFmtX3D71:
mDevice->RealOut.ChannelIndex[FrontLeft] = 0;
mDevice->RealOut.ChannelIndex[FrontRight] = 1;
mDevice->RealOut.ChannelIndex[FrontCenter] = 2;
mDevice->RealOut.ChannelIndex[LFE] = 3;
mDevice->RealOut.ChannelIndex[Aux0] = 4;
mDevice->RealOut.ChannelIndex[Aux1] = 5;
mDevice->RealOut.ChannelIndex[SideLeft] = 6;
mDevice->RealOut.ChannelIndex[SideRight] = 7;
break;
case DevFmtAmbi3D:
break;
}
}
void BackendBase::setDefaultChannelOrder()
{
mDevice->RealOut.ChannelIndex.fill(InvalidChannelIndex);
switch(mDevice->FmtChans)
{
case DevFmtX51:
mDevice->RealOut.ChannelIndex[FrontLeft] = 0;
mDevice->RealOut.ChannelIndex[FrontRight] = 1;
mDevice->RealOut.ChannelIndex[SideLeft] = 2;
mDevice->RealOut.ChannelIndex[SideRight] = 3;
mDevice->RealOut.ChannelIndex[FrontCenter] = 4;
mDevice->RealOut.ChannelIndex[LFE] = 5;
return;
case DevFmtX71:
mDevice->RealOut.ChannelIndex[FrontLeft] = 0;
mDevice->RealOut.ChannelIndex[FrontRight] = 1;
mDevice->RealOut.ChannelIndex[BackLeft] = 2;
mDevice->RealOut.ChannelIndex[BackRight] = 3;
mDevice->RealOut.ChannelIndex[FrontCenter] = 4;
mDevice->RealOut.ChannelIndex[LFE] = 5;
mDevice->RealOut.ChannelIndex[SideLeft] = 6;
mDevice->RealOut.ChannelIndex[SideRight] = 7;
return;
case DevFmtX714:
mDevice->RealOut.ChannelIndex[FrontLeft] = 0;
mDevice->RealOut.ChannelIndex[FrontRight] = 1;
mDevice->RealOut.ChannelIndex[BackLeft] = 2;
mDevice->RealOut.ChannelIndex[BackRight] = 3;
mDevice->RealOut.ChannelIndex[FrontCenter] = 4;
mDevice->RealOut.ChannelIndex[LFE] = 5;
mDevice->RealOut.ChannelIndex[SideLeft] = 6;
mDevice->RealOut.ChannelIndex[SideRight] = 7;
mDevice->RealOut.ChannelIndex[TopFrontLeft] = 8;
mDevice->RealOut.ChannelIndex[TopFrontRight] = 9;
mDevice->RealOut.ChannelIndex[TopBackLeft] = 10;
mDevice->RealOut.ChannelIndex[TopBackRight] = 11;
break;
case DevFmtX3D71:
mDevice->RealOut.ChannelIndex[FrontLeft] = 0;
mDevice->RealOut.ChannelIndex[FrontRight] = 1;
mDevice->RealOut.ChannelIndex[Aux0] = 2;
mDevice->RealOut.ChannelIndex[Aux1] = 3;
mDevice->RealOut.ChannelIndex[FrontCenter] = 4;
mDevice->RealOut.ChannelIndex[LFE] = 5;
mDevice->RealOut.ChannelIndex[SideLeft] = 6;
mDevice->RealOut.ChannelIndex[SideRight] = 7;
return;
/* Same as WFX order */
case DevFmtMono:
case DevFmtStereo:
case DevFmtQuad:
case DevFmtX61:
case DevFmtAmbi3D:
setDefaultWFXChannelOrder();
break;
}
}

View File

@ -0,0 +1,114 @@
#ifndef ALC_BACKENDS_BASE_H
#define ALC_BACKENDS_BASE_H
#include <chrono>
#include <cstdarg>
#include <memory>
#include <ratio>
#include <string>
#include "albyte.h"
#include "core/device.h"
#include "core/except.h"
using uint = unsigned int;
struct ClockLatency {
std::chrono::nanoseconds ClockTime;
std::chrono::nanoseconds Latency;
};
struct BackendBase {
virtual void open(const char *name) = 0;
virtual bool reset();
virtual void start() = 0;
virtual void stop() = 0;
virtual void captureSamples(al::byte *buffer, uint samples);
virtual uint availableSamples();
virtual ClockLatency getClockLatency();
DeviceBase *const mDevice;
BackendBase(DeviceBase *device) noexcept : mDevice{device} { }
virtual ~BackendBase() = default;
protected:
/** Sets the default channel order used by most non-WaveFormatEx-based APIs. */
void setDefaultChannelOrder();
/** Sets the default channel order used by WaveFormatEx. */
void setDefaultWFXChannelOrder();
};
using BackendPtr = std::unique_ptr<BackendBase>;
enum class BackendType {
Playback,
Capture
};
/* Helper to get the current clock time from the device's ClockBase, and
* SamplesDone converted from the sample rate.
*/
inline std::chrono::nanoseconds GetDeviceClockTime(DeviceBase *device)
{
using std::chrono::seconds;
using std::chrono::nanoseconds;
auto ns = nanoseconds{seconds{device->SamplesDone}} / device->Frequency;
return device->ClockBase + ns;
}
/* Helper to get the device latency from the backend, including any fixed
* latency from post-processing.
*/
inline ClockLatency GetClockLatency(DeviceBase *device, BackendBase *backend)
{
ClockLatency ret{backend->getClockLatency()};
ret.Latency += device->FixedLatency;
return ret;
}
struct BackendFactory {
virtual bool init() = 0;
virtual bool querySupport(BackendType type) = 0;
virtual std::string probe(BackendType type) = 0;
virtual BackendPtr createBackend(DeviceBase *device, BackendType type) = 0;
protected:
virtual ~BackendFactory() = default;
};
namespace al {
enum class backend_error {
NoDevice,
DeviceError,
OutOfMemory
};
class backend_exception final : public base_exception {
backend_error mErrorCode;
public:
#ifdef __USE_MINGW_ANSI_STDIO
[[gnu::format(gnu_printf, 3, 4)]]
#else
[[gnu::format(printf, 3, 4)]]
#endif
backend_exception(backend_error code, const char *msg, ...);
~backend_exception() override;
backend_error errorCode() const noexcept { return mErrorCode; }
};
} // namespace al
#endif /* ALC_BACKENDS_BASE_H */

View File

@ -0,0 +1,932 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 1999-2007 by authors.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include "coreaudio.h"
#include <inttypes.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <cmath>
#include <memory>
#include <string>
#include "alnumeric.h"
#include "core/converter.h"
#include "core/device.h"
#include "core/logging.h"
#include "ringbuffer.h"
#include <AudioUnit/AudioUnit.h>
#include <AudioToolbox/AudioToolbox.h>
namespace {
#if TARGET_OS_IOS || TARGET_OS_TV
#define CAN_ENUMERATE 0
#else
#define CAN_ENUMERATE 1
#endif
constexpr auto OutputElement = 0;
constexpr auto InputElement = 1;
#if CAN_ENUMERATE
struct DeviceEntry {
AudioDeviceID mId;
std::string mName;
};
std::vector<DeviceEntry> PlaybackList;
std::vector<DeviceEntry> CaptureList;
OSStatus GetHwProperty(AudioHardwarePropertyID propId, UInt32 dataSize, void *propData)
{
const AudioObjectPropertyAddress addr{propId, kAudioObjectPropertyScopeGlobal,
kAudioObjectPropertyElementMaster};
return AudioObjectGetPropertyData(kAudioObjectSystemObject, &addr, 0, nullptr, &dataSize,
propData);
}
OSStatus GetHwPropertySize(AudioHardwarePropertyID propId, UInt32 *outSize)
{
const AudioObjectPropertyAddress addr{propId, kAudioObjectPropertyScopeGlobal,
kAudioObjectPropertyElementMaster};
return AudioObjectGetPropertyDataSize(kAudioObjectSystemObject, &addr, 0, nullptr, outSize);
}
OSStatus GetDevProperty(AudioDeviceID devId, AudioDevicePropertyID propId, bool isCapture,
UInt32 elem, UInt32 dataSize, void *propData)
{
static const AudioObjectPropertyScope scopes[2]{kAudioDevicePropertyScopeOutput,
kAudioDevicePropertyScopeInput};
const AudioObjectPropertyAddress addr{propId, scopes[isCapture], elem};
return AudioObjectGetPropertyData(devId, &addr, 0, nullptr, &dataSize, propData);
}
OSStatus GetDevPropertySize(AudioDeviceID devId, AudioDevicePropertyID inPropertyID,
bool isCapture, UInt32 elem, UInt32 *outSize)
{
static const AudioObjectPropertyScope scopes[2]{kAudioDevicePropertyScopeOutput,
kAudioDevicePropertyScopeInput};
const AudioObjectPropertyAddress addr{inPropertyID, scopes[isCapture], elem};
return AudioObjectGetPropertyDataSize(devId, &addr, 0, nullptr, outSize);
}
std::string GetDeviceName(AudioDeviceID devId)
{
std::string devname;
CFStringRef nameRef;
/* Try to get the device name as a CFString, for Unicode name support. */
OSStatus err{GetDevProperty(devId, kAudioDevicePropertyDeviceNameCFString, false, 0,
sizeof(nameRef), &nameRef)};
if(err == noErr)
{
const CFIndex propSize{CFStringGetMaximumSizeForEncoding(CFStringGetLength(nameRef),
kCFStringEncodingUTF8)};
devname.resize(static_cast<size_t>(propSize)+1, '\0');
CFStringGetCString(nameRef, &devname[0], propSize+1, kCFStringEncodingUTF8);
CFRelease(nameRef);
}
else
{
/* If that failed, just get the C string. Hopefully there's nothing bad
* with this.
*/
UInt32 propSize{};
if(GetDevPropertySize(devId, kAudioDevicePropertyDeviceName, false, 0, &propSize))
return devname;
devname.resize(propSize+1, '\0');
if(GetDevProperty(devId, kAudioDevicePropertyDeviceName, false, 0, propSize, &devname[0]))
{
devname.clear();
return devname;
}
}
/* Clear extraneous nul chars that may have been written with the name
* string, and return it.
*/
while(!devname.back())
devname.pop_back();
return devname;
}
UInt32 GetDeviceChannelCount(AudioDeviceID devId, bool isCapture)
{
UInt32 propSize{};
auto err = GetDevPropertySize(devId, kAudioDevicePropertyStreamConfiguration, isCapture, 0,
&propSize);
if(err)
{
ERR("kAudioDevicePropertyStreamConfiguration size query failed: %u\n", err);
return 0;
}
auto buflist_data = std::make_unique<char[]>(propSize);
auto *buflist = reinterpret_cast<AudioBufferList*>(buflist_data.get());
err = GetDevProperty(devId, kAudioDevicePropertyStreamConfiguration, isCapture, 0, propSize,
buflist);
if(err)
{
ERR("kAudioDevicePropertyStreamConfiguration query failed: %u\n", err);
return 0;
}
UInt32 numChannels{0};
for(size_t i{0};i < buflist->mNumberBuffers;++i)
numChannels += buflist->mBuffers[i].mNumberChannels;
return numChannels;
}
void EnumerateDevices(std::vector<DeviceEntry> &list, bool isCapture)
{
UInt32 propSize{};
if(auto err = GetHwPropertySize(kAudioHardwarePropertyDevices, &propSize))
{
ERR("Failed to get device list size: %u\n", err);
return;
}
auto devIds = std::vector<AudioDeviceID>(propSize/sizeof(AudioDeviceID), kAudioDeviceUnknown);
if(auto err = GetHwProperty(kAudioHardwarePropertyDevices, propSize, devIds.data()))
{
ERR("Failed to get device list: %u\n", err);
return;
}
std::vector<DeviceEntry> newdevs;
newdevs.reserve(devIds.size());
AudioDeviceID defaultId{kAudioDeviceUnknown};
GetHwProperty(isCapture ? kAudioHardwarePropertyDefaultInputDevice :
kAudioHardwarePropertyDefaultOutputDevice, sizeof(defaultId), &defaultId);
if(defaultId != kAudioDeviceUnknown)
{
newdevs.emplace_back(DeviceEntry{defaultId, GetDeviceName(defaultId)});
const auto &entry = newdevs.back();
TRACE("Got device: %s = ID %u\n", entry.mName.c_str(), entry.mId);
}
for(const AudioDeviceID devId : devIds)
{
if(devId == kAudioDeviceUnknown)
continue;
auto match_devid = [devId](const DeviceEntry &entry) noexcept -> bool
{ return entry.mId == devId; };
auto match = std::find_if(newdevs.cbegin(), newdevs.cend(), match_devid);
if(match != newdevs.cend()) continue;
auto numChannels = GetDeviceChannelCount(devId, isCapture);
if(numChannels > 0)
{
newdevs.emplace_back(DeviceEntry{devId, GetDeviceName(devId)});
const auto &entry = newdevs.back();
TRACE("Got device: %s = ID %u\n", entry.mName.c_str(), entry.mId);
}
}
if(newdevs.size() > 1)
{
/* Rename entries that have matching names, by appending '#2', '#3',
* etc, as needed.
*/
for(auto curitem = newdevs.begin()+1;curitem != newdevs.end();++curitem)
{
auto check_match = [curitem](const DeviceEntry &entry) -> bool
{ return entry.mName == curitem->mName; };
if(std::find_if(newdevs.begin(), curitem, check_match) != curitem)
{
std::string name{curitem->mName};
size_t count{1};
auto check_name = [&name](const DeviceEntry &entry) -> bool
{ return entry.mName == name; };
do {
name = curitem->mName;
name += " #";
name += std::to_string(++count);
} while(std::find_if(newdevs.begin(), curitem, check_name) != curitem);
curitem->mName = std::move(name);
}
}
}
newdevs.shrink_to_fit();
newdevs.swap(list);
}
#else
static constexpr char ca_device[] = "CoreAudio Default";
#endif
struct CoreAudioPlayback final : public BackendBase {
CoreAudioPlayback(DeviceBase *device) noexcept : BackendBase{device} { }
~CoreAudioPlayback() override;
OSStatus MixerProc(AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames,
AudioBufferList *ioData) noexcept;
static OSStatus MixerProcC(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames,
AudioBufferList *ioData) noexcept
{
return static_cast<CoreAudioPlayback*>(inRefCon)->MixerProc(ioActionFlags, inTimeStamp,
inBusNumber, inNumberFrames, ioData);
}
void open(const char *name) override;
bool reset() override;
void start() override;
void stop() override;
AudioUnit mAudioUnit{};
uint mFrameSize{0u};
AudioStreamBasicDescription mFormat{}; // This is the OpenAL format as a CoreAudio ASBD
DEF_NEWDEL(CoreAudioPlayback)
};
CoreAudioPlayback::~CoreAudioPlayback()
{
AudioUnitUninitialize(mAudioUnit);
AudioComponentInstanceDispose(mAudioUnit);
}
OSStatus CoreAudioPlayback::MixerProc(AudioUnitRenderActionFlags*, const AudioTimeStamp*, UInt32,
UInt32, AudioBufferList *ioData) noexcept
{
for(size_t i{0};i < ioData->mNumberBuffers;++i)
{
auto &buffer = ioData->mBuffers[i];
mDevice->renderSamples(buffer.mData, buffer.mDataByteSize/mFrameSize,
buffer.mNumberChannels);
}
return noErr;
}
void CoreAudioPlayback::open(const char *name)
{
#if CAN_ENUMERATE
AudioDeviceID audioDevice{kAudioDeviceUnknown};
if(!name)
GetHwProperty(kAudioHardwarePropertyDefaultOutputDevice, sizeof(audioDevice),
&audioDevice);
else
{
if(PlaybackList.empty())
EnumerateDevices(PlaybackList, false);
auto find_name = [name](const DeviceEntry &entry) -> bool
{ return entry.mName == name; };
auto devmatch = std::find_if(PlaybackList.cbegin(), PlaybackList.cend(), find_name);
if(devmatch == PlaybackList.cend())
throw al::backend_exception{al::backend_error::NoDevice,
"Device name \"%s\" not found", name};
audioDevice = devmatch->mId;
}
#else
if(!name)
name = ca_device;
else if(strcmp(name, ca_device) != 0)
throw al::backend_exception{al::backend_error::NoDevice, "Device name \"%s\" not found",
name};
#endif
/* open the default output unit */
AudioComponentDescription desc{};
desc.componentType = kAudioUnitType_Output;
#if CAN_ENUMERATE
desc.componentSubType = (audioDevice == kAudioDeviceUnknown) ?
kAudioUnitSubType_DefaultOutput : kAudioUnitSubType_HALOutput;
#else
desc.componentSubType = kAudioUnitSubType_RemoteIO;
#endif
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
AudioComponent comp{AudioComponentFindNext(NULL, &desc)};
if(comp == nullptr)
throw al::backend_exception{al::backend_error::NoDevice, "Could not find audio component"};
AudioUnit audioUnit{};
OSStatus err{AudioComponentInstanceNew(comp, &audioUnit)};
if(err != noErr)
throw al::backend_exception{al::backend_error::NoDevice,
"Could not create component instance: %u", err};
#if CAN_ENUMERATE
if(audioDevice != kAudioDeviceUnknown)
AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitScope_Global, OutputElement, &audioDevice, sizeof(AudioDeviceID));
#endif
err = AudioUnitInitialize(audioUnit);
if(err != noErr)
throw al::backend_exception{al::backend_error::DeviceError,
"Could not initialize audio unit: %u", err};
/* WARNING: I don't know if "valid" audio unit values are guaranteed to be
* non-0. If not, this logic is broken.
*/
if(mAudioUnit)
{
AudioUnitUninitialize(mAudioUnit);
AudioComponentInstanceDispose(mAudioUnit);
}
mAudioUnit = audioUnit;
#if CAN_ENUMERATE
if(name)
mDevice->DeviceName = name;
else
{
UInt32 propSize{sizeof(audioDevice)};
audioDevice = kAudioDeviceUnknown;
AudioUnitGetProperty(audioUnit, kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitScope_Global, OutputElement, &audioDevice, &propSize);
std::string devname{GetDeviceName(audioDevice)};
if(!devname.empty()) mDevice->DeviceName = std::move(devname);
else mDevice->DeviceName = "Unknown Device Name";
}
#else
mDevice->DeviceName = name;
#endif
}
bool CoreAudioPlayback::reset()
{
OSStatus err{AudioUnitUninitialize(mAudioUnit)};
if(err != noErr)
ERR("-- AudioUnitUninitialize failed.\n");
/* retrieve default output unit's properties (output side) */
AudioStreamBasicDescription streamFormat{};
UInt32 size{sizeof(streamFormat)};
err = AudioUnitGetProperty(mAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output,
OutputElement, &streamFormat, &size);
if(err != noErr || size != sizeof(streamFormat))
{
ERR("AudioUnitGetProperty failed\n");
return false;
}
#if 0
TRACE("Output streamFormat of default output unit -\n");
TRACE(" streamFormat.mFramesPerPacket = %d\n", streamFormat.mFramesPerPacket);
TRACE(" streamFormat.mChannelsPerFrame = %d\n", streamFormat.mChannelsPerFrame);
TRACE(" streamFormat.mBitsPerChannel = %d\n", streamFormat.mBitsPerChannel);
TRACE(" streamFormat.mBytesPerPacket = %d\n", streamFormat.mBytesPerPacket);
TRACE(" streamFormat.mBytesPerFrame = %d\n", streamFormat.mBytesPerFrame);
TRACE(" streamFormat.mSampleRate = %5.0f\n", streamFormat.mSampleRate);
#endif
/* Use the sample rate from the output unit's current parameters, but reset
* everything else.
*/
if(mDevice->Frequency != streamFormat.mSampleRate)
{
mDevice->BufferSize = static_cast<uint>(mDevice->BufferSize*streamFormat.mSampleRate/
mDevice->Frequency + 0.5);
mDevice->Frequency = static_cast<uint>(streamFormat.mSampleRate);
}
/* FIXME: How to tell what channels are what in the output device, and how
* to specify what we're giving? e.g. 6.0 vs 5.1
*/
streamFormat.mChannelsPerFrame = mDevice->channelsFromFmt();
streamFormat.mFramesPerPacket = 1;
streamFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kLinearPCMFormatFlagIsPacked;
streamFormat.mFormatID = kAudioFormatLinearPCM;
switch(mDevice->FmtType)
{
case DevFmtUByte:
mDevice->FmtType = DevFmtByte;
/* fall-through */
case DevFmtByte:
streamFormat.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger;
streamFormat.mBitsPerChannel = 8;
break;
case DevFmtUShort:
mDevice->FmtType = DevFmtShort;
/* fall-through */
case DevFmtShort:
streamFormat.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger;
streamFormat.mBitsPerChannel = 16;
break;
case DevFmtUInt:
mDevice->FmtType = DevFmtInt;
/* fall-through */
case DevFmtInt:
streamFormat.mFormatFlags |= kLinearPCMFormatFlagIsSignedInteger;
streamFormat.mBitsPerChannel = 32;
break;
case DevFmtFloat:
streamFormat.mFormatFlags |= kLinearPCMFormatFlagIsFloat;
streamFormat.mBitsPerChannel = 32;
break;
}
streamFormat.mBytesPerFrame = streamFormat.mChannelsPerFrame*streamFormat.mBitsPerChannel/8;
streamFormat.mBytesPerPacket = streamFormat.mBytesPerFrame*streamFormat.mFramesPerPacket;
err = AudioUnitSetProperty(mAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
OutputElement, &streamFormat, sizeof(streamFormat));
if(err != noErr)
{
ERR("AudioUnitSetProperty failed\n");
return false;
}
setDefaultWFXChannelOrder();
/* setup callback */
mFrameSize = mDevice->frameSizeFromFmt();
AURenderCallbackStruct input{};
input.inputProc = CoreAudioPlayback::MixerProcC;
input.inputProcRefCon = this;
err = AudioUnitSetProperty(mAudioUnit, kAudioUnitProperty_SetRenderCallback,
kAudioUnitScope_Input, OutputElement, &input, sizeof(AURenderCallbackStruct));
if(err != noErr)
{
ERR("AudioUnitSetProperty failed\n");
return false;
}
/* init the default audio unit... */
err = AudioUnitInitialize(mAudioUnit);
if(err != noErr)
{
ERR("AudioUnitInitialize failed\n");
return false;
}
return true;
}
void CoreAudioPlayback::start()
{
const OSStatus err{AudioOutputUnitStart(mAudioUnit)};
if(err != noErr)
throw al::backend_exception{al::backend_error::DeviceError,
"AudioOutputUnitStart failed: %d", err};
}
void CoreAudioPlayback::stop()
{
OSStatus err{AudioOutputUnitStop(mAudioUnit)};
if(err != noErr)
ERR("AudioOutputUnitStop failed\n");
}
struct CoreAudioCapture final : public BackendBase {
CoreAudioCapture(DeviceBase *device) noexcept : BackendBase{device} { }
~CoreAudioCapture() override;
OSStatus RecordProc(AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber,
UInt32 inNumberFrames, AudioBufferList *ioData) noexcept;
static OSStatus RecordProcC(void *inRefCon, AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames,
AudioBufferList *ioData) noexcept
{
return static_cast<CoreAudioCapture*>(inRefCon)->RecordProc(ioActionFlags, inTimeStamp,
inBusNumber, inNumberFrames, ioData);
}
void open(const char *name) override;
void start() override;
void stop() override;
void captureSamples(al::byte *buffer, uint samples) override;
uint availableSamples() override;
AudioUnit mAudioUnit{0};
uint mFrameSize{0u};
AudioStreamBasicDescription mFormat{}; // This is the OpenAL format as a CoreAudio ASBD
SampleConverterPtr mConverter;
al::vector<char> mCaptureData;
RingBufferPtr mRing{nullptr};
DEF_NEWDEL(CoreAudioCapture)
};
CoreAudioCapture::~CoreAudioCapture()
{
if(mAudioUnit)
AudioComponentInstanceDispose(mAudioUnit);
mAudioUnit = 0;
}
OSStatus CoreAudioCapture::RecordProc(AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp, UInt32 inBusNumber, UInt32 inNumberFrames,
AudioBufferList*) noexcept
{
union {
al::byte _[maxz(sizeof(AudioBufferList), offsetof(AudioBufferList, mBuffers[1]))];
AudioBufferList list;
} audiobuf{};
audiobuf.list.mNumberBuffers = 1;
audiobuf.list.mBuffers[0].mNumberChannels = mFormat.mChannelsPerFrame;
audiobuf.list.mBuffers[0].mData = mCaptureData.data();
audiobuf.list.mBuffers[0].mDataByteSize = static_cast<UInt32>(mCaptureData.size());
OSStatus err{AudioUnitRender(mAudioUnit, ioActionFlags, inTimeStamp, inBusNumber,
inNumberFrames, &audiobuf.list)};
if(err != noErr)
{
ERR("AudioUnitRender capture error: %d\n", err);
return err;
}
mRing->write(mCaptureData.data(), inNumberFrames);
return noErr;
}
void CoreAudioCapture::open(const char *name)
{
#if CAN_ENUMERATE
AudioDeviceID audioDevice{kAudioDeviceUnknown};
if(!name)
GetHwProperty(kAudioHardwarePropertyDefaultInputDevice, sizeof(audioDevice),
&audioDevice);
else
{
if(CaptureList.empty())
EnumerateDevices(CaptureList, true);
auto find_name = [name](const DeviceEntry &entry) -> bool
{ return entry.mName == name; };
auto devmatch = std::find_if(CaptureList.cbegin(), CaptureList.cend(), find_name);
if(devmatch == CaptureList.cend())
throw al::backend_exception{al::backend_error::NoDevice,
"Device name \"%s\" not found", name};
audioDevice = devmatch->mId;
}
#else
if(!name)
name = ca_device;
else if(strcmp(name, ca_device) != 0)
throw al::backend_exception{al::backend_error::NoDevice, "Device name \"%s\" not found",
name};
#endif
AudioComponentDescription desc{};
desc.componentType = kAudioUnitType_Output;
#if CAN_ENUMERATE
desc.componentSubType = (audioDevice == kAudioDeviceUnknown) ?
kAudioUnitSubType_DefaultOutput : kAudioUnitSubType_HALOutput;
#else
desc.componentSubType = kAudioUnitSubType_RemoteIO;
#endif
desc.componentManufacturer = kAudioUnitManufacturer_Apple;
desc.componentFlags = 0;
desc.componentFlagsMask = 0;
// Search for component with given description
AudioComponent comp{AudioComponentFindNext(NULL, &desc)};
if(comp == NULL)
throw al::backend_exception{al::backend_error::NoDevice, "Could not find audio component"};
// Open the component
OSStatus err{AudioComponentInstanceNew(comp, &mAudioUnit)};
if(err != noErr)
throw al::backend_exception{al::backend_error::NoDevice,
"Could not create component instance: %u", err};
// Turn off AudioUnit output
UInt32 enableIO{0};
err = AudioUnitSetProperty(mAudioUnit, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output, OutputElement, &enableIO, sizeof(enableIO));
if(err != noErr)
throw al::backend_exception{al::backend_error::DeviceError,
"Could not disable audio unit output property: %u", err};
// Turn on AudioUnit input
enableIO = 1;
err = AudioUnitSetProperty(mAudioUnit, kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input, InputElement, &enableIO, sizeof(enableIO));
if(err != noErr)
throw al::backend_exception{al::backend_error::DeviceError,
"Could not enable audio unit input property: %u", err};
#if CAN_ENUMERATE
if(audioDevice != kAudioDeviceUnknown)
AudioUnitSetProperty(mAudioUnit, kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitScope_Global, InputElement, &audioDevice, sizeof(AudioDeviceID));
#endif
// set capture callback
AURenderCallbackStruct input{};
input.inputProc = CoreAudioCapture::RecordProcC;
input.inputProcRefCon = this;
err = AudioUnitSetProperty(mAudioUnit, kAudioOutputUnitProperty_SetInputCallback,
kAudioUnitScope_Global, InputElement, &input, sizeof(AURenderCallbackStruct));
if(err != noErr)
throw al::backend_exception{al::backend_error::DeviceError,
"Could not set capture callback: %u", err};
// Disable buffer allocation for capture
UInt32 flag{0};
err = AudioUnitSetProperty(mAudioUnit, kAudioUnitProperty_ShouldAllocateBuffer,
kAudioUnitScope_Output, InputElement, &flag, sizeof(flag));
if(err != noErr)
throw al::backend_exception{al::backend_error::DeviceError,
"Could not disable buffer allocation property: %u", err};
// Initialize the device
err = AudioUnitInitialize(mAudioUnit);
if(err != noErr)
throw al::backend_exception{al::backend_error::DeviceError,
"Could not initialize audio unit: %u", err};
// Get the hardware format
AudioStreamBasicDescription hardwareFormat{};
UInt32 propertySize{sizeof(hardwareFormat)};
err = AudioUnitGetProperty(mAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input,
InputElement, &hardwareFormat, &propertySize);
if(err != noErr || propertySize != sizeof(hardwareFormat))
throw al::backend_exception{al::backend_error::DeviceError,
"Could not get input format: %u", err};
// Set up the requested format description
AudioStreamBasicDescription requestedFormat{};
switch(mDevice->FmtType)
{
case DevFmtByte:
requestedFormat.mBitsPerChannel = 8;
requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
break;
case DevFmtUByte:
requestedFormat.mBitsPerChannel = 8;
requestedFormat.mFormatFlags = kAudioFormatFlagIsPacked;
break;
case DevFmtShort:
requestedFormat.mBitsPerChannel = 16;
requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger
| kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
break;
case DevFmtUShort:
requestedFormat.mBitsPerChannel = 16;
requestedFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
break;
case DevFmtInt:
requestedFormat.mBitsPerChannel = 32;
requestedFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger
| kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
break;
case DevFmtUInt:
requestedFormat.mBitsPerChannel = 32;
requestedFormat.mFormatFlags = kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked;
break;
case DevFmtFloat:
requestedFormat.mBitsPerChannel = 32;
requestedFormat.mFormatFlags = kLinearPCMFormatFlagIsFloat | kAudioFormatFlagsNativeEndian
| kAudioFormatFlagIsPacked;
break;
}
switch(mDevice->FmtChans)
{
case DevFmtMono:
requestedFormat.mChannelsPerFrame = 1;
break;
case DevFmtStereo:
requestedFormat.mChannelsPerFrame = 2;
break;
case DevFmtQuad:
case DevFmtX51:
case DevFmtX61:
case DevFmtX71:
case DevFmtX714:
case DevFmtX3D71:
case DevFmtAmbi3D:
throw al::backend_exception{al::backend_error::DeviceError, "%s not supported",
DevFmtChannelsString(mDevice->FmtChans)};
}
requestedFormat.mBytesPerFrame = requestedFormat.mChannelsPerFrame * requestedFormat.mBitsPerChannel / 8;
requestedFormat.mBytesPerPacket = requestedFormat.mBytesPerFrame;
requestedFormat.mSampleRate = mDevice->Frequency;
requestedFormat.mFormatID = kAudioFormatLinearPCM;
requestedFormat.mReserved = 0;
requestedFormat.mFramesPerPacket = 1;
// save requested format description for later use
mFormat = requestedFormat;
mFrameSize = mDevice->frameSizeFromFmt();
// Use intermediate format for sample rate conversion (outputFormat)
// Set sample rate to the same as hardware for resampling later
AudioStreamBasicDescription outputFormat{requestedFormat};
outputFormat.mSampleRate = hardwareFormat.mSampleRate;
// The output format should be the requested format, but using the hardware sample rate
// This is because the AudioUnit will automatically scale other properties, except for sample rate
err = AudioUnitSetProperty(mAudioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output,
InputElement, &outputFormat, sizeof(outputFormat));
if(err != noErr)
throw al::backend_exception{al::backend_error::DeviceError,
"Could not set input format: %u", err};
/* Calculate the minimum AudioUnit output format frame count for the pre-
* conversion ring buffer. Ensure at least 100ms for the total buffer.
*/
double srateScale{outputFormat.mSampleRate / mDevice->Frequency};
auto FrameCount64 = maxu64(static_cast<uint64_t>(std::ceil(mDevice->BufferSize*srateScale)),
static_cast<UInt32>(outputFormat.mSampleRate)/10);
FrameCount64 += MaxResamplerPadding;
if(FrameCount64 > std::numeric_limits<int32_t>::max())
throw al::backend_exception{al::backend_error::DeviceError,
"Calculated frame count is too large: %" PRIu64, FrameCount64};
UInt32 outputFrameCount{};
propertySize = sizeof(outputFrameCount);
err = AudioUnitGetProperty(mAudioUnit, kAudioUnitProperty_MaximumFramesPerSlice,
kAudioUnitScope_Global, OutputElement, &outputFrameCount, &propertySize);
if(err != noErr || propertySize != sizeof(outputFrameCount))
throw al::backend_exception{al::backend_error::DeviceError,
"Could not get input frame count: %u", err};
mCaptureData.resize(outputFrameCount * mFrameSize);
outputFrameCount = static_cast<UInt32>(maxu64(outputFrameCount, FrameCount64));
mRing = RingBuffer::Create(outputFrameCount, mFrameSize, false);
/* Set up sample converter if needed */
if(outputFormat.mSampleRate != mDevice->Frequency)
mConverter = SampleConverter::Create(mDevice->FmtType, mDevice->FmtType,
mFormat.mChannelsPerFrame, static_cast<uint>(hardwareFormat.mSampleRate),
mDevice->Frequency, Resampler::FastBSinc24);
#if CAN_ENUMERATE
if(name)
mDevice->DeviceName = name;
else
{
UInt32 propSize{sizeof(audioDevice)};
audioDevice = kAudioDeviceUnknown;
AudioUnitGetProperty(mAudioUnit, kAudioOutputUnitProperty_CurrentDevice,
kAudioUnitScope_Global, InputElement, &audioDevice, &propSize);
std::string devname{GetDeviceName(audioDevice)};
if(!devname.empty()) mDevice->DeviceName = std::move(devname);
else mDevice->DeviceName = "Unknown Device Name";
}
#else
mDevice->DeviceName = name;
#endif
}
void CoreAudioCapture::start()
{
OSStatus err{AudioOutputUnitStart(mAudioUnit)};
if(err != noErr)
throw al::backend_exception{al::backend_error::DeviceError,
"AudioOutputUnitStart failed: %d", err};
}
void CoreAudioCapture::stop()
{
OSStatus err{AudioOutputUnitStop(mAudioUnit)};
if(err != noErr)
ERR("AudioOutputUnitStop failed\n");
}
void CoreAudioCapture::captureSamples(al::byte *buffer, uint samples)
{
if(!mConverter)
{
mRing->read(buffer, samples);
return;
}
auto rec_vec = mRing->getReadVector();
const void *src0{rec_vec.first.buf};
auto src0len = static_cast<uint>(rec_vec.first.len);
uint got{mConverter->convert(&src0, &src0len, buffer, samples)};
size_t total_read{rec_vec.first.len - src0len};
if(got < samples && !src0len && rec_vec.second.len > 0)
{
const void *src1{rec_vec.second.buf};
auto src1len = static_cast<uint>(rec_vec.second.len);
got += mConverter->convert(&src1, &src1len, buffer + got*mFrameSize, samples-got);
total_read += rec_vec.second.len - src1len;
}
mRing->readAdvance(total_read);
}
uint CoreAudioCapture::availableSamples()
{
if(!mConverter) return static_cast<uint>(mRing->readSpace());
return mConverter->availableOut(static_cast<uint>(mRing->readSpace()));
}
} // namespace
BackendFactory &CoreAudioBackendFactory::getFactory()
{
static CoreAudioBackendFactory factory{};
return factory;
}
bool CoreAudioBackendFactory::init() { return true; }
bool CoreAudioBackendFactory::querySupport(BackendType type)
{ return type == BackendType::Playback || type == BackendType::Capture; }
std::string CoreAudioBackendFactory::probe(BackendType type)
{
std::string outnames;
#if CAN_ENUMERATE
auto append_name = [&outnames](const DeviceEntry &entry) -> void
{
/* Includes null char. */
outnames.append(entry.mName.c_str(), entry.mName.length()+1);
};
switch(type)
{
case BackendType::Playback:
EnumerateDevices(PlaybackList, false);
std::for_each(PlaybackList.cbegin(), PlaybackList.cend(), append_name);
break;
case BackendType::Capture:
EnumerateDevices(CaptureList, true);
std::for_each(CaptureList.cbegin(), CaptureList.cend(), append_name);
break;
}
#else
switch(type)
{
case BackendType::Playback:
case BackendType::Capture:
/* Includes null char. */
outnames.append(ca_device, sizeof(ca_device));
break;
}
#endif
return outnames;
}
BackendPtr CoreAudioBackendFactory::createBackend(DeviceBase *device, BackendType type)
{
if(type == BackendType::Playback)
return BackendPtr{new CoreAudioPlayback{device}};
if(type == BackendType::Capture)
return BackendPtr{new CoreAudioCapture{device}};
return nullptr;
}

View File

@ -0,0 +1,19 @@
#ifndef BACKENDS_COREAUDIO_H
#define BACKENDS_COREAUDIO_H
#include "base.h"
struct CoreAudioBackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_COREAUDIO_H */

View File

@ -0,0 +1,850 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 1999-2007 by authors.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include "dsound.h"
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#include <stdlib.h>
#include <stdio.h>
#include <memory.h>
#include <cguid.h>
#include <mmreg.h>
#ifndef _WAVEFORMATEXTENSIBLE_
#include <ks.h>
#include <ksmedia.h>
#endif
#include <atomic>
#include <cassert>
#include <thread>
#include <string>
#include <vector>
#include <algorithm>
#include <functional>
#include "alnumeric.h"
#include "comptr.h"
#include "core/device.h"
#include "core/helpers.h"
#include "core/logging.h"
#include "dynload.h"
#include "ringbuffer.h"
#include "strutils.h"
#include "threads.h"
/* MinGW-w64 needs this for some unknown reason now. */
using LPCWAVEFORMATEX = const WAVEFORMATEX*;
#include <dsound.h>
#ifndef DSSPEAKER_5POINT1
# define DSSPEAKER_5POINT1 0x00000006
#endif
#ifndef DSSPEAKER_5POINT1_BACK
# define DSSPEAKER_5POINT1_BACK 0x00000006
#endif
#ifndef DSSPEAKER_7POINT1
# define DSSPEAKER_7POINT1 0x00000007
#endif
#ifndef DSSPEAKER_7POINT1_SURROUND
# define DSSPEAKER_7POINT1_SURROUND 0x00000008
#endif
#ifndef DSSPEAKER_5POINT1_SURROUND
# define DSSPEAKER_5POINT1_SURROUND 0x00000009
#endif
/* Some headers seem to define these as macros for __uuidof, which is annoying
* since some headers don't declare them at all. Hopefully the ifdef is enough
* to tell if they need to be declared.
*/
#ifndef KSDATAFORMAT_SUBTYPE_PCM
DEFINE_GUID(KSDATAFORMAT_SUBTYPE_PCM, 0x00000001, 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71);
#endif
#ifndef KSDATAFORMAT_SUBTYPE_IEEE_FLOAT
DEFINE_GUID(KSDATAFORMAT_SUBTYPE_IEEE_FLOAT, 0x00000003, 0x0000, 0x0010, 0x80, 0x00, 0x00, 0xaa, 0x00, 0x38, 0x9b, 0x71);
#endif
namespace {
#define DEVNAME_HEAD "OpenAL Soft on "
#ifdef HAVE_DYNLOAD
void *ds_handle;
HRESULT (WINAPI *pDirectSoundCreate)(const GUID *pcGuidDevice, IDirectSound **ppDS, IUnknown *pUnkOuter);
HRESULT (WINAPI *pDirectSoundEnumerateW)(LPDSENUMCALLBACKW pDSEnumCallback, void *pContext);
HRESULT (WINAPI *pDirectSoundCaptureCreate)(const GUID *pcGuidDevice, IDirectSoundCapture **ppDSC, IUnknown *pUnkOuter);
HRESULT (WINAPI *pDirectSoundCaptureEnumerateW)(LPDSENUMCALLBACKW pDSEnumCallback, void *pContext);
#ifndef IN_IDE_PARSER
#define DirectSoundCreate pDirectSoundCreate
#define DirectSoundEnumerateW pDirectSoundEnumerateW
#define DirectSoundCaptureCreate pDirectSoundCaptureCreate
#define DirectSoundCaptureEnumerateW pDirectSoundCaptureEnumerateW
#endif
#endif
#define MONO SPEAKER_FRONT_CENTER
#define STEREO (SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT)
#define QUAD (SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT|SPEAKER_BACK_LEFT|SPEAKER_BACK_RIGHT)
#define X5DOT1 (SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT|SPEAKER_FRONT_CENTER|SPEAKER_LOW_FREQUENCY|SPEAKER_SIDE_LEFT|SPEAKER_SIDE_RIGHT)
#define X5DOT1REAR (SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT|SPEAKER_FRONT_CENTER|SPEAKER_LOW_FREQUENCY|SPEAKER_BACK_LEFT|SPEAKER_BACK_RIGHT)
#define X6DOT1 (SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT|SPEAKER_FRONT_CENTER|SPEAKER_LOW_FREQUENCY|SPEAKER_BACK_CENTER|SPEAKER_SIDE_LEFT|SPEAKER_SIDE_RIGHT)
#define X7DOT1 (SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT|SPEAKER_FRONT_CENTER|SPEAKER_LOW_FREQUENCY|SPEAKER_BACK_LEFT|SPEAKER_BACK_RIGHT|SPEAKER_SIDE_LEFT|SPEAKER_SIDE_RIGHT)
#define X7DOT1DOT4 (SPEAKER_FRONT_LEFT|SPEAKER_FRONT_RIGHT|SPEAKER_FRONT_CENTER|SPEAKER_LOW_FREQUENCY|SPEAKER_BACK_LEFT|SPEAKER_BACK_RIGHT|SPEAKER_SIDE_LEFT|SPEAKER_SIDE_RIGHT|SPEAKER_TOP_FRONT_LEFT|SPEAKER_TOP_FRONT_RIGHT|SPEAKER_TOP_BACK_LEFT|SPEAKER_TOP_BACK_RIGHT)
#define MAX_UPDATES 128
struct DevMap {
std::string name;
GUID guid;
template<typename T0, typename T1>
DevMap(T0&& name_, T1&& guid_)
: name{std::forward<T0>(name_)}, guid{std::forward<T1>(guid_)}
{ }
};
al::vector<DevMap> PlaybackDevices;
al::vector<DevMap> CaptureDevices;
bool checkName(const al::vector<DevMap> &list, const std::string &name)
{
auto match_name = [&name](const DevMap &entry) -> bool
{ return entry.name == name; };
return std::find_if(list.cbegin(), list.cend(), match_name) != list.cend();
}
BOOL CALLBACK DSoundEnumDevices(GUID *guid, const WCHAR *desc, const WCHAR*, void *data) noexcept
{
if(!guid)
return TRUE;
auto& devices = *static_cast<al::vector<DevMap>*>(data);
const std::string basename{DEVNAME_HEAD + wstr_to_utf8(desc)};
int count{1};
std::string newname{basename};
while(checkName(devices, newname))
{
newname = basename;
newname += " #";
newname += std::to_string(++count);
}
devices.emplace_back(std::move(newname), *guid);
const DevMap &newentry = devices.back();
OLECHAR *guidstr{nullptr};
HRESULT hr{StringFromCLSID(*guid, &guidstr)};
if(SUCCEEDED(hr))
{
TRACE("Got device \"%s\", GUID \"%ls\"\n", newentry.name.c_str(), guidstr);
CoTaskMemFree(guidstr);
}
return TRUE;
}
struct DSoundPlayback final : public BackendBase {
DSoundPlayback(DeviceBase *device) noexcept : BackendBase{device} { }
~DSoundPlayback() override;
int mixerProc();
void open(const char *name) override;
bool reset() override;
void start() override;
void stop() override;
ComPtr<IDirectSound> mDS;
ComPtr<IDirectSoundBuffer> mPrimaryBuffer;
ComPtr<IDirectSoundBuffer> mBuffer;
ComPtr<IDirectSoundNotify> mNotifies;
HANDLE mNotifyEvent{nullptr};
std::atomic<bool> mKillNow{true};
std::thread mThread;
DEF_NEWDEL(DSoundPlayback)
};
DSoundPlayback::~DSoundPlayback()
{
mNotifies = nullptr;
mBuffer = nullptr;
mPrimaryBuffer = nullptr;
mDS = nullptr;
if(mNotifyEvent)
CloseHandle(mNotifyEvent);
mNotifyEvent = nullptr;
}
FORCE_ALIGN int DSoundPlayback::mixerProc()
{
SetRTPriority();
althrd_setname(MIXER_THREAD_NAME);
DSBCAPS DSBCaps{};
DSBCaps.dwSize = sizeof(DSBCaps);
HRESULT err{mBuffer->GetCaps(&DSBCaps)};
if(FAILED(err))
{
ERR("Failed to get buffer caps: 0x%lx\n", err);
mDevice->handleDisconnect("Failure retrieving playback buffer info: 0x%lx", err);
return 1;
}
const size_t FrameStep{mDevice->channelsFromFmt()};
uint FrameSize{mDevice->frameSizeFromFmt()};
DWORD FragSize{mDevice->UpdateSize * FrameSize};
bool Playing{false};
DWORD LastCursor{0u};
mBuffer->GetCurrentPosition(&LastCursor, nullptr);
while(!mKillNow.load(std::memory_order_acquire)
&& mDevice->Connected.load(std::memory_order_acquire))
{
// Get current play cursor
DWORD PlayCursor;
mBuffer->GetCurrentPosition(&PlayCursor, nullptr);
DWORD avail = (PlayCursor-LastCursor+DSBCaps.dwBufferBytes) % DSBCaps.dwBufferBytes;
if(avail < FragSize)
{
if(!Playing)
{
err = mBuffer->Play(0, 0, DSBPLAY_LOOPING);
if(FAILED(err))
{
ERR("Failed to play buffer: 0x%lx\n", err);
mDevice->handleDisconnect("Failure starting playback: 0x%lx", err);
return 1;
}
Playing = true;
}
avail = WaitForSingleObjectEx(mNotifyEvent, 2000, FALSE);
if(avail != WAIT_OBJECT_0)
ERR("WaitForSingleObjectEx error: 0x%lx\n", avail);
continue;
}
avail -= avail%FragSize;
// Lock output buffer
void *WritePtr1, *WritePtr2;
DWORD WriteCnt1{0u}, WriteCnt2{0u};
err = mBuffer->Lock(LastCursor, avail, &WritePtr1, &WriteCnt1, &WritePtr2, &WriteCnt2, 0);
// If the buffer is lost, restore it and lock
if(err == DSERR_BUFFERLOST)
{
WARN("Buffer lost, restoring...\n");
err = mBuffer->Restore();
if(SUCCEEDED(err))
{
Playing = false;
LastCursor = 0;
err = mBuffer->Lock(0, DSBCaps.dwBufferBytes, &WritePtr1, &WriteCnt1,
&WritePtr2, &WriteCnt2, 0);
}
}
if(SUCCEEDED(err))
{
mDevice->renderSamples(WritePtr1, WriteCnt1/FrameSize, FrameStep);
if(WriteCnt2 > 0)
mDevice->renderSamples(WritePtr2, WriteCnt2/FrameSize, FrameStep);
mBuffer->Unlock(WritePtr1, WriteCnt1, WritePtr2, WriteCnt2);
}
else
{
ERR("Buffer lock error: %#lx\n", err);
mDevice->handleDisconnect("Failed to lock output buffer: 0x%lx", err);
return 1;
}
// Update old write cursor location
LastCursor += WriteCnt1+WriteCnt2;
LastCursor %= DSBCaps.dwBufferBytes;
}
return 0;
}
void DSoundPlayback::open(const char *name)
{
HRESULT hr;
if(PlaybackDevices.empty())
{
/* Initialize COM to prevent name truncation */
HRESULT hrcom{CoInitialize(nullptr)};
hr = DirectSoundEnumerateW(DSoundEnumDevices, &PlaybackDevices);
if(FAILED(hr))
ERR("Error enumerating DirectSound devices (0x%lx)!\n", hr);
if(SUCCEEDED(hrcom))
CoUninitialize();
}
const GUID *guid{nullptr};
if(!name && !PlaybackDevices.empty())
{
name = PlaybackDevices[0].name.c_str();
guid = &PlaybackDevices[0].guid;
}
else
{
auto iter = std::find_if(PlaybackDevices.cbegin(), PlaybackDevices.cend(),
[name](const DevMap &entry) -> bool { return entry.name == name; });
if(iter == PlaybackDevices.cend())
{
GUID id{};
hr = CLSIDFromString(utf8_to_wstr(name).c_str(), &id);
if(SUCCEEDED(hr))
iter = std::find_if(PlaybackDevices.cbegin(), PlaybackDevices.cend(),
[&id](const DevMap &entry) -> bool { return entry.guid == id; });
if(iter == PlaybackDevices.cend())
throw al::backend_exception{al::backend_error::NoDevice,
"Device name \"%s\" not found", name};
}
guid = &iter->guid;
}
hr = DS_OK;
if(!mNotifyEvent)
{
mNotifyEvent = CreateEventW(nullptr, FALSE, FALSE, nullptr);
if(!mNotifyEvent) hr = E_FAIL;
}
//DirectSound Init code
ComPtr<IDirectSound> ds;
if(SUCCEEDED(hr))
hr = DirectSoundCreate(guid, ds.getPtr(), nullptr);
if(SUCCEEDED(hr))
hr = ds->SetCooperativeLevel(GetForegroundWindow(), DSSCL_PRIORITY);
if(FAILED(hr))
throw al::backend_exception{al::backend_error::DeviceError, "Device init failed: 0x%08lx",
hr};
mNotifies = nullptr;
mBuffer = nullptr;
mPrimaryBuffer = nullptr;
mDS = std::move(ds);
mDevice->DeviceName = name;
}
bool DSoundPlayback::reset()
{
mNotifies = nullptr;
mBuffer = nullptr;
mPrimaryBuffer = nullptr;
switch(mDevice->FmtType)
{
case DevFmtByte:
mDevice->FmtType = DevFmtUByte;
break;
case DevFmtFloat:
if(mDevice->Flags.test(SampleTypeRequest))
break;
/* fall-through */
case DevFmtUShort:
mDevice->FmtType = DevFmtShort;
break;
case DevFmtUInt:
mDevice->FmtType = DevFmtInt;
break;
case DevFmtUByte:
case DevFmtShort:
case DevFmtInt:
break;
}
WAVEFORMATEXTENSIBLE OutputType{};
DWORD speakers{};
HRESULT hr{mDS->GetSpeakerConfig(&speakers)};
if(FAILED(hr))
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to get speaker config: 0x%08lx", hr};
speakers = DSSPEAKER_CONFIG(speakers);
if(!mDevice->Flags.test(ChannelsRequest))
{
if(speakers == DSSPEAKER_MONO)
mDevice->FmtChans = DevFmtMono;
else if(speakers == DSSPEAKER_STEREO || speakers == DSSPEAKER_HEADPHONE)
mDevice->FmtChans = DevFmtStereo;
else if(speakers == DSSPEAKER_QUAD)
mDevice->FmtChans = DevFmtQuad;
else if(speakers == DSSPEAKER_5POINT1_SURROUND || speakers == DSSPEAKER_5POINT1_BACK)
mDevice->FmtChans = DevFmtX51;
else if(speakers == DSSPEAKER_7POINT1 || speakers == DSSPEAKER_7POINT1_SURROUND)
mDevice->FmtChans = DevFmtX71;
else
ERR("Unknown system speaker config: 0x%lx\n", speakers);
}
mDevice->Flags.set(DirectEar, (speakers == DSSPEAKER_HEADPHONE));
const bool isRear51{speakers == DSSPEAKER_5POINT1_BACK};
switch(mDevice->FmtChans)
{
case DevFmtMono: OutputType.dwChannelMask = MONO; break;
case DevFmtAmbi3D: mDevice->FmtChans = DevFmtStereo;
/* fall-through */
case DevFmtStereo: OutputType.dwChannelMask = STEREO; break;
case DevFmtQuad: OutputType.dwChannelMask = QUAD; break;
case DevFmtX51: OutputType.dwChannelMask = isRear51 ? X5DOT1REAR : X5DOT1; break;
case DevFmtX61: OutputType.dwChannelMask = X6DOT1; break;
case DevFmtX71: OutputType.dwChannelMask = X7DOT1; break;
case DevFmtX714: OutputType.dwChannelMask = X7DOT1DOT4; break;
case DevFmtX3D71: OutputType.dwChannelMask = X7DOT1; break;
}
retry_open:
hr = S_OK;
OutputType.Format.wFormatTag = WAVE_FORMAT_PCM;
OutputType.Format.nChannels = static_cast<WORD>(mDevice->channelsFromFmt());
OutputType.Format.wBitsPerSample = static_cast<WORD>(mDevice->bytesFromFmt() * 8);
OutputType.Format.nBlockAlign = static_cast<WORD>(OutputType.Format.nChannels *
OutputType.Format.wBitsPerSample / 8);
OutputType.Format.nSamplesPerSec = mDevice->Frequency;
OutputType.Format.nAvgBytesPerSec = OutputType.Format.nSamplesPerSec *
OutputType.Format.nBlockAlign;
OutputType.Format.cbSize = 0;
if(OutputType.Format.nChannels > 2 || mDevice->FmtType == DevFmtFloat)
{
OutputType.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
OutputType.Samples.wValidBitsPerSample = OutputType.Format.wBitsPerSample;
OutputType.Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
if(mDevice->FmtType == DevFmtFloat)
OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
else
OutputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
mPrimaryBuffer = nullptr;
}
else
{
if(SUCCEEDED(hr) && !mPrimaryBuffer)
{
DSBUFFERDESC DSBDescription{};
DSBDescription.dwSize = sizeof(DSBDescription);
DSBDescription.dwFlags = DSBCAPS_PRIMARYBUFFER;
hr = mDS->CreateSoundBuffer(&DSBDescription, mPrimaryBuffer.getPtr(), nullptr);
}
if(SUCCEEDED(hr))
hr = mPrimaryBuffer->SetFormat(&OutputType.Format);
}
if(SUCCEEDED(hr))
{
uint num_updates{mDevice->BufferSize / mDevice->UpdateSize};
if(num_updates > MAX_UPDATES)
num_updates = MAX_UPDATES;
mDevice->BufferSize = mDevice->UpdateSize * num_updates;
DSBUFFERDESC DSBDescription{};
DSBDescription.dwSize = sizeof(DSBDescription);
DSBDescription.dwFlags = DSBCAPS_CTRLPOSITIONNOTIFY | DSBCAPS_GETCURRENTPOSITION2
| DSBCAPS_GLOBALFOCUS;
DSBDescription.dwBufferBytes = mDevice->BufferSize * OutputType.Format.nBlockAlign;
DSBDescription.lpwfxFormat = &OutputType.Format;
hr = mDS->CreateSoundBuffer(&DSBDescription, mBuffer.getPtr(), nullptr);
if(FAILED(hr) && mDevice->FmtType == DevFmtFloat)
{
mDevice->FmtType = DevFmtShort;
goto retry_open;
}
}
if(SUCCEEDED(hr))
{
void *ptr;
hr = mBuffer->QueryInterface(IID_IDirectSoundNotify, &ptr);
if(SUCCEEDED(hr))
{
mNotifies = ComPtr<IDirectSoundNotify>{static_cast<IDirectSoundNotify*>(ptr)};
uint num_updates{mDevice->BufferSize / mDevice->UpdateSize};
assert(num_updates <= MAX_UPDATES);
std::array<DSBPOSITIONNOTIFY,MAX_UPDATES> nots;
for(uint i{0};i < num_updates;++i)
{
nots[i].dwOffset = i * mDevice->UpdateSize * OutputType.Format.nBlockAlign;
nots[i].hEventNotify = mNotifyEvent;
}
if(mNotifies->SetNotificationPositions(num_updates, nots.data()) != DS_OK)
hr = E_FAIL;
}
}
if(FAILED(hr))
{
mNotifies = nullptr;
mBuffer = nullptr;
mPrimaryBuffer = nullptr;
return false;
}
ResetEvent(mNotifyEvent);
setDefaultWFXChannelOrder();
return true;
}
void DSoundPlayback::start()
{
try {
mKillNow.store(false, std::memory_order_release);
mThread = std::thread{std::mem_fn(&DSoundPlayback::mixerProc), this};
}
catch(std::exception& e) {
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to start mixing thread: %s", e.what()};
}
}
void DSoundPlayback::stop()
{
if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
return;
mThread.join();
mBuffer->Stop();
}
struct DSoundCapture final : public BackendBase {
DSoundCapture(DeviceBase *device) noexcept : BackendBase{device} { }
~DSoundCapture() override;
void open(const char *name) override;
void start() override;
void stop() override;
void captureSamples(al::byte *buffer, uint samples) override;
uint availableSamples() override;
ComPtr<IDirectSoundCapture> mDSC;
ComPtr<IDirectSoundCaptureBuffer> mDSCbuffer;
DWORD mBufferBytes{0u};
DWORD mCursor{0u};
RingBufferPtr mRing;
DEF_NEWDEL(DSoundCapture)
};
DSoundCapture::~DSoundCapture()
{
if(mDSCbuffer)
{
mDSCbuffer->Stop();
mDSCbuffer = nullptr;
}
mDSC = nullptr;
}
void DSoundCapture::open(const char *name)
{
HRESULT hr;
if(CaptureDevices.empty())
{
/* Initialize COM to prevent name truncation */
HRESULT hrcom{CoInitialize(nullptr)};
hr = DirectSoundCaptureEnumerateW(DSoundEnumDevices, &CaptureDevices);
if(FAILED(hr))
ERR("Error enumerating DirectSound devices (0x%lx)!\n", hr);
if(SUCCEEDED(hrcom))
CoUninitialize();
}
const GUID *guid{nullptr};
if(!name && !CaptureDevices.empty())
{
name = CaptureDevices[0].name.c_str();
guid = &CaptureDevices[0].guid;
}
else
{
auto iter = std::find_if(CaptureDevices.cbegin(), CaptureDevices.cend(),
[name](const DevMap &entry) -> bool { return entry.name == name; });
if(iter == CaptureDevices.cend())
{
GUID id{};
hr = CLSIDFromString(utf8_to_wstr(name).c_str(), &id);
if(SUCCEEDED(hr))
iter = std::find_if(CaptureDevices.cbegin(), CaptureDevices.cend(),
[&id](const DevMap &entry) -> bool { return entry.guid == id; });
if(iter == CaptureDevices.cend())
throw al::backend_exception{al::backend_error::NoDevice,
"Device name \"%s\" not found", name};
}
guid = &iter->guid;
}
switch(mDevice->FmtType)
{
case DevFmtByte:
case DevFmtUShort:
case DevFmtUInt:
WARN("%s capture samples not supported\n", DevFmtTypeString(mDevice->FmtType));
throw al::backend_exception{al::backend_error::DeviceError,
"%s capture samples not supported", DevFmtTypeString(mDevice->FmtType)};
case DevFmtUByte:
case DevFmtShort:
case DevFmtInt:
case DevFmtFloat:
break;
}
WAVEFORMATEXTENSIBLE InputType{};
switch(mDevice->FmtChans)
{
case DevFmtMono: InputType.dwChannelMask = MONO; break;
case DevFmtStereo: InputType.dwChannelMask = STEREO; break;
case DevFmtQuad: InputType.dwChannelMask = QUAD; break;
case DevFmtX51: InputType.dwChannelMask = X5DOT1; break;
case DevFmtX61: InputType.dwChannelMask = X6DOT1; break;
case DevFmtX71: InputType.dwChannelMask = X7DOT1; break;
case DevFmtX714: InputType.dwChannelMask = X7DOT1DOT4; break;
case DevFmtX3D71:
case DevFmtAmbi3D:
WARN("%s capture not supported\n", DevFmtChannelsString(mDevice->FmtChans));
throw al::backend_exception{al::backend_error::DeviceError, "%s capture not supported",
DevFmtChannelsString(mDevice->FmtChans)};
}
InputType.Format.wFormatTag = WAVE_FORMAT_PCM;
InputType.Format.nChannels = static_cast<WORD>(mDevice->channelsFromFmt());
InputType.Format.wBitsPerSample = static_cast<WORD>(mDevice->bytesFromFmt() * 8);
InputType.Format.nBlockAlign = static_cast<WORD>(InputType.Format.nChannels *
InputType.Format.wBitsPerSample / 8);
InputType.Format.nSamplesPerSec = mDevice->Frequency;
InputType.Format.nAvgBytesPerSec = InputType.Format.nSamplesPerSec *
InputType.Format.nBlockAlign;
InputType.Format.cbSize = 0;
InputType.Samples.wValidBitsPerSample = InputType.Format.wBitsPerSample;
if(mDevice->FmtType == DevFmtFloat)
InputType.SubFormat = KSDATAFORMAT_SUBTYPE_IEEE_FLOAT;
else
InputType.SubFormat = KSDATAFORMAT_SUBTYPE_PCM;
if(InputType.Format.nChannels > 2 || mDevice->FmtType == DevFmtFloat)
{
InputType.Format.wFormatTag = WAVE_FORMAT_EXTENSIBLE;
InputType.Format.cbSize = sizeof(WAVEFORMATEXTENSIBLE) - sizeof(WAVEFORMATEX);
}
uint samples{mDevice->BufferSize};
samples = maxu(samples, 100 * mDevice->Frequency / 1000);
DSCBUFFERDESC DSCBDescription{};
DSCBDescription.dwSize = sizeof(DSCBDescription);
DSCBDescription.dwFlags = 0;
DSCBDescription.dwBufferBytes = samples * InputType.Format.nBlockAlign;
DSCBDescription.lpwfxFormat = &InputType.Format;
//DirectSoundCapture Init code
hr = DirectSoundCaptureCreate(guid, mDSC.getPtr(), nullptr);
if(SUCCEEDED(hr))
mDSC->CreateCaptureBuffer(&DSCBDescription, mDSCbuffer.getPtr(), nullptr);
if(SUCCEEDED(hr))
mRing = RingBuffer::Create(mDevice->BufferSize, InputType.Format.nBlockAlign, false);
if(FAILED(hr))
{
mRing = nullptr;
mDSCbuffer = nullptr;
mDSC = nullptr;
throw al::backend_exception{al::backend_error::DeviceError, "Device init failed: 0x%08lx",
hr};
}
mBufferBytes = DSCBDescription.dwBufferBytes;
setDefaultWFXChannelOrder();
mDevice->DeviceName = name;
}
void DSoundCapture::start()
{
const HRESULT hr{mDSCbuffer->Start(DSCBSTART_LOOPING)};
if(FAILED(hr))
throw al::backend_exception{al::backend_error::DeviceError,
"Failure starting capture: 0x%lx", hr};
}
void DSoundCapture::stop()
{
HRESULT hr{mDSCbuffer->Stop()};
if(FAILED(hr))
{
ERR("stop failed: 0x%08lx\n", hr);
mDevice->handleDisconnect("Failure stopping capture: 0x%lx", hr);
}
}
void DSoundCapture::captureSamples(al::byte *buffer, uint samples)
{ mRing->read(buffer, samples); }
uint DSoundCapture::availableSamples()
{
if(!mDevice->Connected.load(std::memory_order_acquire))
return static_cast<uint>(mRing->readSpace());
const uint FrameSize{mDevice->frameSizeFromFmt()};
const DWORD BufferBytes{mBufferBytes};
const DWORD LastCursor{mCursor};
DWORD ReadCursor{};
void *ReadPtr1{}, *ReadPtr2{};
DWORD ReadCnt1{}, ReadCnt2{};
HRESULT hr{mDSCbuffer->GetCurrentPosition(nullptr, &ReadCursor)};
if(SUCCEEDED(hr))
{
const DWORD NumBytes{(BufferBytes+ReadCursor-LastCursor) % BufferBytes};
if(!NumBytes) return static_cast<uint>(mRing->readSpace());
hr = mDSCbuffer->Lock(LastCursor, NumBytes, &ReadPtr1, &ReadCnt1, &ReadPtr2, &ReadCnt2, 0);
}
if(SUCCEEDED(hr))
{
mRing->write(ReadPtr1, ReadCnt1/FrameSize);
if(ReadPtr2 != nullptr && ReadCnt2 > 0)
mRing->write(ReadPtr2, ReadCnt2/FrameSize);
hr = mDSCbuffer->Unlock(ReadPtr1, ReadCnt1, ReadPtr2, ReadCnt2);
mCursor = ReadCursor;
}
if(FAILED(hr))
{
ERR("update failed: 0x%08lx\n", hr);
mDevice->handleDisconnect("Failure retrieving capture data: 0x%lx", hr);
}
return static_cast<uint>(mRing->readSpace());
}
} // namespace
BackendFactory &DSoundBackendFactory::getFactory()
{
static DSoundBackendFactory factory{};
return factory;
}
bool DSoundBackendFactory::init()
{
#ifdef HAVE_DYNLOAD
if(!ds_handle)
{
ds_handle = LoadLib("dsound.dll");
if(!ds_handle)
{
ERR("Failed to load dsound.dll\n");
return false;
}
#define LOAD_FUNC(f) do { \
p##f = reinterpret_cast<decltype(p##f)>(GetSymbol(ds_handle, #f)); \
if(!p##f) \
{ \
CloseLib(ds_handle); \
ds_handle = nullptr; \
return false; \
} \
} while(0)
LOAD_FUNC(DirectSoundCreate);
LOAD_FUNC(DirectSoundEnumerateW);
LOAD_FUNC(DirectSoundCaptureCreate);
LOAD_FUNC(DirectSoundCaptureEnumerateW);
#undef LOAD_FUNC
}
#endif
return true;
}
bool DSoundBackendFactory::querySupport(BackendType type)
{ return (type == BackendType::Playback || type == BackendType::Capture); }
std::string DSoundBackendFactory::probe(BackendType type)
{
std::string outnames;
auto add_device = [&outnames](const DevMap &entry) -> void
{
/* +1 to also append the null char (to ensure a null-separated list and
* double-null terminated list).
*/
outnames.append(entry.name.c_str(), entry.name.length()+1);
};
/* Initialize COM to prevent name truncation */
HRESULT hr;
HRESULT hrcom{CoInitialize(nullptr)};
switch(type)
{
case BackendType::Playback:
PlaybackDevices.clear();
hr = DirectSoundEnumerateW(DSoundEnumDevices, &PlaybackDevices);
if(FAILED(hr))
ERR("Error enumerating DirectSound playback devices (0x%lx)!\n", hr);
std::for_each(PlaybackDevices.cbegin(), PlaybackDevices.cend(), add_device);
break;
case BackendType::Capture:
CaptureDevices.clear();
hr = DirectSoundCaptureEnumerateW(DSoundEnumDevices, &CaptureDevices);
if(FAILED(hr))
ERR("Error enumerating DirectSound capture devices (0x%lx)!\n", hr);
std::for_each(CaptureDevices.cbegin(), CaptureDevices.cend(), add_device);
break;
}
if(SUCCEEDED(hrcom))
CoUninitialize();
return outnames;
}
BackendPtr DSoundBackendFactory::createBackend(DeviceBase *device, BackendType type)
{
if(type == BackendType::Playback)
return BackendPtr{new DSoundPlayback{device}};
if(type == BackendType::Capture)
return BackendPtr{new DSoundCapture{device}};
return nullptr;
}

View File

@ -0,0 +1,19 @@
#ifndef BACKENDS_DSOUND_H
#define BACKENDS_DSOUND_H
#include "base.h"
struct DSoundBackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_DSOUND_H */

View File

@ -0,0 +1,744 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 1999-2007 by authors.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include "jack.h"
#include <cstdlib>
#include <cstdio>
#include <cstring>
#include <memory.h>
#include <array>
#include <thread>
#include <functional>
#include "alc/alconfig.h"
#include "alnumeric.h"
#include "core/device.h"
#include "core/helpers.h"
#include "core/logging.h"
#include "dynload.h"
#include "ringbuffer.h"
#include "threads.h"
#include <jack/jack.h>
#include <jack/ringbuffer.h>
namespace {
#ifdef HAVE_DYNLOAD
#define JACK_FUNCS(MAGIC) \
MAGIC(jack_client_open); \
MAGIC(jack_client_close); \
MAGIC(jack_client_name_size); \
MAGIC(jack_get_client_name); \
MAGIC(jack_connect); \
MAGIC(jack_activate); \
MAGIC(jack_deactivate); \
MAGIC(jack_port_register); \
MAGIC(jack_port_unregister); \
MAGIC(jack_port_get_buffer); \
MAGIC(jack_port_name); \
MAGIC(jack_get_ports); \
MAGIC(jack_free); \
MAGIC(jack_get_sample_rate); \
MAGIC(jack_set_error_function); \
MAGIC(jack_set_process_callback); \
MAGIC(jack_set_buffer_size_callback); \
MAGIC(jack_set_buffer_size); \
MAGIC(jack_get_buffer_size);
void *jack_handle;
#define MAKE_FUNC(f) decltype(f) * p##f
JACK_FUNCS(MAKE_FUNC)
decltype(jack_error_callback) * pjack_error_callback;
#undef MAKE_FUNC
#ifndef IN_IDE_PARSER
#define jack_client_open pjack_client_open
#define jack_client_close pjack_client_close
#define jack_client_name_size pjack_client_name_size
#define jack_get_client_name pjack_get_client_name
#define jack_connect pjack_connect
#define jack_activate pjack_activate
#define jack_deactivate pjack_deactivate
#define jack_port_register pjack_port_register
#define jack_port_unregister pjack_port_unregister
#define jack_port_get_buffer pjack_port_get_buffer
#define jack_port_name pjack_port_name
#define jack_get_ports pjack_get_ports
#define jack_free pjack_free
#define jack_get_sample_rate pjack_get_sample_rate
#define jack_set_error_function pjack_set_error_function
#define jack_set_process_callback pjack_set_process_callback
#define jack_set_buffer_size_callback pjack_set_buffer_size_callback
#define jack_set_buffer_size pjack_set_buffer_size
#define jack_get_buffer_size pjack_get_buffer_size
#define jack_error_callback (*pjack_error_callback)
#endif
#endif
constexpr char JackDefaultAudioType[] = JACK_DEFAULT_AUDIO_TYPE;
jack_options_t ClientOptions = JackNullOption;
bool jack_load()
{
bool error{false};
#ifdef HAVE_DYNLOAD
if(!jack_handle)
{
std::string missing_funcs;
#ifdef _WIN32
#define JACKLIB "libjack.dll"
#else
#define JACKLIB "libjack.so.0"
#endif
jack_handle = LoadLib(JACKLIB);
if(!jack_handle)
{
WARN("Failed to load %s\n", JACKLIB);
return false;
}
error = false;
#define LOAD_FUNC(f) do { \
p##f = reinterpret_cast<decltype(p##f)>(GetSymbol(jack_handle, #f)); \
if(p##f == nullptr) { \
error = true; \
missing_funcs += "\n" #f; \
} \
} while(0)
JACK_FUNCS(LOAD_FUNC);
#undef LOAD_FUNC
/* Optional symbols. These don't exist in all versions of JACK. */
#define LOAD_SYM(f) p##f = reinterpret_cast<decltype(p##f)>(GetSymbol(jack_handle, #f))
LOAD_SYM(jack_error_callback);
#undef LOAD_SYM
if(error)
{
WARN("Missing expected functions:%s\n", missing_funcs.c_str());
CloseLib(jack_handle);
jack_handle = nullptr;
}
}
#endif
return !error;
}
struct JackDeleter {
void operator()(void *ptr) { jack_free(ptr); }
};
using JackPortsPtr = std::unique_ptr<const char*[],JackDeleter>;
struct DeviceEntry {
std::string mName;
std::string mPattern;
template<typename T, typename U>
DeviceEntry(T&& name, U&& pattern)
: mName{std::forward<T>(name)}, mPattern{std::forward<U>(pattern)}
{ }
};
al::vector<DeviceEntry> PlaybackList;
void EnumerateDevices(jack_client_t *client, al::vector<DeviceEntry> &list)
{
std::remove_reference_t<decltype(list)>{}.swap(list);
if(JackPortsPtr ports{jack_get_ports(client, nullptr, JackDefaultAudioType, JackPortIsInput)})
{
for(size_t i{0};ports[i];++i)
{
const char *sep{std::strchr(ports[i], ':')};
if(!sep || ports[i] == sep) continue;
const al::span<const char> portdev{ports[i], sep};
auto check_name = [portdev](const DeviceEntry &entry) -> bool
{
const size_t len{portdev.size()};
return entry.mName.length() == len
&& entry.mName.compare(0, len, portdev.data(), len) == 0;
};
if(std::find_if(list.cbegin(), list.cend(), check_name) != list.cend())
continue;
std::string name{portdev.data(), portdev.size()};
list.emplace_back(name, name+":");
const auto &entry = list.back();
TRACE("Got device: %s = %s\n", entry.mName.c_str(), entry.mPattern.c_str());
}
/* There are ports but couldn't get device names from them. Add a
* generic entry.
*/
if(ports[0] && list.empty())
{
WARN("No device names found in available ports, adding a generic name.\n");
list.emplace_back("JACK", "");
}
}
if(auto listopt = ConfigValueStr(nullptr, "jack", "custom-devices"))
{
for(size_t strpos{0};strpos < listopt->size();)
{
size_t nextpos{listopt->find(';', strpos)};
size_t seppos{listopt->find('=', strpos)};
if(seppos >= nextpos || seppos == strpos)
{
const std::string entry{listopt->substr(strpos, nextpos-strpos)};
ERR("Invalid device entry: \"%s\"\n", entry.c_str());
if(nextpos != std::string::npos) ++nextpos;
strpos = nextpos;
continue;
}
const al::span<const char> name{listopt->data()+strpos, seppos-strpos};
const al::span<const char> pattern{listopt->data()+(seppos+1),
std::min(nextpos, listopt->size())-(seppos+1)};
/* Check if this custom pattern already exists in the list. */
auto check_pattern = [pattern](const DeviceEntry &entry) -> bool
{
const size_t len{pattern.size()};
return entry.mPattern.length() == len
&& entry.mPattern.compare(0, len, pattern.data(), len) == 0;
};
auto itemmatch = std::find_if(list.begin(), list.end(), check_pattern);
if(itemmatch != list.end())
{
/* If so, replace the name with this custom one. */
itemmatch->mName.assign(name.data(), name.size());
TRACE("Customized device name: %s = %s\n", itemmatch->mName.c_str(),
itemmatch->mPattern.c_str());
}
else
{
/* Otherwise, add a new device entry. */
list.emplace_back(std::string{name.data(), name.size()},
std::string{pattern.data(), pattern.size()});
const auto &entry = list.back();
TRACE("Got custom device: %s = %s\n", entry.mName.c_str(), entry.mPattern.c_str());
}
if(nextpos != std::string::npos) ++nextpos;
strpos = nextpos;
}
}
if(list.size() > 1)
{
/* Rename entries that have matching names, by appending '#2', '#3',
* etc, as needed.
*/
for(auto curitem = list.begin()+1;curitem != list.end();++curitem)
{
auto check_match = [curitem](const DeviceEntry &entry) -> bool
{ return entry.mName == curitem->mName; };
if(std::find_if(list.begin(), curitem, check_match) != curitem)
{
std::string name{curitem->mName};
size_t count{1};
auto check_name = [&name](const DeviceEntry &entry) -> bool
{ return entry.mName == name; };
do {
name = curitem->mName;
name += " #";
name += std::to_string(++count);
} while(std::find_if(list.begin(), curitem, check_name) != curitem);
curitem->mName = std::move(name);
}
}
}
}
struct JackPlayback final : public BackendBase {
JackPlayback(DeviceBase *device) noexcept : BackendBase{device} { }
~JackPlayback() override;
int processRt(jack_nframes_t numframes) noexcept;
static int processRtC(jack_nframes_t numframes, void *arg) noexcept
{ return static_cast<JackPlayback*>(arg)->processRt(numframes); }
int process(jack_nframes_t numframes) noexcept;
static int processC(jack_nframes_t numframes, void *arg) noexcept
{ return static_cast<JackPlayback*>(arg)->process(numframes); }
int mixerProc();
void open(const char *name) override;
bool reset() override;
void start() override;
void stop() override;
ClockLatency getClockLatency() override;
std::string mPortPattern;
jack_client_t *mClient{nullptr};
std::array<jack_port_t*,MAX_OUTPUT_CHANNELS> mPort{};
std::mutex mMutex;
std::atomic<bool> mPlaying{false};
bool mRTMixing{false};
RingBufferPtr mRing;
al::semaphore mSem;
std::atomic<bool> mKillNow{true};
std::thread mThread;
DEF_NEWDEL(JackPlayback)
};
JackPlayback::~JackPlayback()
{
if(!mClient)
return;
auto unregister_port = [this](jack_port_t *port) -> void
{ if(port) jack_port_unregister(mClient, port); };
std::for_each(mPort.begin(), mPort.end(), unregister_port);
mPort.fill(nullptr);
jack_client_close(mClient);
mClient = nullptr;
}
int JackPlayback::processRt(jack_nframes_t numframes) noexcept
{
std::array<jack_default_audio_sample_t*,MAX_OUTPUT_CHANNELS> out;
size_t numchans{0};
for(auto port : mPort)
{
if(!port || numchans == mDevice->RealOut.Buffer.size())
break;
out[numchans++] = static_cast<float*>(jack_port_get_buffer(port, numframes));
}
if(mPlaying.load(std::memory_order_acquire)) LIKELY
mDevice->renderSamples({out.data(), numchans}, static_cast<uint>(numframes));
else
{
auto clear_buf = [numframes](float *outbuf) -> void
{ std::fill_n(outbuf, numframes, 0.0f); };
std::for_each(out.begin(), out.begin()+numchans, clear_buf);
}
return 0;
}
int JackPlayback::process(jack_nframes_t numframes) noexcept
{
std::array<jack_default_audio_sample_t*,MAX_OUTPUT_CHANNELS> out;
size_t numchans{0};
for(auto port : mPort)
{
if(!port) break;
out[numchans++] = static_cast<float*>(jack_port_get_buffer(port, numframes));
}
jack_nframes_t total{0};
if(mPlaying.load(std::memory_order_acquire)) LIKELY
{
auto data = mRing->getReadVector();
jack_nframes_t todo{minu(numframes, static_cast<uint>(data.first.len))};
auto write_first = [&data,numchans,todo](float *outbuf) -> float*
{
const float *RESTRICT in = reinterpret_cast<float*>(data.first.buf);
auto deinterlace_input = [&in,numchans]() noexcept -> float
{
float ret{*in};
in += numchans;
return ret;
};
std::generate_n(outbuf, todo, deinterlace_input);
data.first.buf += sizeof(float);
return outbuf + todo;
};
std::transform(out.begin(), out.begin()+numchans, out.begin(), write_first);
total += todo;
todo = minu(numframes-total, static_cast<uint>(data.second.len));
if(todo > 0)
{
auto write_second = [&data,numchans,todo](float *outbuf) -> float*
{
const float *RESTRICT in = reinterpret_cast<float*>(data.second.buf);
auto deinterlace_input = [&in,numchans]() noexcept -> float
{
float ret{*in};
in += numchans;
return ret;
};
std::generate_n(outbuf, todo, deinterlace_input);
data.second.buf += sizeof(float);
return outbuf + todo;
};
std::transform(out.begin(), out.begin()+numchans, out.begin(), write_second);
total += todo;
}
mRing->readAdvance(total);
mSem.post();
}
if(numframes > total)
{
const jack_nframes_t todo{numframes - total};
auto clear_buf = [todo](float *outbuf) -> void { std::fill_n(outbuf, todo, 0.0f); };
std::for_each(out.begin(), out.begin()+numchans, clear_buf);
}
return 0;
}
int JackPlayback::mixerProc()
{
SetRTPriority();
althrd_setname(MIXER_THREAD_NAME);
const size_t frame_step{mDevice->channelsFromFmt()};
while(!mKillNow.load(std::memory_order_acquire)
&& mDevice->Connected.load(std::memory_order_acquire))
{
if(mRing->writeSpace() < mDevice->UpdateSize)
{
mSem.wait();
continue;
}
auto data = mRing->getWriteVector();
size_t todo{data.first.len + data.second.len};
todo -= todo%mDevice->UpdateSize;
const auto len1 = static_cast<uint>(minz(data.first.len, todo));
const auto len2 = static_cast<uint>(minz(data.second.len, todo-len1));
std::lock_guard<std::mutex> _{mMutex};
mDevice->renderSamples(data.first.buf, len1, frame_step);
if(len2 > 0)
mDevice->renderSamples(data.second.buf, len2, frame_step);
mRing->writeAdvance(todo);
}
return 0;
}
void JackPlayback::open(const char *name)
{
if(!mClient)
{
const PathNamePair &binname = GetProcBinary();
const char *client_name{binname.fname.empty() ? "alsoft" : binname.fname.c_str()};
jack_status_t status;
mClient = jack_client_open(client_name, ClientOptions, &status, nullptr);
if(mClient == nullptr)
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to open client connection: 0x%02x", status};
if((status&JackServerStarted))
TRACE("JACK server started\n");
if((status&JackNameNotUnique))
{
client_name = jack_get_client_name(mClient);
TRACE("Client name not unique, got '%s' instead\n", client_name);
}
}
if(PlaybackList.empty())
EnumerateDevices(mClient, PlaybackList);
if(!name && !PlaybackList.empty())
{
name = PlaybackList[0].mName.c_str();
mPortPattern = PlaybackList[0].mPattern;
}
else
{
auto check_name = [name](const DeviceEntry &entry) -> bool
{ return entry.mName == name; };
auto iter = std::find_if(PlaybackList.cbegin(), PlaybackList.cend(), check_name);
if(iter == PlaybackList.cend())
throw al::backend_exception{al::backend_error::NoDevice,
"Device name \"%s\" not found", name?name:""};
mPortPattern = iter->mPattern;
}
mRTMixing = GetConfigValueBool(name, "jack", "rt-mix", true);
jack_set_process_callback(mClient,
mRTMixing ? &JackPlayback::processRtC : &JackPlayback::processC, this);
mDevice->DeviceName = name;
}
bool JackPlayback::reset()
{
auto unregister_port = [this](jack_port_t *port) -> void
{ if(port) jack_port_unregister(mClient, port); };
std::for_each(mPort.begin(), mPort.end(), unregister_port);
mPort.fill(nullptr);
/* Ignore the requested buffer metrics and just keep one JACK-sized buffer
* ready for when requested.
*/
mDevice->Frequency = jack_get_sample_rate(mClient);
mDevice->UpdateSize = jack_get_buffer_size(mClient);
if(mRTMixing)
{
/* Assume only two periods when directly mixing. Should try to query
* the total port latency when connected.
*/
mDevice->BufferSize = mDevice->UpdateSize * 2;
}
else
{
const char *devname{mDevice->DeviceName.c_str()};
uint bufsize{ConfigValueUInt(devname, "jack", "buffer-size").value_or(mDevice->UpdateSize)};
bufsize = maxu(NextPowerOf2(bufsize), mDevice->UpdateSize);
mDevice->BufferSize = bufsize + mDevice->UpdateSize;
}
/* Force 32-bit float output. */
mDevice->FmtType = DevFmtFloat;
int port_num{0};
auto ports_end = mPort.begin() + mDevice->channelsFromFmt();
auto bad_port = mPort.begin();
while(bad_port != ports_end)
{
std::string name{"channel_" + std::to_string(++port_num)};
*bad_port = jack_port_register(mClient, name.c_str(), JackDefaultAudioType,
JackPortIsOutput | JackPortIsTerminal, 0);
if(!*bad_port) break;
++bad_port;
}
if(bad_port != ports_end)
{
ERR("Failed to register enough JACK ports for %s output\n",
DevFmtChannelsString(mDevice->FmtChans));
if(bad_port == mPort.begin()) return false;
if(bad_port == mPort.begin()+1)
mDevice->FmtChans = DevFmtMono;
else
{
ports_end = mPort.begin()+2;
while(bad_port != ports_end)
{
jack_port_unregister(mClient, *(--bad_port));
*bad_port = nullptr;
}
mDevice->FmtChans = DevFmtStereo;
}
}
setDefaultChannelOrder();
return true;
}
void JackPlayback::start()
{
if(jack_activate(mClient))
throw al::backend_exception{al::backend_error::DeviceError, "Failed to activate client"};
const char *devname{mDevice->DeviceName.c_str()};
if(ConfigValueBool(devname, "jack", "connect-ports").value_or(true))
{
JackPortsPtr pnames{jack_get_ports(mClient, mPortPattern.c_str(), JackDefaultAudioType,
JackPortIsInput)};
if(!pnames)
{
jack_deactivate(mClient);
throw al::backend_exception{al::backend_error::DeviceError, "No playback ports found"};
}
for(size_t i{0};i < al::size(mPort) && mPort[i];++i)
{
if(!pnames[i])
{
ERR("No physical playback port for \"%s\"\n", jack_port_name(mPort[i]));
break;
}
if(jack_connect(mClient, jack_port_name(mPort[i]), pnames[i]))
ERR("Failed to connect output port \"%s\" to \"%s\"\n", jack_port_name(mPort[i]),
pnames[i]);
}
}
/* Reconfigure buffer metrics in case the server changed it since the reset
* (it won't change again after jack_activate), then allocate the ring
* buffer with the appropriate size.
*/
mDevice->Frequency = jack_get_sample_rate(mClient);
mDevice->UpdateSize = jack_get_buffer_size(mClient);
mDevice->BufferSize = mDevice->UpdateSize * 2;
mRing = nullptr;
if(mRTMixing)
mPlaying.store(true, std::memory_order_release);
else
{
uint bufsize{ConfigValueUInt(devname, "jack", "buffer-size").value_or(mDevice->UpdateSize)};
bufsize = maxu(NextPowerOf2(bufsize), mDevice->UpdateSize);
mDevice->BufferSize = bufsize + mDevice->UpdateSize;
mRing = RingBuffer::Create(bufsize, mDevice->frameSizeFromFmt(), true);
try {
mPlaying.store(true, std::memory_order_release);
mKillNow.store(false, std::memory_order_release);
mThread = std::thread{std::mem_fn(&JackPlayback::mixerProc), this};
}
catch(std::exception& e) {
jack_deactivate(mClient);
mPlaying.store(false, std::memory_order_release);
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to start mixing thread: %s", e.what()};
}
}
}
void JackPlayback::stop()
{
if(mPlaying.load(std::memory_order_acquire))
{
mKillNow.store(true, std::memory_order_release);
if(mThread.joinable())
{
mSem.post();
mThread.join();
}
jack_deactivate(mClient);
mPlaying.store(false, std::memory_order_release);
}
}
ClockLatency JackPlayback::getClockLatency()
{
ClockLatency ret;
std::lock_guard<std::mutex> _{mMutex};
ret.ClockTime = GetDeviceClockTime(mDevice);
ret.Latency = std::chrono::seconds{mRing ? mRing->readSpace() : mDevice->UpdateSize};
ret.Latency /= mDevice->Frequency;
return ret;
}
void jack_msg_handler(const char *message)
{
WARN("%s\n", message);
}
} // namespace
bool JackBackendFactory::init()
{
if(!jack_load())
return false;
if(!GetConfigValueBool(nullptr, "jack", "spawn-server", false))
ClientOptions = static_cast<jack_options_t>(ClientOptions | JackNoStartServer);
const PathNamePair &binname = GetProcBinary();
const char *client_name{binname.fname.empty() ? "alsoft" : binname.fname.c_str()};
void (*old_error_cb)(const char*){&jack_error_callback ? jack_error_callback : nullptr};
jack_set_error_function(jack_msg_handler);
jack_status_t status;
jack_client_t *client{jack_client_open(client_name, ClientOptions, &status, nullptr)};
jack_set_error_function(old_error_cb);
if(!client)
{
WARN("jack_client_open() failed, 0x%02x\n", status);
if((status&JackServerFailed) && !(ClientOptions&JackNoStartServer))
ERR("Unable to connect to JACK server\n");
return false;
}
jack_client_close(client);
return true;
}
bool JackBackendFactory::querySupport(BackendType type)
{ return (type == BackendType::Playback); }
std::string JackBackendFactory::probe(BackendType type)
{
std::string outnames;
auto append_name = [&outnames](const DeviceEntry &entry) -> void
{
/* Includes null char. */
outnames.append(entry.mName.c_str(), entry.mName.length()+1);
};
const PathNamePair &binname = GetProcBinary();
const char *client_name{binname.fname.empty() ? "alsoft" : binname.fname.c_str()};
jack_status_t status;
switch(type)
{
case BackendType::Playback:
if(jack_client_t *client{jack_client_open(client_name, ClientOptions, &status, nullptr)})
{
EnumerateDevices(client, PlaybackList);
jack_client_close(client);
}
else
WARN("jack_client_open() failed, 0x%02x\n", status);
std::for_each(PlaybackList.cbegin(), PlaybackList.cend(), append_name);
break;
case BackendType::Capture:
break;
}
return outnames;
}
BackendPtr JackBackendFactory::createBackend(DeviceBase *device, BackendType type)
{
if(type == BackendType::Playback)
return BackendPtr{new JackPlayback{device}};
return nullptr;
}
BackendFactory &JackBackendFactory::getFactory()
{
static JackBackendFactory factory{};
return factory;
}

View File

@ -0,0 +1,19 @@
#ifndef BACKENDS_JACK_H
#define BACKENDS_JACK_H
#include "base.h"
struct JackBackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_JACK_H */

View File

@ -0,0 +1,78 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 2011 by Chris Robinson
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include "loopback.h"
#include "core/device.h"
namespace {
struct LoopbackBackend final : public BackendBase {
LoopbackBackend(DeviceBase *device) noexcept : BackendBase{device} { }
void open(const char *name) override;
bool reset() override;
void start() override;
void stop() override;
DEF_NEWDEL(LoopbackBackend)
};
void LoopbackBackend::open(const char *name)
{
mDevice->DeviceName = name;
}
bool LoopbackBackend::reset()
{
setDefaultWFXChannelOrder();
return true;
}
void LoopbackBackend::start()
{ }
void LoopbackBackend::stop()
{ }
} // namespace
bool LoopbackBackendFactory::init()
{ return true; }
bool LoopbackBackendFactory::querySupport(BackendType)
{ return true; }
std::string LoopbackBackendFactory::probe(BackendType)
{ return std::string{}; }
BackendPtr LoopbackBackendFactory::createBackend(DeviceBase *device, BackendType)
{ return BackendPtr{new LoopbackBackend{device}}; }
BackendFactory &LoopbackBackendFactory::getFactory()
{
static LoopbackBackendFactory factory{};
return factory;
}

View File

@ -0,0 +1,19 @@
#ifndef BACKENDS_LOOPBACK_H
#define BACKENDS_LOOPBACK_H
#include "base.h"
struct LoopbackBackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_LOOPBACK_H */

View File

@ -0,0 +1,179 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 2010 by Chris Robinson
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include "null.h"
#include <exception>
#include <atomic>
#include <chrono>
#include <cstdint>
#include <cstring>
#include <functional>
#include <thread>
#include "core/device.h"
#include "almalloc.h"
#include "core/helpers.h"
#include "threads.h"
namespace {
using std::chrono::seconds;
using std::chrono::milliseconds;
using std::chrono::nanoseconds;
constexpr char nullDevice[] = "No Output";
struct NullBackend final : public BackendBase {
NullBackend(DeviceBase *device) noexcept : BackendBase{device} { }
int mixerProc();
void open(const char *name) override;
bool reset() override;
void start() override;
void stop() override;
std::atomic<bool> mKillNow{true};
std::thread mThread;
DEF_NEWDEL(NullBackend)
};
int NullBackend::mixerProc()
{
const milliseconds restTime{mDevice->UpdateSize*1000/mDevice->Frequency / 2};
SetRTPriority();
althrd_setname(MIXER_THREAD_NAME);
int64_t done{0};
auto start = std::chrono::steady_clock::now();
while(!mKillNow.load(std::memory_order_acquire)
&& mDevice->Connected.load(std::memory_order_acquire))
{
auto now = std::chrono::steady_clock::now();
/* This converts from nanoseconds to nanosamples, then to samples. */
int64_t avail{std::chrono::duration_cast<seconds>((now-start) * mDevice->Frequency).count()};
if(avail-done < mDevice->UpdateSize)
{
std::this_thread::sleep_for(restTime);
continue;
}
while(avail-done >= mDevice->UpdateSize)
{
mDevice->renderSamples(nullptr, mDevice->UpdateSize, 0u);
done += mDevice->UpdateSize;
}
/* For every completed second, increment the start time and reduce the
* samples done. This prevents the difference between the start time
* and current time from growing too large, while maintaining the
* correct number of samples to render.
*/
if(done >= mDevice->Frequency)
{
seconds s{done/mDevice->Frequency};
start += s;
done -= mDevice->Frequency*s.count();
}
}
return 0;
}
void NullBackend::open(const char *name)
{
if(!name)
name = nullDevice;
else if(strcmp(name, nullDevice) != 0)
throw al::backend_exception{al::backend_error::NoDevice, "Device name \"%s\" not found",
name};
mDevice->DeviceName = name;
}
bool NullBackend::reset()
{
setDefaultWFXChannelOrder();
return true;
}
void NullBackend::start()
{
try {
mKillNow.store(false, std::memory_order_release);
mThread = std::thread{std::mem_fn(&NullBackend::mixerProc), this};
}
catch(std::exception& e) {
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to start mixing thread: %s", e.what()};
}
}
void NullBackend::stop()
{
if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
return;
mThread.join();
}
} // namespace
bool NullBackendFactory::init()
{ return true; }
bool NullBackendFactory::querySupport(BackendType type)
{ return (type == BackendType::Playback); }
std::string NullBackendFactory::probe(BackendType type)
{
std::string outnames;
switch(type)
{
case BackendType::Playback:
/* Includes null char. */
outnames.append(nullDevice, sizeof(nullDevice));
break;
case BackendType::Capture:
break;
}
return outnames;
}
BackendPtr NullBackendFactory::createBackend(DeviceBase *device, BackendType type)
{
if(type == BackendType::Playback)
return BackendPtr{new NullBackend{device}};
return nullptr;
}
BackendFactory &NullBackendFactory::getFactory()
{
static NullBackendFactory factory{};
return factory;
}

View File

@ -0,0 +1,19 @@
#ifndef BACKENDS_NULL_H
#define BACKENDS_NULL_H
#include "base.h"
struct NullBackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_NULL_H */

View File

@ -0,0 +1,360 @@
#include "config.h"
#include "oboe.h"
#include <cassert>
#include <cstring>
#include <stdint.h>
#include "alnumeric.h"
#include "core/device.h"
#include "core/logging.h"
#include "ringbuffer.h"
#include "oboe/Oboe.h"
namespace {
constexpr char device_name[] = "Oboe Default";
struct OboePlayback final : public BackendBase, public oboe::AudioStreamCallback {
OboePlayback(DeviceBase *device) : BackendBase{device} { }
oboe::ManagedStream mStream;
oboe::DataCallbackResult onAudioReady(oboe::AudioStream *oboeStream, void *audioData,
int32_t numFrames) override;
void open(const char *name) override;
bool reset() override;
void start() override;
void stop() override;
};
oboe::DataCallbackResult OboePlayback::onAudioReady(oboe::AudioStream *oboeStream, void *audioData,
int32_t numFrames)
{
assert(numFrames > 0);
const int32_t numChannels{oboeStream->getChannelCount()};
mDevice->renderSamples(audioData, static_cast<uint32_t>(numFrames),
static_cast<uint32_t>(numChannels));
return oboe::DataCallbackResult::Continue;
}
void OboePlayback::open(const char *name)
{
if(!name)
name = device_name;
else if(std::strcmp(name, device_name) != 0)
throw al::backend_exception{al::backend_error::NoDevice, "Device name \"%s\" not found",
name};
/* Open a basic output stream, just to ensure it can work. */
oboe::ManagedStream stream;
oboe::Result result{oboe::AudioStreamBuilder{}.setDirection(oboe::Direction::Output)
->setPerformanceMode(oboe::PerformanceMode::LowLatency)
->openManagedStream(stream)};
if(result != oboe::Result::OK)
throw al::backend_exception{al::backend_error::DeviceError, "Failed to create stream: %s",
oboe::convertToText(result)};
mDevice->DeviceName = name;
}
bool OboePlayback::reset()
{
oboe::AudioStreamBuilder builder;
builder.setDirection(oboe::Direction::Output);
builder.setPerformanceMode(oboe::PerformanceMode::LowLatency);
/* Don't let Oboe convert. We should be able to handle anything it gives
* back.
*/
builder.setSampleRateConversionQuality(oboe::SampleRateConversionQuality::None);
builder.setChannelConversionAllowed(false);
builder.setFormatConversionAllowed(false);
builder.setCallback(this);
if(mDevice->Flags.test(FrequencyRequest))
{
builder.setSampleRateConversionQuality(oboe::SampleRateConversionQuality::High);
builder.setSampleRate(static_cast<int32_t>(mDevice->Frequency));
}
if(mDevice->Flags.test(ChannelsRequest))
{
/* Only use mono or stereo at user request. There's no telling what
* other counts may be inferred as.
*/
builder.setChannelCount((mDevice->FmtChans==DevFmtMono) ? oboe::ChannelCount::Mono
: (mDevice->FmtChans==DevFmtStereo) ? oboe::ChannelCount::Stereo
: oboe::ChannelCount::Unspecified);
}
if(mDevice->Flags.test(SampleTypeRequest))
{
oboe::AudioFormat format{oboe::AudioFormat::Unspecified};
switch(mDevice->FmtType)
{
case DevFmtByte:
case DevFmtUByte:
case DevFmtShort:
case DevFmtUShort:
format = oboe::AudioFormat::I16;
break;
case DevFmtInt:
case DevFmtUInt:
#if OBOE_VERSION_MAJOR > 1 || (OBOE_VERSION_MAJOR == 1 && OBOE_VERSION_MINOR >= 6)
format = oboe::AudioFormat::I32;
break;
#endif
case DevFmtFloat:
format = oboe::AudioFormat::Float;
break;
}
builder.setFormat(format);
}
oboe::Result result{builder.openManagedStream(mStream)};
/* If the format failed, try asking for the defaults. */
while(result == oboe::Result::ErrorInvalidFormat)
{
if(builder.getFormat() != oboe::AudioFormat::Unspecified)
builder.setFormat(oboe::AudioFormat::Unspecified);
else if(builder.getSampleRate() != oboe::kUnspecified)
builder.setSampleRate(oboe::kUnspecified);
else if(builder.getChannelCount() != oboe::ChannelCount::Unspecified)
builder.setChannelCount(oboe::ChannelCount::Unspecified);
else
break;
result = builder.openManagedStream(mStream);
}
if(result != oboe::Result::OK)
throw al::backend_exception{al::backend_error::DeviceError, "Failed to create stream: %s",
oboe::convertToText(result)};
mStream->setBufferSizeInFrames(mini(static_cast<int32_t>(mDevice->BufferSize),
mStream->getBufferCapacityInFrames()));
TRACE("Got stream with properties:\n%s", oboe::convertToText(mStream.get()));
if(static_cast<uint>(mStream->getChannelCount()) != mDevice->channelsFromFmt())
{
if(mStream->getChannelCount() >= 2)
mDevice->FmtChans = DevFmtStereo;
else if(mStream->getChannelCount() == 1)
mDevice->FmtChans = DevFmtMono;
else
throw al::backend_exception{al::backend_error::DeviceError,
"Got unhandled channel count: %d", mStream->getChannelCount()};
}
setDefaultWFXChannelOrder();
switch(mStream->getFormat())
{
case oboe::AudioFormat::I16:
mDevice->FmtType = DevFmtShort;
break;
case oboe::AudioFormat::Float:
mDevice->FmtType = DevFmtFloat;
break;
#if OBOE_VERSION_MAJOR > 1 || (OBOE_VERSION_MAJOR == 1 && OBOE_VERSION_MINOR >= 6)
case oboe::AudioFormat::I32:
mDevice->FmtType = DevFmtInt;
break;
case oboe::AudioFormat::I24:
#endif
case oboe::AudioFormat::Unspecified:
case oboe::AudioFormat::Invalid:
throw al::backend_exception{al::backend_error::DeviceError,
"Got unhandled sample type: %s", oboe::convertToText(mStream->getFormat())};
}
mDevice->Frequency = static_cast<uint32_t>(mStream->getSampleRate());
/* Ensure the period size is no less than 10ms. It's possible for FramesPerCallback to be 0
* indicating variable updates, but OpenAL should have a reasonable minimum update size set.
* FramesPerBurst may not necessarily be correct, but hopefully it can act as a minimum
* update size.
*/
mDevice->UpdateSize = maxu(mDevice->Frequency / 100,
static_cast<uint32_t>(mStream->getFramesPerBurst()));
mDevice->BufferSize = maxu(mDevice->UpdateSize * 2,
static_cast<uint32_t>(mStream->getBufferSizeInFrames()));
return true;
}
void OboePlayback::start()
{
const oboe::Result result{mStream->start()};
if(result != oboe::Result::OK)
throw al::backend_exception{al::backend_error::DeviceError, "Failed to start stream: %s",
oboe::convertToText(result)};
}
void OboePlayback::stop()
{
oboe::Result result{mStream->stop()};
if(result != oboe::Result::OK)
throw al::backend_exception{al::backend_error::DeviceError, "Failed to stop stream: %s",
oboe::convertToText(result)};
}
struct OboeCapture final : public BackendBase, public oboe::AudioStreamCallback {
OboeCapture(DeviceBase *device) : BackendBase{device} { }
oboe::ManagedStream mStream;
RingBufferPtr mRing{nullptr};
oboe::DataCallbackResult onAudioReady(oboe::AudioStream *oboeStream, void *audioData,
int32_t numFrames) override;
void open(const char *name) override;
void start() override;
void stop() override;
void captureSamples(al::byte *buffer, uint samples) override;
uint availableSamples() override;
};
oboe::DataCallbackResult OboeCapture::onAudioReady(oboe::AudioStream*, void *audioData,
int32_t numFrames)
{
mRing->write(audioData, static_cast<uint32_t>(numFrames));
return oboe::DataCallbackResult::Continue;
}
void OboeCapture::open(const char *name)
{
if(!name)
name = device_name;
else if(std::strcmp(name, device_name) != 0)
throw al::backend_exception{al::backend_error::NoDevice, "Device name \"%s\" not found",
name};
oboe::AudioStreamBuilder builder;
builder.setDirection(oboe::Direction::Input)
->setPerformanceMode(oboe::PerformanceMode::LowLatency)
->setSampleRateConversionQuality(oboe::SampleRateConversionQuality::High)
->setChannelConversionAllowed(true)
->setFormatConversionAllowed(true)
->setSampleRate(static_cast<int32_t>(mDevice->Frequency))
->setCallback(this);
/* Only use mono or stereo at user request. There's no telling what
* other counts may be inferred as.
*/
switch(mDevice->FmtChans)
{
case DevFmtMono:
builder.setChannelCount(oboe::ChannelCount::Mono);
break;
case DevFmtStereo:
builder.setChannelCount(oboe::ChannelCount::Stereo);
break;
case DevFmtQuad:
case DevFmtX51:
case DevFmtX61:
case DevFmtX71:
case DevFmtX714:
case DevFmtX3D71:
case DevFmtAmbi3D:
throw al::backend_exception{al::backend_error::DeviceError, "%s capture not supported",
DevFmtChannelsString(mDevice->FmtChans)};
}
/* FIXME: This really should support UByte, but Oboe doesn't. We'll need to
* convert.
*/
switch(mDevice->FmtType)
{
case DevFmtShort:
builder.setFormat(oboe::AudioFormat::I16);
break;
case DevFmtFloat:
builder.setFormat(oboe::AudioFormat::Float);
break;
case DevFmtInt:
#if OBOE_VERSION_MAJOR > 1 || (OBOE_VERSION_MAJOR == 1 && OBOE_VERSION_MINOR >= 6)
builder.setFormat(oboe::AudioFormat::I32);
break;
#endif
case DevFmtByte:
case DevFmtUByte:
case DevFmtUShort:
case DevFmtUInt:
throw al::backend_exception{al::backend_error::DeviceError,
"%s capture samples not supported", DevFmtTypeString(mDevice->FmtType)};
}
oboe::Result result{builder.openManagedStream(mStream)};
if(result != oboe::Result::OK)
throw al::backend_exception{al::backend_error::DeviceError, "Failed to create stream: %s",
oboe::convertToText(result)};
TRACE("Got stream with properties:\n%s", oboe::convertToText(mStream.get()));
/* Ensure a minimum ringbuffer size of 100ms. */
mRing = RingBuffer::Create(maxu(mDevice->BufferSize, mDevice->Frequency/10),
static_cast<uint32_t>(mStream->getBytesPerFrame()), false);
mDevice->DeviceName = name;
}
void OboeCapture::start()
{
const oboe::Result result{mStream->start()};
if(result != oboe::Result::OK)
throw al::backend_exception{al::backend_error::DeviceError, "Failed to start stream: %s",
oboe::convertToText(result)};
}
void OboeCapture::stop()
{
const oboe::Result result{mStream->stop()};
if(result != oboe::Result::OK)
throw al::backend_exception{al::backend_error::DeviceError, "Failed to stop stream: %s",
oboe::convertToText(result)};
}
uint OboeCapture::availableSamples()
{ return static_cast<uint>(mRing->readSpace()); }
void OboeCapture::captureSamples(al::byte *buffer, uint samples)
{ mRing->read(buffer, samples); }
} // namespace
bool OboeBackendFactory::init() { return true; }
bool OboeBackendFactory::querySupport(BackendType type)
{ return type == BackendType::Playback || type == BackendType::Capture; }
std::string OboeBackendFactory::probe(BackendType type)
{
switch(type)
{
case BackendType::Playback:
case BackendType::Capture:
/* Includes null char. */
return std::string{device_name, sizeof(device_name)};
}
return std::string{};
}
BackendPtr OboeBackendFactory::createBackend(DeviceBase *device, BackendType type)
{
if(type == BackendType::Playback)
return BackendPtr{new OboePlayback{device}};
if(type == BackendType::Capture)
return BackendPtr{new OboeCapture{device}};
return BackendPtr{};
}
BackendFactory &OboeBackendFactory::getFactory()
{
static OboeBackendFactory factory{};
return factory;
}

View File

@ -0,0 +1,19 @@
#ifndef BACKENDS_OBOE_H
#define BACKENDS_OBOE_H
#include "base.h"
struct OboeBackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_OBOE_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,19 @@
#ifndef BACKENDS_OSL_H
#define BACKENDS_OSL_H
#include "base.h"
struct OSLBackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_OSL_H */

View File

@ -0,0 +1,690 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 1999-2007 by authors.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include "oss.h"
#include <fcntl.h>
#include <poll.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <unistd.h>
#include <algorithm>
#include <atomic>
#include <cerrno>
#include <cstdio>
#include <cstring>
#include <exception>
#include <functional>
#include <memory>
#include <new>
#include <string>
#include <thread>
#include <utility>
#include "albyte.h"
#include "alc/alconfig.h"
#include "almalloc.h"
#include "alnumeric.h"
#include "aloptional.h"
#include "core/device.h"
#include "core/helpers.h"
#include "core/logging.h"
#include "ringbuffer.h"
#include "threads.h"
#include "vector.h"
#include <sys/soundcard.h>
/*
* The OSS documentation talks about SOUND_MIXER_READ, but the header
* only contains MIXER_READ. Play safe. Same for WRITE.
*/
#ifndef SOUND_MIXER_READ
#define SOUND_MIXER_READ MIXER_READ
#endif
#ifndef SOUND_MIXER_WRITE
#define SOUND_MIXER_WRITE MIXER_WRITE
#endif
#if defined(SOUND_VERSION) && (SOUND_VERSION < 0x040000)
#define ALC_OSS_COMPAT
#endif
#ifndef SNDCTL_AUDIOINFO
#define ALC_OSS_COMPAT
#endif
/*
* FreeBSD strongly discourages the use of specific devices,
* such as those returned in oss_audioinfo.devnode
*/
#ifdef __FreeBSD__
#define ALC_OSS_DEVNODE_TRUC
#endif
namespace {
constexpr char DefaultName[] = "OSS Default";
std::string DefaultPlayback{"/dev/dsp"};
std::string DefaultCapture{"/dev/dsp"};
struct DevMap {
std::string name;
std::string device_name;
};
al::vector<DevMap> PlaybackDevices;
al::vector<DevMap> CaptureDevices;
#ifdef ALC_OSS_COMPAT
#define DSP_CAP_OUTPUT 0x00020000
#define DSP_CAP_INPUT 0x00010000
void ALCossListPopulate(al::vector<DevMap> &devlist, int type)
{
devlist.emplace_back(DevMap{DefaultName, (type==DSP_CAP_INPUT) ? DefaultCapture : DefaultPlayback});
}
#else
void ALCossListAppend(al::vector<DevMap> &list, al::span<const char> handle, al::span<const char> path)
{
#ifdef ALC_OSS_DEVNODE_TRUC
for(size_t i{0};i < path.size();++i)
{
if(path[i] == '.' && handle.size() + i >= path.size())
{
const size_t hoffset{handle.size() + i - path.size()};
if(strncmp(path.data() + i, handle.data() + hoffset, path.size() - i) == 0)
handle = handle.first(hoffset);
path = path.first(i);
}
}
#endif
if(handle.empty())
handle = path;
std::string basename{handle.data(), handle.size()};
std::string devname{path.data(), path.size()};
auto match_devname = [&devname](const DevMap &entry) -> bool
{ return entry.device_name == devname; };
if(std::find_if(list.cbegin(), list.cend(), match_devname) != list.cend())
return;
auto checkName = [&list](const std::string &name) -> bool
{
auto match_name = [&name](const DevMap &entry) -> bool { return entry.name == name; };
return std::find_if(list.cbegin(), list.cend(), match_name) != list.cend();
};
int count{1};
std::string newname{basename};
while(checkName(newname))
{
newname = basename;
newname += " #";
newname += std::to_string(++count);
}
list.emplace_back(DevMap{std::move(newname), std::move(devname)});
const DevMap &entry = list.back();
TRACE("Got device \"%s\", \"%s\"\n", entry.name.c_str(), entry.device_name.c_str());
}
void ALCossListPopulate(al::vector<DevMap> &devlist, int type_flag)
{
int fd{open("/dev/mixer", O_RDONLY)};
if(fd < 0)
{
TRACE("Could not open /dev/mixer: %s\n", strerror(errno));
goto done;
}
oss_sysinfo si;
if(ioctl(fd, SNDCTL_SYSINFO, &si) == -1)
{
TRACE("SNDCTL_SYSINFO failed: %s\n", strerror(errno));
goto done;
}
for(int i{0};i < si.numaudios;i++)
{
oss_audioinfo ai;
ai.dev = i;
if(ioctl(fd, SNDCTL_AUDIOINFO, &ai) == -1)
{
ERR("SNDCTL_AUDIOINFO (%d) failed: %s\n", i, strerror(errno));
continue;
}
if(!(ai.caps&type_flag) || ai.devnode[0] == '\0')
continue;
al::span<const char> handle;
if(ai.handle[0] != '\0')
handle = {ai.handle, strnlen(ai.handle, sizeof(ai.handle))};
else
handle = {ai.name, strnlen(ai.name, sizeof(ai.name))};
al::span<const char> devnode{ai.devnode, strnlen(ai.devnode, sizeof(ai.devnode))};
ALCossListAppend(devlist, handle, devnode);
}
done:
if(fd >= 0)
close(fd);
fd = -1;
const char *defdev{((type_flag==DSP_CAP_INPUT) ? DefaultCapture : DefaultPlayback).c_str()};
auto iter = std::find_if(devlist.cbegin(), devlist.cend(),
[defdev](const DevMap &entry) -> bool
{ return entry.device_name == defdev; }
);
if(iter == devlist.cend())
devlist.insert(devlist.begin(), DevMap{DefaultName, defdev});
else
{
DevMap entry{std::move(*iter)};
devlist.erase(iter);
devlist.insert(devlist.begin(), std::move(entry));
}
devlist.shrink_to_fit();
}
#endif
uint log2i(uint x)
{
uint y{0};
while(x > 1)
{
x >>= 1;
y++;
}
return y;
}
struct OSSPlayback final : public BackendBase {
OSSPlayback(DeviceBase *device) noexcept : BackendBase{device} { }
~OSSPlayback() override;
int mixerProc();
void open(const char *name) override;
bool reset() override;
void start() override;
void stop() override;
int mFd{-1};
al::vector<al::byte> mMixData;
std::atomic<bool> mKillNow{true};
std::thread mThread;
DEF_NEWDEL(OSSPlayback)
};
OSSPlayback::~OSSPlayback()
{
if(mFd != -1)
::close(mFd);
mFd = -1;
}
int OSSPlayback::mixerProc()
{
SetRTPriority();
althrd_setname(MIXER_THREAD_NAME);
const size_t frame_step{mDevice->channelsFromFmt()};
const size_t frame_size{mDevice->frameSizeFromFmt()};
while(!mKillNow.load(std::memory_order_acquire)
&& mDevice->Connected.load(std::memory_order_acquire))
{
pollfd pollitem{};
pollitem.fd = mFd;
pollitem.events = POLLOUT;
int pret{poll(&pollitem, 1, 1000)};
if(pret < 0)
{
if(errno == EINTR || errno == EAGAIN)
continue;
ERR("poll failed: %s\n", strerror(errno));
mDevice->handleDisconnect("Failed waiting for playback buffer: %s", strerror(errno));
break;
}
else if(pret == 0)
{
WARN("poll timeout\n");
continue;
}
al::byte *write_ptr{mMixData.data()};
size_t to_write{mMixData.size()};
mDevice->renderSamples(write_ptr, static_cast<uint>(to_write/frame_size), frame_step);
while(to_write > 0 && !mKillNow.load(std::memory_order_acquire))
{
ssize_t wrote{write(mFd, write_ptr, to_write)};
if(wrote < 0)
{
if(errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)
continue;
ERR("write failed: %s\n", strerror(errno));
mDevice->handleDisconnect("Failed writing playback samples: %s", strerror(errno));
break;
}
to_write -= static_cast<size_t>(wrote);
write_ptr += wrote;
}
}
return 0;
}
void OSSPlayback::open(const char *name)
{
const char *devname{DefaultPlayback.c_str()};
if(!name)
name = DefaultName;
else
{
if(PlaybackDevices.empty())
ALCossListPopulate(PlaybackDevices, DSP_CAP_OUTPUT);
auto iter = std::find_if(PlaybackDevices.cbegin(), PlaybackDevices.cend(),
[&name](const DevMap &entry) -> bool
{ return entry.name == name; }
);
if(iter == PlaybackDevices.cend())
throw al::backend_exception{al::backend_error::NoDevice,
"Device name \"%s\" not found", name};
devname = iter->device_name.c_str();
}
int fd{::open(devname, O_WRONLY)};
if(fd == -1)
throw al::backend_exception{al::backend_error::NoDevice, "Could not open %s: %s", devname,
strerror(errno)};
if(mFd != -1)
::close(mFd);
mFd = fd;
mDevice->DeviceName = name;
}
bool OSSPlayback::reset()
{
int ossFormat{};
switch(mDevice->FmtType)
{
case DevFmtByte:
ossFormat = AFMT_S8;
break;
case DevFmtUByte:
ossFormat = AFMT_U8;
break;
case DevFmtUShort:
case DevFmtInt:
case DevFmtUInt:
case DevFmtFloat:
mDevice->FmtType = DevFmtShort;
/* fall-through */
case DevFmtShort:
ossFormat = AFMT_S16_NE;
break;
}
uint periods{mDevice->BufferSize / mDevice->UpdateSize};
uint numChannels{mDevice->channelsFromFmt()};
uint ossSpeed{mDevice->Frequency};
uint frameSize{numChannels * mDevice->bytesFromFmt()};
/* According to the OSS spec, 16 bytes (log2(16)) is the minimum. */
uint log2FragmentSize{maxu(log2i(mDevice->UpdateSize*frameSize), 4)};
uint numFragmentsLogSize{(periods << 16) | log2FragmentSize};
audio_buf_info info{};
const char *err;
#define CHECKERR(func) if((func) < 0) { \
err = #func; \
goto err; \
}
/* Don't fail if SETFRAGMENT fails. We can handle just about anything
* that's reported back via GETOSPACE */
ioctl(mFd, SNDCTL_DSP_SETFRAGMENT, &numFragmentsLogSize);
CHECKERR(ioctl(mFd, SNDCTL_DSP_SETFMT, &ossFormat));
CHECKERR(ioctl(mFd, SNDCTL_DSP_CHANNELS, &numChannels));
CHECKERR(ioctl(mFd, SNDCTL_DSP_SPEED, &ossSpeed));
CHECKERR(ioctl(mFd, SNDCTL_DSP_GETOSPACE, &info));
if(0)
{
err:
ERR("%s failed: %s\n", err, strerror(errno));
return false;
}
#undef CHECKERR
if(mDevice->channelsFromFmt() != numChannels)
{
ERR("Failed to set %s, got %d channels instead\n", DevFmtChannelsString(mDevice->FmtChans),
numChannels);
return false;
}
if(!((ossFormat == AFMT_S8 && mDevice->FmtType == DevFmtByte) ||
(ossFormat == AFMT_U8 && mDevice->FmtType == DevFmtUByte) ||
(ossFormat == AFMT_S16_NE && mDevice->FmtType == DevFmtShort)))
{
ERR("Failed to set %s samples, got OSS format %#x\n", DevFmtTypeString(mDevice->FmtType),
ossFormat);
return false;
}
mDevice->Frequency = ossSpeed;
mDevice->UpdateSize = static_cast<uint>(info.fragsize) / frameSize;
mDevice->BufferSize = static_cast<uint>(info.fragments) * mDevice->UpdateSize;
setDefaultChannelOrder();
mMixData.resize(mDevice->UpdateSize * mDevice->frameSizeFromFmt());
return true;
}
void OSSPlayback::start()
{
try {
mKillNow.store(false, std::memory_order_release);
mThread = std::thread{std::mem_fn(&OSSPlayback::mixerProc), this};
}
catch(std::exception& e) {
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to start mixing thread: %s", e.what()};
}
}
void OSSPlayback::stop()
{
if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
return;
mThread.join();
if(ioctl(mFd, SNDCTL_DSP_RESET) != 0)
ERR("Error resetting device: %s\n", strerror(errno));
}
struct OSScapture final : public BackendBase {
OSScapture(DeviceBase *device) noexcept : BackendBase{device} { }
~OSScapture() override;
int recordProc();
void open(const char *name) override;
void start() override;
void stop() override;
void captureSamples(al::byte *buffer, uint samples) override;
uint availableSamples() override;
int mFd{-1};
RingBufferPtr mRing{nullptr};
std::atomic<bool> mKillNow{true};
std::thread mThread;
DEF_NEWDEL(OSScapture)
};
OSScapture::~OSScapture()
{
if(mFd != -1)
close(mFd);
mFd = -1;
}
int OSScapture::recordProc()
{
SetRTPriority();
althrd_setname(RECORD_THREAD_NAME);
const size_t frame_size{mDevice->frameSizeFromFmt()};
while(!mKillNow.load(std::memory_order_acquire))
{
pollfd pollitem{};
pollitem.fd = mFd;
pollitem.events = POLLIN;
int sret{poll(&pollitem, 1, 1000)};
if(sret < 0)
{
if(errno == EINTR || errno == EAGAIN)
continue;
ERR("poll failed: %s\n", strerror(errno));
mDevice->handleDisconnect("Failed to check capture samples: %s", strerror(errno));
break;
}
else if(sret == 0)
{
WARN("poll timeout\n");
continue;
}
auto vec = mRing->getWriteVector();
if(vec.first.len > 0)
{
ssize_t amt{read(mFd, vec.first.buf, vec.first.len*frame_size)};
if(amt < 0)
{
ERR("read failed: %s\n", strerror(errno));
mDevice->handleDisconnect("Failed reading capture samples: %s", strerror(errno));
break;
}
mRing->writeAdvance(static_cast<size_t>(amt)/frame_size);
}
}
return 0;
}
void OSScapture::open(const char *name)
{
const char *devname{DefaultCapture.c_str()};
if(!name)
name = DefaultName;
else
{
if(CaptureDevices.empty())
ALCossListPopulate(CaptureDevices, DSP_CAP_INPUT);
auto iter = std::find_if(CaptureDevices.cbegin(), CaptureDevices.cend(),
[&name](const DevMap &entry) -> bool
{ return entry.name == name; }
);
if(iter == CaptureDevices.cend())
throw al::backend_exception{al::backend_error::NoDevice,
"Device name \"%s\" not found", name};
devname = iter->device_name.c_str();
}
mFd = ::open(devname, O_RDONLY);
if(mFd == -1)
throw al::backend_exception{al::backend_error::NoDevice, "Could not open %s: %s", devname,
strerror(errno)};
int ossFormat{};
switch(mDevice->FmtType)
{
case DevFmtByte:
ossFormat = AFMT_S8;
break;
case DevFmtUByte:
ossFormat = AFMT_U8;
break;
case DevFmtShort:
ossFormat = AFMT_S16_NE;
break;
case DevFmtUShort:
case DevFmtInt:
case DevFmtUInt:
case DevFmtFloat:
throw al::backend_exception{al::backend_error::DeviceError,
"%s capture samples not supported", DevFmtTypeString(mDevice->FmtType)};
}
uint periods{4};
uint numChannels{mDevice->channelsFromFmt()};
uint frameSize{numChannels * mDevice->bytesFromFmt()};
uint ossSpeed{mDevice->Frequency};
/* according to the OSS spec, 16 bytes are the minimum */
uint log2FragmentSize{maxu(log2i(mDevice->BufferSize * frameSize / periods), 4)};
uint numFragmentsLogSize{(periods << 16) | log2FragmentSize};
audio_buf_info info{};
#define CHECKERR(func) if((func) < 0) { \
throw al::backend_exception{al::backend_error::DeviceError, #func " failed: %s", \
strerror(errno)}; \
}
CHECKERR(ioctl(mFd, SNDCTL_DSP_SETFRAGMENT, &numFragmentsLogSize));
CHECKERR(ioctl(mFd, SNDCTL_DSP_SETFMT, &ossFormat));
CHECKERR(ioctl(mFd, SNDCTL_DSP_CHANNELS, &numChannels));
CHECKERR(ioctl(mFd, SNDCTL_DSP_SPEED, &ossSpeed));
CHECKERR(ioctl(mFd, SNDCTL_DSP_GETISPACE, &info));
#undef CHECKERR
if(mDevice->channelsFromFmt() != numChannels)
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to set %s, got %d channels instead", DevFmtChannelsString(mDevice->FmtChans),
numChannels};
if(!((ossFormat == AFMT_S8 && mDevice->FmtType == DevFmtByte)
|| (ossFormat == AFMT_U8 && mDevice->FmtType == DevFmtUByte)
|| (ossFormat == AFMT_S16_NE && mDevice->FmtType == DevFmtShort)))
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to set %s samples, got OSS format %#x", DevFmtTypeString(mDevice->FmtType),
ossFormat};
mRing = RingBuffer::Create(mDevice->BufferSize, frameSize, false);
mDevice->DeviceName = name;
}
void OSScapture::start()
{
try {
mKillNow.store(false, std::memory_order_release);
mThread = std::thread{std::mem_fn(&OSScapture::recordProc), this};
}
catch(std::exception& e) {
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to start recording thread: %s", e.what()};
}
}
void OSScapture::stop()
{
if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
return;
mThread.join();
if(ioctl(mFd, SNDCTL_DSP_RESET) != 0)
ERR("Error resetting device: %s\n", strerror(errno));
}
void OSScapture::captureSamples(al::byte *buffer, uint samples)
{ mRing->read(buffer, samples); }
uint OSScapture::availableSamples()
{ return static_cast<uint>(mRing->readSpace()); }
} // namespace
BackendFactory &OSSBackendFactory::getFactory()
{
static OSSBackendFactory factory{};
return factory;
}
bool OSSBackendFactory::init()
{
if(auto devopt = ConfigValueStr(nullptr, "oss", "device"))
DefaultPlayback = std::move(*devopt);
if(auto capopt = ConfigValueStr(nullptr, "oss", "capture"))
DefaultCapture = std::move(*capopt);
return true;
}
bool OSSBackendFactory::querySupport(BackendType type)
{ return (type == BackendType::Playback || type == BackendType::Capture); }
std::string OSSBackendFactory::probe(BackendType type)
{
std::string outnames;
auto add_device = [&outnames](const DevMap &entry) -> void
{
struct stat buf;
if(stat(entry.device_name.c_str(), &buf) == 0)
{
/* Includes null char. */
outnames.append(entry.name.c_str(), entry.name.length()+1);
}
};
switch(type)
{
case BackendType::Playback:
PlaybackDevices.clear();
ALCossListPopulate(PlaybackDevices, DSP_CAP_OUTPUT);
std::for_each(PlaybackDevices.cbegin(), PlaybackDevices.cend(), add_device);
break;
case BackendType::Capture:
CaptureDevices.clear();
ALCossListPopulate(CaptureDevices, DSP_CAP_INPUT);
std::for_each(CaptureDevices.cbegin(), CaptureDevices.cend(), add_device);
break;
}
return outnames;
}
BackendPtr OSSBackendFactory::createBackend(DeviceBase *device, BackendType type)
{
if(type == BackendType::Playback)
return BackendPtr{new OSSPlayback{device}};
if(type == BackendType::Capture)
return BackendPtr{new OSScapture{device}};
return nullptr;
}

View File

@ -0,0 +1,19 @@
#ifndef BACKENDS_OSS_H
#define BACKENDS_OSS_H
#include "base.h"
struct OSSBackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_OSS_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,23 @@
#ifndef BACKENDS_PIPEWIRE_H
#define BACKENDS_PIPEWIRE_H
#include <string>
#include "base.h"
struct DeviceBase;
struct PipeWireBackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_PIPEWIRE_H */

View File

@ -0,0 +1,447 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 1999-2007 by authors.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include "portaudio.h"
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include "alc/alconfig.h"
#include "alnumeric.h"
#include "core/device.h"
#include "core/logging.h"
#include "dynload.h"
#include "ringbuffer.h"
#include <portaudio.h>
namespace {
constexpr char pa_device[] = "PortAudio Default";
#ifdef HAVE_DYNLOAD
void *pa_handle;
#define MAKE_FUNC(x) decltype(x) * p##x
MAKE_FUNC(Pa_Initialize);
MAKE_FUNC(Pa_Terminate);
MAKE_FUNC(Pa_GetErrorText);
MAKE_FUNC(Pa_StartStream);
MAKE_FUNC(Pa_StopStream);
MAKE_FUNC(Pa_OpenStream);
MAKE_FUNC(Pa_CloseStream);
MAKE_FUNC(Pa_GetDefaultOutputDevice);
MAKE_FUNC(Pa_GetDefaultInputDevice);
MAKE_FUNC(Pa_GetStreamInfo);
#undef MAKE_FUNC
#ifndef IN_IDE_PARSER
#define Pa_Initialize pPa_Initialize
#define Pa_Terminate pPa_Terminate
#define Pa_GetErrorText pPa_GetErrorText
#define Pa_StartStream pPa_StartStream
#define Pa_StopStream pPa_StopStream
#define Pa_OpenStream pPa_OpenStream
#define Pa_CloseStream pPa_CloseStream
#define Pa_GetDefaultOutputDevice pPa_GetDefaultOutputDevice
#define Pa_GetDefaultInputDevice pPa_GetDefaultInputDevice
#define Pa_GetStreamInfo pPa_GetStreamInfo
#endif
#endif
struct PortPlayback final : public BackendBase {
PortPlayback(DeviceBase *device) noexcept : BackendBase{device} { }
~PortPlayback() override;
int writeCallback(const void *inputBuffer, void *outputBuffer, unsigned long framesPerBuffer,
const PaStreamCallbackTimeInfo *timeInfo, const PaStreamCallbackFlags statusFlags) noexcept;
static int writeCallbackC(const void *inputBuffer, void *outputBuffer,
unsigned long framesPerBuffer, const PaStreamCallbackTimeInfo *timeInfo,
const PaStreamCallbackFlags statusFlags, void *userData) noexcept
{
return static_cast<PortPlayback*>(userData)->writeCallback(inputBuffer, outputBuffer,
framesPerBuffer, timeInfo, statusFlags);
}
void open(const char *name) override;
bool reset() override;
void start() override;
void stop() override;
PaStream *mStream{nullptr};
PaStreamParameters mParams{};
uint mUpdateSize{0u};
DEF_NEWDEL(PortPlayback)
};
PortPlayback::~PortPlayback()
{
PaError err{mStream ? Pa_CloseStream(mStream) : paNoError};
if(err != paNoError)
ERR("Error closing stream: %s\n", Pa_GetErrorText(err));
mStream = nullptr;
}
int PortPlayback::writeCallback(const void*, void *outputBuffer, unsigned long framesPerBuffer,
const PaStreamCallbackTimeInfo*, const PaStreamCallbackFlags) noexcept
{
mDevice->renderSamples(outputBuffer, static_cast<uint>(framesPerBuffer),
static_cast<uint>(mParams.channelCount));
return 0;
}
void PortPlayback::open(const char *name)
{
if(!name)
name = pa_device;
else if(strcmp(name, pa_device) != 0)
throw al::backend_exception{al::backend_error::NoDevice, "Device name \"%s\" not found",
name};
PaStreamParameters params{};
auto devidopt = ConfigValueInt(nullptr, "port", "device");
if(devidopt && *devidopt >= 0) params.device = *devidopt;
else params.device = Pa_GetDefaultOutputDevice();
params.suggestedLatency = mDevice->BufferSize / static_cast<double>(mDevice->Frequency);
params.hostApiSpecificStreamInfo = nullptr;
params.channelCount = ((mDevice->FmtChans == DevFmtMono) ? 1 : 2);
switch(mDevice->FmtType)
{
case DevFmtByte:
params.sampleFormat = paInt8;
break;
case DevFmtUByte:
params.sampleFormat = paUInt8;
break;
case DevFmtUShort:
/* fall-through */
case DevFmtShort:
params.sampleFormat = paInt16;
break;
case DevFmtUInt:
/* fall-through */
case DevFmtInt:
params.sampleFormat = paInt32;
break;
case DevFmtFloat:
params.sampleFormat = paFloat32;
break;
}
retry_open:
PaStream *stream{};
PaError err{Pa_OpenStream(&stream, nullptr, &params, mDevice->Frequency, mDevice->UpdateSize,
paNoFlag, &PortPlayback::writeCallbackC, this)};
if(err != paNoError)
{
if(params.sampleFormat == paFloat32)
{
params.sampleFormat = paInt16;
goto retry_open;
}
throw al::backend_exception{al::backend_error::NoDevice, "Failed to open stream: %s",
Pa_GetErrorText(err)};
}
Pa_CloseStream(mStream);
mStream = stream;
mParams = params;
mUpdateSize = mDevice->UpdateSize;
mDevice->DeviceName = name;
}
bool PortPlayback::reset()
{
const PaStreamInfo *streamInfo{Pa_GetStreamInfo(mStream)};
mDevice->Frequency = static_cast<uint>(streamInfo->sampleRate);
mDevice->UpdateSize = mUpdateSize;
if(mParams.sampleFormat == paInt8)
mDevice->FmtType = DevFmtByte;
else if(mParams.sampleFormat == paUInt8)
mDevice->FmtType = DevFmtUByte;
else if(mParams.sampleFormat == paInt16)
mDevice->FmtType = DevFmtShort;
else if(mParams.sampleFormat == paInt32)
mDevice->FmtType = DevFmtInt;
else if(mParams.sampleFormat == paFloat32)
mDevice->FmtType = DevFmtFloat;
else
{
ERR("Unexpected sample format: 0x%lx\n", mParams.sampleFormat);
return false;
}
if(mParams.channelCount >= 2)
mDevice->FmtChans = DevFmtStereo;
else if(mParams.channelCount == 1)
mDevice->FmtChans = DevFmtMono;
else
{
ERR("Unexpected channel count: %u\n", mParams.channelCount);
return false;
}
setDefaultChannelOrder();
return true;
}
void PortPlayback::start()
{
const PaError err{Pa_StartStream(mStream)};
if(err == paNoError)
throw al::backend_exception{al::backend_error::DeviceError, "Failed to start playback: %s",
Pa_GetErrorText(err)};
}
void PortPlayback::stop()
{
PaError err{Pa_StopStream(mStream)};
if(err != paNoError)
ERR("Error stopping stream: %s\n", Pa_GetErrorText(err));
}
struct PortCapture final : public BackendBase {
PortCapture(DeviceBase *device) noexcept : BackendBase{device} { }
~PortCapture() override;
int readCallback(const void *inputBuffer, void *outputBuffer, unsigned long framesPerBuffer,
const PaStreamCallbackTimeInfo *timeInfo, const PaStreamCallbackFlags statusFlags) noexcept;
static int readCallbackC(const void *inputBuffer, void *outputBuffer,
unsigned long framesPerBuffer, const PaStreamCallbackTimeInfo *timeInfo,
const PaStreamCallbackFlags statusFlags, void *userData) noexcept
{
return static_cast<PortCapture*>(userData)->readCallback(inputBuffer, outputBuffer,
framesPerBuffer, timeInfo, statusFlags);
}
void open(const char *name) override;
void start() override;
void stop() override;
void captureSamples(al::byte *buffer, uint samples) override;
uint availableSamples() override;
PaStream *mStream{nullptr};
PaStreamParameters mParams;
RingBufferPtr mRing{nullptr};
DEF_NEWDEL(PortCapture)
};
PortCapture::~PortCapture()
{
PaError err{mStream ? Pa_CloseStream(mStream) : paNoError};
if(err != paNoError)
ERR("Error closing stream: %s\n", Pa_GetErrorText(err));
mStream = nullptr;
}
int PortCapture::readCallback(const void *inputBuffer, void*, unsigned long framesPerBuffer,
const PaStreamCallbackTimeInfo*, const PaStreamCallbackFlags) noexcept
{
mRing->write(inputBuffer, framesPerBuffer);
return 0;
}
void PortCapture::open(const char *name)
{
if(!name)
name = pa_device;
else if(strcmp(name, pa_device) != 0)
throw al::backend_exception{al::backend_error::NoDevice, "Device name \"%s\" not found",
name};
uint samples{mDevice->BufferSize};
samples = maxu(samples, 100 * mDevice->Frequency / 1000);
uint frame_size{mDevice->frameSizeFromFmt()};
mRing = RingBuffer::Create(samples, frame_size, false);
auto devidopt = ConfigValueInt(nullptr, "port", "capture");
if(devidopt && *devidopt >= 0) mParams.device = *devidopt;
else mParams.device = Pa_GetDefaultOutputDevice();
mParams.suggestedLatency = 0.0f;
mParams.hostApiSpecificStreamInfo = nullptr;
switch(mDevice->FmtType)
{
case DevFmtByte:
mParams.sampleFormat = paInt8;
break;
case DevFmtUByte:
mParams.sampleFormat = paUInt8;
break;
case DevFmtShort:
mParams.sampleFormat = paInt16;
break;
case DevFmtInt:
mParams.sampleFormat = paInt32;
break;
case DevFmtFloat:
mParams.sampleFormat = paFloat32;
break;
case DevFmtUInt:
case DevFmtUShort:
throw al::backend_exception{al::backend_error::DeviceError, "%s samples not supported",
DevFmtTypeString(mDevice->FmtType)};
}
mParams.channelCount = static_cast<int>(mDevice->channelsFromFmt());
PaError err{Pa_OpenStream(&mStream, &mParams, nullptr, mDevice->Frequency,
paFramesPerBufferUnspecified, paNoFlag, &PortCapture::readCallbackC, this)};
if(err != paNoError)
throw al::backend_exception{al::backend_error::NoDevice, "Failed to open stream: %s",
Pa_GetErrorText(err)};
mDevice->DeviceName = name;
}
void PortCapture::start()
{
const PaError err{Pa_StartStream(mStream)};
if(err != paNoError)
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to start recording: %s", Pa_GetErrorText(err)};
}
void PortCapture::stop()
{
PaError err{Pa_StopStream(mStream)};
if(err != paNoError)
ERR("Error stopping stream: %s\n", Pa_GetErrorText(err));
}
uint PortCapture::availableSamples()
{ return static_cast<uint>(mRing->readSpace()); }
void PortCapture::captureSamples(al::byte *buffer, uint samples)
{ mRing->read(buffer, samples); }
} // namespace
bool PortBackendFactory::init()
{
PaError err;
#ifdef HAVE_DYNLOAD
if(!pa_handle)
{
#ifdef _WIN32
# define PALIB "portaudio.dll"
#elif defined(__APPLE__) && defined(__MACH__)
# define PALIB "libportaudio.2.dylib"
#elif defined(__OpenBSD__)
# define PALIB "libportaudio.so"
#else
# define PALIB "libportaudio.so.2"
#endif
pa_handle = LoadLib(PALIB);
if(!pa_handle)
return false;
#define LOAD_FUNC(f) do { \
p##f = reinterpret_cast<decltype(p##f)>(GetSymbol(pa_handle, #f)); \
if(p##f == nullptr) \
{ \
CloseLib(pa_handle); \
pa_handle = nullptr; \
return false; \
} \
} while(0)
LOAD_FUNC(Pa_Initialize);
LOAD_FUNC(Pa_Terminate);
LOAD_FUNC(Pa_GetErrorText);
LOAD_FUNC(Pa_StartStream);
LOAD_FUNC(Pa_StopStream);
LOAD_FUNC(Pa_OpenStream);
LOAD_FUNC(Pa_CloseStream);
LOAD_FUNC(Pa_GetDefaultOutputDevice);
LOAD_FUNC(Pa_GetDefaultInputDevice);
LOAD_FUNC(Pa_GetStreamInfo);
#undef LOAD_FUNC
if((err=Pa_Initialize()) != paNoError)
{
ERR("Pa_Initialize() returned an error: %s\n", Pa_GetErrorText(err));
CloseLib(pa_handle);
pa_handle = nullptr;
return false;
}
}
#else
if((err=Pa_Initialize()) != paNoError)
{
ERR("Pa_Initialize() returned an error: %s\n", Pa_GetErrorText(err));
return false;
}
#endif
return true;
}
bool PortBackendFactory::querySupport(BackendType type)
{ return (type == BackendType::Playback || type == BackendType::Capture); }
std::string PortBackendFactory::probe(BackendType type)
{
std::string outnames;
switch(type)
{
case BackendType::Playback:
case BackendType::Capture:
/* Includes null char. */
outnames.append(pa_device, sizeof(pa_device));
break;
}
return outnames;
}
BackendPtr PortBackendFactory::createBackend(DeviceBase *device, BackendType type)
{
if(type == BackendType::Playback)
return BackendPtr{new PortPlayback{device}};
if(type == BackendType::Capture)
return BackendPtr{new PortCapture{device}};
return nullptr;
}
BackendFactory &PortBackendFactory::getFactory()
{
static PortBackendFactory factory{};
return factory;
}

View File

@ -0,0 +1,19 @@
#ifndef BACKENDS_PORTAUDIO_H
#define BACKENDS_PORTAUDIO_H
#include "base.h"
struct PortBackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_PORTAUDIO_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,19 @@
#ifndef BACKENDS_PULSEAUDIO_H
#define BACKENDS_PULSEAUDIO_H
#include "base.h"
class PulseBackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_PULSEAUDIO_H */

View File

@ -0,0 +1,224 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 2018 by authors.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include "sdl2.h"
#include <cassert>
#include <cstdlib>
#include <cstring>
#include <string>
#include "almalloc.h"
#include "alnumeric.h"
#include "core/device.h"
#include "core/logging.h"
_Pragma("GCC diagnostic push")
_Pragma("GCC diagnostic ignored \"-Wold-style-cast\"")
#include "SDL.h"
_Pragma("GCC diagnostic pop")
namespace {
#ifdef _WIN32
#define DEVNAME_PREFIX "OpenAL Soft on "
#else
#define DEVNAME_PREFIX ""
#endif
constexpr char defaultDeviceName[] = DEVNAME_PREFIX "Default Device";
struct Sdl2Backend final : public BackendBase {
Sdl2Backend(DeviceBase *device) noexcept : BackendBase{device} { }
~Sdl2Backend() override;
void audioCallback(Uint8 *stream, int len) noexcept;
static void audioCallbackC(void *ptr, Uint8 *stream, int len) noexcept
{ static_cast<Sdl2Backend*>(ptr)->audioCallback(stream, len); }
void open(const char *name) override;
bool reset() override;
void start() override;
void stop() override;
SDL_AudioDeviceID mDeviceID{0u};
uint mFrameSize{0};
uint mFrequency{0u};
DevFmtChannels mFmtChans{};
DevFmtType mFmtType{};
uint mUpdateSize{0u};
DEF_NEWDEL(Sdl2Backend)
};
Sdl2Backend::~Sdl2Backend()
{
if(mDeviceID)
SDL_CloseAudioDevice(mDeviceID);
mDeviceID = 0;
}
void Sdl2Backend::audioCallback(Uint8 *stream, int len) noexcept
{
const auto ulen = static_cast<unsigned int>(len);
assert((ulen % mFrameSize) == 0);
mDevice->renderSamples(stream, ulen / mFrameSize, mDevice->channelsFromFmt());
}
void Sdl2Backend::open(const char *name)
{
SDL_AudioSpec want{}, have{};
want.freq = static_cast<int>(mDevice->Frequency);
switch(mDevice->FmtType)
{
case DevFmtUByte: want.format = AUDIO_U8; break;
case DevFmtByte: want.format = AUDIO_S8; break;
case DevFmtUShort: want.format = AUDIO_U16SYS; break;
case DevFmtShort: want.format = AUDIO_S16SYS; break;
case DevFmtUInt: /* fall-through */
case DevFmtInt: want.format = AUDIO_S32SYS; break;
case DevFmtFloat: want.format = AUDIO_F32; break;
}
want.channels = (mDevice->FmtChans == DevFmtMono) ? 1 : 2;
want.samples = static_cast<Uint16>(minu(mDevice->UpdateSize, 8192));
want.callback = &Sdl2Backend::audioCallbackC;
want.userdata = this;
/* Passing nullptr to SDL_OpenAudioDevice opens a default, which isn't
* necessarily the first in the list.
*/
SDL_AudioDeviceID devid;
if(!name || strcmp(name, defaultDeviceName) == 0)
devid = SDL_OpenAudioDevice(nullptr, SDL_FALSE, &want, &have, SDL_AUDIO_ALLOW_ANY_CHANGE);
else
{
const size_t prefix_len = strlen(DEVNAME_PREFIX);
if(strncmp(name, DEVNAME_PREFIX, prefix_len) == 0)
devid = SDL_OpenAudioDevice(name+prefix_len, SDL_FALSE, &want, &have,
SDL_AUDIO_ALLOW_ANY_CHANGE);
else
devid = SDL_OpenAudioDevice(name, SDL_FALSE, &want, &have, SDL_AUDIO_ALLOW_ANY_CHANGE);
}
if(!devid)
throw al::backend_exception{al::backend_error::NoDevice, "%s", SDL_GetError()};
DevFmtChannels devchans{};
if(have.channels >= 2)
devchans = DevFmtStereo;
else if(have.channels == 1)
devchans = DevFmtMono;
else
{
SDL_CloseAudioDevice(devid);
throw al::backend_exception{al::backend_error::DeviceError,
"Unhandled SDL channel count: %d", int{have.channels}};
}
DevFmtType devtype{};
switch(have.format)
{
case AUDIO_U8: devtype = DevFmtUByte; break;
case AUDIO_S8: devtype = DevFmtByte; break;
case AUDIO_U16SYS: devtype = DevFmtUShort; break;
case AUDIO_S16SYS: devtype = DevFmtShort; break;
case AUDIO_S32SYS: devtype = DevFmtInt; break;
case AUDIO_F32SYS: devtype = DevFmtFloat; break;
default:
SDL_CloseAudioDevice(devid);
throw al::backend_exception{al::backend_error::DeviceError, "Unhandled SDL format: 0x%04x",
have.format};
}
if(mDeviceID)
SDL_CloseAudioDevice(mDeviceID);
mDeviceID = devid;
mFrameSize = BytesFromDevFmt(devtype) * have.channels;
mFrequency = static_cast<uint>(have.freq);
mFmtChans = devchans;
mFmtType = devtype;
mUpdateSize = have.samples;
mDevice->DeviceName = name ? name : defaultDeviceName;
}
bool Sdl2Backend::reset()
{
mDevice->Frequency = mFrequency;
mDevice->FmtChans = mFmtChans;
mDevice->FmtType = mFmtType;
mDevice->UpdateSize = mUpdateSize;
mDevice->BufferSize = mUpdateSize * 2; /* SDL always (tries to) use two periods. */
setDefaultWFXChannelOrder();
return true;
}
void Sdl2Backend::start()
{ SDL_PauseAudioDevice(mDeviceID, 0); }
void Sdl2Backend::stop()
{ SDL_PauseAudioDevice(mDeviceID, 1); }
} // namespace
BackendFactory &SDL2BackendFactory::getFactory()
{
static SDL2BackendFactory factory{};
return factory;
}
bool SDL2BackendFactory::init()
{ return (SDL_InitSubSystem(SDL_INIT_AUDIO) == 0); }
bool SDL2BackendFactory::querySupport(BackendType type)
{ return type == BackendType::Playback; }
std::string SDL2BackendFactory::probe(BackendType type)
{
std::string outnames;
if(type != BackendType::Playback)
return outnames;
int num_devices{SDL_GetNumAudioDevices(SDL_FALSE)};
/* Includes null char. */
outnames.append(defaultDeviceName, sizeof(defaultDeviceName));
for(int i{0};i < num_devices;++i)
{
std::string name{DEVNAME_PREFIX};
name += SDL_GetAudioDeviceName(i, SDL_FALSE);
if(!name.empty())
outnames.append(name.c_str(), name.length()+1);
}
return outnames;
}
BackendPtr SDL2BackendFactory::createBackend(DeviceBase *device, BackendType type)
{
if(type == BackendType::Playback)
return BackendPtr{new Sdl2Backend{device}};
return nullptr;
}

View File

@ -0,0 +1,19 @@
#ifndef BACKENDS_SDL2_H
#define BACKENDS_SDL2_H
#include "base.h"
struct SDL2BackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_SDL2_H */

View File

@ -0,0 +1,540 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 1999-2007 by authors.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include "sndio.h"
#include <functional>
#include <inttypes.h>
#include <poll.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <thread>
#include "alnumeric.h"
#include "core/device.h"
#include "core/helpers.h"
#include "core/logging.h"
#include "ringbuffer.h"
#include "threads.h"
#include "vector.h"
#include <sndio.h>
namespace {
static const char sndio_device[] = "SndIO Default";
struct SioPar : public sio_par {
SioPar() { sio_initpar(this); }
void clear() { sio_initpar(this); }
};
struct SndioPlayback final : public BackendBase {
SndioPlayback(DeviceBase *device) noexcept : BackendBase{device} { }
~SndioPlayback() override;
int mixerProc();
void open(const char *name) override;
bool reset() override;
void start() override;
void stop() override;
sio_hdl *mSndHandle{nullptr};
uint mFrameStep{};
al::vector<al::byte> mBuffer;
std::atomic<bool> mKillNow{true};
std::thread mThread;
DEF_NEWDEL(SndioPlayback)
};
SndioPlayback::~SndioPlayback()
{
if(mSndHandle)
sio_close(mSndHandle);
mSndHandle = nullptr;
}
int SndioPlayback::mixerProc()
{
const size_t frameStep{mFrameStep};
const size_t frameSize{frameStep * mDevice->bytesFromFmt()};
SetRTPriority();
althrd_setname(MIXER_THREAD_NAME);
while(!mKillNow.load(std::memory_order_acquire)
&& mDevice->Connected.load(std::memory_order_acquire))
{
al::span<al::byte> buffer{mBuffer};
mDevice->renderSamples(buffer.data(), static_cast<uint>(buffer.size() / frameSize),
frameStep);
while(!buffer.empty() && !mKillNow.load(std::memory_order_acquire))
{
size_t wrote{sio_write(mSndHandle, buffer.data(), buffer.size())};
if(wrote > buffer.size() || wrote == 0)
{
ERR("sio_write failed: 0x%" PRIx64 "\n", wrote);
mDevice->handleDisconnect("Failed to write playback samples");
break;
}
buffer = buffer.subspan(wrote);
}
}
return 0;
}
void SndioPlayback::open(const char *name)
{
if(!name)
name = sndio_device;
else if(strcmp(name, sndio_device) != 0)
throw al::backend_exception{al::backend_error::NoDevice, "Device name \"%s\" not found",
name};
sio_hdl *sndHandle{sio_open(nullptr, SIO_PLAY, 0)};
if(!sndHandle)
throw al::backend_exception{al::backend_error::NoDevice, "Could not open backend device"};
if(mSndHandle)
sio_close(mSndHandle);
mSndHandle = sndHandle;
mDevice->DeviceName = name;
}
bool SndioPlayback::reset()
{
SioPar par;
auto tryfmt = mDevice->FmtType;
retry_params:
switch(tryfmt)
{
case DevFmtByte:
par.bits = 8;
par.sig = 1;
break;
case DevFmtUByte:
par.bits = 8;
par.sig = 0;
break;
case DevFmtShort:
par.bits = 16;
par.sig = 1;
break;
case DevFmtUShort:
par.bits = 16;
par.sig = 0;
break;
case DevFmtFloat:
case DevFmtInt:
par.bits = 32;
par.sig = 1;
break;
case DevFmtUInt:
par.bits = 32;
par.sig = 0;
break;
}
par.bps = SIO_BPS(par.bits);
par.le = SIO_LE_NATIVE;
par.msb = 1;
par.rate = mDevice->Frequency;
par.pchan = mDevice->channelsFromFmt();
par.round = mDevice->UpdateSize;
par.appbufsz = mDevice->BufferSize - mDevice->UpdateSize;
if(!par.appbufsz) par.appbufsz = mDevice->UpdateSize;
try {
if(!sio_setpar(mSndHandle, &par))
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to set device parameters"};
par.clear();
if(!sio_getpar(mSndHandle, &par))
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to get device parameters"};
if(par.bps > 1 && par.le != SIO_LE_NATIVE)
throw al::backend_exception{al::backend_error::DeviceError,
"%s-endian samples not supported", par.le ? "Little" : "Big"};
if(par.bits < par.bps*8 && !par.msb)
throw al::backend_exception{al::backend_error::DeviceError,
"MSB-padded samples not supported (%u of %u bits)", par.bits, par.bps*8};
if(par.pchan < 1)
throw al::backend_exception{al::backend_error::DeviceError,
"No playback channels on device"};
}
catch(al::backend_exception &e) {
if(tryfmt == DevFmtShort)
throw;
par.clear();
tryfmt = DevFmtShort;
goto retry_params;
}
if(par.bps == 1)
mDevice->FmtType = (par.sig==1) ? DevFmtByte : DevFmtUByte;
else if(par.bps == 2)
mDevice->FmtType = (par.sig==1) ? DevFmtShort : DevFmtUShort;
else if(par.bps == 4)
mDevice->FmtType = (par.sig==1) ? DevFmtInt : DevFmtUInt;
else
throw al::backend_exception{al::backend_error::DeviceError,
"Unhandled sample format: %s %u-bit", (par.sig?"signed":"unsigned"), par.bps*8};
mFrameStep = par.pchan;
if(par.pchan != mDevice->channelsFromFmt())
{
WARN("Got %u channel%s for %s\n", par.pchan, (par.pchan==1)?"":"s",
DevFmtChannelsString(mDevice->FmtChans));
if(par.pchan < 2) mDevice->FmtChans = DevFmtMono;
else mDevice->FmtChans = DevFmtStereo;
}
mDevice->Frequency = par.rate;
setDefaultChannelOrder();
mDevice->UpdateSize = par.round;
mDevice->BufferSize = par.bufsz + par.round;
mBuffer.resize(mDevice->UpdateSize * par.pchan*par.bps);
if(par.sig == 1)
std::fill(mBuffer.begin(), mBuffer.end(), al::byte{});
else if(par.bits == 8)
std::fill_n(mBuffer.data(), mBuffer.size(), al::byte(0x80));
else if(par.bits == 16)
std::fill_n(reinterpret_cast<uint16_t*>(mBuffer.data()), mBuffer.size()/2, 0x8000);
else if(par.bits == 32)
std::fill_n(reinterpret_cast<uint32_t*>(mBuffer.data()), mBuffer.size()/4, 0x80000000u);
return true;
}
void SndioPlayback::start()
{
if(!sio_start(mSndHandle))
throw al::backend_exception{al::backend_error::DeviceError, "Error starting playback"};
try {
mKillNow.store(false, std::memory_order_release);
mThread = std::thread{std::mem_fn(&SndioPlayback::mixerProc), this};
}
catch(std::exception& e) {
sio_stop(mSndHandle);
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to start mixing thread: %s", e.what()};
}
}
void SndioPlayback::stop()
{
if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
return;
mThread.join();
if(!sio_stop(mSndHandle))
ERR("Error stopping device\n");
}
/* TODO: This could be improved by avoiding the ring buffer and record thread,
* counting the available samples with the sio_onmove callback and reading
* directly from the device. However, this depends on reasonable support for
* capture buffer sizes apps may request.
*/
struct SndioCapture final : public BackendBase {
SndioCapture(DeviceBase *device) noexcept : BackendBase{device} { }
~SndioCapture() override;
int recordProc();
void open(const char *name) override;
void start() override;
void stop() override;
void captureSamples(al::byte *buffer, uint samples) override;
uint availableSamples() override;
sio_hdl *mSndHandle{nullptr};
RingBufferPtr mRing;
std::atomic<bool> mKillNow{true};
std::thread mThread;
DEF_NEWDEL(SndioCapture)
};
SndioCapture::~SndioCapture()
{
if(mSndHandle)
sio_close(mSndHandle);
mSndHandle = nullptr;
}
int SndioCapture::recordProc()
{
SetRTPriority();
althrd_setname(RECORD_THREAD_NAME);
const uint frameSize{mDevice->frameSizeFromFmt()};
int nfds_pre{sio_nfds(mSndHandle)};
if(nfds_pre <= 0)
{
mDevice->handleDisconnect("Incorrect return value from sio_nfds(): %d", nfds_pre);
return 1;
}
auto fds = std::make_unique<pollfd[]>(static_cast<uint>(nfds_pre));
while(!mKillNow.load(std::memory_order_acquire)
&& mDevice->Connected.load(std::memory_order_acquire))
{
/* Wait until there's some samples to read. */
const int nfds{sio_pollfd(mSndHandle, fds.get(), POLLIN)};
if(nfds <= 0)
{
mDevice->handleDisconnect("Failed to get polling fds: %d", nfds);
break;
}
int pollres{::poll(fds.get(), static_cast<uint>(nfds), 2000)};
if(pollres < 0)
{
if(errno == EINTR) continue;
mDevice->handleDisconnect("Poll error: %s", strerror(errno));
break;
}
if(pollres == 0)
continue;
const int revents{sio_revents(mSndHandle, fds.get())};
if((revents&POLLHUP))
{
mDevice->handleDisconnect("Got POLLHUP from poll events");
break;
}
if(!(revents&POLLIN))
continue;
auto data = mRing->getWriteVector();
al::span<al::byte> buffer{data.first.buf, data.first.len*frameSize};
while(!buffer.empty())
{
size_t got{sio_read(mSndHandle, buffer.data(), buffer.size())};
if(got == 0)
break;
if(got > buffer.size())
{
ERR("sio_read failed: 0x%" PRIx64 "\n", got);
mDevice->handleDisconnect("sio_read failed: 0x%" PRIx64, got);
break;
}
mRing->writeAdvance(got / frameSize);
buffer = buffer.subspan(got);
if(buffer.empty())
{
data = mRing->getWriteVector();
buffer = {data.first.buf, data.first.len*frameSize};
}
}
if(buffer.empty())
{
/* Got samples to read, but no place to store it. Drop it. */
static char junk[4096];
sio_read(mSndHandle, junk, sizeof(junk) - (sizeof(junk)%frameSize));
}
}
return 0;
}
void SndioCapture::open(const char *name)
{
if(!name)
name = sndio_device;
else if(strcmp(name, sndio_device) != 0)
throw al::backend_exception{al::backend_error::NoDevice, "Device name \"%s\" not found",
name};
mSndHandle = sio_open(nullptr, SIO_REC, true);
if(mSndHandle == nullptr)
throw al::backend_exception{al::backend_error::NoDevice, "Could not open backend device"};
SioPar par;
switch(mDevice->FmtType)
{
case DevFmtByte:
par.bits = 8;
par.sig = 1;
break;
case DevFmtUByte:
par.bits = 8;
par.sig = 0;
break;
case DevFmtShort:
par.bits = 16;
par.sig = 1;
break;
case DevFmtUShort:
par.bits = 16;
par.sig = 0;
break;
case DevFmtInt:
par.bits = 32;
par.sig = 1;
break;
case DevFmtUInt:
par.bits = 32;
par.sig = 0;
break;
case DevFmtFloat:
throw al::backend_exception{al::backend_error::DeviceError,
"%s capture samples not supported", DevFmtTypeString(mDevice->FmtType)};
}
par.bps = SIO_BPS(par.bits);
par.le = SIO_LE_NATIVE;
par.msb = 1;
par.rchan = mDevice->channelsFromFmt();
par.rate = mDevice->Frequency;
par.appbufsz = maxu(mDevice->BufferSize, mDevice->Frequency/10);
par.round = minu(par.appbufsz/2, mDevice->Frequency/40);
if(!sio_setpar(mSndHandle, &par) || !sio_getpar(mSndHandle, &par))
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to set device praameters"};
if(par.bps > 1 && par.le != SIO_LE_NATIVE)
throw al::backend_exception{al::backend_error::DeviceError,
"%s-endian samples not supported", par.le ? "Little" : "Big"};
if(par.bits < par.bps*8 && !par.msb)
throw al::backend_exception{al::backend_error::DeviceError,
"Padded samples not supported (got %u of %u bits)", par.bits, par.bps*8};
auto match_fmt = [](DevFmtType fmttype, const sio_par &p) -> bool
{
return (fmttype == DevFmtByte && p.bps == 1 && p.sig != 0)
|| (fmttype == DevFmtUByte && p.bps == 1 && p.sig == 0)
|| (fmttype == DevFmtShort && p.bps == 2 && p.sig != 0)
|| (fmttype == DevFmtUShort && p.bps == 2 && p.sig == 0)
|| (fmttype == DevFmtInt && p.bps == 4 && p.sig != 0)
|| (fmttype == DevFmtUInt && p.bps == 4 && p.sig == 0);
};
if(!match_fmt(mDevice->FmtType, par) || mDevice->channelsFromFmt() != par.rchan
|| mDevice->Frequency != par.rate)
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to set format %s %s %uhz, got %c%u %u-channel %uhz instead",
DevFmtTypeString(mDevice->FmtType), DevFmtChannelsString(mDevice->FmtChans),
mDevice->Frequency, par.sig?'s':'u', par.bps*8, par.rchan, par.rate};
mRing = RingBuffer::Create(mDevice->BufferSize, par.bps*par.rchan, false);
mDevice->BufferSize = static_cast<uint>(mRing->writeSpace());
mDevice->UpdateSize = par.round;
setDefaultChannelOrder();
mDevice->DeviceName = name;
}
void SndioCapture::start()
{
if(!sio_start(mSndHandle))
throw al::backend_exception{al::backend_error::DeviceError, "Error starting capture"};
try {
mKillNow.store(false, std::memory_order_release);
mThread = std::thread{std::mem_fn(&SndioCapture::recordProc), this};
}
catch(std::exception& e) {
sio_stop(mSndHandle);
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to start capture thread: %s", e.what()};
}
}
void SndioCapture::stop()
{
if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
return;
mThread.join();
if(!sio_stop(mSndHandle))
ERR("Error stopping device\n");
}
void SndioCapture::captureSamples(al::byte *buffer, uint samples)
{ mRing->read(buffer, samples); }
uint SndioCapture::availableSamples()
{ return static_cast<uint>(mRing->readSpace()); }
} // namespace
BackendFactory &SndIOBackendFactory::getFactory()
{
static SndIOBackendFactory factory{};
return factory;
}
bool SndIOBackendFactory::init()
{ return true; }
bool SndIOBackendFactory::querySupport(BackendType type)
{ return (type == BackendType::Playback || type == BackendType::Capture); }
std::string SndIOBackendFactory::probe(BackendType type)
{
std::string outnames;
switch(type)
{
case BackendType::Playback:
case BackendType::Capture:
/* Includes null char. */
outnames.append(sndio_device, sizeof(sndio_device));
break;
}
return outnames;
}
BackendPtr SndIOBackendFactory::createBackend(DeviceBase *device, BackendType type)
{
if(type == BackendType::Playback)
return BackendPtr{new SndioPlayback{device}};
if(type == BackendType::Capture)
return BackendPtr{new SndioCapture{device}};
return nullptr;
}

View File

@ -0,0 +1,19 @@
#ifndef BACKENDS_SNDIO_H
#define BACKENDS_SNDIO_H
#include "base.h"
struct SndIOBackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_SNDIO_H */

View File

@ -0,0 +1,303 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 1999-2007 by authors.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include "solaris.h"
#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdlib.h>
#include <stdio.h>
#include <memory.h>
#include <unistd.h>
#include <errno.h>
#include <poll.h>
#include <math.h>
#include <string.h>
#include <thread>
#include <functional>
#include "albyte.h"
#include "alc/alconfig.h"
#include "core/device.h"
#include "core/helpers.h"
#include "core/logging.h"
#include "threads.h"
#include "vector.h"
#include <sys/audioio.h>
namespace {
constexpr char solaris_device[] = "Solaris Default";
std::string solaris_driver{"/dev/audio"};
struct SolarisBackend final : public BackendBase {
SolarisBackend(DeviceBase *device) noexcept : BackendBase{device} { }
~SolarisBackend() override;
int mixerProc();
void open(const char *name) override;
bool reset() override;
void start() override;
void stop() override;
int mFd{-1};
uint mFrameStep{};
al::vector<al::byte> mBuffer;
std::atomic<bool> mKillNow{true};
std::thread mThread;
DEF_NEWDEL(SolarisBackend)
};
SolarisBackend::~SolarisBackend()
{
if(mFd != -1)
close(mFd);
mFd = -1;
}
int SolarisBackend::mixerProc()
{
SetRTPriority();
althrd_setname(MIXER_THREAD_NAME);
const size_t frame_step{mDevice->channelsFromFmt()};
const uint frame_size{mDevice->frameSizeFromFmt()};
while(!mKillNow.load(std::memory_order_acquire)
&& mDevice->Connected.load(std::memory_order_acquire))
{
pollfd pollitem{};
pollitem.fd = mFd;
pollitem.events = POLLOUT;
int pret{poll(&pollitem, 1, 1000)};
if(pret < 0)
{
if(errno == EINTR || errno == EAGAIN)
continue;
ERR("poll failed: %s\n", strerror(errno));
mDevice->handleDisconnect("Failed to wait for playback buffer: %s", strerror(errno));
break;
}
else if(pret == 0)
{
WARN("poll timeout\n");
continue;
}
al::byte *write_ptr{mBuffer.data()};
size_t to_write{mBuffer.size()};
mDevice->renderSamples(write_ptr, static_cast<uint>(to_write/frame_size), frame_step);
while(to_write > 0 && !mKillNow.load(std::memory_order_acquire))
{
ssize_t wrote{write(mFd, write_ptr, to_write)};
if(wrote < 0)
{
if(errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR)
continue;
ERR("write failed: %s\n", strerror(errno));
mDevice->handleDisconnect("Failed to write playback samples: %s", strerror(errno));
break;
}
to_write -= static_cast<size_t>(wrote);
write_ptr += wrote;
}
}
return 0;
}
void SolarisBackend::open(const char *name)
{
if(!name)
name = solaris_device;
else if(strcmp(name, solaris_device) != 0)
throw al::backend_exception{al::backend_error::NoDevice, "Device name \"%s\" not found",
name};
int fd{::open(solaris_driver.c_str(), O_WRONLY)};
if(fd == -1)
throw al::backend_exception{al::backend_error::NoDevice, "Could not open %s: %s",
solaris_driver.c_str(), strerror(errno)};
if(mFd != -1)
::close(mFd);
mFd = fd;
mDevice->DeviceName = name;
}
bool SolarisBackend::reset()
{
audio_info_t info;
AUDIO_INITINFO(&info);
info.play.sample_rate = mDevice->Frequency;
info.play.channels = mDevice->channelsFromFmt();
switch(mDevice->FmtType)
{
case DevFmtByte:
info.play.precision = 8;
info.play.encoding = AUDIO_ENCODING_LINEAR;
break;
case DevFmtUByte:
info.play.precision = 8;
info.play.encoding = AUDIO_ENCODING_LINEAR8;
break;
case DevFmtUShort:
case DevFmtInt:
case DevFmtUInt:
case DevFmtFloat:
mDevice->FmtType = DevFmtShort;
/* fall-through */
case DevFmtShort:
info.play.precision = 16;
info.play.encoding = AUDIO_ENCODING_LINEAR;
break;
}
info.play.buffer_size = mDevice->BufferSize * mDevice->frameSizeFromFmt();
if(ioctl(mFd, AUDIO_SETINFO, &info) < 0)
{
ERR("ioctl failed: %s\n", strerror(errno));
return false;
}
if(mDevice->channelsFromFmt() != info.play.channels)
{
if(info.play.channels >= 2)
mDevice->FmtChans = DevFmtStereo;
else if(info.play.channels == 1)
mDevice->FmtChans = DevFmtMono;
else
throw al::backend_exception{al::backend_error::DeviceError,
"Got %u device channels", info.play.channels};
}
if(info.play.precision == 8 && info.play.encoding == AUDIO_ENCODING_LINEAR8)
mDevice->FmtType = DevFmtUByte;
else if(info.play.precision == 8 && info.play.encoding == AUDIO_ENCODING_LINEAR)
mDevice->FmtType = DevFmtByte;
else if(info.play.precision == 16 && info.play.encoding == AUDIO_ENCODING_LINEAR)
mDevice->FmtType = DevFmtShort;
else if(info.play.precision == 32 && info.play.encoding == AUDIO_ENCODING_LINEAR)
mDevice->FmtType = DevFmtInt;
else
{
ERR("Got unhandled sample type: %d (0x%x)\n", info.play.precision, info.play.encoding);
return false;
}
uint frame_size{mDevice->bytesFromFmt() * info.play.channels};
mFrameStep = info.play.channels;
mDevice->Frequency = info.play.sample_rate;
mDevice->BufferSize = info.play.buffer_size / frame_size;
/* How to get the actual period size/count? */
mDevice->UpdateSize = mDevice->BufferSize / 2;
setDefaultChannelOrder();
mBuffer.resize(mDevice->UpdateSize * size_t{frame_size});
std::fill(mBuffer.begin(), mBuffer.end(), al::byte{});
return true;
}
void SolarisBackend::start()
{
try {
mKillNow.store(false, std::memory_order_release);
mThread = std::thread{std::mem_fn(&SolarisBackend::mixerProc), this};
}
catch(std::exception& e) {
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to start mixing thread: %s", e.what()};
}
}
void SolarisBackend::stop()
{
if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
return;
mThread.join();
if(ioctl(mFd, AUDIO_DRAIN) < 0)
ERR("Error draining device: %s\n", strerror(errno));
}
} // namespace
BackendFactory &SolarisBackendFactory::getFactory()
{
static SolarisBackendFactory factory{};
return factory;
}
bool SolarisBackendFactory::init()
{
if(auto devopt = ConfigValueStr(nullptr, "solaris", "device"))
solaris_driver = std::move(*devopt);
return true;
}
bool SolarisBackendFactory::querySupport(BackendType type)
{ return type == BackendType::Playback; }
std::string SolarisBackendFactory::probe(BackendType type)
{
std::string outnames;
switch(type)
{
case BackendType::Playback:
{
struct stat buf;
if(stat(solaris_driver.c_str(), &buf) == 0)
outnames.append(solaris_device, sizeof(solaris_device));
}
break;
case BackendType::Capture:
break;
}
return outnames;
}
BackendPtr SolarisBackendFactory::createBackend(DeviceBase *device, BackendType type)
{
if(type == BackendType::Playback)
return BackendPtr{new SolarisBackend{device}};
return nullptr;
}

View File

@ -0,0 +1,19 @@
#ifndef BACKENDS_SOLARIS_H
#define BACKENDS_SOLARIS_H
#include "base.h"
struct SolarisBackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_SOLARIS_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,19 @@
#ifndef BACKENDS_WASAPI_H
#define BACKENDS_WASAPI_H
#include "base.h"
struct WasapiBackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_WASAPI_H */

View File

@ -0,0 +1,407 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 1999-2007 by authors.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include "wave.h"
#include <algorithm>
#include <atomic>
#include <cerrno>
#include <chrono>
#include <cstdint>
#include <cstdio>
#include <cstring>
#include <exception>
#include <functional>
#include <thread>
#include "albit.h"
#include "albyte.h"
#include "alc/alconfig.h"
#include "almalloc.h"
#include "alnumeric.h"
#include "core/device.h"
#include "core/helpers.h"
#include "core/logging.h"
#include "opthelpers.h"
#include "strutils.h"
#include "threads.h"
#include "vector.h"
namespace {
using std::chrono::seconds;
using std::chrono::milliseconds;
using std::chrono::nanoseconds;
using ubyte = unsigned char;
using ushort = unsigned short;
constexpr char waveDevice[] = "Wave File Writer";
constexpr ubyte SUBTYPE_PCM[]{
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xaa,
0x00, 0x38, 0x9b, 0x71
};
constexpr ubyte SUBTYPE_FLOAT[]{
0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x80, 0x00, 0x00, 0xaa,
0x00, 0x38, 0x9b, 0x71
};
constexpr ubyte SUBTYPE_BFORMAT_PCM[]{
0x01, 0x00, 0x00, 0x00, 0x21, 0x07, 0xd3, 0x11, 0x86, 0x44, 0xc8, 0xc1,
0xca, 0x00, 0x00, 0x00
};
constexpr ubyte SUBTYPE_BFORMAT_FLOAT[]{
0x03, 0x00, 0x00, 0x00, 0x21, 0x07, 0xd3, 0x11, 0x86, 0x44, 0xc8, 0xc1,
0xca, 0x00, 0x00, 0x00
};
void fwrite16le(ushort val, FILE *f)
{
ubyte data[2]{ static_cast<ubyte>(val&0xff), static_cast<ubyte>((val>>8)&0xff) };
fwrite(data, 1, 2, f);
}
void fwrite32le(uint val, FILE *f)
{
ubyte data[4]{ static_cast<ubyte>(val&0xff), static_cast<ubyte>((val>>8)&0xff),
static_cast<ubyte>((val>>16)&0xff), static_cast<ubyte>((val>>24)&0xff) };
fwrite(data, 1, 4, f);
}
struct WaveBackend final : public BackendBase {
WaveBackend(DeviceBase *device) noexcept : BackendBase{device} { }
~WaveBackend() override;
int mixerProc();
void open(const char *name) override;
bool reset() override;
void start() override;
void stop() override;
FILE *mFile{nullptr};
long mDataStart{-1};
al::vector<al::byte> mBuffer;
std::atomic<bool> mKillNow{true};
std::thread mThread;
DEF_NEWDEL(WaveBackend)
};
WaveBackend::~WaveBackend()
{
if(mFile)
fclose(mFile);
mFile = nullptr;
}
int WaveBackend::mixerProc()
{
const milliseconds restTime{mDevice->UpdateSize*1000/mDevice->Frequency / 2};
althrd_setname(MIXER_THREAD_NAME);
const size_t frameStep{mDevice->channelsFromFmt()};
const size_t frameSize{mDevice->frameSizeFromFmt()};
int64_t done{0};
auto start = std::chrono::steady_clock::now();
while(!mKillNow.load(std::memory_order_acquire)
&& mDevice->Connected.load(std::memory_order_acquire))
{
auto now = std::chrono::steady_clock::now();
/* This converts from nanoseconds to nanosamples, then to samples. */
int64_t avail{std::chrono::duration_cast<seconds>((now-start) *
mDevice->Frequency).count()};
if(avail-done < mDevice->UpdateSize)
{
std::this_thread::sleep_for(restTime);
continue;
}
while(avail-done >= mDevice->UpdateSize)
{
mDevice->renderSamples(mBuffer.data(), mDevice->UpdateSize, frameStep);
done += mDevice->UpdateSize;
if(al::endian::native != al::endian::little)
{
const uint bytesize{mDevice->bytesFromFmt()};
if(bytesize == 2)
{
const size_t len{mBuffer.size() & ~size_t{1}};
for(size_t i{0};i < len;i+=2)
std::swap(mBuffer[i], mBuffer[i+1]);
}
else if(bytesize == 4)
{
const size_t len{mBuffer.size() & ~size_t{3}};
for(size_t i{0};i < len;i+=4)
{
std::swap(mBuffer[i ], mBuffer[i+3]);
std::swap(mBuffer[i+1], mBuffer[i+2]);
}
}
}
const size_t fs{fwrite(mBuffer.data(), frameSize, mDevice->UpdateSize, mFile)};
if(fs < mDevice->UpdateSize || ferror(mFile))
{
ERR("Error writing to file\n");
mDevice->handleDisconnect("Failed to write playback samples");
break;
}
}
/* For every completed second, increment the start time and reduce the
* samples done. This prevents the difference between the start time
* and current time from growing too large, while maintaining the
* correct number of samples to render.
*/
if(done >= mDevice->Frequency)
{
seconds s{done/mDevice->Frequency};
done %= mDevice->Frequency;
start += s;
}
}
return 0;
}
void WaveBackend::open(const char *name)
{
auto fname = ConfigValueStr(nullptr, "wave", "file");
if(!fname) throw al::backend_exception{al::backend_error::NoDevice,
"No wave output filename"};
if(!name)
name = waveDevice;
else if(strcmp(name, waveDevice) != 0)
throw al::backend_exception{al::backend_error::NoDevice, "Device name \"%s\" not found",
name};
/* There's only one "device", so if it's already open, we're done. */
if(mFile) return;
#ifdef _WIN32
{
std::wstring wname{utf8_to_wstr(fname->c_str())};
mFile = _wfopen(wname.c_str(), L"wb");
}
#else
mFile = fopen(fname->c_str(), "wb");
#endif
if(!mFile)
throw al::backend_exception{al::backend_error::DeviceError, "Could not open file '%s': %s",
fname->c_str(), strerror(errno)};
mDevice->DeviceName = name;
}
bool WaveBackend::reset()
{
uint channels{0}, bytes{0}, chanmask{0};
bool isbformat{false};
size_t val;
fseek(mFile, 0, SEEK_SET);
clearerr(mFile);
if(GetConfigValueBool(nullptr, "wave", "bformat", false))
{
mDevice->FmtChans = DevFmtAmbi3D;
mDevice->mAmbiOrder = 1;
}
switch(mDevice->FmtType)
{
case DevFmtByte:
mDevice->FmtType = DevFmtUByte;
break;
case DevFmtUShort:
mDevice->FmtType = DevFmtShort;
break;
case DevFmtUInt:
mDevice->FmtType = DevFmtInt;
break;
case DevFmtUByte:
case DevFmtShort:
case DevFmtInt:
case DevFmtFloat:
break;
}
switch(mDevice->FmtChans)
{
case DevFmtMono: chanmask = 0x04; break;
case DevFmtStereo: chanmask = 0x01 | 0x02; break;
case DevFmtQuad: chanmask = 0x01 | 0x02 | 0x10 | 0x20; break;
case DevFmtX51: chanmask = 0x01 | 0x02 | 0x04 | 0x08 | 0x200 | 0x400; break;
case DevFmtX61: chanmask = 0x01 | 0x02 | 0x04 | 0x08 | 0x100 | 0x200 | 0x400; break;
case DevFmtX71: chanmask = 0x01 | 0x02 | 0x04 | 0x08 | 0x010 | 0x020 | 0x200 | 0x400; break;
case DevFmtX714:
chanmask = 0x01 | 0x02 | 0x04 | 0x08 | 0x010 | 0x020 | 0x200 | 0x400 | 0x1000 | 0x4000
| 0x8000 | 0x20000;
break;
/* NOTE: Same as 7.1. */
case DevFmtX3D71: chanmask = 0x01 | 0x02 | 0x04 | 0x08 | 0x010 | 0x020 | 0x200 | 0x400; break;
case DevFmtAmbi3D:
/* .amb output requires FuMa */
mDevice->mAmbiOrder = minu(mDevice->mAmbiOrder, 3);
mDevice->mAmbiLayout = DevAmbiLayout::FuMa;
mDevice->mAmbiScale = DevAmbiScaling::FuMa;
isbformat = true;
chanmask = 0;
break;
}
bytes = mDevice->bytesFromFmt();
channels = mDevice->channelsFromFmt();
rewind(mFile);
fputs("RIFF", mFile);
fwrite32le(0xFFFFFFFF, mFile); // 'RIFF' header len; filled in at close
fputs("WAVE", mFile);
fputs("fmt ", mFile);
fwrite32le(40, mFile); // 'fmt ' header len; 40 bytes for EXTENSIBLE
// 16-bit val, format type id (extensible: 0xFFFE)
fwrite16le(0xFFFE, mFile);
// 16-bit val, channel count
fwrite16le(static_cast<ushort>(channels), mFile);
// 32-bit val, frequency
fwrite32le(mDevice->Frequency, mFile);
// 32-bit val, bytes per second
fwrite32le(mDevice->Frequency * channels * bytes, mFile);
// 16-bit val, frame size
fwrite16le(static_cast<ushort>(channels * bytes), mFile);
// 16-bit val, bits per sample
fwrite16le(static_cast<ushort>(bytes * 8), mFile);
// 16-bit val, extra byte count
fwrite16le(22, mFile);
// 16-bit val, valid bits per sample
fwrite16le(static_cast<ushort>(bytes * 8), mFile);
// 32-bit val, channel mask
fwrite32le(chanmask, mFile);
// 16 byte GUID, sub-type format
val = fwrite((mDevice->FmtType == DevFmtFloat) ?
(isbformat ? SUBTYPE_BFORMAT_FLOAT : SUBTYPE_FLOAT) :
(isbformat ? SUBTYPE_BFORMAT_PCM : SUBTYPE_PCM), 1, 16, mFile);
(void)val;
fputs("data", mFile);
fwrite32le(0xFFFFFFFF, mFile); // 'data' header len; filled in at close
if(ferror(mFile))
{
ERR("Error writing header: %s\n", strerror(errno));
return false;
}
mDataStart = ftell(mFile);
setDefaultWFXChannelOrder();
const uint bufsize{mDevice->frameSizeFromFmt() * mDevice->UpdateSize};
mBuffer.resize(bufsize);
return true;
}
void WaveBackend::start()
{
if(mDataStart > 0 && fseek(mFile, 0, SEEK_END) != 0)
WARN("Failed to seek on output file\n");
try {
mKillNow.store(false, std::memory_order_release);
mThread = std::thread{std::mem_fn(&WaveBackend::mixerProc), this};
}
catch(std::exception& e) {
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to start mixing thread: %s", e.what()};
}
}
void WaveBackend::stop()
{
if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
return;
mThread.join();
if(mDataStart > 0)
{
long size{ftell(mFile)};
if(size > 0)
{
long dataLen{size - mDataStart};
if(fseek(mFile, 4, SEEK_SET) == 0)
fwrite32le(static_cast<uint>(size-8), mFile); // 'WAVE' header len
if(fseek(mFile, mDataStart-4, SEEK_SET) == 0)
fwrite32le(static_cast<uint>(dataLen), mFile); // 'data' header len
}
}
}
} // namespace
bool WaveBackendFactory::init()
{ return true; }
bool WaveBackendFactory::querySupport(BackendType type)
{ return type == BackendType::Playback; }
std::string WaveBackendFactory::probe(BackendType type)
{
std::string outnames;
switch(type)
{
case BackendType::Playback:
/* Includes null char. */
outnames.append(waveDevice, sizeof(waveDevice));
break;
case BackendType::Capture:
break;
}
return outnames;
}
BackendPtr WaveBackendFactory::createBackend(DeviceBase *device, BackendType type)
{
if(type == BackendType::Playback)
return BackendPtr{new WaveBackend{device}};
return nullptr;
}
BackendFactory &WaveBackendFactory::getFactory()
{
static WaveBackendFactory factory{};
return factory;
}

View File

@ -0,0 +1,19 @@
#ifndef BACKENDS_WAVE_H
#define BACKENDS_WAVE_H
#include "base.h"
struct WaveBackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_WAVE_H */

View File

@ -0,0 +1,628 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 1999-2007 by authors.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include "winmm.h"
#include <stdlib.h>
#include <stdio.h>
#include <memory.h>
#include <windows.h>
#include <mmsystem.h>
#include <mmreg.h>
#include <array>
#include <atomic>
#include <thread>
#include <vector>
#include <string>
#include <algorithm>
#include <functional>
#include "alnumeric.h"
#include "core/device.h"
#include "core/helpers.h"
#include "core/logging.h"
#include "ringbuffer.h"
#include "strutils.h"
#include "threads.h"
#ifndef WAVE_FORMAT_IEEE_FLOAT
#define WAVE_FORMAT_IEEE_FLOAT 0x0003
#endif
namespace {
#define DEVNAME_HEAD "OpenAL Soft on "
al::vector<std::string> PlaybackDevices;
al::vector<std::string> CaptureDevices;
bool checkName(const al::vector<std::string> &list, const std::string &name)
{ return std::find(list.cbegin(), list.cend(), name) != list.cend(); }
void ProbePlaybackDevices(void)
{
PlaybackDevices.clear();
UINT numdevs{waveOutGetNumDevs()};
PlaybackDevices.reserve(numdevs);
for(UINT i{0};i < numdevs;++i)
{
std::string dname;
WAVEOUTCAPSW WaveCaps{};
if(waveOutGetDevCapsW(i, &WaveCaps, sizeof(WaveCaps)) == MMSYSERR_NOERROR)
{
const std::string basename{DEVNAME_HEAD + wstr_to_utf8(WaveCaps.szPname)};
int count{1};
std::string newname{basename};
while(checkName(PlaybackDevices, newname))
{
newname = basename;
newname += " #";
newname += std::to_string(++count);
}
dname = std::move(newname);
TRACE("Got device \"%s\", ID %u\n", dname.c_str(), i);
}
PlaybackDevices.emplace_back(std::move(dname));
}
}
void ProbeCaptureDevices(void)
{
CaptureDevices.clear();
UINT numdevs{waveInGetNumDevs()};
CaptureDevices.reserve(numdevs);
for(UINT i{0};i < numdevs;++i)
{
std::string dname;
WAVEINCAPSW WaveCaps{};
if(waveInGetDevCapsW(i, &WaveCaps, sizeof(WaveCaps)) == MMSYSERR_NOERROR)
{
const std::string basename{DEVNAME_HEAD + wstr_to_utf8(WaveCaps.szPname)};
int count{1};
std::string newname{basename};
while(checkName(CaptureDevices, newname))
{
newname = basename;
newname += " #";
newname += std::to_string(++count);
}
dname = std::move(newname);
TRACE("Got device \"%s\", ID %u\n", dname.c_str(), i);
}
CaptureDevices.emplace_back(std::move(dname));
}
}
struct WinMMPlayback final : public BackendBase {
WinMMPlayback(DeviceBase *device) noexcept : BackendBase{device} { }
~WinMMPlayback() override;
void CALLBACK waveOutProc(HWAVEOUT device, UINT msg, DWORD_PTR param1, DWORD_PTR param2) noexcept;
static void CALLBACK waveOutProcC(HWAVEOUT device, UINT msg, DWORD_PTR instance, DWORD_PTR param1, DWORD_PTR param2) noexcept
{ reinterpret_cast<WinMMPlayback*>(instance)->waveOutProc(device, msg, param1, param2); }
int mixerProc();
void open(const char *name) override;
bool reset() override;
void start() override;
void stop() override;
std::atomic<uint> mWritable{0u};
al::semaphore mSem;
uint mIdx{0u};
std::array<WAVEHDR,4> mWaveBuffer{};
HWAVEOUT mOutHdl{nullptr};
WAVEFORMATEX mFormat{};
std::atomic<bool> mKillNow{true};
std::thread mThread;
DEF_NEWDEL(WinMMPlayback)
};
WinMMPlayback::~WinMMPlayback()
{
if(mOutHdl)
waveOutClose(mOutHdl);
mOutHdl = nullptr;
al_free(mWaveBuffer[0].lpData);
std::fill(mWaveBuffer.begin(), mWaveBuffer.end(), WAVEHDR{});
}
/* WinMMPlayback::waveOutProc
*
* Posts a message to 'WinMMPlayback::mixerProc' everytime a WaveOut Buffer is
* completed and returns to the application (for more data)
*/
void CALLBACK WinMMPlayback::waveOutProc(HWAVEOUT, UINT msg, DWORD_PTR, DWORD_PTR) noexcept
{
if(msg != WOM_DONE) return;
mWritable.fetch_add(1, std::memory_order_acq_rel);
mSem.post();
}
FORCE_ALIGN int WinMMPlayback::mixerProc()
{
SetRTPriority();
althrd_setname(MIXER_THREAD_NAME);
while(!mKillNow.load(std::memory_order_acquire)
&& mDevice->Connected.load(std::memory_order_acquire))
{
uint todo{mWritable.load(std::memory_order_acquire)};
if(todo < 1)
{
mSem.wait();
continue;
}
size_t widx{mIdx};
do {
WAVEHDR &waveHdr = mWaveBuffer[widx];
if(++widx == mWaveBuffer.size()) widx = 0;
mDevice->renderSamples(waveHdr.lpData, mDevice->UpdateSize, mFormat.nChannels);
mWritable.fetch_sub(1, std::memory_order_acq_rel);
waveOutWrite(mOutHdl, &waveHdr, sizeof(WAVEHDR));
} while(--todo);
mIdx = static_cast<uint>(widx);
}
return 0;
}
void WinMMPlayback::open(const char *name)
{
if(PlaybackDevices.empty())
ProbePlaybackDevices();
// Find the Device ID matching the deviceName if valid
auto iter = name ?
std::find(PlaybackDevices.cbegin(), PlaybackDevices.cend(), name) :
PlaybackDevices.cbegin();
if(iter == PlaybackDevices.cend())
throw al::backend_exception{al::backend_error::NoDevice, "Device name \"%s\" not found",
name};
auto DeviceID = static_cast<UINT>(std::distance(PlaybackDevices.cbegin(), iter));
DevFmtType fmttype{mDevice->FmtType};
retry_open:
WAVEFORMATEX format{};
if(fmttype == DevFmtFloat)
{
format.wFormatTag = WAVE_FORMAT_IEEE_FLOAT;
format.wBitsPerSample = 32;
}
else
{
format.wFormatTag = WAVE_FORMAT_PCM;
if(fmttype == DevFmtUByte || fmttype == DevFmtByte)
format.wBitsPerSample = 8;
else
format.wBitsPerSample = 16;
}
format.nChannels = ((mDevice->FmtChans == DevFmtMono) ? 1 : 2);
format.nBlockAlign = static_cast<WORD>(format.wBitsPerSample * format.nChannels / 8);
format.nSamplesPerSec = mDevice->Frequency;
format.nAvgBytesPerSec = format.nSamplesPerSec * format.nBlockAlign;
format.cbSize = 0;
HWAVEOUT outHandle{};
MMRESULT res{waveOutOpen(&outHandle, DeviceID, &format,
reinterpret_cast<DWORD_PTR>(&WinMMPlayback::waveOutProcC),
reinterpret_cast<DWORD_PTR>(this), CALLBACK_FUNCTION)};
if(res != MMSYSERR_NOERROR)
{
if(fmttype == DevFmtFloat)
{
fmttype = DevFmtShort;
goto retry_open;
}
throw al::backend_exception{al::backend_error::DeviceError, "waveOutOpen failed: %u", res};
}
if(mOutHdl)
waveOutClose(mOutHdl);
mOutHdl = outHandle;
mFormat = format;
mDevice->DeviceName = PlaybackDevices[DeviceID];
}
bool WinMMPlayback::reset()
{
mDevice->BufferSize = static_cast<uint>(uint64_t{mDevice->BufferSize} *
mFormat.nSamplesPerSec / mDevice->Frequency);
mDevice->BufferSize = (mDevice->BufferSize+3) & ~0x3u;
mDevice->UpdateSize = mDevice->BufferSize / 4;
mDevice->Frequency = mFormat.nSamplesPerSec;
if(mFormat.wFormatTag == WAVE_FORMAT_IEEE_FLOAT)
{
if(mFormat.wBitsPerSample == 32)
mDevice->FmtType = DevFmtFloat;
else
{
ERR("Unhandled IEEE float sample depth: %d\n", mFormat.wBitsPerSample);
return false;
}
}
else if(mFormat.wFormatTag == WAVE_FORMAT_PCM)
{
if(mFormat.wBitsPerSample == 16)
mDevice->FmtType = DevFmtShort;
else if(mFormat.wBitsPerSample == 8)
mDevice->FmtType = DevFmtUByte;
else
{
ERR("Unhandled PCM sample depth: %d\n", mFormat.wBitsPerSample);
return false;
}
}
else
{
ERR("Unhandled format tag: 0x%04x\n", mFormat.wFormatTag);
return false;
}
if(mFormat.nChannels >= 2)
mDevice->FmtChans = DevFmtStereo;
else if(mFormat.nChannels == 1)
mDevice->FmtChans = DevFmtMono;
else
{
ERR("Unhandled channel count: %d\n", mFormat.nChannels);
return false;
}
setDefaultWFXChannelOrder();
uint BufferSize{mDevice->UpdateSize * mFormat.nChannels * mDevice->bytesFromFmt()};
al_free(mWaveBuffer[0].lpData);
mWaveBuffer[0] = WAVEHDR{};
mWaveBuffer[0].lpData = static_cast<char*>(al_calloc(16, BufferSize * mWaveBuffer.size()));
mWaveBuffer[0].dwBufferLength = BufferSize;
for(size_t i{1};i < mWaveBuffer.size();i++)
{
mWaveBuffer[i] = WAVEHDR{};
mWaveBuffer[i].lpData = mWaveBuffer[i-1].lpData + mWaveBuffer[i-1].dwBufferLength;
mWaveBuffer[i].dwBufferLength = BufferSize;
}
mIdx = 0;
return true;
}
void WinMMPlayback::start()
{
try {
for(auto &waveHdr : mWaveBuffer)
waveOutPrepareHeader(mOutHdl, &waveHdr, sizeof(WAVEHDR));
mWritable.store(static_cast<uint>(mWaveBuffer.size()), std::memory_order_release);
mKillNow.store(false, std::memory_order_release);
mThread = std::thread{std::mem_fn(&WinMMPlayback::mixerProc), this};
}
catch(std::exception& e) {
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to start mixing thread: %s", e.what()};
}
}
void WinMMPlayback::stop()
{
if(mKillNow.exchange(true, std::memory_order_acq_rel) || !mThread.joinable())
return;
mThread.join();
while(mWritable.load(std::memory_order_acquire) < mWaveBuffer.size())
mSem.wait();
for(auto &waveHdr : mWaveBuffer)
waveOutUnprepareHeader(mOutHdl, &waveHdr, sizeof(WAVEHDR));
mWritable.store(0, std::memory_order_release);
}
struct WinMMCapture final : public BackendBase {
WinMMCapture(DeviceBase *device) noexcept : BackendBase{device} { }
~WinMMCapture() override;
void CALLBACK waveInProc(HWAVEIN device, UINT msg, DWORD_PTR param1, DWORD_PTR param2) noexcept;
static void CALLBACK waveInProcC(HWAVEIN device, UINT msg, DWORD_PTR instance, DWORD_PTR param1, DWORD_PTR param2) noexcept
{ reinterpret_cast<WinMMCapture*>(instance)->waveInProc(device, msg, param1, param2); }
int captureProc();
void open(const char *name) override;
void start() override;
void stop() override;
void captureSamples(al::byte *buffer, uint samples) override;
uint availableSamples() override;
std::atomic<uint> mReadable{0u};
al::semaphore mSem;
uint mIdx{0};
std::array<WAVEHDR,4> mWaveBuffer{};
HWAVEIN mInHdl{nullptr};
RingBufferPtr mRing{nullptr};
WAVEFORMATEX mFormat{};
std::atomic<bool> mKillNow{true};
std::thread mThread;
DEF_NEWDEL(WinMMCapture)
};
WinMMCapture::~WinMMCapture()
{
// Close the Wave device
if(mInHdl)
waveInClose(mInHdl);
mInHdl = nullptr;
al_free(mWaveBuffer[0].lpData);
std::fill(mWaveBuffer.begin(), mWaveBuffer.end(), WAVEHDR{});
}
/* WinMMCapture::waveInProc
*
* Posts a message to 'WinMMCapture::captureProc' everytime a WaveIn Buffer is
* completed and returns to the application (with more data).
*/
void CALLBACK WinMMCapture::waveInProc(HWAVEIN, UINT msg, DWORD_PTR, DWORD_PTR) noexcept
{
if(msg != WIM_DATA) return;
mReadable.fetch_add(1, std::memory_order_acq_rel);
mSem.post();
}
int WinMMCapture::captureProc()
{
althrd_setname(RECORD_THREAD_NAME);
while(!mKillNow.load(std::memory_order_acquire) &&
mDevice->Connected.load(std::memory_order_acquire))
{
uint todo{mReadable.load(std::memory_order_acquire)};
if(todo < 1)
{
mSem.wait();
continue;
}
size_t widx{mIdx};
do {
WAVEHDR &waveHdr = mWaveBuffer[widx];
widx = (widx+1) % mWaveBuffer.size();
mRing->write(waveHdr.lpData, waveHdr.dwBytesRecorded / mFormat.nBlockAlign);
mReadable.fetch_sub(1, std::memory_order_acq_rel);
waveInAddBuffer(mInHdl, &waveHdr, sizeof(WAVEHDR));
} while(--todo);
mIdx = static_cast<uint>(widx);
}
return 0;
}
void WinMMCapture::open(const char *name)
{
if(CaptureDevices.empty())
ProbeCaptureDevices();
// Find the Device ID matching the deviceName if valid
auto iter = name ?
std::find(CaptureDevices.cbegin(), CaptureDevices.cend(), name) :
CaptureDevices.cbegin();
if(iter == CaptureDevices.cend())
throw al::backend_exception{al::backend_error::NoDevice, "Device name \"%s\" not found",
name};
auto DeviceID = static_cast<UINT>(std::distance(CaptureDevices.cbegin(), iter));
switch(mDevice->FmtChans)
{
case DevFmtMono:
case DevFmtStereo:
break;
case DevFmtQuad:
case DevFmtX51:
case DevFmtX61:
case DevFmtX71:
case DevFmtX714:
case DevFmtX3D71:
case DevFmtAmbi3D:
throw al::backend_exception{al::backend_error::DeviceError, "%s capture not supported",
DevFmtChannelsString(mDevice->FmtChans)};
}
switch(mDevice->FmtType)
{
case DevFmtUByte:
case DevFmtShort:
case DevFmtInt:
case DevFmtFloat:
break;
case DevFmtByte:
case DevFmtUShort:
case DevFmtUInt:
throw al::backend_exception{al::backend_error::DeviceError, "%s samples not supported",
DevFmtTypeString(mDevice->FmtType)};
}
mFormat = WAVEFORMATEX{};
mFormat.wFormatTag = (mDevice->FmtType == DevFmtFloat) ?
WAVE_FORMAT_IEEE_FLOAT : WAVE_FORMAT_PCM;
mFormat.nChannels = static_cast<WORD>(mDevice->channelsFromFmt());
mFormat.wBitsPerSample = static_cast<WORD>(mDevice->bytesFromFmt() * 8);
mFormat.nBlockAlign = static_cast<WORD>(mFormat.wBitsPerSample * mFormat.nChannels / 8);
mFormat.nSamplesPerSec = mDevice->Frequency;
mFormat.nAvgBytesPerSec = mFormat.nSamplesPerSec * mFormat.nBlockAlign;
mFormat.cbSize = 0;
MMRESULT res{waveInOpen(&mInHdl, DeviceID, &mFormat,
reinterpret_cast<DWORD_PTR>(&WinMMCapture::waveInProcC),
reinterpret_cast<DWORD_PTR>(this), CALLBACK_FUNCTION)};
if(res != MMSYSERR_NOERROR)
throw al::backend_exception{al::backend_error::DeviceError, "waveInOpen failed: %u", res};
// Ensure each buffer is 50ms each
DWORD BufferSize{mFormat.nAvgBytesPerSec / 20u};
BufferSize -= (BufferSize % mFormat.nBlockAlign);
// Allocate circular memory buffer for the captured audio
// Make sure circular buffer is at least 100ms in size
uint CapturedDataSize{mDevice->BufferSize};
CapturedDataSize = static_cast<uint>(maxz(CapturedDataSize, BufferSize*mWaveBuffer.size()));
mRing = RingBuffer::Create(CapturedDataSize, mFormat.nBlockAlign, false);
al_free(mWaveBuffer[0].lpData);
mWaveBuffer[0] = WAVEHDR{};
mWaveBuffer[0].lpData = static_cast<char*>(al_calloc(16, BufferSize * mWaveBuffer.size()));
mWaveBuffer[0].dwBufferLength = BufferSize;
for(size_t i{1};i < mWaveBuffer.size();++i)
{
mWaveBuffer[i] = WAVEHDR{};
mWaveBuffer[i].lpData = mWaveBuffer[i-1].lpData + mWaveBuffer[i-1].dwBufferLength;
mWaveBuffer[i].dwBufferLength = mWaveBuffer[i-1].dwBufferLength;
}
mDevice->DeviceName = CaptureDevices[DeviceID];
}
void WinMMCapture::start()
{
try {
for(size_t i{0};i < mWaveBuffer.size();++i)
{
waveInPrepareHeader(mInHdl, &mWaveBuffer[i], sizeof(WAVEHDR));
waveInAddBuffer(mInHdl, &mWaveBuffer[i], sizeof(WAVEHDR));
}
mKillNow.store(false, std::memory_order_release);
mThread = std::thread{std::mem_fn(&WinMMCapture::captureProc), this};
waveInStart(mInHdl);
}
catch(std::exception& e) {
throw al::backend_exception{al::backend_error::DeviceError,
"Failed to start recording thread: %s", e.what()};
}
}
void WinMMCapture::stop()
{
waveInStop(mInHdl);
mKillNow.store(true, std::memory_order_release);
if(mThread.joinable())
{
mSem.post();
mThread.join();
}
waveInReset(mInHdl);
for(size_t i{0};i < mWaveBuffer.size();++i)
waveInUnprepareHeader(mInHdl, &mWaveBuffer[i], sizeof(WAVEHDR));
mReadable.store(0, std::memory_order_release);
mIdx = 0;
}
void WinMMCapture::captureSamples(al::byte *buffer, uint samples)
{ mRing->read(buffer, samples); }
uint WinMMCapture::availableSamples()
{ return static_cast<uint>(mRing->readSpace()); }
} // namespace
bool WinMMBackendFactory::init()
{ return true; }
bool WinMMBackendFactory::querySupport(BackendType type)
{ return type == BackendType::Playback || type == BackendType::Capture; }
std::string WinMMBackendFactory::probe(BackendType type)
{
std::string outnames;
auto add_device = [&outnames](const std::string &dname) -> void
{
/* +1 to also append the null char (to ensure a null-separated list and
* double-null terminated list).
*/
if(!dname.empty())
outnames.append(dname.c_str(), dname.length()+1);
};
switch(type)
{
case BackendType::Playback:
ProbePlaybackDevices();
std::for_each(PlaybackDevices.cbegin(), PlaybackDevices.cend(), add_device);
break;
case BackendType::Capture:
ProbeCaptureDevices();
std::for_each(CaptureDevices.cbegin(), CaptureDevices.cend(), add_device);
break;
}
return outnames;
}
BackendPtr WinMMBackendFactory::createBackend(DeviceBase *device, BackendType type)
{
if(type == BackendType::Playback)
return BackendPtr{new WinMMPlayback{device}};
if(type == BackendType::Capture)
return BackendPtr{new WinMMCapture{device}};
return nullptr;
}
BackendFactory &WinMMBackendFactory::getFactory()
{
static WinMMBackendFactory factory{};
return factory;
}

View File

@ -0,0 +1,19 @@
#ifndef BACKENDS_WINMM_H
#define BACKENDS_WINMM_H
#include "base.h"
struct WinMMBackendFactory final : public BackendFactory {
public:
bool init() override;
bool querySupport(BackendType type) override;
std::string probe(BackendType type) override;
BackendPtr createBackend(DeviceBase *device, BackendType type) override;
static BackendFactory &getFactory();
};
#endif /* BACKENDS_WINMM_H */

1105
externals/openal-soft/alc/context.cpp vendored Normal file

File diff suppressed because it is too large Load Diff

540
externals/openal-soft/alc/context.h vendored Normal file
View File

@ -0,0 +1,540 @@
#ifndef ALC_CONTEXT_H
#define ALC_CONTEXT_H
#include <atomic>
#include <memory>
#include <mutex>
#include <stdint.h>
#include <utility>
#include "AL/al.h"
#include "AL/alc.h"
#include "AL/alext.h"
#include "al/listener.h"
#include "almalloc.h"
#include "alnumeric.h"
#include "atomic.h"
#include "core/context.h"
#include "intrusive_ptr.h"
#include "vector.h"
#ifdef ALSOFT_EAX
#include "al/eax/call.h"
#include "al/eax/exception.h"
#include "al/eax/fx_slot_index.h"
#include "al/eax/fx_slots.h"
#include "al/eax/utils.h"
#endif // ALSOFT_EAX
struct ALeffect;
struct ALeffectslot;
struct ALsource;
using uint = unsigned int;
struct SourceSubList {
uint64_t FreeMask{~0_u64};
ALsource *Sources{nullptr}; /* 64 */
SourceSubList() noexcept = default;
SourceSubList(const SourceSubList&) = delete;
SourceSubList(SourceSubList&& rhs) noexcept : FreeMask{rhs.FreeMask}, Sources{rhs.Sources}
{ rhs.FreeMask = ~0_u64; rhs.Sources = nullptr; }
~SourceSubList();
SourceSubList& operator=(const SourceSubList&) = delete;
SourceSubList& operator=(SourceSubList&& rhs) noexcept
{ std::swap(FreeMask, rhs.FreeMask); std::swap(Sources, rhs.Sources); return *this; }
};
struct EffectSlotSubList {
uint64_t FreeMask{~0_u64};
ALeffectslot *EffectSlots{nullptr}; /* 64 */
EffectSlotSubList() noexcept = default;
EffectSlotSubList(const EffectSlotSubList&) = delete;
EffectSlotSubList(EffectSlotSubList&& rhs) noexcept
: FreeMask{rhs.FreeMask}, EffectSlots{rhs.EffectSlots}
{ rhs.FreeMask = ~0_u64; rhs.EffectSlots = nullptr; }
~EffectSlotSubList();
EffectSlotSubList& operator=(const EffectSlotSubList&) = delete;
EffectSlotSubList& operator=(EffectSlotSubList&& rhs) noexcept
{ std::swap(FreeMask, rhs.FreeMask); std::swap(EffectSlots, rhs.EffectSlots); return *this; }
};
struct ALCcontext : public al::intrusive_ref<ALCcontext>, ContextBase {
const al::intrusive_ptr<ALCdevice> mALDevice;
bool mPropsDirty{true};
bool mDeferUpdates{false};
std::mutex mPropLock;
std::atomic<ALenum> mLastError{AL_NO_ERROR};
DistanceModel mDistanceModel{DistanceModel::Default};
bool mSourceDistanceModel{false};
float mDopplerFactor{1.0f};
float mDopplerVelocity{1.0f};
float mSpeedOfSound{SpeedOfSoundMetersPerSec};
float mAirAbsorptionGainHF{AirAbsorbGainHF};
std::mutex mEventCbLock;
ALEVENTPROCSOFT mEventCb{};
void *mEventParam{nullptr};
ALlistener mListener{};
al::vector<SourceSubList> mSourceList;
ALuint mNumSources{0};
std::mutex mSourceLock;
al::vector<EffectSlotSubList> mEffectSlotList;
ALuint mNumEffectSlots{0u};
std::mutex mEffectSlotLock;
/* Default effect slot */
std::unique_ptr<ALeffectslot> mDefaultSlot;
const char *mExtensionList{nullptr};
std::string mExtensionListOverride{};
ALCcontext(al::intrusive_ptr<ALCdevice> device);
ALCcontext(const ALCcontext&) = delete;
ALCcontext& operator=(const ALCcontext&) = delete;
~ALCcontext();
void init();
/**
* Removes the context from its device and removes it from being current on
* the running thread or globally. Returns true if other contexts still
* exist on the device.
*/
bool deinit();
/**
* Defers/suspends updates for the given context's listener and sources.
* This does *NOT* stop mixing, but rather prevents certain property
* changes from taking effect. mPropLock must be held when called.
*/
void deferUpdates() noexcept { mDeferUpdates = true; }
/**
* Resumes update processing after being deferred. mPropLock must be held
* when called.
*/
void processUpdates()
{
if(std::exchange(mDeferUpdates, false))
applyAllUpdates();
}
/**
* Applies all pending updates for the context, listener, effect slots, and
* sources.
*/
void applyAllUpdates();
#ifdef __USE_MINGW_ANSI_STDIO
[[gnu::format(gnu_printf, 3, 4)]]
#else
[[gnu::format(printf, 3, 4)]]
#endif
void setError(ALenum errorCode, const char *msg, ...);
/* Process-wide current context */
static std::atomic<bool> sGlobalContextLock;
static std::atomic<ALCcontext*> sGlobalContext;
private:
/* Thread-local current context. */
static thread_local ALCcontext *sLocalContext;
/* Thread-local context handling. This handles attempting to release the
* context which may have been left current when the thread is destroyed.
*/
class ThreadCtx {
public:
~ThreadCtx();
void set(ALCcontext *ctx) const noexcept { sLocalContext = ctx; }
};
static thread_local ThreadCtx sThreadContext;
public:
/* HACK: MinGW generates bad code when accessing an extern thread_local
* object. Add a wrapper function for it that only accesses it where it's
* defined.
*/
#ifdef __MINGW32__
static ALCcontext *getThreadContext() noexcept;
static void setThreadContext(ALCcontext *context) noexcept;
#else
static ALCcontext *getThreadContext() noexcept { return sLocalContext; }
static void setThreadContext(ALCcontext *context) noexcept { sThreadContext.set(context); }
#endif
/* Default effect that applies to sources that don't have an effect on send 0. */
static ALeffect sDefaultEffect;
DEF_NEWDEL(ALCcontext)
#ifdef ALSOFT_EAX
public:
bool hasEax() const noexcept { return mEaxIsInitialized; }
bool eaxIsCapable() const noexcept;
void eaxUninitialize() noexcept;
ALenum eax_eax_set(
const GUID* property_set_id,
ALuint property_id,
ALuint property_source_id,
ALvoid* property_value,
ALuint property_value_size);
ALenum eax_eax_get(
const GUID* property_set_id,
ALuint property_id,
ALuint property_source_id,
ALvoid* property_value,
ALuint property_value_size);
void eaxSetLastError() noexcept;
EaxFxSlotIndex eaxGetPrimaryFxSlotIndex() const noexcept
{ return mEaxPrimaryFxSlotIndex; }
const ALeffectslot& eaxGetFxSlot(EaxFxSlotIndexValue fx_slot_index) const
{ return mEaxFxSlots.get(fx_slot_index); }
ALeffectslot& eaxGetFxSlot(EaxFxSlotIndexValue fx_slot_index)
{ return mEaxFxSlots.get(fx_slot_index); }
bool eaxNeedsCommit() const noexcept { return mEaxNeedsCommit; }
void eaxCommit();
void eaxCommitFxSlots()
{ mEaxFxSlots.commit(); }
private:
static constexpr auto eax_primary_fx_slot_id_dirty_bit = EaxDirtyFlags{1} << 0;
static constexpr auto eax_distance_factor_dirty_bit = EaxDirtyFlags{1} << 1;
static constexpr auto eax_air_absorption_hf_dirty_bit = EaxDirtyFlags{1} << 2;
static constexpr auto eax_hf_reference_dirty_bit = EaxDirtyFlags{1} << 3;
static constexpr auto eax_macro_fx_factor_dirty_bit = EaxDirtyFlags{1} << 4;
using Eax4Props = EAX40CONTEXTPROPERTIES;
struct Eax4State {
Eax4Props i; // Immediate.
Eax4Props d; // Deferred.
};
using Eax5Props = EAX50CONTEXTPROPERTIES;
struct Eax5State {
Eax5Props i; // Immediate.
Eax5Props d; // Deferred.
};
class ContextException : public EaxException
{
public:
explicit ContextException(const char* message)
: EaxException{"EAX_CONTEXT", message}
{}
};
struct Eax4PrimaryFxSlotIdValidator {
void operator()(const GUID& guidPrimaryFXSlotID) const
{
if(guidPrimaryFXSlotID != EAX_NULL_GUID &&
guidPrimaryFXSlotID != EAXPROPERTYID_EAX40_FXSlot0 &&
guidPrimaryFXSlotID != EAXPROPERTYID_EAX40_FXSlot1 &&
guidPrimaryFXSlotID != EAXPROPERTYID_EAX40_FXSlot2 &&
guidPrimaryFXSlotID != EAXPROPERTYID_EAX40_FXSlot3)
{
eax_fail_unknown_primary_fx_slot_id();
}
}
};
struct Eax4DistanceFactorValidator {
void operator()(float flDistanceFactor) const
{
eax_validate_range<ContextException>(
"Distance Factor",
flDistanceFactor,
EAXCONTEXT_MINDISTANCEFACTOR,
EAXCONTEXT_MAXDISTANCEFACTOR);
}
};
struct Eax4AirAbsorptionHfValidator {
void operator()(float flAirAbsorptionHF) const
{
eax_validate_range<ContextException>(
"Air Absorption HF",
flAirAbsorptionHF,
EAXCONTEXT_MINAIRABSORPTIONHF,
EAXCONTEXT_MAXAIRABSORPTIONHF);
}
};
struct Eax4HfReferenceValidator {
void operator()(float flHFReference) const
{
eax_validate_range<ContextException>(
"HF Reference",
flHFReference,
EAXCONTEXT_MINHFREFERENCE,
EAXCONTEXT_MAXHFREFERENCE);
}
};
struct Eax4AllValidator {
void operator()(const EAX40CONTEXTPROPERTIES& all) const
{
Eax4PrimaryFxSlotIdValidator{}(all.guidPrimaryFXSlotID);
Eax4DistanceFactorValidator{}(all.flDistanceFactor);
Eax4AirAbsorptionHfValidator{}(all.flAirAbsorptionHF);
Eax4HfReferenceValidator{}(all.flHFReference);
}
};
struct Eax5PrimaryFxSlotIdValidator {
void operator()(const GUID& guidPrimaryFXSlotID) const
{
if(guidPrimaryFXSlotID != EAX_NULL_GUID &&
guidPrimaryFXSlotID != EAXPROPERTYID_EAX50_FXSlot0 &&
guidPrimaryFXSlotID != EAXPROPERTYID_EAX50_FXSlot1 &&
guidPrimaryFXSlotID != EAXPROPERTYID_EAX50_FXSlot2 &&
guidPrimaryFXSlotID != EAXPROPERTYID_EAX50_FXSlot3)
{
eax_fail_unknown_primary_fx_slot_id();
}
}
};
struct Eax5MacroFxFactorValidator {
void operator()(float flMacroFXFactor) const
{
eax_validate_range<ContextException>(
"Macro FX Factor",
flMacroFXFactor,
EAXCONTEXT_MINMACROFXFACTOR,
EAXCONTEXT_MAXMACROFXFACTOR);
}
};
struct Eax5AllValidator {
void operator()(const EAX50CONTEXTPROPERTIES& all) const
{
Eax5PrimaryFxSlotIdValidator{}(all.guidPrimaryFXSlotID);
Eax4DistanceFactorValidator{}(all.flDistanceFactor);
Eax4AirAbsorptionHfValidator{}(all.flAirAbsorptionHF);
Eax4HfReferenceValidator{}(all.flHFReference);
Eax5MacroFxFactorValidator{}(all.flMacroFXFactor);
}
};
struct Eax5EaxVersionValidator {
void operator()(unsigned long ulEAXVersion) const
{
eax_validate_range<ContextException>(
"EAX version",
ulEAXVersion,
EAXCONTEXT_MINEAXSESSION,
EAXCONTEXT_MAXEAXSESSION);
}
};
struct Eax5MaxActiveSendsValidator {
void operator()(unsigned long ulMaxActiveSends) const
{
eax_validate_range<ContextException>(
"Max Active Sends",
ulMaxActiveSends,
EAXCONTEXT_MINMAXACTIVESENDS,
EAXCONTEXT_MAXMAXACTIVESENDS);
}
};
struct Eax5SessionAllValidator {
void operator()(const EAXSESSIONPROPERTIES& all) const
{
Eax5EaxVersionValidator{}(all.ulEAXVersion);
Eax5MaxActiveSendsValidator{}(all.ulMaxActiveSends);
}
};
struct Eax5SpeakerConfigValidator {
void operator()(unsigned long ulSpeakerConfig) const
{
eax_validate_range<ContextException>(
"Speaker Config",
ulSpeakerConfig,
EAXCONTEXT_MINSPEAKERCONFIG,
EAXCONTEXT_MAXSPEAKERCONFIG);
}
};
bool mEaxIsInitialized{};
bool mEaxIsTried{};
long mEaxLastError{};
unsigned long mEaxSpeakerConfig{};
EaxFxSlotIndex mEaxPrimaryFxSlotIndex{};
EaxFxSlots mEaxFxSlots{};
int mEaxVersion{}; // Current EAX version.
bool mEaxNeedsCommit{};
EaxDirtyFlags mEaxDf{}; // Dirty flags for the current EAX version.
Eax5State mEax123{}; // EAX1/EAX2/EAX3 state.
Eax4State mEax4{}; // EAX4 state.
Eax5State mEax5{}; // EAX5 state.
Eax5Props mEax{}; // Current EAX state.
EAXSESSIONPROPERTIES mEaxSession{};
[[noreturn]] static void eax_fail(const char* message);
[[noreturn]] static void eax_fail_unknown_property_set_id();
[[noreturn]] static void eax_fail_unknown_primary_fx_slot_id();
[[noreturn]] static void eax_fail_unknown_property_id();
[[noreturn]] static void eax_fail_unknown_version();
// Gets a value from EAX call,
// validates it,
// and updates the current value.
template<typename TValidator, typename TProperty>
static void eax_set(const EaxCall& call, TProperty& property)
{
const auto& value = call.get_value<ContextException, const TProperty>();
TValidator{}(value);
property = value;
}
// Gets a new value from EAX call,
// validates it,
// updates the deferred value,
// updates a dirty flag.
template<
typename TValidator,
EaxDirtyFlags TDirtyBit,
typename TMemberResult,
typename TProps,
typename TState>
void eax_defer(const EaxCall& call, TState& state, TMemberResult TProps::*member) noexcept
{
const auto& src = call.get_value<ContextException, const TMemberResult>();
TValidator{}(src);
const auto& dst_i = state.i.*member;
auto& dst_d = state.d.*member;
dst_d = src;
if(dst_i != dst_d)
mEaxDf |= TDirtyBit;
}
template<
EaxDirtyFlags TDirtyBit,
typename TMemberResult,
typename TProps,
typename TState>
void eax_context_commit_property(TState& state, EaxDirtyFlags& dst_df,
TMemberResult TProps::*member) noexcept
{
if((mEaxDf & TDirtyBit) != EaxDirtyFlags{})
{
dst_df |= TDirtyBit;
const auto& src_d = state.d.*member;
state.i.*member = src_d;
mEax.*member = src_d;
}
}
void eax_initialize_extensions();
void eax_initialize();
bool eax_has_no_default_effect_slot() const noexcept;
void eax_ensure_no_default_effect_slot() const;
bool eax_has_enough_aux_sends() const noexcept;
void eax_ensure_enough_aux_sends() const;
void eax_ensure_compatibility();
unsigned long eax_detect_speaker_configuration() const;
void eax_update_speaker_configuration();
void eax_set_last_error_defaults() noexcept;
void eax_session_set_defaults() noexcept;
static void eax4_context_set_defaults(Eax4Props& props) noexcept;
static void eax4_context_set_defaults(Eax4State& state) noexcept;
static void eax5_context_set_defaults(Eax5Props& props) noexcept;
static void eax5_context_set_defaults(Eax5State& state) noexcept;
void eax_context_set_defaults();
void eax_set_defaults();
void eax_dispatch_fx_slot(const EaxCall& call);
void eax_dispatch_source(const EaxCall& call);
void eax_get_misc(const EaxCall& call);
void eax4_get(const EaxCall& call, const Eax4Props& props);
void eax5_get(const EaxCall& call, const Eax5Props& props);
void eax_get(const EaxCall& call);
void eax_context_commit_primary_fx_slot_id();
void eax_context_commit_distance_factor();
void eax_context_commit_air_absorbtion_hf();
void eax_context_commit_hf_reference();
void eax_context_commit_macro_fx_factor();
void eax_initialize_fx_slots();
void eax_update_sources();
void eax_set_misc(const EaxCall& call);
void eax4_defer_all(const EaxCall& call, Eax4State& state);
void eax4_defer(const EaxCall& call, Eax4State& state);
void eax5_defer_all(const EaxCall& call, Eax5State& state);
void eax5_defer(const EaxCall& call, Eax5State& state);
void eax_set(const EaxCall& call);
void eax4_context_commit(Eax4State& state, EaxDirtyFlags& dst_df);
void eax5_context_commit(Eax5State& state, EaxDirtyFlags& dst_df);
void eax_context_commit();
#endif // ALSOFT_EAX
};
using ContextRef = al::intrusive_ptr<ALCcontext>;
ContextRef GetContextRef(void);
void UpdateContextProps(ALCcontext *context);
extern bool TrapALError;
#ifdef ALSOFT_EAX
ALenum AL_APIENTRY EAXSet(
const GUID* property_set_id,
ALuint property_id,
ALuint property_source_id,
ALvoid* property_value,
ALuint property_value_size) noexcept;
ALenum AL_APIENTRY EAXGet(
const GUID* property_set_id,
ALuint property_id,
ALuint property_source_id,
ALvoid* property_value,
ALuint property_value_size) noexcept;
#endif // ALSOFT_EAX
#endif /* ALC_CONTEXT_H */

93
externals/openal-soft/alc/device.cpp vendored Normal file
View File

@ -0,0 +1,93 @@
#include "config.h"
#include "device.h"
#include <numeric>
#include <stddef.h>
#include "albit.h"
#include "alconfig.h"
#include "backends/base.h"
#include "core/bformatdec.h"
#include "core/bs2b.h"
#include "core/front_stablizer.h"
#include "core/hrtf.h"
#include "core/logging.h"
#include "core/mastering.h"
#include "core/uhjfilter.h"
namespace {
using voidp = void*;
} // namespace
ALCdevice::ALCdevice(DeviceType type) : DeviceBase{type}
{ }
ALCdevice::~ALCdevice()
{
TRACE("Freeing device %p\n", voidp{this});
Backend = nullptr;
size_t count{std::accumulate(BufferList.cbegin(), BufferList.cend(), size_t{0u},
[](size_t cur, const BufferSubList &sublist) noexcept -> size_t
{ return cur + static_cast<uint>(al::popcount(~sublist.FreeMask)); })};
if(count > 0)
WARN("%zu Buffer%s not deleted\n", count, (count==1)?"":"s");
count = std::accumulate(EffectList.cbegin(), EffectList.cend(), size_t{0u},
[](size_t cur, const EffectSubList &sublist) noexcept -> size_t
{ return cur + static_cast<uint>(al::popcount(~sublist.FreeMask)); });
if(count > 0)
WARN("%zu Effect%s not deleted\n", count, (count==1)?"":"s");
count = std::accumulate(FilterList.cbegin(), FilterList.cend(), size_t{0u},
[](size_t cur, const FilterSubList &sublist) noexcept -> size_t
{ return cur + static_cast<uint>(al::popcount(~sublist.FreeMask)); });
if(count > 0)
WARN("%zu Filter%s not deleted\n", count, (count==1)?"":"s");
}
void ALCdevice::enumerateHrtfs()
{
mHrtfList = EnumerateHrtf(configValue<std::string>(nullptr, "hrtf-paths"));
if(auto defhrtfopt = configValue<std::string>(nullptr, "default-hrtf"))
{
auto iter = std::find(mHrtfList.begin(), mHrtfList.end(), *defhrtfopt);
if(iter == mHrtfList.end())
WARN("Failed to find default HRTF \"%s\"\n", defhrtfopt->c_str());
else if(iter != mHrtfList.begin())
std::rotate(mHrtfList.begin(), iter, iter+1);
}
}
auto ALCdevice::getOutputMode1() const noexcept -> OutputMode1
{
if(mContexts.load(std::memory_order_relaxed)->empty())
return OutputMode1::Any;
switch(FmtChans)
{
case DevFmtMono: return OutputMode1::Mono;
case DevFmtStereo:
if(mHrtf)
return OutputMode1::Hrtf;
else if(mUhjEncoder)
return OutputMode1::Uhj2;
return OutputMode1::StereoBasic;
case DevFmtQuad: return OutputMode1::Quad;
case DevFmtX51: return OutputMode1::X51;
case DevFmtX61: return OutputMode1::X61;
case DevFmtX71: return OutputMode1::X71;
case DevFmtX714:
case DevFmtX3D71:
case DevFmtAmbi3D:
break;
}
return OutputMode1::Any;
}

165
externals/openal-soft/alc/device.h vendored Normal file
View File

@ -0,0 +1,165 @@
#ifndef ALC_DEVICE_H
#define ALC_DEVICE_H
#include <atomic>
#include <memory>
#include <mutex>
#include <stdint.h>
#include <string>
#include <utility>
#include "AL/alc.h"
#include "AL/alext.h"
#include "alconfig.h"
#include "almalloc.h"
#include "alnumeric.h"
#include "core/device.h"
#include "inprogext.h"
#include "intrusive_ptr.h"
#include "vector.h"
#ifdef ALSOFT_EAX
#include "al/eax/x_ram.h"
#endif // ALSOFT_EAX
struct ALbuffer;
struct ALeffect;
struct ALfilter;
struct BackendBase;
using uint = unsigned int;
struct BufferSubList {
uint64_t FreeMask{~0_u64};
ALbuffer *Buffers{nullptr}; /* 64 */
BufferSubList() noexcept = default;
BufferSubList(const BufferSubList&) = delete;
BufferSubList(BufferSubList&& rhs) noexcept : FreeMask{rhs.FreeMask}, Buffers{rhs.Buffers}
{ rhs.FreeMask = ~0_u64; rhs.Buffers = nullptr; }
~BufferSubList();
BufferSubList& operator=(const BufferSubList&) = delete;
BufferSubList& operator=(BufferSubList&& rhs) noexcept
{ std::swap(FreeMask, rhs.FreeMask); std::swap(Buffers, rhs.Buffers); return *this; }
};
struct EffectSubList {
uint64_t FreeMask{~0_u64};
ALeffect *Effects{nullptr}; /* 64 */
EffectSubList() noexcept = default;
EffectSubList(const EffectSubList&) = delete;
EffectSubList(EffectSubList&& rhs) noexcept : FreeMask{rhs.FreeMask}, Effects{rhs.Effects}
{ rhs.FreeMask = ~0_u64; rhs.Effects = nullptr; }
~EffectSubList();
EffectSubList& operator=(const EffectSubList&) = delete;
EffectSubList& operator=(EffectSubList&& rhs) noexcept
{ std::swap(FreeMask, rhs.FreeMask); std::swap(Effects, rhs.Effects); return *this; }
};
struct FilterSubList {
uint64_t FreeMask{~0_u64};
ALfilter *Filters{nullptr}; /* 64 */
FilterSubList() noexcept = default;
FilterSubList(const FilterSubList&) = delete;
FilterSubList(FilterSubList&& rhs) noexcept : FreeMask{rhs.FreeMask}, Filters{rhs.Filters}
{ rhs.FreeMask = ~0_u64; rhs.Filters = nullptr; }
~FilterSubList();
FilterSubList& operator=(const FilterSubList&) = delete;
FilterSubList& operator=(FilterSubList&& rhs) noexcept
{ std::swap(FreeMask, rhs.FreeMask); std::swap(Filters, rhs.Filters); return *this; }
};
struct ALCdevice : public al::intrusive_ref<ALCdevice>, DeviceBase {
/* This lock protects the device state (format, update size, etc) from
* being from being changed in multiple threads, or being accessed while
* being changed. It's also used to serialize calls to the backend.
*/
std::mutex StateLock;
std::unique_ptr<BackendBase> Backend;
ALCuint NumMonoSources{};
ALCuint NumStereoSources{};
// Maximum number of sources that can be created
uint SourcesMax{};
// Maximum number of slots that can be created
uint AuxiliaryEffectSlotMax{};
std::string mHrtfName;
al::vector<std::string> mHrtfList;
ALCenum mHrtfStatus{ALC_FALSE};
enum class OutputMode1 : ALCenum {
Any = ALC_ANY_SOFT,
Mono = ALC_MONO_SOFT,
Stereo = ALC_STEREO_SOFT,
StereoBasic = ALC_STEREO_BASIC_SOFT,
Uhj2 = ALC_STEREO_UHJ_SOFT,
Hrtf = ALC_STEREO_HRTF_SOFT,
Quad = ALC_QUAD_SOFT,
X51 = ALC_SURROUND_5_1_SOFT,
X61 = ALC_SURROUND_6_1_SOFT,
X71 = ALC_SURROUND_7_1_SOFT
};
OutputMode1 getOutputMode1() const noexcept;
using OutputMode = OutputMode1;
std::atomic<ALCenum> LastError{ALC_NO_ERROR};
// Map of Buffers for this device
std::mutex BufferLock;
al::vector<BufferSubList> BufferList;
// Map of Effects for this device
std::mutex EffectLock;
al::vector<EffectSubList> EffectList;
// Map of Filters for this device
std::mutex FilterLock;
al::vector<FilterSubList> FilterList;
#ifdef ALSOFT_EAX
ALuint eax_x_ram_free_size{eax_x_ram_max_size};
#endif // ALSOFT_EAX
ALCdevice(DeviceType type);
~ALCdevice();
void enumerateHrtfs();
bool getConfigValueBool(const char *block, const char *key, bool def)
{ return GetConfigValueBool(DeviceName.c_str(), block, key, def); }
template<typename T>
inline al::optional<T> configValue(const char *block, const char *key) = delete;
DEF_NEWDEL(ALCdevice)
};
template<>
inline al::optional<std::string> ALCdevice::configValue(const char *block, const char *key)
{ return ConfigValueStr(DeviceName.c_str(), block, key); }
template<>
inline al::optional<int> ALCdevice::configValue(const char *block, const char *key)
{ return ConfigValueInt(DeviceName.c_str(), block, key); }
template<>
inline al::optional<uint> ALCdevice::configValue(const char *block, const char *key)
{ return ConfigValueUInt(DeviceName.c_str(), block, key); }
template<>
inline al::optional<float> ALCdevice::configValue(const char *block, const char *key)
{ return ConfigValueFloat(DeviceName.c_str(), block, key); }
template<>
inline al::optional<bool> ALCdevice::configValue(const char *block, const char *key)
{ return ConfigValueBool(DeviceName.c_str(), block, key); }
#endif

View File

@ -0,0 +1,235 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 2018 by Raul Herraiz.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include <algorithm>
#include <array>
#include <cstdlib>
#include <iterator>
#include <utility>
#include "alc/effects/base.h"
#include "almalloc.h"
#include "alnumbers.h"
#include "alnumeric.h"
#include "alspan.h"
#include "core/ambidefs.h"
#include "core/bufferline.h"
#include "core/context.h"
#include "core/devformat.h"
#include "core/device.h"
#include "core/effectslot.h"
#include "core/mixer.h"
#include "intrusive_ptr.h"
namespace {
constexpr float GainScale{31621.0f};
constexpr float MinFreq{20.0f};
constexpr float MaxFreq{2500.0f};
constexpr float QFactor{5.0f};
struct AutowahState final : public EffectState {
/* Effect parameters */
float mAttackRate;
float mReleaseRate;
float mResonanceGain;
float mPeakGain;
float mFreqMinNorm;
float mBandwidthNorm;
float mEnvDelay;
/* Filter components derived from the envelope. */
struct {
float cos_w0;
float alpha;
} mEnv[BufferLineSize];
struct {
uint mTargetChannel{InvalidChannelIndex};
/* Effect filters' history. */
struct {
float z1, z2;
} mFilter;
/* Effect gains for each output channel */
float mCurrentGain;
float mTargetGain;
} mChans[MaxAmbiChannels];
/* Effects buffers */
alignas(16) float mBufferOut[BufferLineSize];
void deviceUpdate(const DeviceBase *device, const BufferStorage *buffer) override;
void update(const ContextBase *context, const EffectSlot *slot, const EffectProps *props,
const EffectTarget target) override;
void process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn,
const al::span<FloatBufferLine> samplesOut) override;
DEF_NEWDEL(AutowahState)
};
void AutowahState::deviceUpdate(const DeviceBase*, const BufferStorage*)
{
/* (Re-)initializing parameters and clear the buffers. */
mAttackRate = 1.0f;
mReleaseRate = 1.0f;
mResonanceGain = 10.0f;
mPeakGain = 4.5f;
mFreqMinNorm = 4.5e-4f;
mBandwidthNorm = 0.05f;
mEnvDelay = 0.0f;
for(auto &e : mEnv)
{
e.cos_w0 = 0.0f;
e.alpha = 0.0f;
}
for(auto &chan : mChans)
{
chan.mTargetChannel = InvalidChannelIndex;
chan.mFilter.z1 = 0.0f;
chan.mFilter.z2 = 0.0f;
chan.mCurrentGain = 0.0f;
}
}
void AutowahState::update(const ContextBase *context, const EffectSlot *slot,
const EffectProps *props, const EffectTarget target)
{
const DeviceBase *device{context->mDevice};
const auto frequency = static_cast<float>(device->Frequency);
const float ReleaseTime{clampf(props->Autowah.ReleaseTime, 0.001f, 1.0f)};
mAttackRate = std::exp(-1.0f / (props->Autowah.AttackTime*frequency));
mReleaseRate = std::exp(-1.0f / (ReleaseTime*frequency));
/* 0-20dB Resonance Peak gain */
mResonanceGain = std::sqrt(std::log10(props->Autowah.Resonance)*10.0f / 3.0f);
mPeakGain = 1.0f - std::log10(props->Autowah.PeakGain / GainScale);
mFreqMinNorm = MinFreq / frequency;
mBandwidthNorm = (MaxFreq-MinFreq) / frequency;
mOutTarget = target.Main->Buffer;
auto set_channel = [this](size_t idx, uint outchan, float outgain)
{
mChans[idx].mTargetChannel = outchan;
mChans[idx].mTargetGain = outgain;
};
target.Main->setAmbiMixParams(slot->Wet, slot->Gain, set_channel);
}
void AutowahState::process(const size_t samplesToDo,
const al::span<const FloatBufferLine> samplesIn, const al::span<FloatBufferLine> samplesOut)
{
const float attack_rate{mAttackRate};
const float release_rate{mReleaseRate};
const float res_gain{mResonanceGain};
const float peak_gain{mPeakGain};
const float freq_min{mFreqMinNorm};
const float bandwidth{mBandwidthNorm};
float env_delay{mEnvDelay};
for(size_t i{0u};i < samplesToDo;i++)
{
float w0, sample, a;
/* Envelope follower described on the book: Audio Effects, Theory,
* Implementation and Application.
*/
sample = peak_gain * std::fabs(samplesIn[0][i]);
a = (sample > env_delay) ? attack_rate : release_rate;
env_delay = lerpf(sample, env_delay, a);
/* Calculate the cos and alpha components for this sample's filter. */
w0 = minf((bandwidth*env_delay + freq_min), 0.46f) * (al::numbers::pi_v<float>*2.0f);
mEnv[i].cos_w0 = std::cos(w0);
mEnv[i].alpha = std::sin(w0)/(2.0f * QFactor);
}
mEnvDelay = env_delay;
auto chandata = std::begin(mChans);
for(const auto &insamples : samplesIn)
{
const size_t outidx{chandata->mTargetChannel};
if(outidx == InvalidChannelIndex)
{
++chandata;
continue;
}
/* This effectively inlines BiquadFilter_setParams for a peaking
* filter and BiquadFilter_processC. The alpha and cosine components
* for the filter coefficients were previously calculated with the
* envelope. Because the filter changes for each sample, the
* coefficients are transient and don't need to be held.
*/
float z1{chandata->mFilter.z1};
float z2{chandata->mFilter.z2};
for(size_t i{0u};i < samplesToDo;i++)
{
const float alpha{mEnv[i].alpha};
const float cos_w0{mEnv[i].cos_w0};
float input, output;
float a[3], b[3];
b[0] = 1.0f + alpha*res_gain;
b[1] = -2.0f * cos_w0;
b[2] = 1.0f - alpha*res_gain;
a[0] = 1.0f + alpha/res_gain;
a[1] = -2.0f * cos_w0;
a[2] = 1.0f - alpha/res_gain;
input = insamples[i];
output = input*(b[0]/a[0]) + z1;
z1 = input*(b[1]/a[0]) - output*(a[1]/a[0]) + z2;
z2 = input*(b[2]/a[0]) - output*(a[2]/a[0]);
mBufferOut[i] = output;
}
chandata->mFilter.z1 = z1;
chandata->mFilter.z2 = z2;
/* Now, mix the processed sound data to the output. */
MixSamples({mBufferOut, samplesToDo}, samplesOut[outidx].data(), chandata->mCurrentGain,
chandata->mTargetGain, samplesToDo);
++chandata;
}
}
struct AutowahStateFactory final : public EffectStateFactory {
al::intrusive_ptr<EffectState> create() override
{ return al::intrusive_ptr<EffectState>{new AutowahState{}}; }
};
} // namespace
EffectStateFactory *AutowahStateFactory_getFactory()
{
static AutowahStateFactory AutowahFactory{};
return &AutowahFactory;
}

View File

@ -0,0 +1,26 @@
#ifndef EFFECTS_BASE_H
#define EFFECTS_BASE_H
#include "core/effects/base.h"
EffectStateFactory *NullStateFactory_getFactory(void);
EffectStateFactory *ReverbStateFactory_getFactory(void);
EffectStateFactory *StdReverbStateFactory_getFactory(void);
EffectStateFactory *AutowahStateFactory_getFactory(void);
EffectStateFactory *ChorusStateFactory_getFactory(void);
EffectStateFactory *CompressorStateFactory_getFactory(void);
EffectStateFactory *DistortionStateFactory_getFactory(void);
EffectStateFactory *EchoStateFactory_getFactory(void);
EffectStateFactory *EqualizerStateFactory_getFactory(void);
EffectStateFactory *FlangerStateFactory_getFactory(void);
EffectStateFactory *FshifterStateFactory_getFactory(void);
EffectStateFactory *ModulatorStateFactory_getFactory(void);
EffectStateFactory *PshifterStateFactory_getFactory(void);
EffectStateFactory* VmorpherStateFactory_getFactory(void);
EffectStateFactory *DedicatedStateFactory_getFactory(void);
EffectStateFactory *ConvolutionStateFactory_getFactory(void);
#endif /* EFFECTS_BASE_H */

View File

@ -0,0 +1,330 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 2013 by Mike Gorchak
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include <algorithm>
#include <array>
#include <climits>
#include <cstdlib>
#include <iterator>
#include "alc/effects/base.h"
#include "almalloc.h"
#include "alnumbers.h"
#include "alnumeric.h"
#include "alspan.h"
#include "core/bufferline.h"
#include "core/context.h"
#include "core/devformat.h"
#include "core/device.h"
#include "core/effectslot.h"
#include "core/mixer.h"
#include "core/mixer/defs.h"
#include "core/resampler_limits.h"
#include "intrusive_ptr.h"
#include "opthelpers.h"
#include "vector.h"
namespace {
using uint = unsigned int;
struct ChorusState final : public EffectState {
al::vector<float,16> mDelayBuffer;
uint mOffset{0};
uint mLfoOffset{0};
uint mLfoRange{1};
float mLfoScale{0.0f};
uint mLfoDisp{0};
/* Calculated delays to apply to the left and right outputs. */
uint mModDelays[2][BufferLineSize];
/* Temp storage for the modulated left and right outputs. */
alignas(16) float mBuffer[2][BufferLineSize];
/* Gains for left and right outputs. */
struct {
float Current[MaxAmbiChannels]{};
float Target[MaxAmbiChannels]{};
} mGains[2];
/* effect parameters */
ChorusWaveform mWaveform{};
int mDelay{0};
float mDepth{0.0f};
float mFeedback{0.0f};
void calcTriangleDelays(const size_t todo);
void calcSinusoidDelays(const size_t todo);
void deviceUpdate(const DeviceBase *device, const BufferStorage *buffer) override;
void update(const ContextBase *context, const EffectSlot *slot, const EffectProps *props,
const EffectTarget target) override;
void process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn,
const al::span<FloatBufferLine> samplesOut) override;
DEF_NEWDEL(ChorusState)
};
void ChorusState::deviceUpdate(const DeviceBase *Device, const BufferStorage*)
{
constexpr float max_delay{maxf(ChorusMaxDelay, FlangerMaxDelay)};
const auto frequency = static_cast<float>(Device->Frequency);
const size_t maxlen{NextPowerOf2(float2uint(max_delay*2.0f*frequency) + 1u)};
if(maxlen != mDelayBuffer.size())
decltype(mDelayBuffer)(maxlen).swap(mDelayBuffer);
std::fill(mDelayBuffer.begin(), mDelayBuffer.end(), 0.0f);
for(auto &e : mGains)
{
std::fill(std::begin(e.Current), std::end(e.Current), 0.0f);
std::fill(std::begin(e.Target), std::end(e.Target), 0.0f);
}
}
void ChorusState::update(const ContextBase *Context, const EffectSlot *Slot,
const EffectProps *props, const EffectTarget target)
{
constexpr int mindelay{(MaxResamplerPadding>>1) << MixerFracBits};
/* The LFO depth is scaled to be relative to the sample delay. Clamp the
* delay and depth to allow enough padding for resampling.
*/
const DeviceBase *device{Context->mDevice};
const auto frequency = static_cast<float>(device->Frequency);
mWaveform = props->Chorus.Waveform;
mDelay = maxi(float2int(props->Chorus.Delay*frequency*MixerFracOne + 0.5f), mindelay);
mDepth = minf(props->Chorus.Depth * static_cast<float>(mDelay),
static_cast<float>(mDelay - mindelay));
mFeedback = props->Chorus.Feedback;
/* Gains for left and right sides */
static constexpr auto inv_sqrt2 = static_cast<float>(1.0 / al::numbers::sqrt2);
static constexpr auto lcoeffs_pw = CalcDirectionCoeffs({-1.0f, 0.0f, 0.0f});
static constexpr auto rcoeffs_pw = CalcDirectionCoeffs({ 1.0f, 0.0f, 0.0f});
static constexpr auto lcoeffs_nrml = CalcDirectionCoeffs({-inv_sqrt2, 0.0f, inv_sqrt2});
static constexpr auto rcoeffs_nrml = CalcDirectionCoeffs({ inv_sqrt2, 0.0f, inv_sqrt2});
auto &lcoeffs = (device->mRenderMode != RenderMode::Pairwise) ? lcoeffs_nrml : lcoeffs_pw;
auto &rcoeffs = (device->mRenderMode != RenderMode::Pairwise) ? rcoeffs_nrml : rcoeffs_pw;
mOutTarget = target.Main->Buffer;
ComputePanGains(target.Main, lcoeffs.data(), Slot->Gain, mGains[0].Target);
ComputePanGains(target.Main, rcoeffs.data(), Slot->Gain, mGains[1].Target);
float rate{props->Chorus.Rate};
if(!(rate > 0.0f))
{
mLfoOffset = 0;
mLfoRange = 1;
mLfoScale = 0.0f;
mLfoDisp = 0;
}
else
{
/* Calculate LFO coefficient (number of samples per cycle). Limit the
* max range to avoid overflow when calculating the displacement.
*/
uint lfo_range{float2uint(minf(frequency/rate + 0.5f, float{INT_MAX/360 - 180}))};
mLfoOffset = mLfoOffset * lfo_range / mLfoRange;
mLfoRange = lfo_range;
switch(mWaveform)
{
case ChorusWaveform::Triangle:
mLfoScale = 4.0f / static_cast<float>(mLfoRange);
break;
case ChorusWaveform::Sinusoid:
mLfoScale = al::numbers::pi_v<float>*2.0f / static_cast<float>(mLfoRange);
break;
}
/* Calculate lfo phase displacement */
int phase{props->Chorus.Phase};
if(phase < 0) phase = 360 + phase;
mLfoDisp = (mLfoRange*static_cast<uint>(phase) + 180) / 360;
}
}
void ChorusState::calcTriangleDelays(const size_t todo)
{
const uint lfo_range{mLfoRange};
const float lfo_scale{mLfoScale};
const float depth{mDepth};
const int delay{mDelay};
ASSUME(lfo_range > 0);
ASSUME(todo > 0);
auto gen_lfo = [lfo_scale,depth,delay](const uint offset) -> uint
{
const float offset_norm{static_cast<float>(offset) * lfo_scale};
return static_cast<uint>(fastf2i((1.0f-std::abs(2.0f-offset_norm)) * depth) + delay);
};
uint offset{mLfoOffset};
for(size_t i{0};i < todo;)
{
size_t rem{minz(todo-i, lfo_range-offset)};
do {
mModDelays[0][i++] = gen_lfo(offset++);
} while(--rem);
if(offset == lfo_range)
offset = 0;
}
offset = (mLfoOffset+mLfoDisp) % lfo_range;
for(size_t i{0};i < todo;)
{
size_t rem{minz(todo-i, lfo_range-offset)};
do {
mModDelays[1][i++] = gen_lfo(offset++);
} while(--rem);
if(offset == lfo_range)
offset = 0;
}
mLfoOffset = static_cast<uint>(mLfoOffset+todo) % lfo_range;
}
void ChorusState::calcSinusoidDelays(const size_t todo)
{
const uint lfo_range{mLfoRange};
const float lfo_scale{mLfoScale};
const float depth{mDepth};
const int delay{mDelay};
ASSUME(lfo_range > 0);
ASSUME(todo > 0);
auto gen_lfo = [lfo_scale,depth,delay](const uint offset) -> uint
{
const float offset_norm{static_cast<float>(offset) * lfo_scale};
return static_cast<uint>(fastf2i(std::sin(offset_norm)*depth) + delay);
};
uint offset{mLfoOffset};
for(size_t i{0};i < todo;)
{
size_t rem{minz(todo-i, lfo_range-offset)};
do {
mModDelays[0][i++] = gen_lfo(offset++);
} while(--rem);
if(offset == lfo_range)
offset = 0;
}
offset = (mLfoOffset+mLfoDisp) % lfo_range;
for(size_t i{0};i < todo;)
{
size_t rem{minz(todo-i, lfo_range-offset)};
do {
mModDelays[1][i++] = gen_lfo(offset++);
} while(--rem);
if(offset == lfo_range)
offset = 0;
}
mLfoOffset = static_cast<uint>(mLfoOffset+todo) % lfo_range;
}
void ChorusState::process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn, const al::span<FloatBufferLine> samplesOut)
{
const size_t bufmask{mDelayBuffer.size()-1};
const float feedback{mFeedback};
const uint avgdelay{(static_cast<uint>(mDelay) + MixerFracHalf) >> MixerFracBits};
float *RESTRICT delaybuf{mDelayBuffer.data()};
uint offset{mOffset};
if(mWaveform == ChorusWaveform::Sinusoid)
calcSinusoidDelays(samplesToDo);
else /*if(mWaveform == ChorusWaveform::Triangle)*/
calcTriangleDelays(samplesToDo);
const uint *RESTRICT ldelays{mModDelays[0]};
const uint *RESTRICT rdelays{mModDelays[1]};
float *RESTRICT lbuffer{al::assume_aligned<16>(mBuffer[0])};
float *RESTRICT rbuffer{al::assume_aligned<16>(mBuffer[1])};
for(size_t i{0u};i < samplesToDo;++i)
{
// Feed the buffer's input first (necessary for delays < 1).
delaybuf[offset&bufmask] = samplesIn[0][i];
// Tap for the left output.
uint delay{offset - (ldelays[i]>>MixerFracBits)};
float mu{static_cast<float>(ldelays[i]&MixerFracMask) * (1.0f/MixerFracOne)};
lbuffer[i] = cubic(delaybuf[(delay+1) & bufmask], delaybuf[(delay ) & bufmask],
delaybuf[(delay-1) & bufmask], delaybuf[(delay-2) & bufmask], mu);
// Tap for the right output.
delay = offset - (rdelays[i]>>MixerFracBits);
mu = static_cast<float>(rdelays[i]&MixerFracMask) * (1.0f/MixerFracOne);
rbuffer[i] = cubic(delaybuf[(delay+1) & bufmask], delaybuf[(delay ) & bufmask],
delaybuf[(delay-1) & bufmask], delaybuf[(delay-2) & bufmask], mu);
// Accumulate feedback from the average delay of the taps.
delaybuf[offset&bufmask] += delaybuf[(offset-avgdelay) & bufmask] * feedback;
++offset;
}
MixSamples({lbuffer, samplesToDo}, samplesOut, mGains[0].Current, mGains[0].Target,
samplesToDo, 0);
MixSamples({rbuffer, samplesToDo}, samplesOut, mGains[1].Current, mGains[1].Target,
samplesToDo, 0);
mOffset = offset;
}
struct ChorusStateFactory final : public EffectStateFactory {
al::intrusive_ptr<EffectState> create() override
{ return al::intrusive_ptr<EffectState>{new ChorusState{}}; }
};
/* Flanger is basically a chorus with a really short delay. They can both use
* the same processing functions, so piggyback flanger on the chorus functions.
*/
struct FlangerStateFactory final : public EffectStateFactory {
al::intrusive_ptr<EffectState> create() override
{ return al::intrusive_ptr<EffectState>{new ChorusState{}}; }
};
} // namespace
EffectStateFactory *ChorusStateFactory_getFactory()
{
static ChorusStateFactory ChorusFactory{};
return &ChorusFactory;
}
EffectStateFactory *FlangerStateFactory_getFactory()
{
static FlangerStateFactory FlangerFactory{};
return &FlangerFactory;
}

View File

@ -0,0 +1,201 @@
/**
* This file is part of the OpenAL Soft cross platform audio library
*
* Copyright (C) 2013 by Anis A. Hireche
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of Spherical-Harmonic-Transform nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include <array>
#include <cstdlib>
#include <iterator>
#include <utility>
#include "alc/effects/base.h"
#include "almalloc.h"
#include "alnumeric.h"
#include "alspan.h"
#include "core/ambidefs.h"
#include "core/bufferline.h"
#include "core/devformat.h"
#include "core/device.h"
#include "core/effectslot.h"
#include "core/mixer.h"
#include "core/mixer/defs.h"
#include "intrusive_ptr.h"
struct ContextBase;
namespace {
#define AMP_ENVELOPE_MIN 0.5f
#define AMP_ENVELOPE_MAX 2.0f
#define ATTACK_TIME 0.1f /* 100ms to rise from min to max */
#define RELEASE_TIME 0.2f /* 200ms to drop from max to min */
struct CompressorState final : public EffectState {
/* Effect gains for each channel */
struct {
uint mTarget{InvalidChannelIndex};
float mGain{1.0f};
} mChans[MaxAmbiChannels];
/* Effect parameters */
bool mEnabled{true};
float mAttackMult{1.0f};
float mReleaseMult{1.0f};
float mEnvFollower{1.0f};
void deviceUpdate(const DeviceBase *device, const BufferStorage *buffer) override;
void update(const ContextBase *context, const EffectSlot *slot, const EffectProps *props,
const EffectTarget target) override;
void process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn,
const al::span<FloatBufferLine> samplesOut) override;
DEF_NEWDEL(CompressorState)
};
void CompressorState::deviceUpdate(const DeviceBase *device, const BufferStorage*)
{
/* Number of samples to do a full attack and release (non-integer sample
* counts are okay).
*/
const float attackCount{static_cast<float>(device->Frequency) * ATTACK_TIME};
const float releaseCount{static_cast<float>(device->Frequency) * RELEASE_TIME};
/* Calculate per-sample multipliers to attack and release at the desired
* rates.
*/
mAttackMult = std::pow(AMP_ENVELOPE_MAX/AMP_ENVELOPE_MIN, 1.0f/attackCount);
mReleaseMult = std::pow(AMP_ENVELOPE_MIN/AMP_ENVELOPE_MAX, 1.0f/releaseCount);
}
void CompressorState::update(const ContextBase*, const EffectSlot *slot,
const EffectProps *props, const EffectTarget target)
{
mEnabled = props->Compressor.OnOff;
mOutTarget = target.Main->Buffer;
auto set_channel = [this](size_t idx, uint outchan, float outgain)
{
mChans[idx].mTarget = outchan;
mChans[idx].mGain = outgain;
};
target.Main->setAmbiMixParams(slot->Wet, slot->Gain, set_channel);
}
void CompressorState::process(const size_t samplesToDo,
const al::span<const FloatBufferLine> samplesIn, const al::span<FloatBufferLine> samplesOut)
{
for(size_t base{0u};base < samplesToDo;)
{
float gains[256];
const size_t td{minz(256, samplesToDo-base)};
/* Generate the per-sample gains from the signal envelope. */
float env{mEnvFollower};
if(mEnabled)
{
for(size_t i{0u};i < td;++i)
{
/* Clamp the absolute amplitude to the defined envelope limits,
* then attack or release the envelope to reach it.
*/
const float amplitude{clampf(std::fabs(samplesIn[0][base+i]), AMP_ENVELOPE_MIN,
AMP_ENVELOPE_MAX)};
if(amplitude > env)
env = minf(env*mAttackMult, amplitude);
else if(amplitude < env)
env = maxf(env*mReleaseMult, amplitude);
/* Apply the reciprocal of the envelope to normalize the volume
* (compress the dynamic range).
*/
gains[i] = 1.0f / env;
}
}
else
{
/* Same as above, except the amplitude is forced to 1. This helps
* ensure smooth gain changes when the compressor is turned on and
* off.
*/
for(size_t i{0u};i < td;++i)
{
const float amplitude{1.0f};
if(amplitude > env)
env = minf(env*mAttackMult, amplitude);
else if(amplitude < env)
env = maxf(env*mReleaseMult, amplitude);
gains[i] = 1.0f / env;
}
}
mEnvFollower = env;
/* Now compress the signal amplitude to output. */
auto chan = std::cbegin(mChans);
for(const auto &input : samplesIn)
{
const size_t outidx{chan->mTarget};
if(outidx != InvalidChannelIndex)
{
const float *RESTRICT src{input.data() + base};
float *RESTRICT dst{samplesOut[outidx].data() + base};
const float gain{chan->mGain};
if(!(std::fabs(gain) > GainSilenceThreshold))
{
for(size_t i{0u};i < td;i++)
dst[i] += src[i] * gains[i] * gain;
}
}
++chan;
}
base += td;
}
}
struct CompressorStateFactory final : public EffectStateFactory {
al::intrusive_ptr<EffectState> create() override
{ return al::intrusive_ptr<EffectState>{new CompressorState{}}; }
};
} // namespace
EffectStateFactory *CompressorStateFactory_getFactory()
{
static CompressorStateFactory CompressorFactory{};
return &CompressorFactory;
}

View File

@ -0,0 +1,636 @@
#include "config.h"
#include <algorithm>
#include <array>
#include <complex>
#include <cstddef>
#include <functional>
#include <iterator>
#include <memory>
#include <stdint.h>
#include <utility>
#ifdef HAVE_SSE_INTRINSICS
#include <xmmintrin.h>
#elif defined(HAVE_NEON)
#include <arm_neon.h>
#endif
#include "albyte.h"
#include "alcomplex.h"
#include "almalloc.h"
#include "alnumbers.h"
#include "alnumeric.h"
#include "alspan.h"
#include "base.h"
#include "core/ambidefs.h"
#include "core/bufferline.h"
#include "core/buffer_storage.h"
#include "core/context.h"
#include "core/devformat.h"
#include "core/device.h"
#include "core/effectslot.h"
#include "core/filters/splitter.h"
#include "core/fmt_traits.h"
#include "core/mixer.h"
#include "intrusive_ptr.h"
#include "polyphase_resampler.h"
#include "vector.h"
namespace {
/* Convolution reverb is implemented using a segmented overlap-add method. The
* impulse response is broken up into multiple segments of 128 samples, and
* each segment has an FFT applied with a 256-sample buffer (the latter half
* left silent) to get its frequency-domain response. The resulting response
* has its positive/non-mirrored frequencies saved (129 bins) in each segment.
*
* Input samples are similarly broken up into 128-sample segments, with an FFT
* applied to each new incoming segment to get its 129 bins. A history of FFT'd
* input segments is maintained, equal to the length of the impulse response.
*
* To apply the reverberation, each impulse response segment is convolved with
* its paired input segment (using complex multiplies, far cheaper than FIRs),
* accumulating into a 256-bin FFT buffer. The input history is then shifted to
* align with later impulse response segments for next time.
*
* An inverse FFT is then applied to the accumulated FFT buffer to get a 256-
* sample time-domain response for output, which is split in two halves. The
* first half is the 128-sample output, and the second half is a 128-sample
* (really, 127) delayed extension, which gets added to the output next time.
* Convolving two time-domain responses of lengths N and M results in a time-
* domain signal of length N+M-1, and this holds true regardless of the
* convolution being applied in the frequency domain, so these "overflow"
* samples need to be accounted for.
*
* To avoid a delay with gathering enough input samples to apply an FFT with,
* the first segment is applied directly in the time-domain as the samples come
* in. Once enough have been retrieved, the FFT is applied on the input and
* it's paired with the remaining (FFT'd) filter segments for processing.
*/
void LoadSamples(float *RESTRICT dst, const al::byte *src, const size_t srcstep, FmtType srctype,
const size_t samples) noexcept
{
#define HANDLE_FMT(T) case T: al::LoadSampleArray<T>(dst, src, srcstep, samples); break
switch(srctype)
{
HANDLE_FMT(FmtUByte);
HANDLE_FMT(FmtShort);
HANDLE_FMT(FmtFloat);
HANDLE_FMT(FmtDouble);
HANDLE_FMT(FmtMulaw);
HANDLE_FMT(FmtAlaw);
/* FIXME: Handle ADPCM decoding here. */
case FmtIMA4:
case FmtMSADPCM:
std::fill_n(dst, samples, 0.0f);
break;
}
#undef HANDLE_FMT
}
inline auto& GetAmbiScales(AmbiScaling scaletype) noexcept
{
switch(scaletype)
{
case AmbiScaling::FuMa: return AmbiScale::FromFuMa();
case AmbiScaling::SN3D: return AmbiScale::FromSN3D();
case AmbiScaling::UHJ: return AmbiScale::FromUHJ();
case AmbiScaling::N3D: break;
}
return AmbiScale::FromN3D();
}
inline auto& GetAmbiLayout(AmbiLayout layouttype) noexcept
{
if(layouttype == AmbiLayout::FuMa) return AmbiIndex::FromFuMa();
return AmbiIndex::FromACN();
}
inline auto& GetAmbi2DLayout(AmbiLayout layouttype) noexcept
{
if(layouttype == AmbiLayout::FuMa) return AmbiIndex::FromFuMa2D();
return AmbiIndex::FromACN2D();
}
struct ChanMap {
Channel channel;
float angle;
float elevation;
};
constexpr float Deg2Rad(float x) noexcept
{ return static_cast<float>(al::numbers::pi / 180.0 * x); }
using complex_f = std::complex<float>;
constexpr size_t ConvolveUpdateSize{256};
constexpr size_t ConvolveUpdateSamples{ConvolveUpdateSize / 2};
void apply_fir(al::span<float> dst, const float *RESTRICT src, const float *RESTRICT filter)
{
#ifdef HAVE_SSE_INTRINSICS
for(float &output : dst)
{
__m128 r4{_mm_setzero_ps()};
for(size_t j{0};j < ConvolveUpdateSamples;j+=4)
{
const __m128 coeffs{_mm_load_ps(&filter[j])};
const __m128 s{_mm_loadu_ps(&src[j])};
r4 = _mm_add_ps(r4, _mm_mul_ps(s, coeffs));
}
r4 = _mm_add_ps(r4, _mm_shuffle_ps(r4, r4, _MM_SHUFFLE(0, 1, 2, 3)));
r4 = _mm_add_ps(r4, _mm_movehl_ps(r4, r4));
output = _mm_cvtss_f32(r4);
++src;
}
#elif defined(HAVE_NEON)
for(float &output : dst)
{
float32x4_t r4{vdupq_n_f32(0.0f)};
for(size_t j{0};j < ConvolveUpdateSamples;j+=4)
r4 = vmlaq_f32(r4, vld1q_f32(&src[j]), vld1q_f32(&filter[j]));
r4 = vaddq_f32(r4, vrev64q_f32(r4));
output = vget_lane_f32(vadd_f32(vget_low_f32(r4), vget_high_f32(r4)), 0);
++src;
}
#else
for(float &output : dst)
{
float ret{0.0f};
for(size_t j{0};j < ConvolveUpdateSamples;++j)
ret += src[j] * filter[j];
output = ret;
++src;
}
#endif
}
struct ConvolutionState final : public EffectState {
FmtChannels mChannels{};
AmbiLayout mAmbiLayout{};
AmbiScaling mAmbiScaling{};
uint mAmbiOrder{};
size_t mFifoPos{0};
std::array<float,ConvolveUpdateSamples*2> mInput{};
al::vector<std::array<float,ConvolveUpdateSamples>,16> mFilter;
al::vector<std::array<float,ConvolveUpdateSamples*2>,16> mOutput;
alignas(16) std::array<complex_f,ConvolveUpdateSize> mFftBuffer{};
size_t mCurrentSegment{0};
size_t mNumConvolveSegs{0};
struct ChannelData {
alignas(16) FloatBufferLine mBuffer{};
float mHfScale{}, mLfScale{};
BandSplitter mFilter{};
float Current[MAX_OUTPUT_CHANNELS]{};
float Target[MAX_OUTPUT_CHANNELS]{};
};
using ChannelDataArray = al::FlexArray<ChannelData>;
std::unique_ptr<ChannelDataArray> mChans;
std::unique_ptr<complex_f[]> mComplexData;
ConvolutionState() = default;
~ConvolutionState() override = default;
void NormalMix(const al::span<FloatBufferLine> samplesOut, const size_t samplesToDo);
void UpsampleMix(const al::span<FloatBufferLine> samplesOut, const size_t samplesToDo);
void (ConvolutionState::*mMix)(const al::span<FloatBufferLine>,const size_t)
{&ConvolutionState::NormalMix};
void deviceUpdate(const DeviceBase *device, const BufferStorage *buffer) override;
void update(const ContextBase *context, const EffectSlot *slot, const EffectProps *props,
const EffectTarget target) override;
void process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn,
const al::span<FloatBufferLine> samplesOut) override;
DEF_NEWDEL(ConvolutionState)
};
void ConvolutionState::NormalMix(const al::span<FloatBufferLine> samplesOut,
const size_t samplesToDo)
{
for(auto &chan : *mChans)
MixSamples({chan.mBuffer.data(), samplesToDo}, samplesOut, chan.Current, chan.Target,
samplesToDo, 0);
}
void ConvolutionState::UpsampleMix(const al::span<FloatBufferLine> samplesOut,
const size_t samplesToDo)
{
for(auto &chan : *mChans)
{
const al::span<float> src{chan.mBuffer.data(), samplesToDo};
chan.mFilter.processScale(src, chan.mHfScale, chan.mLfScale);
MixSamples(src, samplesOut, chan.Current, chan.Target, samplesToDo, 0);
}
}
void ConvolutionState::deviceUpdate(const DeviceBase *device, const BufferStorage *buffer)
{
using UhjDecoderType = UhjDecoder<512>;
static constexpr auto DecoderPadding = UhjDecoderType::sInputPadding;
constexpr uint MaxConvolveAmbiOrder{1u};
mFifoPos = 0;
mInput.fill(0.0f);
decltype(mFilter){}.swap(mFilter);
decltype(mOutput){}.swap(mOutput);
mFftBuffer.fill(complex_f{});
mCurrentSegment = 0;
mNumConvolveSegs = 0;
mChans = nullptr;
mComplexData = nullptr;
/* An empty buffer doesn't need a convolution filter. */
if(!buffer || buffer->mSampleLen < 1) return;
mChannels = buffer->mChannels;
mAmbiLayout = IsUHJ(mChannels) ? AmbiLayout::FuMa : buffer->mAmbiLayout;
mAmbiScaling = IsUHJ(mChannels) ? AmbiScaling::UHJ : buffer->mAmbiScaling;
mAmbiOrder = minu(buffer->mAmbiOrder, MaxConvolveAmbiOrder);
constexpr size_t m{ConvolveUpdateSize/2 + 1};
const auto bytesPerSample = BytesFromFmt(buffer->mType);
const auto realChannels = buffer->channelsFromFmt();
const auto numChannels = (mChannels == FmtUHJ2) ? 3u : ChannelsFromFmt(mChannels, mAmbiOrder);
mChans = ChannelDataArray::Create(numChannels);
/* The impulse response needs to have the same sample rate as the input and
* output. The bsinc24 resampler is decent, but there is high-frequency
* attenuation that some people may be able to pick up on. Since this is
* called very infrequently, go ahead and use the polyphase resampler.
*/
PPhaseResampler resampler;
if(device->Frequency != buffer->mSampleRate)
resampler.init(buffer->mSampleRate, device->Frequency);
const auto resampledCount = static_cast<uint>(
(uint64_t{buffer->mSampleLen}*device->Frequency+(buffer->mSampleRate-1)) /
buffer->mSampleRate);
const BandSplitter splitter{device->mXOverFreq / static_cast<float>(device->Frequency)};
for(auto &e : *mChans)
e.mFilter = splitter;
mFilter.resize(numChannels, {});
mOutput.resize(numChannels, {});
/* Calculate the number of segments needed to hold the impulse response and
* the input history (rounded up), and allocate them. Exclude one segment
* which gets applied as a time-domain FIR filter. Make sure at least one
* segment is allocated to simplify handling.
*/
mNumConvolveSegs = (resampledCount+(ConvolveUpdateSamples-1)) / ConvolveUpdateSamples;
mNumConvolveSegs = maxz(mNumConvolveSegs, 2) - 1;
const size_t complex_length{mNumConvolveSegs * m * (numChannels+1)};
mComplexData = std::make_unique<complex_f[]>(complex_length);
std::fill_n(mComplexData.get(), complex_length, complex_f{});
/* Load the samples from the buffer. */
const size_t srclinelength{RoundUp(buffer->mSampleLen+DecoderPadding, 16)};
auto srcsamples = std::make_unique<float[]>(srclinelength * numChannels);
std::fill_n(srcsamples.get(), srclinelength * numChannels, 0.0f);
for(size_t c{0};c < numChannels && c < realChannels;++c)
LoadSamples(srcsamples.get() + srclinelength*c, buffer->mData.data() + bytesPerSample*c,
realChannels, buffer->mType, buffer->mSampleLen);
if(IsUHJ(mChannels))
{
auto decoder = std::make_unique<UhjDecoderType>();
std::array<float*,4> samples{};
for(size_t c{0};c < numChannels;++c)
samples[c] = srcsamples.get() + srclinelength*c;
decoder->decode({samples.data(), numChannels}, buffer->mSampleLen, buffer->mSampleLen);
}
auto ressamples = std::make_unique<double[]>(buffer->mSampleLen +
(resampler ? resampledCount : 0));
complex_f *filteriter = mComplexData.get() + mNumConvolveSegs*m;
for(size_t c{0};c < numChannels;++c)
{
/* Resample to match the device. */
if(resampler)
{
std::copy_n(srcsamples.get() + srclinelength*c, buffer->mSampleLen,
ressamples.get() + resampledCount);
resampler.process(buffer->mSampleLen, ressamples.get()+resampledCount,
resampledCount, ressamples.get());
}
else
std::copy_n(srcsamples.get() + srclinelength*c, buffer->mSampleLen, ressamples.get());
/* Store the first segment's samples in reverse in the time-domain, to
* apply as a FIR filter.
*/
const size_t first_size{minz(resampledCount, ConvolveUpdateSamples)};
std::transform(ressamples.get(), ressamples.get()+first_size, mFilter[c].rbegin(),
[](const double d) noexcept -> float { return static_cast<float>(d); });
auto fftbuffer = std::vector<std::complex<double>>(ConvolveUpdateSize);
size_t done{first_size};
for(size_t s{0};s < mNumConvolveSegs;++s)
{
const size_t todo{minz(resampledCount-done, ConvolveUpdateSamples)};
auto iter = std::copy_n(&ressamples[done], todo, fftbuffer.begin());
done += todo;
std::fill(iter, fftbuffer.end(), std::complex<double>{});
forward_fft(al::as_span(fftbuffer));
filteriter = std::copy_n(fftbuffer.cbegin(), m, filteriter);
}
}
}
void ConvolutionState::update(const ContextBase *context, const EffectSlot *slot,
const EffectProps* /*props*/, const EffectTarget target)
{
/* NOTE: Stereo and Rear are slightly different from normal mixing (as
* defined in alu.cpp). These are 45 degrees from center, rather than the
* 30 degrees used there.
*
* TODO: LFE is not mixed to output. This will require each buffer channel
* to have its own output target since the main mixing buffer won't have an
* LFE channel (due to being B-Format).
*/
static constexpr ChanMap MonoMap[1]{
{ FrontCenter, 0.0f, 0.0f }
}, StereoMap[2]{
{ FrontLeft, Deg2Rad(-45.0f), Deg2Rad(0.0f) },
{ FrontRight, Deg2Rad( 45.0f), Deg2Rad(0.0f) }
}, RearMap[2]{
{ BackLeft, Deg2Rad(-135.0f), Deg2Rad(0.0f) },
{ BackRight, Deg2Rad( 135.0f), Deg2Rad(0.0f) }
}, QuadMap[4]{
{ FrontLeft, Deg2Rad( -45.0f), Deg2Rad(0.0f) },
{ FrontRight, Deg2Rad( 45.0f), Deg2Rad(0.0f) },
{ BackLeft, Deg2Rad(-135.0f), Deg2Rad(0.0f) },
{ BackRight, Deg2Rad( 135.0f), Deg2Rad(0.0f) }
}, X51Map[6]{
{ FrontLeft, Deg2Rad( -30.0f), Deg2Rad(0.0f) },
{ FrontRight, Deg2Rad( 30.0f), Deg2Rad(0.0f) },
{ FrontCenter, Deg2Rad( 0.0f), Deg2Rad(0.0f) },
{ LFE, 0.0f, 0.0f },
{ SideLeft, Deg2Rad(-110.0f), Deg2Rad(0.0f) },
{ SideRight, Deg2Rad( 110.0f), Deg2Rad(0.0f) }
}, X61Map[7]{
{ FrontLeft, Deg2Rad(-30.0f), Deg2Rad(0.0f) },
{ FrontRight, Deg2Rad( 30.0f), Deg2Rad(0.0f) },
{ FrontCenter, Deg2Rad( 0.0f), Deg2Rad(0.0f) },
{ LFE, 0.0f, 0.0f },
{ BackCenter, Deg2Rad(180.0f), Deg2Rad(0.0f) },
{ SideLeft, Deg2Rad(-90.0f), Deg2Rad(0.0f) },
{ SideRight, Deg2Rad( 90.0f), Deg2Rad(0.0f) }
}, X71Map[8]{
{ FrontLeft, Deg2Rad( -30.0f), Deg2Rad(0.0f) },
{ FrontRight, Deg2Rad( 30.0f), Deg2Rad(0.0f) },
{ FrontCenter, Deg2Rad( 0.0f), Deg2Rad(0.0f) },
{ LFE, 0.0f, 0.0f },
{ BackLeft, Deg2Rad(-150.0f), Deg2Rad(0.0f) },
{ BackRight, Deg2Rad( 150.0f), Deg2Rad(0.0f) },
{ SideLeft, Deg2Rad( -90.0f), Deg2Rad(0.0f) },
{ SideRight, Deg2Rad( 90.0f), Deg2Rad(0.0f) }
};
if(mNumConvolveSegs < 1) UNLIKELY
return;
mMix = &ConvolutionState::NormalMix;
for(auto &chan : *mChans)
std::fill(std::begin(chan.Target), std::end(chan.Target), 0.0f);
const float gain{slot->Gain};
if(IsAmbisonic(mChannels))
{
DeviceBase *device{context->mDevice};
if(mChannels == FmtUHJ2 && !device->mUhjEncoder)
{
mMix = &ConvolutionState::UpsampleMix;
(*mChans)[0].mHfScale = 1.0f;
(*mChans)[0].mLfScale = DecoderBase::sWLFScale;
(*mChans)[1].mHfScale = 1.0f;
(*mChans)[1].mLfScale = DecoderBase::sXYLFScale;
(*mChans)[2].mHfScale = 1.0f;
(*mChans)[2].mLfScale = DecoderBase::sXYLFScale;
}
else if(device->mAmbiOrder > mAmbiOrder)
{
mMix = &ConvolutionState::UpsampleMix;
const auto scales = AmbiScale::GetHFOrderScales(mAmbiOrder, device->mAmbiOrder,
device->m2DMixing);
(*mChans)[0].mHfScale = scales[0];
(*mChans)[0].mLfScale = 1.0f;
for(size_t i{1};i < mChans->size();++i)
{
(*mChans)[i].mHfScale = scales[1];
(*mChans)[i].mLfScale = 1.0f;
}
}
mOutTarget = target.Main->Buffer;
auto&& scales = GetAmbiScales(mAmbiScaling);
const uint8_t *index_map{Is2DAmbisonic(mChannels) ?
GetAmbi2DLayout(mAmbiLayout).data() :
GetAmbiLayout(mAmbiLayout).data()};
std::array<float,MaxAmbiChannels> coeffs{};
for(size_t c{0u};c < mChans->size();++c)
{
const size_t acn{index_map[c]};
coeffs[acn] = scales[acn];
ComputePanGains(target.Main, coeffs.data(), gain, (*mChans)[c].Target);
coeffs[acn] = 0.0f;
}
}
else
{
DeviceBase *device{context->mDevice};
al::span<const ChanMap> chanmap{};
switch(mChannels)
{
case FmtMono: chanmap = MonoMap; break;
case FmtSuperStereo:
case FmtStereo: chanmap = StereoMap; break;
case FmtRear: chanmap = RearMap; break;
case FmtQuad: chanmap = QuadMap; break;
case FmtX51: chanmap = X51Map; break;
case FmtX61: chanmap = X61Map; break;
case FmtX71: chanmap = X71Map; break;
case FmtBFormat2D:
case FmtBFormat3D:
case FmtUHJ2:
case FmtUHJ3:
case FmtUHJ4:
break;
}
mOutTarget = target.Main->Buffer;
if(device->mRenderMode == RenderMode::Pairwise)
{
auto ScaleAzimuthFront = [](float azimuth, float scale) -> float
{
constexpr float half_pi{al::numbers::pi_v<float>*0.5f};
const float abs_azi{std::fabs(azimuth)};
if(!(abs_azi >= half_pi))
return std::copysign(minf(abs_azi*scale, half_pi), azimuth);
return azimuth;
};
for(size_t i{0};i < chanmap.size();++i)
{
if(chanmap[i].channel == LFE) continue;
const auto coeffs = CalcAngleCoeffs(ScaleAzimuthFront(chanmap[i].angle, 2.0f),
chanmap[i].elevation, 0.0f);
ComputePanGains(target.Main, coeffs.data(), gain, (*mChans)[i].Target);
}
}
else for(size_t i{0};i < chanmap.size();++i)
{
if(chanmap[i].channel == LFE) continue;
const auto coeffs = CalcAngleCoeffs(chanmap[i].angle, chanmap[i].elevation, 0.0f);
ComputePanGains(target.Main, coeffs.data(), gain, (*mChans)[i].Target);
}
}
}
void ConvolutionState::process(const size_t samplesToDo,
const al::span<const FloatBufferLine> samplesIn, const al::span<FloatBufferLine> samplesOut)
{
if(mNumConvolveSegs < 1) UNLIKELY
return;
constexpr size_t m{ConvolveUpdateSize/2 + 1};
size_t curseg{mCurrentSegment};
auto &chans = *mChans;
for(size_t base{0u};base < samplesToDo;)
{
const size_t todo{minz(ConvolveUpdateSamples-mFifoPos, samplesToDo-base)};
std::copy_n(samplesIn[0].begin() + base, todo,
mInput.begin()+ConvolveUpdateSamples+mFifoPos);
/* Apply the FIR for the newly retrieved input samples, and combine it
* with the inverse FFT'd output samples.
*/
for(size_t c{0};c < chans.size();++c)
{
auto buf_iter = chans[c].mBuffer.begin() + base;
apply_fir({buf_iter, todo}, mInput.data()+1 + mFifoPos, mFilter[c].data());
auto fifo_iter = mOutput[c].begin() + mFifoPos;
std::transform(fifo_iter, fifo_iter+todo, buf_iter, buf_iter, std::plus<>{});
}
mFifoPos += todo;
base += todo;
/* Check whether the input buffer is filled with new samples. */
if(mFifoPos < ConvolveUpdateSamples) break;
mFifoPos = 0;
/* Move the newest input to the front for the next iteration's history. */
std::copy(mInput.cbegin()+ConvolveUpdateSamples, mInput.cend(), mInput.begin());
/* Calculate the frequency domain response and add the relevant
* frequency bins to the FFT history.
*/
auto fftiter = std::copy_n(mInput.cbegin(), ConvolveUpdateSamples, mFftBuffer.begin());
std::fill(fftiter, mFftBuffer.end(), complex_f{});
forward_fft(al::as_span(mFftBuffer));
std::copy_n(mFftBuffer.cbegin(), m, &mComplexData[curseg*m]);
const complex_f *RESTRICT filter{mComplexData.get() + mNumConvolveSegs*m};
for(size_t c{0};c < chans.size();++c)
{
std::fill_n(mFftBuffer.begin(), m, complex_f{});
/* Convolve each input segment with its IR filter counterpart
* (aligned in time).
*/
const complex_f *RESTRICT input{&mComplexData[curseg*m]};
for(size_t s{curseg};s < mNumConvolveSegs;++s)
{
for(size_t i{0};i < m;++i,++input,++filter)
mFftBuffer[i] += *input * *filter;
}
input = mComplexData.get();
for(size_t s{0};s < curseg;++s)
{
for(size_t i{0};i < m;++i,++input,++filter)
mFftBuffer[i] += *input * *filter;
}
/* Reconstruct the mirrored/negative frequencies to do a proper
* inverse FFT.
*/
for(size_t i{m};i < ConvolveUpdateSize;++i)
mFftBuffer[i] = std::conj(mFftBuffer[ConvolveUpdateSize-i]);
/* Apply iFFT to get the 256 (really 255) samples for output. The
* 128 output samples are combined with the last output's 127
* second-half samples (and this output's second half is
* subsequently saved for next time).
*/
inverse_fft(al::as_span(mFftBuffer));
/* The iFFT'd response is scaled up by the number of bins, so apply
* the inverse to normalize the output.
*/
for(size_t i{0};i < ConvolveUpdateSamples;++i)
mOutput[c][i] =
(mFftBuffer[i].real()+mOutput[c][ConvolveUpdateSamples+i]) *
(1.0f/float{ConvolveUpdateSize});
for(size_t i{0};i < ConvolveUpdateSamples;++i)
mOutput[c][ConvolveUpdateSamples+i] = mFftBuffer[ConvolveUpdateSamples+i].real();
}
/* Shift the input history. */
curseg = curseg ? (curseg-1) : (mNumConvolveSegs-1);
}
mCurrentSegment = curseg;
/* Finally, mix to the output. */
(this->*mMix)(samplesOut, samplesToDo);
}
struct ConvolutionStateFactory final : public EffectStateFactory {
al::intrusive_ptr<EffectState> create() override
{ return al::intrusive_ptr<EffectState>{new ConvolutionState{}}; }
};
} // namespace
EffectStateFactory *ConvolutionStateFactory_getFactory()
{
static ConvolutionStateFactory ConvolutionFactory{};
return &ConvolutionFactory;
}

View File

@ -0,0 +1,123 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 2011 by Chris Robinson.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include <algorithm>
#include <array>
#include <cstdlib>
#include <iterator>
#include "alc/effects/base.h"
#include "almalloc.h"
#include "alspan.h"
#include "core/bufferline.h"
#include "core/devformat.h"
#include "core/device.h"
#include "core/effectslot.h"
#include "core/mixer.h"
#include "intrusive_ptr.h"
struct ContextBase;
namespace {
using uint = unsigned int;
struct DedicatedState final : public EffectState {
/* The "dedicated" effect can output to the real output, so should have
* gains for all possible output channels and not just the main ambisonic
* buffer.
*/
float mCurrentGains[MAX_OUTPUT_CHANNELS];
float mTargetGains[MAX_OUTPUT_CHANNELS];
void deviceUpdate(const DeviceBase *device, const BufferStorage *buffer) override;
void update(const ContextBase *context, const EffectSlot *slot, const EffectProps *props,
const EffectTarget target) override;
void process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn,
const al::span<FloatBufferLine> samplesOut) override;
DEF_NEWDEL(DedicatedState)
};
void DedicatedState::deviceUpdate(const DeviceBase*, const BufferStorage*)
{
std::fill(std::begin(mCurrentGains), std::end(mCurrentGains), 0.0f);
}
void DedicatedState::update(const ContextBase*, const EffectSlot *slot,
const EffectProps *props, const EffectTarget target)
{
std::fill(std::begin(mTargetGains), std::end(mTargetGains), 0.0f);
const float Gain{slot->Gain * props->Dedicated.Gain};
if(slot->EffectType == EffectSlotType::DedicatedLFE)
{
const uint idx{target.RealOut ? target.RealOut->ChannelIndex[LFE] : InvalidChannelIndex};
if(idx != InvalidChannelIndex)
{
mOutTarget = target.RealOut->Buffer;
mTargetGains[idx] = Gain;
}
}
else if(slot->EffectType == EffectSlotType::DedicatedDialog)
{
/* Dialog goes to the front-center speaker if it exists, otherwise it
* plays from the front-center location. */
const uint idx{target.RealOut ? target.RealOut->ChannelIndex[FrontCenter]
: InvalidChannelIndex};
if(idx != InvalidChannelIndex)
{
mOutTarget = target.RealOut->Buffer;
mTargetGains[idx] = Gain;
}
else
{
static constexpr auto coeffs = CalcDirectionCoeffs({0.0f, 0.0f, -1.0f});
mOutTarget = target.Main->Buffer;
ComputePanGains(target.Main, coeffs.data(), Gain, mTargetGains);
}
}
}
void DedicatedState::process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn, const al::span<FloatBufferLine> samplesOut)
{
MixSamples({samplesIn[0].data(), samplesToDo}, samplesOut, mCurrentGains, mTargetGains,
samplesToDo, 0);
}
struct DedicatedStateFactory final : public EffectStateFactory {
al::intrusive_ptr<EffectState> create() override
{ return al::intrusive_ptr<EffectState>{new DedicatedState{}}; }
};
} // namespace
EffectStateFactory *DedicatedStateFactory_getFactory()
{
static DedicatedStateFactory DedicatedFactory{};
return &DedicatedFactory;
}

View File

@ -0,0 +1,178 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 2013 by Mike Gorchak
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include <algorithm>
#include <array>
#include <cstdlib>
#include <iterator>
#include "alc/effects/base.h"
#include "almalloc.h"
#include "alnumbers.h"
#include "alnumeric.h"
#include "alspan.h"
#include "core/bufferline.h"
#include "core/context.h"
#include "core/devformat.h"
#include "core/device.h"
#include "core/effectslot.h"
#include "core/filters/biquad.h"
#include "core/mixer.h"
#include "core/mixer/defs.h"
#include "intrusive_ptr.h"
namespace {
struct DistortionState final : public EffectState {
/* Effect gains for each channel */
float mGain[MaxAmbiChannels]{};
/* Effect parameters */
BiquadFilter mLowpass;
BiquadFilter mBandpass;
float mAttenuation{};
float mEdgeCoeff{};
alignas(16) float mBuffer[2][BufferLineSize]{};
void deviceUpdate(const DeviceBase *device, const BufferStorage *buffer) override;
void update(const ContextBase *context, const EffectSlot *slot, const EffectProps *props,
const EffectTarget target) override;
void process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn,
const al::span<FloatBufferLine> samplesOut) override;
DEF_NEWDEL(DistortionState)
};
void DistortionState::deviceUpdate(const DeviceBase*, const BufferStorage*)
{
mLowpass.clear();
mBandpass.clear();
}
void DistortionState::update(const ContextBase *context, const EffectSlot *slot,
const EffectProps *props, const EffectTarget target)
{
const DeviceBase *device{context->mDevice};
/* Store waveshaper edge settings. */
const float edge{minf(std::sin(al::numbers::pi_v<float>*0.5f * props->Distortion.Edge),
0.99f)};
mEdgeCoeff = 2.0f * edge / (1.0f-edge);
float cutoff{props->Distortion.LowpassCutoff};
/* Bandwidth value is constant in octaves. */
float bandwidth{(cutoff / 2.0f) / (cutoff * 0.67f)};
/* Divide normalized frequency by the amount of oversampling done during
* processing.
*/
auto frequency = static_cast<float>(device->Frequency);
mLowpass.setParamsFromBandwidth(BiquadType::LowPass, cutoff/frequency/4.0f, 1.0f, bandwidth);
cutoff = props->Distortion.EQCenter;
/* Convert bandwidth in Hz to octaves. */
bandwidth = props->Distortion.EQBandwidth / (cutoff * 0.67f);
mBandpass.setParamsFromBandwidth(BiquadType::BandPass, cutoff/frequency/4.0f, 1.0f, bandwidth);
static constexpr auto coeffs = CalcDirectionCoeffs({0.0f, 0.0f, -1.0f});
mOutTarget = target.Main->Buffer;
ComputePanGains(target.Main, coeffs.data(), slot->Gain*props->Distortion.Gain, mGain);
}
void DistortionState::process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn, const al::span<FloatBufferLine> samplesOut)
{
const float fc{mEdgeCoeff};
for(size_t base{0u};base < samplesToDo;)
{
/* Perform 4x oversampling to avoid aliasing. Oversampling greatly
* improves distortion quality and allows to implement lowpass and
* bandpass filters using high frequencies, at which classic IIR
* filters became unstable.
*/
size_t todo{minz(BufferLineSize, (samplesToDo-base) * 4)};
/* Fill oversample buffer using zero stuffing. Multiply the sample by
* the amount of oversampling to maintain the signal's power.
*/
for(size_t i{0u};i < todo;i++)
mBuffer[0][i] = !(i&3) ? samplesIn[0][(i>>2)+base] * 4.0f : 0.0f;
/* First step, do lowpass filtering of original signal. Additionally
* perform buffer interpolation and lowpass cutoff for oversampling
* (which is fortunately first step of distortion). So combine three
* operations into the one.
*/
mLowpass.process({mBuffer[0], todo}, mBuffer[1]);
/* Second step, do distortion using waveshaper function to emulate
* signal processing during tube overdriving. Three steps of
* waveshaping are intended to modify waveform without boost/clipping/
* attenuation process.
*/
auto proc_sample = [fc](float smp) -> float
{
smp = (1.0f + fc) * smp/(1.0f + fc*std::abs(smp));
smp = (1.0f + fc) * smp/(1.0f + fc*std::abs(smp)) * -1.0f;
smp = (1.0f + fc) * smp/(1.0f + fc*std::abs(smp));
return smp;
};
std::transform(std::begin(mBuffer[1]), std::begin(mBuffer[1])+todo, std::begin(mBuffer[0]),
proc_sample);
/* Third step, do bandpass filtering of distorted signal. */
mBandpass.process({mBuffer[0], todo}, mBuffer[1]);
todo >>= 2;
const float *outgains{mGain};
for(FloatBufferLine &output : samplesOut)
{
/* Fourth step, final, do attenuation and perform decimation,
* storing only one sample out of four.
*/
const float gain{*(outgains++)};
if(!(std::fabs(gain) > GainSilenceThreshold))
continue;
for(size_t i{0u};i < todo;i++)
output[base+i] += gain * mBuffer[1][i*4];
}
base += todo;
}
}
struct DistortionStateFactory final : public EffectStateFactory {
al::intrusive_ptr<EffectState> create() override
{ return al::intrusive_ptr<EffectState>{new DistortionState{}}; }
};
} // namespace
EffectStateFactory *DistortionStateFactory_getFactory()
{
static DistortionStateFactory DistortionFactory{};
return &DistortionFactory;
}

View File

@ -0,0 +1,180 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 2009 by Chris Robinson.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include <algorithm>
#include <array>
#include <cstdlib>
#include <iterator>
#include <tuple>
#include "alc/effects/base.h"
#include "almalloc.h"
#include "alnumeric.h"
#include "alspan.h"
#include "core/bufferline.h"
#include "core/context.h"
#include "core/devformat.h"
#include "core/device.h"
#include "core/effectslot.h"
#include "core/filters/biquad.h"
#include "core/mixer.h"
#include "intrusive_ptr.h"
#include "opthelpers.h"
#include "vector.h"
namespace {
using uint = unsigned int;
constexpr float LowpassFreqRef{5000.0f};
struct EchoState final : public EffectState {
al::vector<float,16> mSampleBuffer;
// The echo is two tap. The delay is the number of samples from before the
// current offset
struct {
size_t delay{0u};
} mTap[2];
size_t mOffset{0u};
/* The panning gains for the two taps */
struct {
float Current[MaxAmbiChannels]{};
float Target[MaxAmbiChannels]{};
} mGains[2];
BiquadFilter mFilter;
float mFeedGain{0.0f};
alignas(16) float mTempBuffer[2][BufferLineSize];
void deviceUpdate(const DeviceBase *device, const BufferStorage *buffer) override;
void update(const ContextBase *context, const EffectSlot *slot, const EffectProps *props,
const EffectTarget target) override;
void process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn,
const al::span<FloatBufferLine> samplesOut) override;
DEF_NEWDEL(EchoState)
};
void EchoState::deviceUpdate(const DeviceBase *Device, const BufferStorage*)
{
const auto frequency = static_cast<float>(Device->Frequency);
// Use the next power of 2 for the buffer length, so the tap offsets can be
// wrapped using a mask instead of a modulo
const uint maxlen{NextPowerOf2(float2uint(EchoMaxDelay*frequency + 0.5f) +
float2uint(EchoMaxLRDelay*frequency + 0.5f))};
if(maxlen != mSampleBuffer.size())
al::vector<float,16>(maxlen).swap(mSampleBuffer);
std::fill(mSampleBuffer.begin(), mSampleBuffer.end(), 0.0f);
for(auto &e : mGains)
{
std::fill(std::begin(e.Current), std::end(e.Current), 0.0f);
std::fill(std::begin(e.Target), std::end(e.Target), 0.0f);
}
}
void EchoState::update(const ContextBase *context, const EffectSlot *slot,
const EffectProps *props, const EffectTarget target)
{
const DeviceBase *device{context->mDevice};
const auto frequency = static_cast<float>(device->Frequency);
mTap[0].delay = maxu(float2uint(props->Echo.Delay*frequency + 0.5f), 1);
mTap[1].delay = float2uint(props->Echo.LRDelay*frequency + 0.5f) + mTap[0].delay;
const float gainhf{maxf(1.0f - props->Echo.Damping, 0.0625f)}; /* Limit -24dB */
mFilter.setParamsFromSlope(BiquadType::HighShelf, LowpassFreqRef/frequency, gainhf, 1.0f);
mFeedGain = props->Echo.Feedback;
/* Convert echo spread (where 0 = center, +/-1 = sides) to angle. */
const float angle{std::asin(props->Echo.Spread)};
const auto coeffs0 = CalcAngleCoeffs(-angle, 0.0f, 0.0f);
const auto coeffs1 = CalcAngleCoeffs( angle, 0.0f, 0.0f);
mOutTarget = target.Main->Buffer;
ComputePanGains(target.Main, coeffs0.data(), slot->Gain, mGains[0].Target);
ComputePanGains(target.Main, coeffs1.data(), slot->Gain, mGains[1].Target);
}
void EchoState::process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn, const al::span<FloatBufferLine> samplesOut)
{
const size_t mask{mSampleBuffer.size()-1};
float *RESTRICT delaybuf{mSampleBuffer.data()};
size_t offset{mOffset};
size_t tap1{offset - mTap[0].delay};
size_t tap2{offset - mTap[1].delay};
float z1, z2;
ASSUME(samplesToDo > 0);
const BiquadFilter filter{mFilter};
std::tie(z1, z2) = mFilter.getComponents();
for(size_t i{0u};i < samplesToDo;)
{
offset &= mask;
tap1 &= mask;
tap2 &= mask;
size_t td{minz(mask+1 - maxz(offset, maxz(tap1, tap2)), samplesToDo-i)};
do {
/* Feed the delay buffer's input first. */
delaybuf[offset] = samplesIn[0][i];
/* Get delayed output from the first and second taps. Use the
* second tap for feedback.
*/
mTempBuffer[0][i] = delaybuf[tap1++];
mTempBuffer[1][i] = delaybuf[tap2++];
const float feedb{mTempBuffer[1][i++]};
/* Add feedback to the delay buffer with damping and attenuation. */
delaybuf[offset++] += filter.processOne(feedb, z1, z2) * mFeedGain;
} while(--td);
}
mFilter.setComponents(z1, z2);
mOffset = offset;
for(size_t c{0};c < 2;c++)
MixSamples({mTempBuffer[c], samplesToDo}, samplesOut, mGains[c].Current, mGains[c].Target,
samplesToDo, 0);
}
struct EchoStateFactory final : public EffectStateFactory {
al::intrusive_ptr<EffectState> create() override
{ return al::intrusive_ptr<EffectState>{new EchoState{}}; }
};
} // namespace
EffectStateFactory *EchoStateFactory_getFactory()
{
static EchoStateFactory EchoFactory{};
return &EchoFactory;
}

View File

@ -0,0 +1,204 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 2013 by Mike Gorchak
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include <algorithm>
#include <array>
#include <cstdlib>
#include <functional>
#include <iterator>
#include <utility>
#include "alc/effects/base.h"
#include "almalloc.h"
#include "alspan.h"
#include "core/ambidefs.h"
#include "core/bufferline.h"
#include "core/context.h"
#include "core/devformat.h"
#include "core/device.h"
#include "core/effectslot.h"
#include "core/filters/biquad.h"
#include "core/mixer.h"
#include "intrusive_ptr.h"
namespace {
/* The document "Effects Extension Guide.pdf" says that low and high *
* frequencies are cutoff frequencies. This is not fully correct, they *
* are corner frequencies for low and high shelf filters. If they were *
* just cutoff frequencies, there would be no need in cutoff frequency *
* gains, which are present. Documentation for "Creative Proteus X2" *
* software describes 4-band equalizer functionality in a much better *
* way. This equalizer seems to be a predecessor of OpenAL 4-band *
* equalizer. With low and high shelf filters we are able to cutoff *
* frequencies below and/or above corner frequencies using attenuation *
* gains (below 1.0) and amplify all low and/or high frequencies using *
* gains above 1.0. *
* *
* Low-shelf Low Mid Band High Mid Band High-shelf *
* corner center center corner *
* frequency frequency frequency frequency *
* 50Hz..800Hz 200Hz..3000Hz 1000Hz..8000Hz 4000Hz..16000Hz *
* *
* | | | | *
* | | | | *
* B -----+ /--+--\ /--+--\ +----- *
* O |\ | | | | | | /| *
* O | \ - | - - | - / | *
* S + | \ | | | | | | / | *
* T | | | | | | | | | | *
* ---------+---------------+------------------+---------------+-------- *
* C | | | | | | | | | | *
* U - | / | | | | | | \ | *
* T | / - | - - | - \ | *
* O |/ | | | | | | \| *
* F -----+ \--+--/ \--+--/ +----- *
* F | | | | *
* | | | | *
* *
* Gains vary from 0.126 up to 7.943, which means from -18dB attenuation *
* up to +18dB amplification. Band width varies from 0.01 up to 1.0 in *
* octaves for two mid bands. *
* *
* Implementation is based on the "Cookbook formulae for audio EQ biquad *
* filter coefficients" by Robert Bristow-Johnson *
* http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt */
struct EqualizerState final : public EffectState {
struct {
uint mTargetChannel{InvalidChannelIndex};
/* Effect parameters */
BiquadFilter mFilter[4];
/* Effect gains for each channel */
float mCurrentGain{};
float mTargetGain{};
} mChans[MaxAmbiChannels];
alignas(16) FloatBufferLine mSampleBuffer{};
void deviceUpdate(const DeviceBase *device, const BufferStorage *buffer) override;
void update(const ContextBase *context, const EffectSlot *slot, const EffectProps *props,
const EffectTarget target) override;
void process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn,
const al::span<FloatBufferLine> samplesOut) override;
DEF_NEWDEL(EqualizerState)
};
void EqualizerState::deviceUpdate(const DeviceBase*, const BufferStorage*)
{
for(auto &e : mChans)
{
e.mTargetChannel = InvalidChannelIndex;
std::for_each(std::begin(e.mFilter), std::end(e.mFilter),
std::mem_fn(&BiquadFilter::clear));
e.mCurrentGain = 0.0f;
}
}
void EqualizerState::update(const ContextBase *context, const EffectSlot *slot,
const EffectProps *props, const EffectTarget target)
{
const DeviceBase *device{context->mDevice};
auto frequency = static_cast<float>(device->Frequency);
float gain, f0norm;
/* Calculate coefficients for the each type of filter. Note that the shelf
* and peaking filters' gain is for the centerpoint of the transition band,
* while the effect property gains are for the shelf/peak itself. So the
* property gains need their dB halved (sqrt of linear gain) for the
* shelf/peak to reach the provided gain.
*/
gain = std::sqrt(props->Equalizer.LowGain);
f0norm = props->Equalizer.LowCutoff / frequency;
mChans[0].mFilter[0].setParamsFromSlope(BiquadType::LowShelf, f0norm, gain, 0.75f);
gain = std::sqrt(props->Equalizer.Mid1Gain);
f0norm = props->Equalizer.Mid1Center / frequency;
mChans[0].mFilter[1].setParamsFromBandwidth(BiquadType::Peaking, f0norm, gain,
props->Equalizer.Mid1Width);
gain = std::sqrt(props->Equalizer.Mid2Gain);
f0norm = props->Equalizer.Mid2Center / frequency;
mChans[0].mFilter[2].setParamsFromBandwidth(BiquadType::Peaking, f0norm, gain,
props->Equalizer.Mid2Width);
gain = std::sqrt(props->Equalizer.HighGain);
f0norm = props->Equalizer.HighCutoff / frequency;
mChans[0].mFilter[3].setParamsFromSlope(BiquadType::HighShelf, f0norm, gain, 0.75f);
/* Copy the filter coefficients for the other input channels. */
for(size_t i{1u};i < slot->Wet.Buffer.size();++i)
{
mChans[i].mFilter[0].copyParamsFrom(mChans[0].mFilter[0]);
mChans[i].mFilter[1].copyParamsFrom(mChans[0].mFilter[1]);
mChans[i].mFilter[2].copyParamsFrom(mChans[0].mFilter[2]);
mChans[i].mFilter[3].copyParamsFrom(mChans[0].mFilter[3]);
}
mOutTarget = target.Main->Buffer;
auto set_channel = [this](size_t idx, uint outchan, float outgain)
{
mChans[idx].mTargetChannel = outchan;
mChans[idx].mTargetGain = outgain;
};
target.Main->setAmbiMixParams(slot->Wet, slot->Gain, set_channel);
}
void EqualizerState::process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn, const al::span<FloatBufferLine> samplesOut)
{
const al::span<float> buffer{mSampleBuffer.data(), samplesToDo};
auto chan = std::begin(mChans);
for(const auto &input : samplesIn)
{
const size_t outidx{chan->mTargetChannel};
if(outidx != InvalidChannelIndex)
{
const al::span<const float> inbuf{input.data(), samplesToDo};
DualBiquad{chan->mFilter[0], chan->mFilter[1]}.process(inbuf, buffer.begin());
DualBiquad{chan->mFilter[2], chan->mFilter[3]}.process(buffer, buffer.begin());
MixSamples(buffer, samplesOut[outidx].data(), chan->mCurrentGain, chan->mTargetGain,
samplesToDo);
}
++chan;
}
}
struct EqualizerStateFactory final : public EffectStateFactory {
al::intrusive_ptr<EffectState> create() override
{ return al::intrusive_ptr<EffectState>{new EqualizerState{}}; }
};
} // namespace
EffectStateFactory *EqualizerStateFactory_getFactory()
{
static EqualizerStateFactory EqualizerFactory{};
return &EqualizerFactory;
}

View File

@ -0,0 +1,255 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 2018 by Raul Herraiz.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include <algorithm>
#include <array>
#include <cmath>
#include <complex>
#include <cstdlib>
#include <iterator>
#include "alc/effects/base.h"
#include "alcomplex.h"
#include "almalloc.h"
#include "alnumbers.h"
#include "alnumeric.h"
#include "alspan.h"
#include "core/bufferline.h"
#include "core/context.h"
#include "core/devformat.h"
#include "core/device.h"
#include "core/effectslot.h"
#include "core/mixer.h"
#include "core/mixer/defs.h"
#include "intrusive_ptr.h"
namespace {
using uint = unsigned int;
using complex_d = std::complex<double>;
constexpr size_t HilSize{1024};
constexpr size_t HilHalfSize{HilSize >> 1};
constexpr size_t OversampleFactor{4};
static_assert(HilSize%OversampleFactor == 0, "Factor must be a clean divisor of the size");
constexpr size_t HilStep{HilSize / OversampleFactor};
/* Define a Hann window, used to filter the HIL input and output. */
struct Windower {
alignas(16) std::array<double,HilSize> mData;
Windower()
{
/* Create lookup table of the Hann window for the desired size. */
for(size_t i{0};i < HilHalfSize;i++)
{
constexpr double scale{al::numbers::pi / double{HilSize}};
const double val{std::sin((static_cast<double>(i)+0.5) * scale)};
mData[i] = mData[HilSize-1-i] = val * val;
}
}
};
const Windower gWindow{};
struct FshifterState final : public EffectState {
/* Effect parameters */
size_t mCount{};
size_t mPos{};
std::array<uint,2> mPhaseStep{};
std::array<uint,2> mPhase{};
std::array<double,2> mSign{};
/* Effects buffers */
std::array<double,HilSize> mInFIFO{};
std::array<complex_d,HilStep> mOutFIFO{};
std::array<complex_d,HilSize> mOutputAccum{};
std::array<complex_d,HilSize> mAnalytic{};
std::array<complex_d,BufferLineSize> mOutdata{};
alignas(16) FloatBufferLine mBufferOut{};
/* Effect gains for each output channel */
struct {
float Current[MaxAmbiChannels]{};
float Target[MaxAmbiChannels]{};
} mGains[2];
void deviceUpdate(const DeviceBase *device, const BufferStorage *buffer) override;
void update(const ContextBase *context, const EffectSlot *slot, const EffectProps *props,
const EffectTarget target) override;
void process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn,
const al::span<FloatBufferLine> samplesOut) override;
DEF_NEWDEL(FshifterState)
};
void FshifterState::deviceUpdate(const DeviceBase*, const BufferStorage*)
{
/* (Re-)initializing parameters and clear the buffers. */
mCount = 0;
mPos = HilSize - HilStep;
mPhaseStep.fill(0u);
mPhase.fill(0u);
mSign.fill(1.0);
mInFIFO.fill(0.0);
mOutFIFO.fill(complex_d{});
mOutputAccum.fill(complex_d{});
mAnalytic.fill(complex_d{});
for(auto &gain : mGains)
{
std::fill(std::begin(gain.Current), std::end(gain.Current), 0.0f);
std::fill(std::begin(gain.Target), std::end(gain.Target), 0.0f);
}
}
void FshifterState::update(const ContextBase *context, const EffectSlot *slot,
const EffectProps *props, const EffectTarget target)
{
const DeviceBase *device{context->mDevice};
const float step{props->Fshifter.Frequency / static_cast<float>(device->Frequency)};
mPhaseStep[0] = mPhaseStep[1] = fastf2u(minf(step, 1.0f) * MixerFracOne);
switch(props->Fshifter.LeftDirection)
{
case FShifterDirection::Down:
mSign[0] = -1.0;
break;
case FShifterDirection::Up:
mSign[0] = 1.0;
break;
case FShifterDirection::Off:
mPhase[0] = 0;
mPhaseStep[0] = 0;
break;
}
switch(props->Fshifter.RightDirection)
{
case FShifterDirection::Down:
mSign[1] = -1.0;
break;
case FShifterDirection::Up:
mSign[1] = 1.0;
break;
case FShifterDirection::Off:
mPhase[1] = 0;
mPhaseStep[1] = 0;
break;
}
static constexpr auto inv_sqrt2 = static_cast<float>(1.0 / al::numbers::sqrt2);
static constexpr auto lcoeffs_pw = CalcDirectionCoeffs({-1.0f, 0.0f, 0.0f});
static constexpr auto rcoeffs_pw = CalcDirectionCoeffs({ 1.0f, 0.0f, 0.0f});
static constexpr auto lcoeffs_nrml = CalcDirectionCoeffs({-inv_sqrt2, 0.0f, inv_sqrt2});
static constexpr auto rcoeffs_nrml = CalcDirectionCoeffs({ inv_sqrt2, 0.0f, inv_sqrt2});
auto &lcoeffs = (device->mRenderMode != RenderMode::Pairwise) ? lcoeffs_nrml : lcoeffs_pw;
auto &rcoeffs = (device->mRenderMode != RenderMode::Pairwise) ? rcoeffs_nrml : rcoeffs_pw;
mOutTarget = target.Main->Buffer;
ComputePanGains(target.Main, lcoeffs.data(), slot->Gain, mGains[0].Target);
ComputePanGains(target.Main, rcoeffs.data(), slot->Gain, mGains[1].Target);
}
void FshifterState::process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn, const al::span<FloatBufferLine> samplesOut)
{
for(size_t base{0u};base < samplesToDo;)
{
size_t todo{minz(HilStep-mCount, samplesToDo-base)};
/* Fill FIFO buffer with samples data */
const size_t pos{mPos};
size_t count{mCount};
do {
mInFIFO[pos+count] = samplesIn[0][base];
mOutdata[base] = mOutFIFO[count];
++base; ++count;
} while(--todo);
mCount = count;
/* Check whether FIFO buffer is filled */
if(mCount < HilStep) break;
mCount = 0;
mPos = (mPos+HilStep) & (HilSize-1);
/* Real signal windowing and store in Analytic buffer */
for(size_t src{mPos}, k{0u};src < HilSize;++src,++k)
mAnalytic[k] = mInFIFO[src]*gWindow.mData[k];
for(size_t src{0u}, k{HilSize-mPos};src < mPos;++src,++k)
mAnalytic[k] = mInFIFO[src]*gWindow.mData[k];
/* Processing signal by Discrete Hilbert Transform (analytical signal). */
complex_hilbert(mAnalytic);
/* Windowing and add to output accumulator */
for(size_t dst{mPos}, k{0u};dst < HilSize;++dst,++k)
mOutputAccum[dst] += 2.0/OversampleFactor*gWindow.mData[k]*mAnalytic[k];
for(size_t dst{0u}, k{HilSize-mPos};dst < mPos;++dst,++k)
mOutputAccum[dst] += 2.0/OversampleFactor*gWindow.mData[k]*mAnalytic[k];
/* Copy out the accumulated result, then clear for the next iteration. */
std::copy_n(mOutputAccum.cbegin() + mPos, HilStep, mOutFIFO.begin());
std::fill_n(mOutputAccum.begin() + mPos, HilStep, complex_d{});
}
/* Process frequency shifter using the analytic signal obtained. */
float *RESTRICT BufferOut{al::assume_aligned<16>(mBufferOut.data())};
for(size_t c{0};c < 2;++c)
{
const uint phase_step{mPhaseStep[c]};
uint phase_idx{mPhase[c]};
for(size_t k{0};k < samplesToDo;++k)
{
const double phase{phase_idx * (al::numbers::pi*2.0 / MixerFracOne)};
BufferOut[k] = static_cast<float>(mOutdata[k].real()*std::cos(phase) +
mOutdata[k].imag()*std::sin(phase)*mSign[c]);
phase_idx += phase_step;
phase_idx &= MixerFracMask;
}
mPhase[c] = phase_idx;
/* Now, mix the processed sound data to the output. */
MixSamples({BufferOut, samplesToDo}, samplesOut, mGains[c].Current, mGains[c].Target,
maxz(samplesToDo, 512), 0);
}
}
struct FshifterStateFactory final : public EffectStateFactory {
al::intrusive_ptr<EffectState> create() override
{ return al::intrusive_ptr<EffectState>{new FshifterState{}}; }
};
} // namespace
EffectStateFactory *FshifterStateFactory_getFactory()
{
static FshifterStateFactory FshifterFactory{};
return &FshifterFactory;
}

View File

@ -0,0 +1,193 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 2009 by Chris Robinson.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include <algorithm>
#include <array>
#include <cstdlib>
#include <iterator>
#include "alc/effects/base.h"
#include "almalloc.h"
#include "alnumbers.h"
#include "alnumeric.h"
#include "alspan.h"
#include "core/ambidefs.h"
#include "core/bufferline.h"
#include "core/context.h"
#include "core/devformat.h"
#include "core/device.h"
#include "core/effectslot.h"
#include "core/filters/biquad.h"
#include "core/mixer.h"
#include "intrusive_ptr.h"
namespace {
using uint = unsigned int;
#define MAX_UPDATE_SAMPLES 128
#define WAVEFORM_FRACBITS 24
#define WAVEFORM_FRACONE (1<<WAVEFORM_FRACBITS)
#define WAVEFORM_FRACMASK (WAVEFORM_FRACONE-1)
inline float Sin(uint index)
{
constexpr float scale{al::numbers::pi_v<float>*2.0f / WAVEFORM_FRACONE};
return std::sin(static_cast<float>(index) * scale);
}
inline float Saw(uint index)
{ return static_cast<float>(index)*(2.0f/WAVEFORM_FRACONE) - 1.0f; }
inline float Square(uint index)
{ return static_cast<float>(static_cast<int>((index>>(WAVEFORM_FRACBITS-2))&2) - 1); }
inline float One(uint) { return 1.0f; }
template<float (&func)(uint)>
void Modulate(float *RESTRICT dst, uint index, const uint step, size_t todo)
{
for(size_t i{0u};i < todo;i++)
{
index += step;
index &= WAVEFORM_FRACMASK;
dst[i] = func(index);
}
}
struct ModulatorState final : public EffectState {
void (*mGetSamples)(float*RESTRICT, uint, const uint, size_t){};
uint mIndex{0};
uint mStep{1};
struct {
uint mTargetChannel{InvalidChannelIndex};
BiquadFilter mFilter;
float mCurrentGain{};
float mTargetGain{};
} mChans[MaxAmbiChannels];
void deviceUpdate(const DeviceBase *device, const BufferStorage *buffer) override;
void update(const ContextBase *context, const EffectSlot *slot, const EffectProps *props,
const EffectTarget target) override;
void process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn,
const al::span<FloatBufferLine> samplesOut) override;
DEF_NEWDEL(ModulatorState)
};
void ModulatorState::deviceUpdate(const DeviceBase*, const BufferStorage*)
{
for(auto &e : mChans)
{
e.mTargetChannel = InvalidChannelIndex;
e.mFilter.clear();
e.mCurrentGain = 0.0f;
}
}
void ModulatorState::update(const ContextBase *context, const EffectSlot *slot,
const EffectProps *props, const EffectTarget target)
{
const DeviceBase *device{context->mDevice};
const float step{props->Modulator.Frequency / static_cast<float>(device->Frequency)};
mStep = fastf2u(clampf(step*WAVEFORM_FRACONE, 0.0f, float{WAVEFORM_FRACONE-1}));
if(mStep == 0)
mGetSamples = Modulate<One>;
else if(props->Modulator.Waveform == ModulatorWaveform::Sinusoid)
mGetSamples = Modulate<Sin>;
else if(props->Modulator.Waveform == ModulatorWaveform::Sawtooth)
mGetSamples = Modulate<Saw>;
else /*if(props->Modulator.Waveform == ModulatorWaveform::Square)*/
mGetSamples = Modulate<Square>;
float f0norm{props->Modulator.HighPassCutoff / static_cast<float>(device->Frequency)};
f0norm = clampf(f0norm, 1.0f/512.0f, 0.49f);
/* Bandwidth value is constant in octaves. */
mChans[0].mFilter.setParamsFromBandwidth(BiquadType::HighPass, f0norm, 1.0f, 0.75f);
for(size_t i{1u};i < slot->Wet.Buffer.size();++i)
mChans[i].mFilter.copyParamsFrom(mChans[0].mFilter);
mOutTarget = target.Main->Buffer;
auto set_channel = [this](size_t idx, uint outchan, float outgain)
{
mChans[idx].mTargetChannel = outchan;
mChans[idx].mTargetGain = outgain;
};
target.Main->setAmbiMixParams(slot->Wet, slot->Gain, set_channel);
}
void ModulatorState::process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn, const al::span<FloatBufferLine> samplesOut)
{
for(size_t base{0u};base < samplesToDo;)
{
alignas(16) float modsamples[MAX_UPDATE_SAMPLES];
const size_t td{minz(MAX_UPDATE_SAMPLES, samplesToDo-base)};
mGetSamples(modsamples, mIndex, mStep, td);
mIndex += static_cast<uint>(mStep * td);
mIndex &= WAVEFORM_FRACMASK;
auto chandata = std::begin(mChans);
for(const auto &input : samplesIn)
{
const size_t outidx{chandata->mTargetChannel};
if(outidx != InvalidChannelIndex)
{
alignas(16) float temps[MAX_UPDATE_SAMPLES];
chandata->mFilter.process({&input[base], td}, temps);
for(size_t i{0u};i < td;i++)
temps[i] *= modsamples[i];
MixSamples({temps, td}, samplesOut[outidx].data()+base, chandata->mCurrentGain,
chandata->mTargetGain, samplesToDo-base);
}
++chandata;
}
base += td;
}
}
struct ModulatorStateFactory final : public EffectStateFactory {
al::intrusive_ptr<EffectState> create() override
{ return al::intrusive_ptr<EffectState>{new ModulatorState{}}; }
};
} // namespace
EffectStateFactory *ModulatorStateFactory_getFactory()
{
static ModulatorStateFactory ModulatorFactory{};
return &ModulatorFactory;
}

View File

@ -0,0 +1,84 @@
#include "config.h"
#include <stddef.h>
#include "almalloc.h"
#include "alspan.h"
#include "base.h"
#include "core/bufferline.h"
#include "intrusive_ptr.h"
struct ContextBase;
struct DeviceBase;
struct EffectSlot;
namespace {
struct NullState final : public EffectState {
NullState();
~NullState() override;
void deviceUpdate(const DeviceBase *device, const BufferStorage *buffer) override;
void update(const ContextBase *context, const EffectSlot *slot, const EffectProps *props,
const EffectTarget target) override;
void process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn,
const al::span<FloatBufferLine> samplesOut) override;
DEF_NEWDEL(NullState)
};
/* This constructs the effect state. It's called when the object is first
* created.
*/
NullState::NullState() = default;
/* This destructs the effect state. It's called only when the effect instance
* is no longer used.
*/
NullState::~NullState() = default;
/* This updates the device-dependant effect state. This is called on state
* initialization and any time the device parameters (e.g. playback frequency,
* format) have been changed. Will always be followed by a call to the update
* method, if successful.
*/
void NullState::deviceUpdate(const DeviceBase* /*device*/, const BufferStorage* /*buffer*/)
{
}
/* This updates the effect state with new properties. This is called any time
* the effect is (re)loaded into a slot.
*/
void NullState::update(const ContextBase* /*context*/, const EffectSlot* /*slot*/,
const EffectProps* /*props*/, const EffectTarget /*target*/)
{
}
/* This processes the effect state, for the given number of samples from the
* input to the output buffer. The result should be added to the output buffer,
* not replace it.
*/
void NullState::process(const size_t/*samplesToDo*/,
const al::span<const FloatBufferLine> /*samplesIn*/,
const al::span<FloatBufferLine> /*samplesOut*/)
{
}
struct NullStateFactory final : public EffectStateFactory {
al::intrusive_ptr<EffectState> create() override;
};
/* Creates EffectState objects of the appropriate type. */
al::intrusive_ptr<EffectState> NullStateFactory::create()
{ return al::intrusive_ptr<EffectState>{new NullState{}}; }
} // namespace
EffectStateFactory *NullStateFactory_getFactory()
{
static NullStateFactory NullFactory{};
return &NullFactory;
}

View File

@ -0,0 +1,307 @@
/**
* OpenAL cross platform audio library
* Copyright (C) 2018 by Raul Herraiz.
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Library General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Library General Public License for more details.
*
* You should have received a copy of the GNU Library General Public
* License along with this library; if not, write to the
* Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
* Or go to http://www.gnu.org/copyleft/lgpl.html
*/
#include "config.h"
#include <algorithm>
#include <array>
#include <cmath>
#include <complex>
#include <cstdlib>
#include <iterator>
#include "alc/effects/base.h"
#include "alcomplex.h"
#include "almalloc.h"
#include "alnumbers.h"
#include "alnumeric.h"
#include "alspan.h"
#include "core/bufferline.h"
#include "core/devformat.h"
#include "core/device.h"
#include "core/effectslot.h"
#include "core/mixer.h"
#include "core/mixer/defs.h"
#include "intrusive_ptr.h"
struct ContextBase;
namespace {
using uint = unsigned int;
using complex_f = std::complex<float>;
constexpr size_t StftSize{1024};
constexpr size_t StftHalfSize{StftSize >> 1};
constexpr size_t OversampleFactor{8};
static_assert(StftSize%OversampleFactor == 0, "Factor must be a clean divisor of the size");
constexpr size_t StftStep{StftSize / OversampleFactor};
/* Define a Hann window, used to filter the STFT input and output. */
struct Windower {
alignas(16) std::array<float,StftSize> mData;
Windower()
{
/* Create lookup table of the Hann window for the desired size. */
for(size_t i{0};i < StftHalfSize;i++)
{
constexpr double scale{al::numbers::pi / double{StftSize}};
const double val{std::sin((static_cast<double>(i)+0.5) * scale)};
mData[i] = mData[StftSize-1-i] = static_cast<float>(val * val);
}
}
};
const Windower gWindow{};
struct FrequencyBin {
float Magnitude;
float FreqBin;
};
struct PshifterState final : public EffectState {
/* Effect parameters */
size_t mCount;
size_t mPos;
uint mPitchShiftI;
float mPitchShift;
/* Effects buffers */
std::array<float,StftSize> mFIFO;
std::array<float,StftHalfSize+1> mLastPhase;
std::array<float,StftHalfSize+1> mSumPhase;
std::array<float,StftSize> mOutputAccum;
std::array<complex_f,StftSize> mFftBuffer;
std::array<FrequencyBin,StftHalfSize+1> mAnalysisBuffer;
std::array<FrequencyBin,StftHalfSize+1> mSynthesisBuffer;
alignas(16) FloatBufferLine mBufferOut;
/* Effect gains for each output channel */
float mCurrentGains[MaxAmbiChannels];
float mTargetGains[MaxAmbiChannels];
void deviceUpdate(const DeviceBase *device, const BufferStorage *buffer) override;
void update(const ContextBase *context, const EffectSlot *slot, const EffectProps *props,
const EffectTarget target) override;
void process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn,
const al::span<FloatBufferLine> samplesOut) override;
DEF_NEWDEL(PshifterState)
};
void PshifterState::deviceUpdate(const DeviceBase*, const BufferStorage*)
{
/* (Re-)initializing parameters and clear the buffers. */
mCount = 0;
mPos = StftSize - StftStep;
mPitchShiftI = MixerFracOne;
mPitchShift = 1.0f;
mFIFO.fill(0.0f);
mLastPhase.fill(0.0f);
mSumPhase.fill(0.0f);
mOutputAccum.fill(0.0f);
mFftBuffer.fill(complex_f{});
mAnalysisBuffer.fill(FrequencyBin{});
mSynthesisBuffer.fill(FrequencyBin{});
std::fill(std::begin(mCurrentGains), std::end(mCurrentGains), 0.0f);
std::fill(std::begin(mTargetGains), std::end(mTargetGains), 0.0f);
}
void PshifterState::update(const ContextBase*, const EffectSlot *slot,
const EffectProps *props, const EffectTarget target)
{
const int tune{props->Pshifter.CoarseTune*100 + props->Pshifter.FineTune};
const float pitch{std::pow(2.0f, static_cast<float>(tune) / 1200.0f)};
mPitchShiftI = clampu(fastf2u(pitch*MixerFracOne), MixerFracHalf, MixerFracOne*2);
mPitchShift = static_cast<float>(mPitchShiftI) * float{1.0f/MixerFracOne};
static constexpr auto coeffs = CalcDirectionCoeffs({0.0f, 0.0f, -1.0f});
mOutTarget = target.Main->Buffer;
ComputePanGains(target.Main, coeffs.data(), slot->Gain, mTargetGains);
}
void PshifterState::process(const size_t samplesToDo,
const al::span<const FloatBufferLine> samplesIn, const al::span<FloatBufferLine> samplesOut)
{
/* Pitch shifter engine based on the work of Stephan Bernsee.
* http://blogs.zynaptiq.com/bernsee/pitch-shifting-using-the-ft/
*/
/* Cycle offset per update expected of each frequency bin (bin 0 is none,
* bin 1 is x1, bin 2 is x2, etc).
*/
constexpr float expected_cycles{al::numbers::pi_v<float>*2.0f / OversampleFactor};
for(size_t base{0u};base < samplesToDo;)
{
const size_t todo{minz(StftStep-mCount, samplesToDo-base)};
/* Retrieve the output samples from the FIFO and fill in the new input
* samples.
*/
auto fifo_iter = mFIFO.begin()+mPos + mCount;
std::copy_n(fifo_iter, todo, mBufferOut.begin()+base);
std::copy_n(samplesIn[0].begin()+base, todo, fifo_iter);
mCount += todo;
base += todo;
/* Check whether FIFO buffer is filled with new samples. */
if(mCount < StftStep) break;
mCount = 0;
mPos = (mPos+StftStep) & (mFIFO.size()-1);
/* Time-domain signal windowing, store in FftBuffer, and apply a
* forward FFT to get the frequency-domain signal.
*/
for(size_t src{mPos}, k{0u};src < StftSize;++src,++k)
mFftBuffer[k] = mFIFO[src] * gWindow.mData[k];
for(size_t src{0u}, k{StftSize-mPos};src < mPos;++src,++k)
mFftBuffer[k] = mFIFO[src] * gWindow.mData[k];
forward_fft(al::as_span(mFftBuffer));
/* Analyze the obtained data. Since the real FFT is symmetric, only
* StftHalfSize+1 samples are needed.
*/
for(size_t k{0u};k < StftHalfSize+1;k++)
{
const float magnitude{std::abs(mFftBuffer[k])};
const float phase{std::arg(mFftBuffer[k])};
/* Compute the phase difference from the last update and subtract
* the expected phase difference for this bin.
*
* When oversampling, the expected per-update offset increments by
* 1/OversampleFactor for every frequency bin. So, the offset wraps
* every 'OversampleFactor' bin.
*/
const auto bin_offset = static_cast<float>(k % OversampleFactor);
float tmp{(phase - mLastPhase[k]) - bin_offset*expected_cycles};
/* Store the actual phase for the next update. */
mLastPhase[k] = phase;
/* Normalize from pi, and wrap the delta between -1 and +1. */
tmp *= al::numbers::inv_pi_v<float>;
int qpd{float2int(tmp)};
tmp -= static_cast<float>(qpd + (qpd%2));
/* Get deviation from bin frequency (-0.5 to +0.5), and account for
* oversampling.
*/
tmp *= 0.5f * OversampleFactor;
/* Compute the k-th partials' frequency bin target and store the
* magnitude and frequency bin in the analysis buffer. We don't
* need the "true frequency" since it's a linear relationship with
* the bin.
*/
mAnalysisBuffer[k].Magnitude = magnitude;
mAnalysisBuffer[k].FreqBin = static_cast<float>(k) + tmp;
}
/* Shift the frequency bins according to the pitch adjustment,
* accumulating the magnitudes of overlapping frequency bins.
*/
std::fill(mSynthesisBuffer.begin(), mSynthesisBuffer.end(), FrequencyBin{});
constexpr size_t bin_limit{((StftHalfSize+1)<<MixerFracBits) - MixerFracHalf - 1};
const size_t bin_count{minz(StftHalfSize+1, bin_limit/mPitchShiftI + 1)};
for(size_t k{0u};k < bin_count;k++)
{
const size_t j{(k*mPitchShiftI + MixerFracHalf) >> MixerFracBits};
/* If more than two bins end up together, use the target frequency
* bin for the one with the dominant magnitude. There might be a
* better way to handle this, but it's better than last-index-wins.
*/
if(mAnalysisBuffer[k].Magnitude > mSynthesisBuffer[j].Magnitude)
mSynthesisBuffer[j].FreqBin = mAnalysisBuffer[k].FreqBin * mPitchShift;
mSynthesisBuffer[j].Magnitude += mAnalysisBuffer[k].Magnitude;
}
/* Reconstruct the frequency-domain signal from the adjusted frequency
* bins.
*/
for(size_t k{0u};k < StftHalfSize+1;k++)
{
/* Calculate the actual delta phase for this bin's target frequency
* bin, and accumulate it to get the actual bin phase.
*/
float tmp{mSumPhase[k] + mSynthesisBuffer[k].FreqBin*expected_cycles};
/* Wrap between -pi and +pi for the sum. If mSumPhase is left to
* grow indefinitely, it will lose precision and produce less exact
* phase over time.
*/
tmp *= al::numbers::inv_pi_v<float>;
int qpd{float2int(tmp)};
tmp -= static_cast<float>(qpd + (qpd%2));
mSumPhase[k] = tmp * al::numbers::pi_v<float>;
mFftBuffer[k] = std::polar(mSynthesisBuffer[k].Magnitude, mSumPhase[k]);
}
for(size_t k{StftHalfSize+1};k < StftSize;++k)
mFftBuffer[k] = std::conj(mFftBuffer[StftSize-k]);
/* Apply an inverse FFT to get the time-domain signal, and accumulate
* for the output with windowing.
*/
inverse_fft(al::as_span(mFftBuffer));
static constexpr float scale{3.0f / OversampleFactor / StftSize};
for(size_t dst{mPos}, k{0u};dst < StftSize;++dst,++k)
mOutputAccum[dst] += gWindow.mData[k]*mFftBuffer[k].real() * scale;
for(size_t dst{0u}, k{StftSize-mPos};dst < mPos;++dst,++k)
mOutputAccum[dst] += gWindow.mData[k]*mFftBuffer[k].real() * scale;
/* Copy out the accumulated result, then clear for the next iteration. */
std::copy_n(mOutputAccum.begin() + mPos, StftStep, mFIFO.begin() + mPos);
std::fill_n(mOutputAccum.begin() + mPos, StftStep, 0.0f);
}
/* Now, mix the processed sound data to the output. */
MixSamples({mBufferOut.data(), samplesToDo}, samplesOut, mCurrentGains, mTargetGains,
maxz(samplesToDo, 512), 0);
}
struct PshifterStateFactory final : public EffectStateFactory {
al::intrusive_ptr<EffectState> create() override
{ return al::intrusive_ptr<EffectState>{new PshifterState{}}; }
};
} // namespace
EffectStateFactory *PshifterStateFactory_getFactory()
{
static PshifterStateFactory PshifterFactory{};
return &PshifterFactory;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,350 @@
/**
* This file is part of the OpenAL Soft cross platform audio library
*
* Copyright (C) 2019 by Anis A. Hireche
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of Spherical-Harmonic-Transform nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
#include <algorithm>
#include <array>
#include <cstdlib>
#include <functional>
#include <iterator>
#include "alc/effects/base.h"
#include "almalloc.h"
#include "alnumbers.h"
#include "alnumeric.h"
#include "alspan.h"
#include "core/ambidefs.h"
#include "core/bufferline.h"
#include "core/context.h"
#include "core/devformat.h"
#include "core/device.h"
#include "core/effectslot.h"
#include "core/mixer.h"
#include "intrusive_ptr.h"
namespace {
using uint = unsigned int;
#define MAX_UPDATE_SAMPLES 256
#define NUM_FORMANTS 4
#define NUM_FILTERS 2
#define Q_FACTOR 5.0f
#define VOWEL_A_INDEX 0
#define VOWEL_B_INDEX 1
#define WAVEFORM_FRACBITS 24
#define WAVEFORM_FRACONE (1<<WAVEFORM_FRACBITS)
#define WAVEFORM_FRACMASK (WAVEFORM_FRACONE-1)
inline float Sin(uint index)
{
constexpr float scale{al::numbers::pi_v<float>*2.0f / WAVEFORM_FRACONE};
return std::sin(static_cast<float>(index) * scale)*0.5f + 0.5f;
}
inline float Saw(uint index)
{ return static_cast<float>(index) / float{WAVEFORM_FRACONE}; }
inline float Triangle(uint index)
{ return std::fabs(static_cast<float>(index)*(2.0f/WAVEFORM_FRACONE) - 1.0f); }
inline float Half(uint) { return 0.5f; }
template<float (&func)(uint)>
void Oscillate(float *RESTRICT dst, uint index, const uint step, size_t todo)
{
for(size_t i{0u};i < todo;i++)
{
index += step;
index &= WAVEFORM_FRACMASK;
dst[i] = func(index);
}
}
struct FormantFilter
{
float mCoeff{0.0f};
float mGain{1.0f};
float mS1{0.0f};
float mS2{0.0f};
FormantFilter() = default;
FormantFilter(float f0norm, float gain)
: mCoeff{std::tan(al::numbers::pi_v<float> * f0norm)}, mGain{gain}
{ }
inline void process(const float *samplesIn, float *samplesOut, const size_t numInput)
{
/* A state variable filter from a topology-preserving transform.
* Based on a talk given by Ivan Cohen: https://www.youtube.com/watch?v=esjHXGPyrhg
*/
const float g{mCoeff};
const float gain{mGain};
const float h{1.0f / (1.0f + (g/Q_FACTOR) + (g*g))};
float s1{mS1};
float s2{mS2};
for(size_t i{0u};i < numInput;i++)
{
const float H{(samplesIn[i] - (1.0f/Q_FACTOR + g)*s1 - s2)*h};
const float B{g*H + s1};
const float L{g*B + s2};
s1 = g*H + B;
s2 = g*B + L;
// Apply peak and accumulate samples.
samplesOut[i] += B * gain;
}
mS1 = s1;
mS2 = s2;
}
inline void clear()
{
mS1 = 0.0f;
mS2 = 0.0f;
}
};
struct VmorpherState final : public EffectState {
struct {
uint mTargetChannel{InvalidChannelIndex};
/* Effect parameters */
FormantFilter mFormants[NUM_FILTERS][NUM_FORMANTS];
/* Effect gains for each channel */
float mCurrentGain{};
float mTargetGain{};
} mChans[MaxAmbiChannels];
void (*mGetSamples)(float*RESTRICT, uint, const uint, size_t){};
uint mIndex{0};
uint mStep{1};
/* Effects buffers */
alignas(16) float mSampleBufferA[MAX_UPDATE_SAMPLES]{};
alignas(16) float mSampleBufferB[MAX_UPDATE_SAMPLES]{};
alignas(16) float mLfo[MAX_UPDATE_SAMPLES]{};
void deviceUpdate(const DeviceBase *device, const BufferStorage *buffer) override;
void update(const ContextBase *context, const EffectSlot *slot, const EffectProps *props,
const EffectTarget target) override;
void process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn,
const al::span<FloatBufferLine> samplesOut) override;
static std::array<FormantFilter,4> getFiltersByPhoneme(VMorpherPhenome phoneme,
float frequency, float pitch);
DEF_NEWDEL(VmorpherState)
};
std::array<FormantFilter,4> VmorpherState::getFiltersByPhoneme(VMorpherPhenome phoneme,
float frequency, float pitch)
{
/* Using soprano formant set of values to
* better match mid-range frequency space.
*
* See: https://www.classes.cs.uchicago.edu/archive/1999/spring/CS295/Computing_Resources/Csound/CsManual3.48b1.HTML/Appendices/table3.html
*/
switch(phoneme)
{
case VMorpherPhenome::A:
return {{
{( 800 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
{(1150 * pitch) / frequency, 0.501187f}, /* std::pow(10.0f, -6 / 20.0f); */
{(2900 * pitch) / frequency, 0.025118f}, /* std::pow(10.0f, -32 / 20.0f); */
{(3900 * pitch) / frequency, 0.100000f} /* std::pow(10.0f, -20 / 20.0f); */
}};
case VMorpherPhenome::E:
return {{
{( 350 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
{(2000 * pitch) / frequency, 0.100000f}, /* std::pow(10.0f, -20 / 20.0f); */
{(2800 * pitch) / frequency, 0.177827f}, /* std::pow(10.0f, -15 / 20.0f); */
{(3600 * pitch) / frequency, 0.009999f} /* std::pow(10.0f, -40 / 20.0f); */
}};
case VMorpherPhenome::I:
return {{
{( 270 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
{(2140 * pitch) / frequency, 0.251188f}, /* std::pow(10.0f, -12 / 20.0f); */
{(2950 * pitch) / frequency, 0.050118f}, /* std::pow(10.0f, -26 / 20.0f); */
{(3900 * pitch) / frequency, 0.050118f} /* std::pow(10.0f, -26 / 20.0f); */
}};
case VMorpherPhenome::O:
return {{
{( 450 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
{( 800 * pitch) / frequency, 0.281838f}, /* std::pow(10.0f, -11 / 20.0f); */
{(2830 * pitch) / frequency, 0.079432f}, /* std::pow(10.0f, -22 / 20.0f); */
{(3800 * pitch) / frequency, 0.079432f} /* std::pow(10.0f, -22 / 20.0f); */
}};
case VMorpherPhenome::U:
return {{
{( 325 * pitch) / frequency, 1.000000f}, /* std::pow(10.0f, 0 / 20.0f); */
{( 700 * pitch) / frequency, 0.158489f}, /* std::pow(10.0f, -16 / 20.0f); */
{(2700 * pitch) / frequency, 0.017782f}, /* std::pow(10.0f, -35 / 20.0f); */
{(3800 * pitch) / frequency, 0.009999f} /* std::pow(10.0f, -40 / 20.0f); */
}};
default:
break;
}
return {};
}
void VmorpherState::deviceUpdate(const DeviceBase*, const BufferStorage*)
{
for(auto &e : mChans)
{
e.mTargetChannel = InvalidChannelIndex;
std::for_each(std::begin(e.mFormants[VOWEL_A_INDEX]), std::end(e.mFormants[VOWEL_A_INDEX]),
std::mem_fn(&FormantFilter::clear));
std::for_each(std::begin(e.mFormants[VOWEL_B_INDEX]), std::end(e.mFormants[VOWEL_B_INDEX]),
std::mem_fn(&FormantFilter::clear));
e.mCurrentGain = 0.0f;
}
}
void VmorpherState::update(const ContextBase *context, const EffectSlot *slot,
const EffectProps *props, const EffectTarget target)
{
const DeviceBase *device{context->mDevice};
const float frequency{static_cast<float>(device->Frequency)};
const float step{props->Vmorpher.Rate / frequency};
mStep = fastf2u(clampf(step*WAVEFORM_FRACONE, 0.0f, float{WAVEFORM_FRACONE-1}));
if(mStep == 0)
mGetSamples = Oscillate<Half>;
else if(props->Vmorpher.Waveform == VMorpherWaveform::Sinusoid)
mGetSamples = Oscillate<Sin>;
else if(props->Vmorpher.Waveform == VMorpherWaveform::Triangle)
mGetSamples = Oscillate<Triangle>;
else /*if(props->Vmorpher.Waveform == VMorpherWaveform::Sawtooth)*/
mGetSamples = Oscillate<Saw>;
const float pitchA{std::pow(2.0f,
static_cast<float>(props->Vmorpher.PhonemeACoarseTuning) / 12.0f)};
const float pitchB{std::pow(2.0f,
static_cast<float>(props->Vmorpher.PhonemeBCoarseTuning) / 12.0f)};
auto vowelA = getFiltersByPhoneme(props->Vmorpher.PhonemeA, frequency, pitchA);
auto vowelB = getFiltersByPhoneme(props->Vmorpher.PhonemeB, frequency, pitchB);
/* Copy the filter coefficients to the input channels. */
for(size_t i{0u};i < slot->Wet.Buffer.size();++i)
{
std::copy(vowelA.begin(), vowelA.end(), std::begin(mChans[i].mFormants[VOWEL_A_INDEX]));
std::copy(vowelB.begin(), vowelB.end(), std::begin(mChans[i].mFormants[VOWEL_B_INDEX]));
}
mOutTarget = target.Main->Buffer;
auto set_channel = [this](size_t idx, uint outchan, float outgain)
{
mChans[idx].mTargetChannel = outchan;
mChans[idx].mTargetGain = outgain;
};
target.Main->setAmbiMixParams(slot->Wet, slot->Gain, set_channel);
}
void VmorpherState::process(const size_t samplesToDo, const al::span<const FloatBufferLine> samplesIn, const al::span<FloatBufferLine> samplesOut)
{
/* Following the EFX specification for a conformant implementation which describes
* the effect as a pair of 4-band formant filters blended together using an LFO.
*/
for(size_t base{0u};base < samplesToDo;)
{
const size_t td{minz(MAX_UPDATE_SAMPLES, samplesToDo-base)};
mGetSamples(mLfo, mIndex, mStep, td);
mIndex += static_cast<uint>(mStep * td);
mIndex &= WAVEFORM_FRACMASK;
auto chandata = std::begin(mChans);
for(const auto &input : samplesIn)
{
const size_t outidx{chandata->mTargetChannel};
if(outidx == InvalidChannelIndex)
{
++chandata;
continue;
}
auto& vowelA = chandata->mFormants[VOWEL_A_INDEX];
auto& vowelB = chandata->mFormants[VOWEL_B_INDEX];
/* Process first vowel. */
std::fill_n(std::begin(mSampleBufferA), td, 0.0f);
vowelA[0].process(&input[base], mSampleBufferA, td);
vowelA[1].process(&input[base], mSampleBufferA, td);
vowelA[2].process(&input[base], mSampleBufferA, td);
vowelA[3].process(&input[base], mSampleBufferA, td);
/* Process second vowel. */
std::fill_n(std::begin(mSampleBufferB), td, 0.0f);
vowelB[0].process(&input[base], mSampleBufferB, td);
vowelB[1].process(&input[base], mSampleBufferB, td);
vowelB[2].process(&input[base], mSampleBufferB, td);
vowelB[3].process(&input[base], mSampleBufferB, td);
alignas(16) float blended[MAX_UPDATE_SAMPLES];
for(size_t i{0u};i < td;i++)
blended[i] = lerpf(mSampleBufferA[i], mSampleBufferB[i], mLfo[i]);
/* Now, mix the processed sound data to the output. */
MixSamples({blended, td}, samplesOut[outidx].data()+base, chandata->mCurrentGain,
chandata->mTargetGain, samplesToDo-base);
++chandata;
}
base += td;
}
}
struct VmorpherStateFactory final : public EffectStateFactory {
al::intrusive_ptr<EffectState> create() override
{ return al::intrusive_ptr<EffectState>{new VmorpherState{}}; }
};
} // namespace
EffectStateFactory *VmorpherStateFactory_getFactory()
{
static VmorpherStateFactory VmorpherFactory{};
return &VmorpherFactory;
}

73
externals/openal-soft/alc/inprogext.h vendored Normal file
View File

@ -0,0 +1,73 @@
#ifndef INPROGEXT_H
#define INPROGEXT_H
#include "AL/al.h"
#include "AL/alc.h"
#include "AL/alext.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifndef AL_SOFT_map_buffer
#define AL_SOFT_map_buffer 1
typedef unsigned int ALbitfieldSOFT;
#define AL_MAP_READ_BIT_SOFT 0x00000001
#define AL_MAP_WRITE_BIT_SOFT 0x00000002
#define AL_MAP_PERSISTENT_BIT_SOFT 0x00000004
#define AL_PRESERVE_DATA_BIT_SOFT 0x00000008
typedef void (AL_APIENTRY*LPALBUFFERSTORAGESOFT)(ALuint buffer, ALenum format, const ALvoid *data, ALsizei size, ALsizei freq, ALbitfieldSOFT flags);
typedef void* (AL_APIENTRY*LPALMAPBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length, ALbitfieldSOFT access);
typedef void (AL_APIENTRY*LPALUNMAPBUFFERSOFT)(ALuint buffer);
typedef void (AL_APIENTRY*LPALFLUSHMAPPEDBUFFERSOFT)(ALuint buffer, ALsizei offset, ALsizei length);
#ifdef AL_ALEXT_PROTOTYPES
AL_API void AL_APIENTRY alBufferStorageSOFT(ALuint buffer, ALenum format, const ALvoid *data, ALsizei size, ALsizei freq, ALbitfieldSOFT flags);
AL_API void* AL_APIENTRY alMapBufferSOFT(ALuint buffer, ALsizei offset, ALsizei length, ALbitfieldSOFT access);
AL_API void AL_APIENTRY alUnmapBufferSOFT(ALuint buffer);
AL_API void AL_APIENTRY alFlushMappedBufferSOFT(ALuint buffer, ALsizei offset, ALsizei length);
#endif
#endif
#ifndef AL_SOFT_bformat_hoa
#define AL_SOFT_bformat_hoa
#define AL_UNPACK_AMBISONIC_ORDER_SOFT 0x199D
#endif
#ifndef AL_SOFT_convolution_reverb
#define AL_SOFT_convolution_reverb
#define AL_EFFECT_CONVOLUTION_REVERB_SOFT 0xA000
#define AL_EFFECTSLOT_STATE_SOFT 0x199D
typedef void (AL_APIENTRY*LPALAUXILIARYEFFECTSLOTPLAYSOFT)(ALuint slotid);
typedef void (AL_APIENTRY*LPALAUXILIARYEFFECTSLOTPLAYVSOFT)(ALsizei n, const ALuint *slotids);
typedef void (AL_APIENTRY*LPALAUXILIARYEFFECTSLOTSTOPSOFT)(ALuint slotid);
typedef void (AL_APIENTRY*LPALAUXILIARYEFFECTSLOTSTOPVSOFT)(ALsizei n, const ALuint *slotids);
#ifdef AL_ALEXT_PROTOTYPES
AL_API void AL_APIENTRY alAuxiliaryEffectSlotPlaySOFT(ALuint slotid);
AL_API void AL_APIENTRY alAuxiliaryEffectSlotPlayvSOFT(ALsizei n, const ALuint *slotids);
AL_API void AL_APIENTRY alAuxiliaryEffectSlotStopSOFT(ALuint slotid);
AL_API void AL_APIENTRY alAuxiliaryEffectSlotStopvSOFT(ALsizei n, const ALuint *slotids);
#endif
#endif
#ifndef AL_SOFT_hold_on_disconnect
#define AL_SOFT_hold_on_disconnect
#define AL_STOP_SOURCES_ON_DISCONNECT_SOFT 0x19AB
#endif
/* Non-standard export. Not part of any extension. */
AL_API const ALchar* AL_APIENTRY alsoft_get_version(void);
/* Functions from abandoned extensions. Only here for binary compatibility. */
AL_API void AL_APIENTRY alSourceQueueBufferLayersSOFT(ALuint src, ALsizei nb,
const ALuint *buffers);
AL_API ALint64SOFT AL_APIENTRY alGetInteger64SOFT(ALenum pname);
AL_API void AL_APIENTRY alGetInteger64vSOFT(ALenum pname, ALint64SOFT *values);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* INPROGEXT_H */

1152
externals/openal-soft/alc/panning.cpp vendored Normal file

File diff suppressed because it is too large Load Diff