holopy3/Assets/Normal/Realtime/Audio/AudioOutput.cs

123 lines
5 KiB
C#
Raw Normal View History

2020-12-10 14:25:12 +00:00
using UnityEngine;
using Normal.Realtime.Native;
using Normal.Utility;
namespace Normal.Realtime {
public class AudioOutput : MonoBehaviour {
private volatile bool _mute;
public bool mute { get { return _mute; } set { _mute = mute; } }
public volatile float _dbLevel = -42.0f;
public float dbLevel { get { return _dbLevel; } }
private int _systemSampleRate;
private AudioOutputStream _audioOutputStream;
void Awake() {
_systemSampleRate = AudioSettings.outputSampleRate;
}
void OnDestroy() {
Stop();
}
void OnEnable() {
if (_audioOutputStream != null) {
AudioSource audioSource = GetComponent<AudioSource>();
if (audioSource != null) {
if (!audioSource.isPlaying)
audioSource.Play();
} else {
Debug.LogError("Realtime: AudioOutput has audio output stream, but no AudioSource. Was it destroyed on accident?");
}
}
}
public void StartWithAudioOutputStream(AudioOutputStream audioOutputStream) {
_audioOutputStream = audioOutputStream;
// Speaker
AudioSource audioSource = GetComponent<AudioSource>();
// Create audio source if needed.
if (audioSource == null) {
audioSource = gameObject.AddComponent<AudioSource>();
audioSource.spatialize = true;
audioSource.spatialBlend = 1.0f;
}
// TODO: Do we want AudioClip's sample rate to match OPUS? That means Unity is left with doing any resampling. We might be able to do the resampling better ourselves.
// TODO: We can probably specify a shorter clip length here since it's autogenerated now.
audioSource.enabled = true;
audioSource.loop = true;
audioSource.clip = AudioClip.Create("Normcore Audio Stream", 48000, 1, 48000, true, (float[] data) => { for (int i = 0; i < data.Length; i++) data[i] = 1.0f; });
audioSource.pitch = 1.0f;
audioSource.spatializePostEffects = true;
audioSource.Play();
}
public void Stop() {
AudioSource audioSource = GetComponent<AudioSource>();
if (audioSource != null) {
audioSource.Stop();
}
_audioOutputStream = null;
}
void OnAudioFilterRead(float[] data, int channels) {
if (_audioOutputStream == null || _audioOutputStream.nativePointerIsNull) {
// Zero the data back out.
for (int i = 0; i < data.Length; i++)
data[i] = 0.0f;
// Zero db level
_dbLevel = -42.0f;
// Bail
return;
}
// Configure the AudioOutputStream to resample to our desired sample rate
_audioOutputStream.SetSampleRate(_systemSampleRate);
int incomingNumberOfChannels = _audioOutputStream.Channels();
int numberOfFramesNeeded = data.Length / channels;
int numberOfIncomingSamplesNeeded = numberOfFramesNeeded * incomingNumberOfChannels;
float[] audioData = new float[numberOfIncomingSamplesNeeded];
if (_audioOutputStream.GetAudioData(audioData)) {
// Mix incoming audio data into buffer buffer
for (int f = 0; f < numberOfFramesNeeded; f++) {
for (int c = 0; c < channels; c++) {
int cIn = c;
if (cIn >= incomingNumberOfChannels)
cIn = incomingNumberOfChannels-1;
int sIn = f*incomingNumberOfChannels + cIn;
int sOut = f*channels + c;
// TODO: If there's no spatializer, we need to do this, but if there is a spatializer, we can just copy the value.
// TODO: Why is the input signal we're getting not 1.0 when spatialization is turned off??
data[sOut] = !_mute ? audioData[sIn] : 0.0f;
}
}
// Calculate db level using the last 256 frames
int firstFrame = numberOfFramesNeeded - 256;
if (firstFrame < 0)
firstFrame = 0;
int firstSample = firstFrame * incomingNumberOfChannels;
_dbLevel = StaticFunctions.CalculateAverageDbForAudioBuffer(audioData, firstSample);
} else {
// Failed to retrieve audio samples. zero the data back out.
// TODO: Maybe we should fade in/out here? Maybe the native interface can do that for us?
for (int i = 0; i < data.Length; i++)
data[i] = 0.0f;
// Zero db level
_dbLevel = -42.0f;
}
}
}
}