non-vr lobby, version fix

This commit is contained in:
joonasp
2022-06-29 14:45:17 +03:00
parent 5774be9822
commit 04baadfad1
1774 changed files with 573069 additions and 1533 deletions

View File

@@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: 94b6b805fed3dd44a81ab18662551aff
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,193 @@
using System;
#if NETFX_CORE
using Windows.UI.Xaml;
using TimeObject = System.Object;
#else
using TimeObject = System.Timers.ElapsedEventArgs;
#endif
namespace Photon.Voice
{
public static partial class AudioUtil
{
/// <summary>IAudioReader that provides a constant tone signal.</summary>
/// Because of current resampling algorithm, the tone is distorted if SamplingRate does not equal encoder sampling rate.
public class ToneAudioReader<T> : IAudioReader<T>
{
/// <summary>Create a new ToneAudioReader instance</summary>
/// <param name="clockSec">Function to get current time in seconds. In Unity, pass in '() => AudioSettings.dspTime' for better results.</param>
/// <param name="frequency">Frequency of the generated tone (in Hz).</param>
/// <param name="samplingRate">Sampling rate of the audio signal (in Hz).</param>
/// <param name="channels">Number of channels in the audio signal.</param>
public ToneAudioReader(Func<double> clockSec = null, double frequency = 440, int samplingRate = 48000, int channels = 2)
{
this.clockSec = clockSec == null ? () => DateTime.Now.Ticks / 10000000.0 : clockSec;
this.samplingRate = samplingRate;
this.channels = channels;
k = 2 * Math.PI * frequency / SamplingRate;
}
/// <summary>Number of channels in the audio signal.</summary>
public int Channels { get { return channels; } }
/// <summary>Sampling rate of the audio signal (in Hz).</summary>
public int SamplingRate { get { return samplingRate; } }
/// <summary>If not null, audio object is in invalid state.</summary>
public string Error { get; private set; }
public void Dispose()
{
}
double k;
long timeSamples;
Func<double> clockSec;
int samplingRate;
int channels;
public bool Read(T[] buf)
{
var bufSamples = buf.Length / Channels;
var t = (long)(clockSec() * SamplingRate);
var deltaTimeSamples = t - timeSamples;
if (Math.Abs(deltaTimeSamples) > SamplingRate / 4) // when started or Read has not been called for a while
{
deltaTimeSamples = bufSamples;
timeSamples = t - bufSamples;
}
if (deltaTimeSamples < bufSamples)
{
return false;
}
else
{
int x = 0;
if (buf is float[])
{
for (int i = 0; i < bufSamples; i++)
{
var b = buf as float[];
var v = (float)(System.Math.Sin(timeSamples++ * k) * 0.2f);
for (int j = 0; j < Channels; j++)
b[x++] = v;
}
}
else if (buf is short[])
{
var b = buf as short[];
for (int i = 0; i < bufSamples; i++)
{
var v = (short)(System.Math.Sin(timeSamples++ * k) * (0.2f * short.MaxValue));
for (int j = 0; j < Channels; j++)
b[x++] = v;
}
}
return true;
}
}
}
/// <summary>IAudioPusher that provides a constant tone signal.</summary>
// Helpful for debug but does not compile for UWP because of System.Timers.Timer.
public class ToneAudioPusher<T> : IAudioPusher<T>
{
/// <summary>Create a new ToneAudioReader instance</summary>
/// <param name="frequency">Frequency of the generated tone (in Hz).</param>
/// <param name="bufSizeMs">Size of buffers to push (in milliseconds).</param>
/// <param name="samplingRate">Sampling rate of the audio signal (in Hz).</param>
/// <param name="channels">Number of channels in the audio signal.</param>
public ToneAudioPusher(int frequency = 440, int bufSizeMs = 100, int samplingRate = 48000, int channels = 2)
{
this.samplingRate = samplingRate;
this.channels = channels;
this.bufSizeSamples = bufSizeMs * SamplingRate / 1000;
k = 2 * Math.PI * frequency/ SamplingRate;
}
double k;
#if NETFX_CORE
DispatcherTimer timer;
#else
System.Timers.Timer timer;
#endif
Action<T[]> callback;
ObjectFactory<T[], int> bufferFactory;
/// <summary>Set the callback function used for pushing data</summary>
/// <param name="callback">Callback function to use</param>
/// <param name="bufferFactory">Buffer factory used to create the buffer that is pushed to the callback</param>
public void SetCallback(Action<T[]> callback, ObjectFactory<T[], int> bufferFactory)
{
if (timer != null)
{
Dispose();
}
this.callback = callback;
this.bufferFactory = bufferFactory;
// Hook up the Elapsed event for the timer.
#if NETFX_CORE
timer = new DispatcherTimer();
timer.Tick += OnTimedEvent;
timer.Interval = new TimeSpan(10000000 * bufSizeSamples / SamplingRate); // ticks (10 000 000 per sec) in single buffer
#else
timer = new System.Timers.Timer(1000.0 * bufSizeSamples / SamplingRate);
timer.Elapsed += new System.Timers.ElapsedEventHandler(OnTimedEvent);
timer.Enabled = true;
#endif
}
private void OnTimedEvent(object source, TimeObject e)
{
var buf = bufferFactory.New(bufSizeSamples * Channels);
int x = 0;
if (buf is float[])
{
var b = buf as float[];
for (int i = 0; i < bufSizeSamples; i++)
{
var v = (float)(System.Math.Sin((posSamples + i) * k) / 2);
for (int j = 0; j < Channels; j++)
b[x++] = v;
}
}
else if (buf is short[])
{
var b = buf as short[];
for (int i = 0; i < bufSizeSamples; i++)
{
var v = (short)(System.Math.Sin((posSamples + i) * k) * short.MaxValue / 2);
for (int j = 0; j < Channels; j++)
b[x++] = v;
}
}
cntFrame++;
posSamples += bufSizeSamples;
this.callback(buf);
}
int cntFrame;
int posSamples;
int bufSizeSamples;
int samplingRate;
int channels;
public int Channels { get { return channels; } }
public int SamplingRate { get { return samplingRate; } }
public string Error { get; private set; }
public void Dispose()
{
if (timer != null)
{
#if NETFX_CORE
timer.Stop();
#else
timer.Close();
#endif
}
}
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 452a47d43fb9a2541ac591325fbf50a6
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1 @@
// placeholder for deleted file

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: eced827a23d11be4684625f493c862aa
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,506 @@
// To enable SoundTouch library tempo change for audio frame shrinking when catching up:
// Define PHOTON_VOICE_SOUND_TOUCH_ENABLE
// Set PlayDelayConfig.TempoChangeHQ to true
// Add SoundTouch library https://gitlab.com/soundtouch/soundtouch
// Android: edit /Android-lib/jni/Android.mk:
// add ../../SoundTouchDLL/SoundTouchDLL.cpp to sources list
// add LOCAL_CFLAGS += -DDLL_EXPORTS
// Windows: http://soundtouch.surina.net/download.html
// Add SoundTouch library C# wrapper https://gitlab.com/soundtouch/soundtouch/-/blob/master/source/csharp-example/SoundTouch.cs
// Replace "SoundTouch.dll" with "soundtouch" in SoundTouch.cs
using System.Collections.Generic;
using System;
using System.Runtime.InteropServices;
namespace Photon.Voice
{
public interface IAudioOut<T>
{
bool IsPlaying { get; }
void Start(int frequency, int channels, int frameSamplesPerChannel);
void Flush();
void Stop();
void Push(T[] frame);
void Service();
int Lag { get; } // ms
}
public class AudioOutDelayControl
{
public class PlayDelayConfig
{
public PlayDelayConfig()
{
Low = 200;
High = 400;
Max = 1000;
SpeedUpPerc = 5;
#if PHOTON_VOICE_SOUND_TOUCH_ENABLE
TempoChangeHQ = false;
#endif
}
public int Low { get ; set; } // ms: (Target) Audio player initilizes the delay with this value on Start and after flush and moves to it during corrections
public int High { get; set; } // ms: Audio player tries to keep the delay below this value.
public int Max { get; set; } // ms: Audio player guarantees that the delay never exceeds this value.
public int SpeedUpPerc { get; set; } // playback speed-up to catch up the stream
#if PHOTON_VOICE_SOUND_TOUCH_ENABLE
public bool TempoChangeHQ { get; set; }
#endif
public PlayDelayConfig Clone()
{
return new PlayDelayConfig
{
Low = Low,
High = High,
Max = Max,
SpeedUpPerc = SpeedUpPerc,
#if PHOTON_VOICE_SOUND_TOUCH_ENABLE
TempoChangeHQ = TempoChangeHQ,
#endif
};
}
}
}
// Consumes audio frames via Push(), optionally resizes and writes (OutWrite) them to the output to keep constant delay
// between output playback position (OutPos) and input stream position (advanced with each write).
// Assumes output is always playing.
public abstract class AudioOutDelayControl<T> : AudioOutDelayControl, IAudioOut<T>
{
readonly int sizeofT = Marshal.SizeOf(default(T));
abstract public int OutPos { get; }
abstract public void OutCreate(int frequency, int channels, int bufferSamples);
abstract public void OutStart();
abstract public void OutWrite(T[] data, int offsetSamples);
const int TEMPO_UP_SKIP_GROUP = 6;
private int frameSamples;
private int frameSize;
protected int bufferSamples;
protected int frequency;
private int clipWriteSamplePos;
private int playSamplePosPrev;
private int sourceTimeSamplesPrev;
private int playLoopCount;
PlayDelayConfig playDelayConfig;
protected int channels;
private bool started;
private bool flushed = true;
private int targetDelaySamples;
private int upperTargetDelaySamples; // correct if higher: gradually move to target via input frames resampling
private int maxDelaySamples; // set delay to this value if delay is higher
private const int NO_PUSH_TIMEOUT_MS = 100; // should be greater than Push() call interval
int lastPushTime = Environment.TickCount - NO_PUSH_TIMEOUT_MS;
protected readonly ILogger logger;
protected readonly string logPrefix;
private readonly bool debugInfo;
readonly bool processInService = false; // enqueue frame in Push() in process it in Service(), otherwise process directly in Push()
T[] zeroFrame;
T[] resampledFrame;
#if PHOTON_VOICE_SOUND_TOUCH_ENABLE
soundtouch.SoundTouch st;
#endif
AudioUtil.TempoUp<T> tempoUp;
bool tempoChangeHQ; // true if library is available
public AudioOutDelayControl(bool processInService, PlayDelayConfig playDelayConfig, ILogger logger, string logPrefix, bool debugInfo)
{
this.processInService = processInService;
// make sure that settings are not mutable
this.playDelayConfig = playDelayConfig.Clone();
this.logger = logger;
this.logPrefix = logPrefix;
this.debugInfo = debugInfo;
}
public int Lag { get { return (int)((this.clipWriteSamplePos - (this.started ? (float)this.playLoopCount * this.bufferSamples + this.OutPos : 0.0f)) * 1000 / frequency); } }
public bool IsFlushed
{
get { return !started || this.flushed; }
}
public bool IsPlaying
{
get { return !IsFlushed && (Environment.TickCount - lastPushTime < NO_PUSH_TIMEOUT_MS); }
}
public void Start(int frequency, int channels, int frameSamples)
{
//frequency = (int)(frequency * 1.2); // underrun test
//frequency = (int)(frequency / 1.2); // overrun test
this.frequency = frequency;
this.channels = channels;
// add 1 frame samples to make sure that we have something to play when delay set to 0
this.targetDelaySamples = playDelayConfig.Low * frequency / 1000 + frameSamples;
this.upperTargetDelaySamples = playDelayConfig.High * frequency / 1000 + frameSamples;
if (this.upperTargetDelaySamples < targetDelaySamples + 2 * frameSamples)
{
this.upperTargetDelaySamples = targetDelaySamples + 2 * frameSamples;
}
int resampleRampEndMs = playDelayConfig.Max;
this.maxDelaySamples = playDelayConfig.Max * frequency / 1000;
if (this.maxDelaySamples < this.upperTargetDelaySamples)
{
this.maxDelaySamples = this.upperTargetDelaySamples;
}
this.bufferSamples = 3 * this.maxDelaySamples; // make sure we have enough space
this.frameSamples = frameSamples;
this.frameSize = frameSamples * channels;
this.clipWriteSamplePos = this.targetDelaySamples;
if (this.framePool.Info != this.frameSize)
{
this.framePool.Init(this.frameSize);
}
this.zeroFrame = new T[this.frameSize];
this.resampledFrame = new T[this.frameSize];
#if PHOTON_VOICE_SOUND_TOUCH_ENABLE
if (this.playDelayConfig.TempoChangeHQ)
{
try
{
st = new soundtouch.SoundTouch();
st.Channels = (uint)channels;
st.SampleRate = (uint)frequency;
tempoChangeHQ = true;
}
catch (DllNotFoundException e)
{
logger.LogError("{0} SoundTouch library not found, disabling HQ tempo mode: {1}", this.logPrefix, e);
tempoChangeHQ = false;
}
}
#else
tempoChangeHQ = false;
#endif
if (!tempoChangeHQ)
{
tempoUp = new AudioUtil.TempoUp<T>();
}
OutCreate(frequency, channels, bufferSamples);
OutStart();
this.started = true;
this.logger.LogInfo("{0} Start: {1} bs={2} ch={3} f={4} tds={5} utds={6} mds={7} speed={8} tempo={9}", this.logPrefix, sizeofT == 2 ? "short" : "float", bufferSamples, channels, frequency, targetDelaySamples, upperTargetDelaySamples, maxDelaySamples, playDelayConfig.SpeedUpPerc, tempoChangeHQ ? "HQ" : "LQ");
}
Queue<T[]> frameQueue = new Queue<T[]>();
public const int FRAME_POOL_CAPACITY = 50;
PrimitiveArrayPool<T> framePool = new PrimitiveArrayPool<T>(FRAME_POOL_CAPACITY, "AudioOutDelayControl");
bool catchingUp = false;
bool processFrame(T[] frame, int playSamplePos)
{
var lagSamples = this.clipWriteSamplePos - playSamplePos;
if (!this.flushed)
{
if (lagSamples > maxDelaySamples)
{
if (this.debugInfo)
{
this.logger.LogDebug("{0} overrun {1} {2} {3} {4} {5}", this.logPrefix, upperTargetDelaySamples, lagSamples, playSamplePos, this.clipWriteSamplePos, playSamplePos + targetDelaySamples);
}
this.clipWriteSamplePos = playSamplePos + maxDelaySamples;
lagSamples = maxDelaySamples;
}
else if (lagSamples < 0)
{
if (this.debugInfo)
{
this.logger.LogDebug("{0} underrun {1} {2} {3} {4} {5}", this.logPrefix, upperTargetDelaySamples, lagSamples, playSamplePos, this.clipWriteSamplePos, playSamplePos + targetDelaySamples);
}
this.clipWriteSamplePos = playSamplePos + targetDelaySamples;
lagSamples = targetDelaySamples;
}
}
if (frame == null) // flush signalled
{
this.flushed = true;
if (this.debugInfo)
{
this.logger.LogDebug("{0} stream flush pause {1} {2} {3} {4} {5}", this.logPrefix, upperTargetDelaySamples, lagSamples, playSamplePos, this.clipWriteSamplePos, playSamplePos + targetDelaySamples);
}
if (catchingUp)
{
#if PHOTON_VOICE_SOUND_TOUCH_ENABLE
if (tempoChangeHQ)
{
st.Flush();
writeTempoHQ();
}
#endif
catchingUp = false;
if (this.debugInfo)
{
this.logger.LogDebug("{0} stream sync reset {1} {2} {3} {4} {5}", this.logPrefix, upperTargetDelaySamples, lagSamples, playSamplePos, this.clipWriteSamplePos, playSamplePos + targetDelaySamples);
}
}
return true;
}
else
{
if (this.flushed)
{
this.clipWriteSamplePos = playSamplePos + targetDelaySamples;
lagSamples = targetDelaySamples;
this.flushed = false;
if (this.debugInfo)
{
this.logger.LogDebug("{0} stream unpause {1} {2} {3} {4} {5}", this.logPrefix, upperTargetDelaySamples, lagSamples, playSamplePos, this.clipWriteSamplePos, playSamplePos + targetDelaySamples);
}
}
}
// starting catching up
if (lagSamples > upperTargetDelaySamples && !catchingUp)
{
if (!tempoChangeHQ)
{
tempoUp.Begin(channels, playDelayConfig.SpeedUpPerc, TEMPO_UP_SKIP_GROUP);
}
#if PHOTON_VOICE_SOUND_TOUCH_ENABLE
else
{
st.Clear();
var tempo = (float)(100 + playDelayConfig.SpeedUpPerc) / 100;
st.Tempo = tempo;
}
#endif
catchingUp = true;
if (this.debugInfo)
{
this.logger.LogDebug("{0} stream sync started {1} {2} {3} {4} {5}", this.logPrefix, upperTargetDelaySamples, lagSamples, playSamplePos, this.clipWriteSamplePos, playSamplePos + targetDelaySamples);
}
}
// finishing catching up
bool frameIsWritten = false; // first frame after switching from catching up requires special processing to flush TempoUp (the end of skipping wave removed if required)
if (lagSamples <= targetDelaySamples && catchingUp)
{
if (!tempoChangeHQ)
{
int skipSamples = tempoUp.End(frame);
int resampledLenSamples = frame.Length / channels - skipSamples;
Buffer.BlockCopy(frame, skipSamples * channels * sizeofT, resampledFrame, 0, resampledLenSamples * channels * sizeofT);
writeResampled(resampledFrame, resampledLenSamples);
frameIsWritten = true;
}
#if PHOTON_VOICE_SOUND_TOUCH_ENABLE
else
{
st.Flush();
writeTempoHQ();
st.Clear();
}
#endif
catchingUp = false;
if (this.debugInfo)
{
this.logger.LogDebug("{0} stream sync finished {1} {2} {3} {4} {5}", this.logPrefix, upperTargetDelaySamples, lagSamples, playSamplePos, this.clipWriteSamplePos, playSamplePos + targetDelaySamples);
}
}
if (frameIsWritten)
{
return false;
}
if (catchingUp)
{
if (!tempoChangeHQ)
{
int resampledLenSamples = tempoUp.Process(frame, resampledFrame);
writeResampled(resampledFrame, resampledLenSamples);
}
#if PHOTON_VOICE_SOUND_TOUCH_ENABLE
else
{
if (sizeofT == 2)
{
st.PutSamplesI16(frame as short[], (uint)(frame.Length / channels));
}
else
{
st.PutSamples(frame as float[], (uint)(frame.Length / channels));
}
lagSamples -= writeTempoHQ();
}
#endif
}
else
{
OutWrite(frame, this.clipWriteSamplePos % this.bufferSamples);
this.clipWriteSamplePos += frame.Length / this.channels;
}
return false;
}
// should be called in Update thread
public void Service()
{
if (this.started)
{
// cache PlayerPos
int sourceTimeSamples = OutPos;
// loop detection (pcmsetpositioncallback not called when clip loops)
if (sourceTimeSamples < sourceTimeSamplesPrev)
{
playLoopCount++;
}
sourceTimeSamplesPrev = sourceTimeSamples;
var playSamplePos = this.playLoopCount * this.bufferSamples + sourceTimeSamples;
if (processInService)
{
lock (this.frameQueue)
{
while (frameQueue.Count > 0)
{
var frame = frameQueue.Dequeue();
if (processFrame(frame, playSamplePos))
{
return; // flush signalled
}
framePool.Release(frame, frame.Length);
}
}
}
// clear played back buffer segment
var clearStart = this.playSamplePosPrev;
var clearMin = playSamplePos - this.bufferSamples;
if (clearStart < clearMin)
{
clearStart = clearMin;
}
// round up
var framesToClear = (playSamplePos - clearStart - 1) / this.frameSamples + 1;
for (var offset = playSamplePos - framesToClear * this.frameSamples; offset < playSamplePos; offset += this.frameSamples)
{
var o = offset % this.bufferSamples;
if (o < 0) o += this.bufferSamples;
OutWrite(this.zeroFrame, o);
}
this.playSamplePosPrev = playSamplePos;
}
}
#if PHOTON_VOICE_SOUND_TOUCH_ENABLE
int writeTempoHQ()
{
int resampledLenSamples;
if (sizeofT == 2)
{
resampledLenSamples = (int)st.ReceiveSamplesI16(resampledFrame as short[], (uint)(resampledFrame.Length / channels));
}
else
{
resampledLenSamples = (int)st.ReceiveSamples(resampledFrame as float[], (uint)(resampledFrame.Length / channels));
}
return writeResampled(resampledFrame, resampledLenSamples);
}
#endif
int writeResampled(T[] f, int resampledLenSamples)
{
// zero not used part of the buffer because SetData applies entire frame
// if this frame is the last, grabage may be played back
var tailSize = (f.Length - resampledLenSamples * channels) * sizeofT;
if (tailSize > 0) // it may be 0 what BlockCopy does not like
{
Buffer.BlockCopy(this.zeroFrame, 0, f, resampledLenSamples * channels * sizeofT, tailSize);
}
OutWrite(f, this.clipWriteSamplePos % this.bufferSamples);
this.clipWriteSamplePos += resampledLenSamples;
return resampledLenSamples;
}
// may be called on any thread
public void Push(T[] frame)
{
if (!this.started)
{
return;
}
if (frame.Length == 0)
{
return;
}
if (frame.Length != this.frameSize)
{
logger.LogError("{0} audio frames are not of size: {1} != {2}", this.logPrefix, frame.Length, this.frameSize);
return;
}
if (processInService)
{
T[] b = framePool.AcquireOrCreate();
Buffer.BlockCopy(frame, 0, b, 0, frame.Length * sizeofT);
lock (this.frameQueue)
{
this.frameQueue.Enqueue(b);
}
}
else
{
processFrame(frame, this.playLoopCount * this.bufferSamples + OutPos);
}
lastPushTime = Environment.TickCount;
}
public void Flush()
{
if (processInService)
{
lock (this.frameQueue)
{
this.frameQueue.Enqueue(null);
}
}
else
{
processFrame(null, this.playLoopCount * this.bufferSamples + OutPos);
}
}
virtual public void Stop()
{
#if PHOTON_VOICE_SOUND_TOUCH_ENABLE
if (st != null)
{
st.Dispose();
st = null;
}
#endif
this.started = false;
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: a06c54d78ac649a4b9a3e1ad59bff688
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,266 @@
using System.Collections.Generic;
namespace Photon.Voice
{
// Keeps buffer size within given bounds (discards or repeats samples) even if numbers of pushed and read samples per second are different
public class AudioSyncBuffer<T> : IAudioOut<T>
{
private int curPlayingFrameSamplePos;
private int sampleRate;
private int channels;
private int frameSamples;
private int frameSize;
private bool started;
private int maxDevPlayDelaySamples;
private int targetPlayDelaySamples;
int playDelayMs;
private readonly ILogger logger;
private readonly string logPrefix;
private readonly bool debugInfo;
private readonly int elementSize = System.Runtime.InteropServices.Marshal.SizeOf(typeof(T));
private T[] emptyFrame;
public AudioSyncBuffer(int playDelayMs, ILogger logger, string logPrefix, bool debugInfo)
{
this.playDelayMs = playDelayMs;
this.logger = logger;
this.logPrefix = logPrefix;
this.debugInfo = debugInfo;
}
public int Lag
{
get
{
lock (this)
{
return (int)((float)this.frameQueue.Count * this.frameSamples * 1000 / sampleRate);
}
}
}
public bool IsPlaying
{
get
{
lock (this)
{
return this.started;
}
}
}
// Can be called on runnig AudioSyncBuffer to reuse it for other parameters
public void Start(int sampleRate, int channels, int frameSamples)
{
lock (this)
{
this.started = false;
this.sampleRate = sampleRate;
// this.sampleRate = (int)(sampleRate * 1.2); // underrun test
// this.sampleRate = (int)(sampleRate / 1.2); // overrun test
this.channels = channels;
this.frameSamples = frameSamples;
this.frameSize = frameSamples * channels;
int playDelaySamples = playDelayMs * sampleRate / 1000 + frameSamples;
this.maxDevPlayDelaySamples = playDelaySamples / 2;
this.targetPlayDelaySamples = playDelaySamples + maxDevPlayDelaySamples;
if (this.framePool.Info != this.frameSize)
{
this.framePool.Init(this.frameSize);
}
//frameQueue = new Queue<T[]>();
while (this.frameQueue.Count > 0)
{
dequeueFrameQueue();
}
// it's important to change 'emptyFrame' value after frameQueue cleaned up, otherwise ' != this.emptyFrame' check in dequeueFrameQueue() will not work
this.emptyFrame = new T[this.frameSize];
// initial sync
int framesCnt = targetPlayDelaySamples / this.frameSamples;
this.curPlayingFrameSamplePos = targetPlayDelaySamples % this.frameSamples;
while (this.frameQueue.Count < framesCnt)
{
this.frameQueue.Enqueue(emptyFrame);
}
this.started = true;
}
}
Queue<T[]> frameQueue = new Queue<T[]>();
public const int FRAME_POOL_CAPACITY = 50;
PrimitiveArrayPool<T> framePool = new PrimitiveArrayPool<T>(FRAME_POOL_CAPACITY, "AudioSyncBuffer");
public void Service()
{
}
public void Read(T[] outBuf, int outChannels, int outSampleRate)
{
lock (this)
{
if (this.started)
{
int outPos = 0;
// enough data in remaining frames to fill entire out buffer
// framesElemRem / this.sampleRate >= outElemRem / outSampleRate
while ((this.frameQueue.Count * this.frameSamples - this.curPlayingFrameSamplePos) * this.channels * outSampleRate >= (outBuf.Length - outPos) * this.sampleRate)
{
int playingFramePos = this.curPlayingFrameSamplePos * this.channels;
var frame = frameQueue.Peek();
int outElemRem = outBuf.Length - outPos;
int frameElemRem = frame.Length - playingFramePos;
// enough data in the current frame to fill entire out buffer and some will remain for the next call: keeping this frame
// frameElemRem / (frCh * frRate) > outElemRem / (outCh * outRate)
if (frameElemRem * outChannels * outSampleRate > outElemRem * this.channels * this.sampleRate)
{
// frame remainder is large enough to fill outBuf remainder, keep this frame and return
//int framePosDelta = this.channels * outChannels * this.sampleRate / (outElemRem * outSampleRate);
int framePosDelta = outElemRem * this.channels* this.sampleRate / (outChannels * outSampleRate);
if (this.sampleRate == outSampleRate && this.channels == outChannels)
{
System.Buffer.BlockCopy(frame, playingFramePos * elementSize, outBuf, outPos * elementSize, outElemRem * elementSize);
}
else
{
AudioUtil.Resample(frame, playingFramePos, framePosDelta, this.channels, outBuf, outPos, outElemRem, outChannels);
}
this.curPlayingFrameSamplePos += framePosDelta / this.channels;
return;
}
// discarding current frame because it fills exactly out buffer or next frame required to do so
else
{
int outPosDelta = frameElemRem * outChannels * outSampleRate / (this.channels * this.sampleRate);
if (this.sampleRate == outSampleRate && this.channels == outChannels)
{
System.Buffer.BlockCopy(frame, playingFramePos * elementSize, outBuf, outPos * elementSize, frameElemRem * elementSize);
}
else
{
AudioUtil.Resample(frame, playingFramePos, frameElemRem, this.channels, outBuf, outPos, outPosDelta, outChannels);
}
outPos += outPosDelta;
this.curPlayingFrameSamplePos = 0;
dequeueFrameQueue();
if (outPosDelta == outElemRem)
{
return;
}
}
}
}
}
}
// may be called on any thread
public void Push(T[] frame)
{
lock (this)
{
if (this.started)
{
if (frame.Length == 0)
{
return;
}
if (frame.Length != this.frameSize)
{
logger.LogError("{0} AudioSyncBuffer audio frames are not of size: {1} != {2}", this.logPrefix, frame.Length, frameSize);
return;
}
//TODO: call framePool.AcquireOrCreate(frame.Length) and test
if (framePool.Info != frame.Length)
{
framePool.Init(frame.Length);
}
T[] b = framePool.AcquireOrCreate();
System.Buffer.BlockCopy(frame, 0, b, 0, System.Buffer.ByteLength(frame));
lock (this)
{
frameQueue.Enqueue(b);
syncFrameQueue();
}
}
}
}
public void Flush()
{
}
public void Stop()
{
lock (this)
{
this.started = false;
}
}
// call inside lock (this) { ... }
private void dequeueFrameQueue()
{
var f = this.frameQueue.Dequeue();
if (f != this.emptyFrame)
{
this.framePool.Release(f, f.Length);
}
}
// call inside lock (this) { ... }
private void syncFrameQueue()
{
var lagSamples = this.frameQueue.Count * this.frameSamples - this.curPlayingFrameSamplePos;
if (lagSamples > targetPlayDelaySamples + maxDevPlayDelaySamples)
{
int framesCnt = targetPlayDelaySamples / this.frameSamples;
this.curPlayingFrameSamplePos = targetPlayDelaySamples % this.frameSamples;
while (frameQueue.Count > framesCnt)
{
dequeueFrameQueue();
}
if (this.debugInfo)
{
this.logger.LogWarning("{0} AudioSynctBuffer overrun {1} {2} {3} {4}", this.logPrefix, targetPlayDelaySamples - maxDevPlayDelaySamples, targetPlayDelaySamples + maxDevPlayDelaySamples, lagSamples, framesCnt, this.curPlayingFrameSamplePos);
}
}
else if (lagSamples < targetPlayDelaySamples - maxDevPlayDelaySamples)
{
int framesCnt = targetPlayDelaySamples / this.frameSamples;
this.curPlayingFrameSamplePos = targetPlayDelaySamples % this.frameSamples;
while (frameQueue.Count < framesCnt)
{
frameQueue.Enqueue(emptyFrame);
}
if (this.debugInfo)
{
this.logger.LogWarning("{0} AudioSyncBuffer underrun {1} {2} {3} {4}", this.logPrefix, targetPlayDelaySamples - maxDevPlayDelaySamples, targetPlayDelaySamples + maxDevPlayDelaySamples, lagSamples, framesCnt, this.curPlayingFrameSamplePos);
}
}
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: e144988938e55264f8209b1abf6f3ad9
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: d4ec039454ee9b646ade0abcee9623b2
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,185 @@
#if (UNITY_IOS && !UNITY_EDITOR) || __IOS__
#define DLL_IMPORT_INTERNAL
#endif
using System;
using System.Collections;
using System.Collections.Generic;
using System.Runtime.InteropServices;
namespace Photon.Voice
{
public struct DeviceInfo
{
// used internally for Default property creation
private DeviceInfo(bool isDefault, int idInt, string idString, string name)
{
IsDefault = isDefault;
IDInt = idInt;
IDString = idString;
Name = name;
useStringID = false;
}
// numeric id
public DeviceInfo(int id, string name)
{
IsDefault = false;
IDInt = id;
IDString = "";
Name = name;
useStringID = false;
}
// string id
public DeviceInfo(string id, string name)
{
IsDefault = false;
IDInt = 0;
IDString = id;
Name = name;
useStringID = true;
}
// name is id (Unity Microphone and WebCamTexture APIs)
public DeviceInfo(string name)
{
IsDefault = false;
IDInt = 0;
IDString = name;
Name = name;
useStringID = true;
}
public bool IsDefault { get; private set; }
public int IDInt { get; private set; }
public string IDString { get; private set; }
public string Name { get; private set; }
private bool useStringID;
public static bool operator ==(DeviceInfo d1, DeviceInfo d2)
{
return d1.Equals(d2);
}
public static bool operator !=(DeviceInfo d1, DeviceInfo d2)
{
return !d1.Equals(d2);
}
// trivial implementation to avoid warnings CS0660 and CS0661 about missing overrides when == and != defined
public override bool Equals(object obj)
{
return base.Equals(obj);
}
public override int GetHashCode()
{
return base.GetHashCode();
}
public override string ToString()
{
if (useStringID)
{
return (Name == null ? "" : Name) + (IDString == null || IDString == Name ? "" : " (" + IDString.Substring(0, Math.Min(10, IDString.Length)) + ")");
}
else
{
return string.Format("{0} ({1})", Name, IDInt);
}
}
// default device id may differ on different platform, use this platform value instead of Default.Int
public static readonly DeviceInfo Default = new DeviceInfo(true, -128, "", "[Default]");
}
public interface IDeviceEnumerator : IDisposable, IEnumerable<DeviceInfo>
{
bool IsSupported { get; }
void Refresh();
string Error { get; }
}
public abstract class DeviceEnumeratorBase : IDeviceEnumerator
{
protected List<DeviceInfo> devices = new List<DeviceInfo>();
protected ILogger logger;
public DeviceEnumeratorBase(ILogger logger)
{
this.logger = logger;
}
public virtual bool IsSupported => true;
public virtual string Error { get; protected set; }
public IEnumerator<DeviceInfo> GetEnumerator()
{
return devices.GetEnumerator();
}
public abstract void Refresh();
IEnumerator IEnumerable.GetEnumerator()
{
return this.GetEnumerator();
}
public abstract void Dispose();
}
internal class DeviceEnumeratorNotSupported : DeviceEnumeratorBase
{
public override bool IsSupported => false;
string message;
public DeviceEnumeratorNotSupported(ILogger logger, string message) : base(logger)
{
this.message = message;
}
public override void Refresh()
{
}
public override string Error { get { return message; } }
public override void Dispose()
{
}
}
internal class AudioInEnumeratorNotSupported : DeviceEnumeratorNotSupported
{
public AudioInEnumeratorNotSupported(ILogger logger)
: base(logger, "Current platform is not supported by audio input DeviceEnumerator.")
{
}
}
internal class VideoInEnumeratorNotSupported : DeviceEnumeratorNotSupported
{
public VideoInEnumeratorNotSupported(ILogger logger)
: base(logger, "Current platform is not supported by video capture DeviceEnumerator.")
{
}
}
public interface IAudioInChangeNotifier : IDisposable
{
bool IsSupported { get; }
string Error { get; }
}
public class AudioInChangeNotifierNotSupported : IAudioInChangeNotifier
{
public bool IsSupported => false;
public AudioInChangeNotifierNotSupported(Action callback, ILogger logger)
{
}
public string Error { get { return "Current platform " + "is not supported by AudioInChangeNotifier."; } }
public void Dispose()
{
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: f75ba560b98da6b45ba6ac0a930cd58f
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,146 @@
#define STATS
using System;
using System.Runtime.InteropServices;
using System.Threading;
namespace Photon.Voice
{
// Encapsulates byte array slice, FrameFlags and dispose Action
public struct FrameBuffer
{
readonly byte[] array;
readonly int offset;
readonly int count;
readonly IDisposable disposer;
bool disposed;
int refCnt; // See Retain()
GCHandle gcHandle;
IntPtr ptr;
bool pinned;
#if STATS
static internal int statDisposerCreated;
static internal int statDisposerDisposed;
static internal int statPinned;
static internal int statUnpinned;
#else
static internal int statDisposerCreated = Int32.MaxValue;
static internal int statDisposerDisposed = Int32.MaxValue;
static internal int statPinned = Int32.MaxValue;
static internal int statUnpinned = Int32.MaxValue;
#endif
public FrameBuffer(byte[] array, int offset, int count, FrameFlags flags, IDisposable disposer)
{
this.array = array;
this.offset = offset;
this.count = count;
this.Flags = flags;
this.disposer = disposer;
this.disposed = false;
this.refCnt = 1;
this.gcHandle = new GCHandle();
this.ptr = IntPtr.Zero;
this.pinned = false;
#if STATS
if (disposer != null)
{
Interlocked.Increment(ref statDisposerCreated);
}
#endif
}
public FrameBuffer(byte[] array, FrameFlags flags)
{
this.array = array;
this.offset = 0;
this.count = array == null ? 0 : array.Length;
this.Flags = flags;
this.disposer = null;
this.disposed = false;
this.refCnt = 1;
this.gcHandle = new GCHandle();
this.ptr = IntPtr.Zero;
this.pinned = false;
#if STATS
if (disposer != null) // false
{
Interlocked.Increment(ref statDisposerCreated);
}
#endif
}
// Pins underlying buffer and returns the pointer to it with offset.
// Unpins in Dispose().
public IntPtr Ptr
{
get
{
if (!pinned)
{
gcHandle = GCHandle.Alloc(array, GCHandleType.Pinned);
ptr = IntPtr.Add(gcHandle.AddrOfPinnedObject(), offset);
pinned = true;
#if STATS
Interlocked.Increment(ref statPinned);
#endif
}
return ptr;
}
}
// Use Retain() to prevent the owner from disposing the buffer when it calls Release(). Since FrameBuffer is a struct, ref counter
// is shared only between parameters in stack passed as ref, so ref counter is rather a flag, not a counter.
// To preserve the buffer for future use:
// void foo(ref FrameBuffer f1) {
// this.f2 = f1; // don't call on copy!
// f1.Retain()
// }
// in other thread or in subsequent call or when disposing the owner:
// this.f2.release();
public void Retain()
{
refCnt++;
}
// Call on each owned FrameBuffer after processing:
// var f1 = new FrameBuffer()
// foo(f1)
// f1.Release()
public void Release()
{
refCnt--;
if (refCnt <= 0)
{
Dispose();
}
}
private void Dispose()
{
// not allocated if was created with FrameBuffer()
if (pinned)
{
gcHandle.Free();
pinned = false;
#if STATS
Interlocked.Increment(ref statUnpinned);
#endif
}
if (disposer != null && !disposed)
{
disposer.Dispose();
disposed = true;
#if STATS
Interlocked.Increment(ref statDisposerDisposed);
#endif
}
}
public byte[] Array { get { return array; } }
public int Length { get { return count; } }
public int Offset { get { return offset; } }
public FrameFlags Flags { get; }
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 4efa49ae83b47f346bb58cdca5ea6281
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,106 @@
using System;
namespace Photon.Voice
{
/// <summary>
/// Uniform interface to <see cref="ObjectPool{TType, TInfo}"/> and single reusable object.
/// </summary>
/// <typeparam name="TType">Object type.</typeparam>
/// <typeparam name="TInfo">Type of property used to check 2 objects identity (like integral length of array).</typeparam>
public interface ObjectFactory<TType, TInfo> : IDisposable
{
TInfo Info { get; }
TType New();
TType New(TInfo info);
void Free(TType obj);
void Free(TType obj, TInfo info);
}
// Object factory implementation skipped, we use only arrays for now
/// <summary>
/// Array factory returnig the same array instance as long as it requested with the same array length. If length changes, new array instance created.
/// </summary>
/// <typeparam name="T">Array element type.</typeparam>
public class FactoryReusableArray<T> : ObjectFactory<T[], int>
{
T[] arr;
public FactoryReusableArray(int size)
{
this.arr = new T[size];
}
public int Info { get { return arr.Length; } }
public T[] New()
{
return arr;
}
public T[] New(int size)
{
if (arr.Length != size)
{
arr = new T[size];
}
return arr;
}
public void Free(T[] obj)
{
}
public void Free(T[] obj, int info)
{
}
public void Dispose()
{
}
}
/// <summary>
/// <see cref="PrimitiveArrayPool{T}"/> as wrapped in object factory interface.
/// </summary>
/// <typeparam name="T">Array element type.</typeparam>
public class FactoryPrimitiveArrayPool<T> : ObjectFactory<T[], int>
{
PrimitiveArrayPool<T> pool;
public FactoryPrimitiveArrayPool(int capacity, string name)
{
pool = new PrimitiveArrayPool<T>(capacity, name);
}
public FactoryPrimitiveArrayPool(int capacity, string name, int info)
{
pool = new PrimitiveArrayPool<T>(capacity, name, info);
}
public int Info { get { return pool.Info; } }
public T[] New()
{
return pool.AcquireOrCreate();
}
public T[] New(int size)
{
return pool.AcquireOrCreate(size);
}
public void Free(T[] obj)
{
pool.Release(obj);
}
public void Free(T[] obj, int info)
{
pool.Release(obj, info);
}
public void Dispose()
{
pool.Dispose();
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: b8fa5acbebe1ba540b8ba97675ff15be
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,240 @@
using System;
namespace Photon.Voice
{
/// <summary>
/// Generic Pool to re-use objects of a certain type (TType) that optionally match a certain property or set of properties (TInfo).
/// </summary>
/// <typeparam name="TType">Object type.</typeparam>
/// <typeparam name="TInfo">Type of parameter used to check 2 objects identity (like integral length of array).</typeparam>
public abstract class ObjectPool<TType, TInfo> : IDisposable
{
protected int capacity;
protected TInfo info;
private TType[] freeObj = new TType[0];
protected int pos;
protected string name;
private bool inited;
abstract protected TType createObject(TInfo info);
abstract protected void destroyObject(TType obj);
abstract protected bool infosMatch(TInfo i0, TInfo i1);
internal string LogPrefix { get { return "[ObjectPool] [" + name + "]"; } }
/// <summary>Create a new ObjectPool instance. Does not call Init().</summary>
/// <param name="capacity">Capacity (size) of the object pool.</param>
/// <param name="name">Name of the object pool.</param>
public ObjectPool(int capacity, string name)
{
this.capacity = capacity;
this.name = name;
}
/// <summary>Create a new ObjectPool instance with the given info structure. Calls Init().</summary>
/// <param name="capacity">Capacity (size) of the object pool.</param>
/// <param name="name">Name of the object pool.</param>
/// <param name="info">Info about this Pool's objects.</param>
public ObjectPool(int capacity, string name, TInfo info)
{
this.capacity = capacity;
this.name = name;
Init(info);
}
/// <summary>(Re-)Initializes this ObjectPool.</summary>
/// If there are objects available in this Pool, they will be destroyed.
/// Allocates (Capacity) new Objects.
/// <param name="info">Info about this Pool's objects.</param>
public void Init(TInfo info)
{
lock (this)
{
while (pos > 0)
{
destroyObject(freeObj[--pos]);
}
this.info = info;
this.freeObj = new TType[capacity];
inited = true;
}
}
/// <summary>The property (info) that objects in this Pool must match.</summary>
public TInfo Info
{
get { return info; }
}
/// <summary>Acquire an existing object, or create a new one if none are available.</summary>
/// <remarks>If it fails to get one from the pool, this will create from the info given in this pool's constructor.</remarks>
public TType AcquireOrCreate()
{
lock (this)
{
if (pos > 0)
{
return freeObj[--pos];
}
if (!inited)
{
throw new Exception(LogPrefix + " not initialized");
}
}
return createObject(this.info);
}
/// <summary>Acquire an existing object (if info matches), or create a new one from the passed info.</summary>
/// <param name="info">Info structure to match, or create a new object with.</param>
public TType AcquireOrCreate(TInfo info)
{
// TODO: this.info thread safety
if (!infosMatch(this.info, info))
{
Init(info);
}
return AcquireOrCreate();
}
/// <summary>Returns object to pool.</summary>
/// <param name="obj">The object to return to the pool.</param>
/// <param name="objInfo">The info structure about obj.</param>
/// <remarks>obj is returned to the pool only if objInfo matches this pool's info. Else, it is destroyed.</remarks>
virtual public bool Release(TType obj, TInfo objInfo)
{
// TODO: this.info thread safety
if (infosMatch(this.info, objInfo))
{
lock (this)
{
if (pos < freeObj.Length)
{
freeObj[pos++] = obj;
return true;
}
}
}
// destroy if can't reuse
//UnityEngine.Debug.Log(LogPrefix + " Release(Info) destroy");
destroyObject(obj);
// TODO: log warning
return false;
}
/// <summary>Returns object to pool, or destroys it if the pool is full.</summary>
/// <param name="obj">The object to return to the pool.</param>
virtual public bool Release(TType obj)
{
lock (this)
{
if (pos < freeObj.Length)
{
freeObj[pos++] = obj;
return true;
}
}
// destroy if can't reuse
//UnityEngine.Debug.Log(LogPrefix + " Release destroy " + pos);
destroyObject(obj);
// TODO: log warning
return false;
}
/// <summary>Free resources assoicated with this ObjectPool</summary>
public void Dispose()
{
lock (this)
{
while (pos > 0)
{
destroyObject(freeObj[--pos]);
}
freeObj = new TType[0];
}
}
}
/// <summary>
/// Pool of Arrays with components of type T, with ObjectPool info being the array's size.
/// </summary>
/// <typeparam name="T">Array element type.</typeparam>
public class PrimitiveArrayPool<T> : ObjectPool<T[], int>
{
public PrimitiveArrayPool(int capacity, string name) : base(capacity, name) { }
public PrimitiveArrayPool(int capacity, string name, int info) : base(capacity, name, info) { }
protected override T[] createObject(int info)
{
//UnityEngine.Debug.Log(LogPrefix + " Create " + pos);
return new T[info];
}
protected override void destroyObject(T[] obj)
{
//UnityEngine.Debug.Log(LogPrefix + " Dispose " + pos + " " + obj.GetHashCode());
}
protected override bool infosMatch(int i0, int i1)
{
return i0 == i1;
}
}
public class ImageBufferNativePool<T> : ObjectPool<T, ImageBufferInfo> where T : ImageBufferNative
{
public delegate T Factory(ImageBufferNativePool<T> pool, ImageBufferInfo info);
Factory factory;
public ImageBufferNativePool(int capacity, Factory factory, string name) : base(capacity, name)
{
this.factory = factory;
}
public ImageBufferNativePool(int capacity, Factory factory, string name, ImageBufferInfo info) : base(capacity, name, info)
{
this.factory = factory;
}
protected override T createObject(ImageBufferInfo info)
{
//UnityEngine.Debug.Log(LogPrefix + " Create " + pos);
return factory(this, info);
}
protected override void destroyObject(T obj)
{
//UnityEngine.Debug.Log(LogPrefix + " Dispose " + pos + " " + obj.GetHashCode());
obj.Dispose();
}
// only height and stride compared, other parameters do not affect native buffers and can be simple overwritten
protected override bool infosMatch(ImageBufferInfo i0, ImageBufferInfo i1)
{
if (i0.Height != i1.Height)
{
return false;
}
var s0 = i0.Stride;
var s1 = i1.Stride;
if (s0.Length != s1.Length)
{
return false;
}
switch (i0.Stride.Length)
{
// most common case are 1 and 3 planes
case 1:
return s0[0] == s1[0];
case 2:
return s0[0] == s1[0] && s0[1] == s1[1];
case 3:
return s0[0] == s1[0] && s0[1] == s1[1] && s0[2] == s1[2];
default:
for (int i = 0; i < s0.Length; i++)
{
if (s0[i] != s1[i])
{
return false;
}
}
return true;
}
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 84a99b47c1b2fba4a8a268215e5adef1
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,269 @@
using POpusCodec.Enums;
using POpusCodec;
using System;
namespace Photon.Voice
{
public class OpusCodec
{
static public string Version
{
get
{
return OpusLib.Version;
}
}
public enum FrameDuration
{
Frame2dot5ms = 2500,
Frame5ms = 5000,
Frame10ms = 10000,
Frame20ms = 20000,
Frame40ms = 40000,
Frame60ms = 60000
}
public static class Factory
{
static public IEncoder CreateEncoder<B>(VoiceInfo i, ILogger logger)
{
if (typeof(B) == typeof(float[]))
return new EncoderFloat(i, logger);
else if (typeof(B) == typeof(short[]))
return new EncoderShort(i, logger);
else
throw new UnsupportedCodecException("Factory.CreateEncoder<" + typeof(B) + ">", i.Codec);
}
}
public static class DecoderFactory
{
public static IEncoder Create<T>(VoiceInfo i, ILogger logger)
{
var x = new T[1];
if (x[0].GetType() == typeof(float))
return new EncoderFloat(i, logger);
else if (x[0].GetType() == typeof(short))
return new EncoderShort(i, logger);
else
throw new UnsupportedCodecException("EncoderFactory.Create<" + x[0].GetType() + ">", i.Codec);
}
}
abstract public class Encoder<T> : IEncoderDirect<T[]>
{
protected OpusEncoder encoder;
protected bool disposed;
protected Encoder(VoiceInfo i, ILogger logger)
{
try
{
encoder = new OpusEncoder((SamplingRate)i.SamplingRate, (Channels)i.Channels, i.Bitrate, OpusApplicationType.Voip, (Delay)(i.FrameDurationUs * 2 / 1000));
logger.LogInfo("[PV] OpusCodec.Encoder created. Opus version " + Version + ". Bitrate " + encoder.Bitrate + ". EncoderDelay " + encoder.EncoderDelay);
}
catch (Exception e)
{
Error = e.ToString();
if (Error == null) // should never happen but since Error used as validity flag, make sure that it's not null
{
Error = "Exception in OpusCodec.Encoder constructor";
}
logger.LogError("[PV] OpusCodec.Encoder: " + Error);
}
}
public string Error { get; private set; }
public Action<ArraySegment<byte>, FrameFlags> Output { set; get; }
public void Input(T[] buf)
{
if (Error != null)
{
return;
}
if (Output == null)
{
Error = "OpusCodec.Encoder: Output action is not set";
return;
}
lock (this)
{
if (disposed || Error != null) { }
else
{
var res = encodeTyped(buf);
if (res.Count != 0)
{
Output(res, 0);
}
}
}
}
public void EndOfStream()
{
lock (this)
{
if (disposed || Error != null) { }
else
{
Output(EmptyBuffer, FrameFlags.EndOfStream);
}
}
return;
}
private static readonly ArraySegment<byte> EmptyBuffer = new ArraySegment<byte>(new byte[] { });
public ArraySegment<byte> DequeueOutput(out FrameFlags flags) { flags = 0; return EmptyBuffer; }
protected abstract ArraySegment<byte> encodeTyped(T[] buf);
public I GetPlatformAPI<I>() where I : class
{
return null;
}
public void Dispose()
{
lock (this)
{
if (encoder != null)
{
encoder.Dispose();
}
disposed = true;
}
}
}
public class EncoderFloat : Encoder<float>
{
internal EncoderFloat(VoiceInfo i, ILogger logger) : base(i, logger) { }
override protected ArraySegment<byte> encodeTyped(float[] buf)
{
return encoder.Encode(buf);
}
}
public class EncoderShort : Encoder<short>
{
internal EncoderShort(VoiceInfo i, ILogger logger) : base(i, logger) { }
override protected ArraySegment<byte> encodeTyped(short[] buf)
{
return encoder.Encode(buf);
}
}
public class Decoder<T> : IDecoder
{
protected OpusDecoder<T> decoder;
ILogger logger;
public Decoder(Action<FrameOut<T>> output, ILogger logger)
{
this.output = output;
this.logger = logger;
}
public void Open(VoiceInfo i)
{
try
{
decoder = new OpusDecoder<T>((SamplingRate)i.SamplingRate, (Channels)i.Channels);
logger.LogInfo("[PV] OpusCodec.Decoder created. Opus version " + Version);
}
catch (Exception e)
{
Error = e.ToString();
if (Error == null) // should never happen but since Error used as validity flag, make sure that it's not null
{
Error = "Exception in OpusCodec.Decoder constructor";
}
logger.LogError("[PV] OpusCodec.Decoder: " + Error);
}
}
public string Error { get; private set; }
private Action<FrameOut<T>> output;
public void Dispose()
{
if (decoder != null)
{
decoder.Dispose();
}
}
FrameOut<T> frameOut = new FrameOut<T>(null, false);
public void Input(ref FrameBuffer buf)
{
if (Error == null)
{
bool endOfStream = (buf.Flags & FrameFlags.EndOfStream) != 0;
if (endOfStream)
{
T[] res1 = null;
T[] res2;
// EndOfStream packet may have data
// normally we do not send null with EndOfStream flag, but null is still valid here
if (buf.Array == null && buf.Length > 0)
{
res1 = decoder.DecodePacket(ref buf);
}
// flush decoder
res2 = decoder.DecodeEndOfStream();
// if res1 is empty, res2 has correct (possible empty) buffer for EndOfStream frame
if (res1 != null && res1.Length == 0)
{
// output cal per res required
if (res2 != null && res2.Length != 0)
{
output(frameOut.Set(res1, false));
}
else
{
// swap results to reuse the code below
res2 = res1;
}
}
output(frameOut.Set(res2, true));
}
else
{
T[] res;
res = decoder.DecodePacket(ref buf);
if (res.Length != 0)
{
output(frameOut.Set(res, false));
}
}
}
}
}
public class Util
{
internal static int bestEncoderSampleRate(int f)
{
int diff = int.MaxValue;
int res = (int)SamplingRate.Sampling48000;
foreach (var x in Enum.GetValues(typeof(SamplingRate)))
{
var d = Math.Abs((int)x - f);
if (d < diff)
{
diff = d;
res = (int)x;
}
}
return res;
}
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: be5011148df058f45957478c38d4a847
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: f35d80937ebbd374ba47c378ae1919d3
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: df603a30659a4c048b9c3fb7aa38b85d
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,31 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace POpusCodec.Enums
{
public enum Bandwidth : int
{
/// <summary>
/// Up to 4Khz
/// </summary>
Narrowband = 1101,
/// <summary>
/// Up to 6Khz
/// </summary>
Mediumband = 1102,
/// <summary>
/// Up to 8Khz
/// </summary>
Wideband = 1103,
/// <summary>
/// Up to 12Khz
/// </summary>
SuperWideband = 1104,
/// <summary>
/// Up to 20Khz (High Definition)
/// </summary>
Fullband = 1105
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 2c527233af8a7c54fab631659436832c
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,19 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace POpusCodec.Enums
{
public enum Channels : int
{
/// <summary>
/// 1 Channel
/// </summary>
Mono = 1,
/// <summary>
/// 2 Channels
/// </summary>
Stereo = 2
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 4866c479545570d41abaff5f6a604676
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,22 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace POpusCodec.Enums
{
public enum Complexity : int
{
Complexity0 = 0,
Complexity1 = 1,
Complexity2 = 2,
Complexity3 = 3,
Complexity4 = 4,
Complexity5 = 5,
Complexity6 = 6,
Complexity7 = 7,
Complexity8 = 8,
Complexity9 = 9,
Complexity10 = 10
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 5eb43bf1b9e449b438cbea885607ad3e
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,38 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace POpusCodec.Enums
{
/// <summary>
/// Using a duration of less than 10 ms will prevent the encoder from using the LPC or hybrid modes.
/// </summary>
public enum Delay
{
/// <summary>
/// 2.5ms
/// </summary>
Delay2dot5ms = 5,
/// <summary>
/// 5ms
/// </summary>
Delay5ms = 10,
/// <summary>
/// 10ms
/// </summary>
Delay10ms = 20,
/// <summary>
/// 20ms
/// </summary>
Delay20ms = 40,
/// <summary>
/// 40ms
/// </summary>
Delay40ms = 80,
/// <summary>
/// 60ms
/// </summary>
Delay60ms = 120
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: f7fa617b20b39d3488d2d9fbc1abae43
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,14 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace POpusCodec.Enums
{
public enum ForceChannels : int
{
NoForce = -1000,
Mono = 1,
Stereo = 2
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 41001579e394e2d4ea08c136c82de9f7
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,28 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace POpusCodec.Enums
{
public enum OpusApplicationType : int
{
/// <summary>
/// Gives best quality at a given bitrate for voice signals.
/// It enhances the input signal by high-pass filtering and emphasizing formants and harmonics.
/// Optionally it includes in-band forward error correction to protect against packet loss.
/// Use this mode for typical VoIP applications.
/// Because of the enhancement, even at high bitrates the output may sound different from the input.
/// </summary>
Voip = 2048,
/// <summary>
/// Gives best quality at a given bitrate for most non-voice signals like music.
/// Use this mode for music and mixed (music/voice) content, broadcast, and applications requiring less than 15 ms of coding delay.
/// </summary>
Audio = 2049,
/// <summary>
/// Configures low-delay mode that disables the speech-optimized mode in exchange for slightly reduced delay.
/// </summary>
RestrictedLowDelay = 2051
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 0875bbb79a90bc1448ebc2142d1699bd
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,30 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace POpusCodec.Enums
{
internal enum OpusCtlGetRequest : int
{
Application = 4001,
Bitrate = 4003,
MaxBandwidth = 4005,
VBR = 4007,
Bandwidth = 4009,
Complexity = 4011,
InbandFec = 4013,
PacketLossPercentage = 4015,
Dtx = 4017,
VBRConstraint = 4021,
ForceChannels = 4023,
Signal = 4025,
LookAhead = 4027,
SampleRate = 4029,
FinalRange = 4031,
Pitch = 4033,
Gain = 4035,
LsbDepth = 4037,
LastPacketDurationRequest = 4039
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: fb806501c083df349acf30e98483b1b6
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,25 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace POpusCodec.Enums
{
internal enum OpusCtlSetRequest : int
{
Application = 4000,
Bitrate = 4002,
MaxBandwidth = 4004,
VBR = 4006,
Bandwidth = 4008,
Complexity = 4010,
InbandFec = 4012,
PacketLossPercentage = 4014,
Dtx = 4016,
VBRConstraint = 4020,
ForceChannels = 4022,
Signal = 4024,
Gain = 4034,
LsbDepth = 4036
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 2685c657fd2eeb04caf2917098b69215
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,19 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace POpusCodec.Enums
{
public enum OpusStatusCode : int
{
OK = 0,
BadArguments = -1,
BufferTooSmall = -2,
InternalError = -3,
InvalidPacket = -4,
Unimplemented = -5,
InvalidState = -6,
AllocFail = -7
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: baf95840af423aa4d8afbbf5ccb17f1f
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,16 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace POpusCodec.Enums
{
public enum SamplingRate : int
{
Sampling08000 = 8000,
Sampling12000 = 12000,
Sampling16000 = 16000,
Sampling24000 = 24000,
Sampling48000 = 48000
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: ecca42cdd5ba70a44b9eada7e82d172a
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,23 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace POpusCodec.Enums
{
public enum SignalHint : int
{
/// <summary>
/// (default)
/// </summary>
Auto = -1000,
/// <summary>
/// Bias thresholds towards choosing LPC or Hybrid modes
/// </summary>
Voice = 3001,
/// <summary>
/// Bias thresholds towards choosing MDCT modes.
/// </summary>
Music = 3002
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 740a25c5418bd1b449fc254e53be34b7
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,220 @@
using System;
using POpusCodec.Enums;
using System.Runtime.InteropServices;
using Photon.Voice;
namespace POpusCodec
{
public class OpusDecoder<T> : IDisposable
{
private const bool UseInbandFEC = true;
private bool TisFloat;
private int sizeofT;
private IntPtr _handle = IntPtr.Zero;
private const int MaxFrameSize = 5760;
private int _channelCount;
private static readonly T[] EmptyBuffer = new T[] { };
private Bandwidth? _previousPacketBandwidth = null;
public Bandwidth? PreviousPacketBandwidth
{
get
{
return _previousPacketBandwidth;
}
}
public OpusDecoder(SamplingRate outputSamplingRateHz, Channels numChannels)
{
TisFloat = default(T) is float;
sizeofT = Marshal.SizeOf(default(T));
if ((outputSamplingRateHz != SamplingRate.Sampling08000)
&& (outputSamplingRateHz != SamplingRate.Sampling12000)
&& (outputSamplingRateHz != SamplingRate.Sampling16000)
&& (outputSamplingRateHz != SamplingRate.Sampling24000)
&& (outputSamplingRateHz != SamplingRate.Sampling48000))
{
throw new ArgumentOutOfRangeException("outputSamplingRateHz", "Must use one of the pre-defined sampling rates (" + outputSamplingRateHz + ")");
}
if ((numChannels != Channels.Mono)
&& (numChannels != Channels.Stereo))
{
throw new ArgumentOutOfRangeException("numChannels", "Must be Mono or Stereo");
}
_channelCount = (int)numChannels;
_handle = Wrapper.opus_decoder_create(outputSamplingRateHz, numChannels);
if (_handle == IntPtr.Zero)
{
throw new OpusException(OpusStatusCode.AllocFail, "Memory was not allocated for the encoder");
}
}
private T[] buffer; // allocated for exactly 1 frame size as first valid frame received
private FrameBuffer prevPacketData;
bool prevPacketInvalid; // maybe false if prevPacket us null
// pass null to indicate packet loss
public T[] DecodePacket(ref FrameBuffer packetData)
{
if (this.buffer == null && packetData.Array == null)
{
return EmptyBuffer;
}
int numSamplesDecoded = 0;
if (this.buffer == null)
{
// on the first call we don't know frame size, use temporal buffer of maximal length
this.buffer = new T[MaxFrameSize * _channelCount];
}
bool packetInvalid;
if (packetData.Array == null)
{
packetInvalid = true;
}
else
{
int bandwidth = Wrapper.opus_packet_get_bandwidth(packetData.Ptr);
packetInvalid = bandwidth == (int)OpusStatusCode.InvalidPacket;
}
bool regularDecode = false;
if (UseInbandFEC)
{
if (prevPacketInvalid)
{
if (packetInvalid)
{
// no fec data, conceal previous frame
numSamplesDecoded = TisFloat ?
Wrapper.opus_decode(_handle, new FrameBuffer(), this.buffer as float[], 0, _channelCount) :
Wrapper.opus_decode(_handle, new FrameBuffer(), this.buffer as short[], 0, _channelCount);
//UnityEngine.Debug.Log("======================= Conceal");
}
else
{
// error correct previous frame with the help of the current
numSamplesDecoded = TisFloat ?
Wrapper.opus_decode(_handle, packetData, this.buffer as float[], 1, _channelCount) :
Wrapper.opus_decode(_handle, packetData, this.buffer as short[], 1, _channelCount);
//UnityEngine.Debug.Log("======================= FEC");
}
}
else
{
// decode previous frame
if (prevPacketData.Array != null) // is null on 1st call
{
numSamplesDecoded = TisFloat ?
Wrapper.opus_decode(_handle, prevPacketData, this.buffer as float[], 0, _channelCount) :
Wrapper.opus_decode(_handle, prevPacketData, this.buffer as short[], 0, _channelCount);
// prevPacketData is disposed below before copying packetData to it
regularDecode = true;
}
}
prevPacketData.Release();
prevPacketData = packetData;
packetData.Retain();
prevPacketInvalid = packetInvalid;
}
else
{
#pragma warning disable 162
// decode or conceal current frame
numSamplesDecoded = TisFloat ?
Wrapper.opus_decode(_handle, packetData, this.buffer as float[], 0, _channelCount) :
Wrapper.opus_decode(_handle, packetData, this.buffer as short[], 0, _channelCount);
regularDecode = true;
#pragma warning restore 162
}
if (numSamplesDecoded == 0)
return EmptyBuffer;
if (this.buffer.Length != numSamplesDecoded * _channelCount)
{
if (!regularDecode)
{
// wait for regular valid frame to imitialize the size
return EmptyBuffer;
}
// now that we know the frame size, allocate the buffer and copy data from temporal buffer
var tmp = this.buffer;
this.buffer = new T[numSamplesDecoded * _channelCount];
Buffer.BlockCopy(tmp, 0, this.buffer, 0, numSamplesDecoded * sizeofT);
}
return this.buffer;
}
public T[] DecodeEndOfStream()
{
int numSamplesDecoded = 0;
if (UseInbandFEC && !prevPacketInvalid)
{
// follow the same buffer initializatiopn pattern as in DecodeFrame() though buffer is already initialized most likely
if (this.buffer == null)
{
// on the first call we don't know frame size, use temporal buffer of maximal length
this.buffer = new T[MaxFrameSize * _channelCount];
}
// decode previous frame
if (prevPacketData.Array != null) // is null on 1st call
{
numSamplesDecoded = TisFloat ?
Wrapper.opus_decode(_handle, prevPacketData, this.buffer as float[], 1, _channelCount) :
Wrapper.opus_decode(_handle, prevPacketData, this.buffer as short[], 1, _channelCount);
}
prevPacketData.Release();
prevPacketData = new FrameBuffer();
prevPacketInvalid = false;
if (numSamplesDecoded == 0)
{
return EmptyBuffer;
}
else
{
// follow the same buffer initializatiopn pattern as in DecodeFrame()
if (this.buffer.Length != numSamplesDecoded * _channelCount)
{
// now that we know the frame size, allocate the buffer and copy data from temporal buffer
var tmp = this.buffer;
this.buffer = new T[numSamplesDecoded * _channelCount];
Buffer.BlockCopy(tmp, 0, this.buffer, 0, numSamplesDecoded * sizeofT);
}
return this.buffer;
}
}
else
{
prevPacketData.Release();
prevPacketData = new FrameBuffer();
prevPacketInvalid = false;
return EmptyBuffer;
}
}
public void Dispose()
{
prevPacketData.Release();
if (_handle != IntPtr.Zero)
{
Wrapper.opus_decoder_destroy(_handle);
_handle = IntPtr.Zero;
}
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 111ab6962d87d294394acc522d2570a3
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,283 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using POpusCodec.Enums;
using System.Runtime.InteropServices;
namespace POpusCodec
{
static public class OpusLib
{
static public string Version
{
get
{
return Marshal.PtrToStringAnsi(Wrapper.opus_get_version_string());
}
}
}
public class OpusEncoder : IDisposable
{
public const int BitrateMax = -1;
private IntPtr _handle = IntPtr.Zero;
private const int RecommendedMaxPacketSize = 4000;
private int _frameSizePerChannel = 960;
private SamplingRate _inputSamplingRate = SamplingRate.Sampling48000;
private Channels _inputChannels = Channels.Stereo;
public SamplingRate InputSamplingRate
{
get
{
return _inputSamplingRate;
}
}
public Channels InputChannels
{
get
{
return _inputChannels;
}
}
private readonly byte[] writePacket = new byte[RecommendedMaxPacketSize];
private static readonly ArraySegment<byte> EmptyBuffer = new ArraySegment<byte>(new byte[] { });
private Delay _encoderDelay = Delay.Delay20ms;
/// <summary>
/// Using a duration of less than 10 ms will prevent the encoder from using the LPC or hybrid modes.
/// </summary>
public Delay EncoderDelay
{
set
{
_encoderDelay = value;
_frameSizePerChannel = (int)((((int)_inputSamplingRate) / 1000) * ((decimal)_encoderDelay) / 2);
}
get
{
return _encoderDelay;
}
}
public int FrameSizePerChannel
{
get
{
return _frameSizePerChannel;
}
}
public int Bitrate
{
get
{
return Wrapper.get_opus_encoder_ctl(_handle, OpusCtlGetRequest.Bitrate);
}
set
{
Wrapper.set_opus_encoder_ctl(_handle, OpusCtlSetRequest.Bitrate, value);
}
}
public Bandwidth MaxBandwidth
{
get
{
return (Bandwidth)Wrapper.get_opus_encoder_ctl(_handle, OpusCtlGetRequest.MaxBandwidth);
}
set
{
Wrapper.set_opus_encoder_ctl(_handle, OpusCtlSetRequest.MaxBandwidth, (int)value);
}
}
public Complexity Complexity
{
get
{
return (Complexity)Wrapper.get_opus_encoder_ctl(_handle, OpusCtlGetRequest.Complexity);
}
set
{
Wrapper.set_opus_encoder_ctl(_handle, OpusCtlSetRequest.Complexity, (int)value);
}
}
public int ExpectedPacketLossPercentage
{
get
{
return Wrapper.get_opus_encoder_ctl(_handle, OpusCtlGetRequest.PacketLossPercentage);
}
set
{
Wrapper.set_opus_encoder_ctl(_handle, OpusCtlSetRequest.PacketLossPercentage, value);
}
}
public SignalHint SignalHint
{
get
{
return (SignalHint)Wrapper.get_opus_encoder_ctl(_handle, OpusCtlGetRequest.Signal);
}
set
{
Wrapper.set_opus_encoder_ctl(_handle, OpusCtlSetRequest.Signal, (int)value);
}
}
public ForceChannels ForceChannels
{
get
{
return (ForceChannels)Wrapper.get_opus_encoder_ctl(_handle, OpusCtlGetRequest.ForceChannels);
}
set
{
Wrapper.set_opus_encoder_ctl(_handle, OpusCtlSetRequest.ForceChannels, (int)value);
}
}
public bool UseInbandFEC
{
get
{
return Wrapper.get_opus_encoder_ctl(_handle, OpusCtlGetRequest.InbandFec) == 1;
}
set
{
Wrapper.set_opus_encoder_ctl(_handle, OpusCtlSetRequest.InbandFec, value ? 1 : 0);
}
}
public int PacketLossPercentage
{
get
{
return Wrapper.get_opus_encoder_ctl(_handle, OpusCtlGetRequest.PacketLossPercentage);
}
set
{
Wrapper.set_opus_encoder_ctl(_handle, OpusCtlSetRequest.PacketLossPercentage, value);
}
}
public bool UseUnconstrainedVBR
{
get
{
return Wrapper.get_opus_encoder_ctl(_handle, OpusCtlGetRequest.VBRConstraint) == 0;
}
set
{
Wrapper.set_opus_encoder_ctl(_handle, OpusCtlSetRequest.VBRConstraint, value ? 0 : 1);
}
}
public bool DtxEnabled
{
get
{
return Wrapper.get_opus_encoder_ctl(_handle, OpusCtlGetRequest.Dtx) == 1;
}
set
{
Wrapper.set_opus_encoder_ctl(_handle, OpusCtlSetRequest.Dtx, value ? 1 : 0);
}
}
//public OpusEncoder(SamplingRate inputSamplingRateHz, Channels numChannels)
// : this(inputSamplingRateHz, numChannels, 120000, OpusApplicationType.Audio, Delay.Delay20ms)
//{ }
//public OpusEncoder(SamplingRate inputSamplingRateHz, Channels numChannels, int bitrate)
// : this(inputSamplingRateHz, numChannels, bitrate, OpusApplicationType.Audio, Delay.Delay20ms)
//{ }
//public OpusEncoder(SamplingRate inputSamplingRateHz, Channels numChannels, int bitrate, OpusApplicationType applicationType)
// : this(inputSamplingRateHz, numChannels, bitrate, applicationType, Delay.Delay20ms)
//{ }
public OpusEncoder(SamplingRate inputSamplingRateHz, Channels numChannels, int bitrate, OpusApplicationType applicationType, Delay encoderDelay)
{
if ((inputSamplingRateHz != SamplingRate.Sampling08000)
&& (inputSamplingRateHz != SamplingRate.Sampling12000)
&& (inputSamplingRateHz != SamplingRate.Sampling16000)
&& (inputSamplingRateHz != SamplingRate.Sampling24000)
&& (inputSamplingRateHz != SamplingRate.Sampling48000))
{
throw new ArgumentOutOfRangeException("inputSamplingRateHz", "Must use one of the pre-defined sampling rates(" + inputSamplingRateHz + ")");
}
if ((numChannels != Channels.Mono)
&& (numChannels != Channels.Stereo))
{
throw new ArgumentOutOfRangeException("numChannels", "Must be Mono or Stereo");
}
if ((applicationType != OpusApplicationType.Audio)
&& (applicationType != OpusApplicationType.RestrictedLowDelay)
&& (applicationType != OpusApplicationType.Voip))
{
throw new ArgumentOutOfRangeException("applicationType", "Must use one of the pre-defined application types (" + applicationType + ")");
}
if ((encoderDelay != Delay.Delay10ms)
&& (encoderDelay != Delay.Delay20ms)
&& (encoderDelay != Delay.Delay2dot5ms)
&& (encoderDelay != Delay.Delay40ms)
&& (encoderDelay != Delay.Delay5ms)
&& (encoderDelay != Delay.Delay60ms))
{
throw new ArgumentOutOfRangeException("encoderDelay", "Must use one of the pre-defined delay values (" + encoderDelay + ")"); ;
}
_inputSamplingRate = inputSamplingRateHz;
_inputChannels = numChannels;
_handle = Wrapper.opus_encoder_create(inputSamplingRateHz, numChannels, applicationType);
if (_handle == IntPtr.Zero)
{
throw new OpusException(OpusStatusCode.AllocFail, "Memory was not allocated for the encoder");
}
EncoderDelay = encoderDelay;
Bitrate = bitrate;
UseInbandFEC = true;
PacketLossPercentage = 30;
}
public ArraySegment<byte> Encode(float[] pcmSamples)
{
int size = Wrapper.opus_encode(_handle, pcmSamples, _frameSizePerChannel, writePacket);
if (size <= 1) //DTX. Negative already handled at this point
return EmptyBuffer;
else
return new ArraySegment<byte>(writePacket, 0, size);
}
public ArraySegment<byte> Encode(short[] pcmSamples)
{
int size = Wrapper.opus_encode(_handle, pcmSamples, _frameSizePerChannel, writePacket);
if (size <= 1) //DTX. Negative already handled at this point
return EmptyBuffer;
else
return new ArraySegment<byte>(writePacket, 0, size);
}
public void Dispose()
{
if (_handle != IntPtr.Zero)
{
Wrapper.opus_encoder_destroy(_handle);
_handle = IntPtr.Zero;
}
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 487631ffee9000c42b81291343b86446
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,27 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using POpusCodec.Enums;
namespace POpusCodec
{
public class OpusException : Exception
{
private OpusStatusCode _statusCode = OpusStatusCode.OK;
public OpusStatusCode StatusCode
{
get
{
return _statusCode;
}
}
public OpusException(OpusStatusCode statusCode, string message)
: base(message + " (" + statusCode + ")")
{
_statusCode = statusCode;
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 6a76e6ebcab0b9344837c3cbd4124687
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,298 @@
#if ((UNITY_IOS || UNITY_SWITCH) && !UNITY_EDITOR) || __IOS__
#define DLL_IMPORT_INTERNAL
#endif
#if NONE //UNITY_EDITOR_WIN || UNITY_STANDALONE_WIN || UNITY_WSA
// opus.* lib built from original opus repo
#else
#define OPUS_EGPV // opus_egpv.* lib with interop helpers (we still may use such libs for the platforms where helpers are not required)
#endif
// Interop helpers required for iOS ARM64 IL2CPP (and maybe in other cases) because of variadic functions PInvoke calling issue:
// https://stackoverflow.com/questions/35536515/variable-argument-function-bad-access-with-va-arg-at-ios-arm64
// use statically linked interop helpers defined outside of opus.lib
#if (UNITY_IOS && !UNITY_EDITOR) || __IOS__
#define OPUS_EGPV_INTEROP_HELPER_EXTERNAL
#endif
// Interop helpers required also for Apple Silicon (ARM64)
#if UNITY_EDITOR_OSX || UNITY_STANDALONE_OSX
// use interop helpers built into opus_egpv.* lib (works for any platform but requires opus lib compiled from customized sources)
#define OPUS_EGPV_INTEROP_HELPER_BUILTIN
#define OPUS_EGPV
#endif
#if UNITY_WEBGL && !UNITY_EDITOR
#define DLL_IMPORT_INTERNAL
#define OPUS_EGPV_INTEROP_HELPER_BUILTIN
#endif
using System;
using System.Runtime.InteropServices;
using POpusCodec.Enums;
using Photon.Voice;
namespace POpusCodec
{
internal class Wrapper
{
#if DLL_IMPORT_INTERNAL
const string lib_name = "__Internal";
#else
#if OPUS_EGPV
const string lib_name = "opus_egpv";
#else
const string lib_name = "opus";
#endif
#endif
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
private static extern int opus_encoder_get_size(Channels channels);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
private static extern OpusStatusCode opus_encoder_init(IntPtr st, SamplingRate Fs, Channels channels, OpusApplicationType application);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
public static extern IntPtr opus_get_version_string();
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
private static extern int opus_encode(IntPtr st, short[] pcm, int frame_size, byte[] data, int max_data_bytes);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
private static extern int opus_encode_float(IntPtr st, float[] pcm, int frame_size, byte[] data, int max_data_bytes);
#if OPUS_EGPV_INTEROP_HELPER_BUILTIN
const string ctl_entry_point_set = "_set";
const string ctl_entry_point_get = "_get";
#elif OPUS_EGPV_INTEROP_HELPER_EXTERNAL
const string ctl_entry_point_set = "_set_ext";
const string ctl_entry_point_get = "_get_ext";
#else
const string ctl_entry_point_set = "";
const string ctl_entry_point_get = "";
#endif
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi, EntryPoint = "opus_encoder_ctl" + ctl_entry_point_set)]
private static extern int opus_encoder_ctl_set(IntPtr st, OpusCtlSetRequest request, int value);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi, EntryPoint = "opus_encoder_ctl" + ctl_entry_point_get)]
private static extern int opus_encoder_ctl_get(IntPtr st, OpusCtlGetRequest request, ref int value);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi, EntryPoint = "opus_decoder_ctl" + ctl_entry_point_set)]
private static extern int opus_decoder_ctl_set(IntPtr st, OpusCtlSetRequest request, int value);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi, EntryPoint = "opus_decoder_ctl" + ctl_entry_point_get)]
private static extern int opus_decoder_ctl_get(IntPtr st, OpusCtlGetRequest request, ref int value);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
private static extern int opus_decoder_get_size(Channels channels);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
private static extern OpusStatusCode opus_decoder_init(IntPtr st, SamplingRate Fs, Channels channels);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
private static extern int opus_decode(IntPtr st, IntPtr data, int len, short[] pcm, int frame_size, int decode_fec);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
private static extern int opus_decode_float(IntPtr st, IntPtr data, int len, float[] pcm, int frame_size, int decode_fec);
// [DllImport(import_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
// private static extern int opus_decode(IntPtr st, IntPtr data, int len, short[] pcm, int frame_size, int decode_fec);
// [DllImport(import_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
// private static extern int opus_decode_float(IntPtr st, IntPtr data, int len, float[] pcm, int frame_size, int decode_fec);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
public static extern int opus_packet_get_bandwidth(IntPtr data);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
public static extern int opus_packet_get_nb_channels(byte[] data);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
private static extern IntPtr opus_strerror(OpusStatusCode error);
public static IntPtr opus_encoder_create(SamplingRate Fs, Channels channels, OpusApplicationType application)
{
int size = Wrapper.opus_encoder_get_size(channels);
IntPtr ptr = Marshal.AllocHGlobal(size);
OpusStatusCode statusCode = Wrapper.opus_encoder_init(ptr, Fs, channels, application);
try
{
HandleStatusCode(statusCode, "opus_encoder_create/opus_encoder_init", Fs, channels, application);
}
catch (Exception ex)
{
if (ptr != IntPtr.Zero)
{
Wrapper.opus_encoder_destroy(ptr);
ptr = IntPtr.Zero;
}
throw ex;
}
return ptr;
}
public static int opus_encode(IntPtr st, short[] pcm, int frame_size, byte[] data)
{
if (st == IntPtr.Zero)
throw new ObjectDisposedException("OpusEncoder");
int payloadLength = opus_encode(st, pcm, frame_size, data, data.Length);
if (payloadLength <= 0)
{
HandleStatusCode((OpusStatusCode)payloadLength, "opus_encode/short", frame_size, data.Length);
}
return payloadLength;
}
public static int opus_encode(IntPtr st, float[] pcm, int frame_size, byte[] data)
{
if (st == IntPtr.Zero)
throw new ObjectDisposedException("OpusEncoder");
int payloadLength = opus_encode_float(st, pcm, frame_size, data, data.Length);
if (payloadLength <= 0)
{
HandleStatusCode((OpusStatusCode)payloadLength, "opus_encode/float", frame_size, data.Length);
}
return payloadLength;
}
public static void opus_encoder_destroy(IntPtr st)
{
Marshal.FreeHGlobal(st);
}
public static int get_opus_encoder_ctl(IntPtr st, OpusCtlGetRequest request)
{
if (st == IntPtr.Zero)
throw new ObjectDisposedException("OpusEncoder");
int value = 0;
OpusStatusCode statusCode = (OpusStatusCode)opus_encoder_ctl_get(st, request, ref value);
HandleStatusCode(statusCode, "opus_encoder_ctl_get", request);
return value;
}
public static void set_opus_encoder_ctl(IntPtr st, OpusCtlSetRequest request, int value)
{
if (st == IntPtr.Zero)
throw new ObjectDisposedException("OpusEncoder");
OpusStatusCode statusCode = (OpusStatusCode)opus_encoder_ctl_set(st, request, value);
HandleStatusCode(statusCode, "opus_encoder_ctl_set", request, value);
}
public static int get_opus_decoder_ctl(IntPtr st, OpusCtlGetRequest request)
{
if (st == IntPtr.Zero)
throw new ObjectDisposedException("OpusDcoder");
int value = 0;
OpusStatusCode statusCode = (OpusStatusCode)opus_decoder_ctl_get(st, request, ref value);
HandleStatusCode(statusCode, "get_opus_decoder_ctl", request, value);
return value;
}
public static void set_opus_decoder_ctl(IntPtr st, OpusCtlSetRequest request, int value)
{
if (st == IntPtr.Zero)
throw new ObjectDisposedException("OpusDecoder");
OpusStatusCode statusCode = (OpusStatusCode)opus_decoder_ctl_set(st, request, value);
HandleStatusCode(statusCode, "set_opus_decoder_ctl", request, value);
}
public static IntPtr opus_decoder_create(SamplingRate Fs, Channels channels)
{
int size = Wrapper.opus_decoder_get_size(channels);
IntPtr ptr = Marshal.AllocHGlobal(size);
OpusStatusCode statusCode = Wrapper.opus_decoder_init(ptr, Fs, channels);
try
{
HandleStatusCode(statusCode, "opus_decoder_create", Fs, channels);
}
catch (Exception ex)
{
if (ptr != IntPtr.Zero)
{
Wrapper.opus_decoder_destroy(ptr);
ptr = IntPtr.Zero;
}
throw ex;
}
return ptr;
}
public static void opus_decoder_destroy(IntPtr st)
{
Marshal.FreeHGlobal(st);
}
public static int opus_decode(IntPtr st, FrameBuffer data, short[] pcm, int decode_fec, int channels)
{
if (st == IntPtr.Zero)
throw new ObjectDisposedException("OpusDecoder");
int numSamplesDecoded = opus_decode(st, data.Ptr, data.Length, pcm, pcm.Length / channels, decode_fec);
if (numSamplesDecoded == (int)OpusStatusCode.InvalidPacket)
return 0;
if (numSamplesDecoded <= 0)
{
HandleStatusCode((OpusStatusCode)numSamplesDecoded, "opus_decode/short", data.Length, pcm.Length, decode_fec, channels);
}
return numSamplesDecoded;
}
public static int opus_decode(IntPtr st, FrameBuffer data, float[] pcm, int decode_fec, int channels)
{
if (st == IntPtr.Zero)
throw new ObjectDisposedException("OpusDecoder");
int numSamplesDecoded = opus_decode_float(st, data.Ptr, data.Length, pcm, pcm.Length / channels, decode_fec);
if (numSamplesDecoded == (int)OpusStatusCode.InvalidPacket)
return 0;
if (numSamplesDecoded <= 0)
{
HandleStatusCode((OpusStatusCode)numSamplesDecoded, "opus_decode/float", data.Length, pcm.Length, decode_fec, channels);
}
return numSamplesDecoded;
}
private static void HandleStatusCode(OpusStatusCode statusCode, params object[] info)
{
if (statusCode != OpusStatusCode.OK)
{
var infoMsg = "";
foreach (var i in info) infoMsg += i.ToString() + ":";
throw new OpusException(statusCode, infoMsg + Marshal.PtrToStringAnsi(opus_strerror(statusCode)));
}
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: f928aa994e3035c4089dd9f8e839a0a1
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,275 @@
using System;
namespace Photon.Voice
{
public static class Platform
{
static public IDeviceEnumerator CreateAudioInEnumerator(ILogger logger)
{
#if WINDOWS_UWP || ENABLE_WINMD_SUPPORT
return new UWP.AudioInEnumerator(logger);
#elif PHOTON_VOICE_WINDOWS || UNITY_STANDALONE_WIN || UNITY_EDITOR_WIN
return new Windows.AudioInEnumerator(logger);
#elif UNITY_EDITOR_OSX || UNITY_STANDALONE_OSX
return new MacOS.AudioInEnumerator(logger);
#else
return new AudioInEnumeratorNotSupported(logger);
#endif
}
static public IAudioInChangeNotifier CreateAudioInChangeNotifier(Action callback, ILogger logger)
{
#if (UNITY_IOS && !UNITY_EDITOR)
return new IOS.AudioInChangeNotifier(callback, logger);
#elif UNITY_EDITOR_OSX || UNITY_STANDALONE_OSX
return new MacOS.AudioInChangeNotifier(callback, logger);
#elif UNITY_SWITCH && !UNITY_EDITOR
return new Switch.AudioInChangeNotifier(callback, logger);
#else
return new AudioInChangeNotifierNotSupported(callback, logger);
#endif
}
static public IEncoder CreateDefaultAudioEncoder<T>(ILogger logger, VoiceInfo info)
{
switch (info.Codec)
{
case Codec.AudioOpus:
return OpusCodec.Factory.CreateEncoder<T[]>(info, logger);
case Codec.Raw: // Debug only. Assumes that original data is short[].
return new RawCodec.Encoder<T>();
default:
throw new UnsupportedCodecException("Platform.CreateDefaultAudioEncoder", info.Codec);
}
}
static public IAudioDesc CreateDefaultAudioSource(ILogger logger, DeviceInfo dev, int samplingRate, int channels, object otherParams = null)
{
#if PHOTON_VOICE_WINDOWS || UNITY_STANDALONE_WIN || UNITY_EDITOR_WIN
return new Windows.WindowsAudioInPusher(dev.IsDefault ? -1 : dev.IDInt, logger);
#elif UNITY_WEBGL && UNITY_2021_2_OR_NEWER && !UNITY_EDITOR // requires ES6
return new Unity.WebAudioMicIn(samplingRate, channels, logger);
#elif UNITY_IOS && !UNITY_EDITOR
if (otherParams == null)
{
return new IOS.AudioInPusher(IOS.AudioSessionParametersPresets.VoIP, logger);
}
else
{
return new IOS.AudioInPusher((IOS.AudioSessionParameters)otherParams, logger);
}
#elif UNITY_STANDALONE_OSX || UNITY_EDITOR_OSX
return new MacOS.AudioInPusher(dev.IsDefault ? -1 : dev.IDInt, logger);
#elif UNITY_ANDROID && !UNITY_EDITOR
if (otherParams == null)
{
return new Unity.AndroidAudioInAEC(logger, true, true, true);
}
else
{
var p = (Unity.AndroidAudioInParameters)otherParams;
return new Unity.AndroidAudioInAEC(logger, p.EnableAEC, p.EnableAGC, p.EnableNS);
}
#elif UNITY_WSA && !UNITY_EDITOR
return new UWP.AudioInPusher(logger, samplingRate, channels, dev.IsDefault ? "" : dev.IDString);
#elif UNITY_SWITCH && !UNITY_EDITOR
return new Switch.AudioInPusher(logger);
#elif UNITY_5_3_OR_NEWER // #if UNITY
return new Unity.MicWrapper(dev.IDString, samplingRate, logger);
#else
throw new UnsupportedPlatformException("Platform.CreateDefaultAudioSource");
#endif
}
#if PHOTON_VOICE_VIDEO_ENABLE
static public IDeviceEnumerator CreateVideoInEnumerator(ILogger logger)
{
#if WINDOWS_UWP || ENABLE_WINMD_SUPPORT
return new UWP.VideoInEnumerator(logger);
#elif UNITY_STANDALONE_WIN || UNITY_EDITOR_WIN || UNITY_EDITOR_OSX || UNITY_STANDALONE_OSX
return new Unity.VideoInEnumerator(logger);
#else
return new VideoInEnumeratorNotSupported(logger);
#endif
}
static public IEncoderDirectImage CreateDefaultVideoEncoder(ILogger logger, VoiceInfo info)
{
switch (info.Codec)
{
case Codec.VideoVP8:
case Codec.VideoVP9:
//return new FFmpegCodec.Encoder(logger, info);
return new VPxCodec.Encoder(logger, info);
#if PHOTON_VOICE_WINDOWS || UNITY_EDITOR_WIN || UNITY_STANDALONE_WIN
case Codec.VideoH264:
//return new FFmpegCodec.Encoder(logger, info);
return new Windows.MFTCodec.VideoEncoder(logger, info);
#elif UNITY_EDITOR_OSX || UNITY_STANDALONE_OSX
case Codec.VideoH264:
//return new FFmpegCodec.Encoder(logger, info);
return new MacOS.VideoEncoder(logger, info);
#endif
default:
throw new UnsupportedCodecException("Platform.CreateDefaultVideoEncoder", info.Codec);
}
}
static public IDecoderDirect<ImageBufferNative> CreateDefaultVideoDecoder(ILogger logger, VoiceInfo info)
{
switch (info.Codec)
{
case Codec.VideoVP8:
case Codec.VideoVP9:
//return new FFmpegCodec.Decoder(logger);
return new VPxCodec.Decoder(logger);
#if PHOTON_VOICE_WINDOWS || UNITY_EDITOR_WIN || UNITY_STANDALONE_WIN
case Codec.VideoH264:
//return new FFmpegCodec.Decoder(logger);
return new Windows.MFTCodec.VideoDecoder(logger, info);
#elif UNITY_EDITOR_OSX || UNITY_STANDALONE_OSX
case Codec.VideoH264:
//return new FFmpegCodec.Decoder(logger);
return new MacOS.VideoDecoder(logger, info);
break;
#endif
default:
throw new UnsupportedCodecException("Platform.CreateDefaultVideoDecoder", info.Codec);
}
}
static public IVideoRecorder CreateDefaultVideoRecorder(ILogger logger, VoiceInfo info, DeviceInfo camDevice, Action<IVideoRecorder> onReady)
{
// native platform-specific recorders
#if UNITY_ANDROID && !UNITY_EDITOR
return new Unity.AndroidVideoRecorderSurfaceView(logger, info, onReady);
#elif UNITY_IOS && !UNITY_EDITOR
if (info.Codec == Codec.VideoH264)
{
return new IOS.VideoRecorderLayer(logger, info, onReady);
}
throw new UnsupportedCodecException("Platform.CreateDefaultVideoRecorder", info.Codec);
#elif WINDOWS_UWP || (UNITY_WSA && !UNITY_EDITOR)
if (info.Codec == Codec.VideoH264)
{
return new UWP.VideoRecorderMediaPlayerElement(logger, info, camDevice.IDString, onReady);
}
throw new UnsupportedCodecException("Platform.CreateDefaultVideoRecorder", info.Codec);
#else // multi-platform VideoRecorderUnity
var ve = CreateDefaultVideoEncoder(logger, info);
#if UNITY_5_3_OR_NEWER // #if UNITY
return new Unity.VideoRecorderUnity(ve, null, camDevice.IDString, info.Width, info.Height, info.FPS, onReady);
#else
throw new NotImplementedException("Platform.CreateDefaultVideoRecorder: default Video Recorder for the platform is not implemented.");
#endif
#endif
}
static public IVideoPlayer CreateDefaultVideoPlayer(ILogger logger, VoiceInfo info, Action<IVideoPlayer> onReady)
{
// native platform-specific players
#if UNITY_ANDROID && !UNITY_EDITOR
var vd = new Unity.AndroidVideoDecoderSurfaceView(logger, info);
return new VideoPlayer(vd, vd.Preview, info.Width, info.Height, onReady);
#elif UNITY_IOS && !UNITY_EDITOR
if (info.Codec == Codec.VideoH264)
{
var vd = new IOS.VideoDecoderLayer(logger);
return new VideoPlayer(vd, vd.PreviewLayer, info.Width, info.Height, onReady);
}
throw new UnsupportedCodecException("Platform.CreateDefaultVideoPlayer", info.Codec);
#elif WINDOWS_UWP || (UNITY_WSA && !UNITY_EDITOR)
if (info.Codec == Codec.VideoH264)
{
var vd = new UWP.VideoDecoderMediaPlayerElement(logger, info);
return new VideoPlayer(vd, vd.PreviewMediaPlayerElement, info.Width, info.Height, onReady);
}
throw new UnsupportedCodecException("Platform.CreateDefaultVideoPlayer", info.Codec);
#else // multi-platform VideoPlayerUnity or generic VideoPlayer
var vd = CreateDefaultVideoDecoder(logger, info);
#if UNITY_5_3_OR_NEWER // #if UNITY
var vp = new Unity.VideoPlayerUnity(vd, onReady);
// assign Draw method copying Image to Unity texture as software decoder Output
vd.Output = vp.Draw;
return vp;
#else
throw new NotImplementedException("Platform.CreateDefaultVideoPlayer: default Video Player for the platform is not implemented.");
#endif
#endif
}
public static IPreviewManager CreateDefaultPreviewManager(ILogger logger)
{
#if UNITY_ANDROID && !UNITY_EDITOR
return new Unity.AndroidPreviewManagerSurfaceView(logger);
#elif UNITY_IOS && !UNITY_EDITOR
return new IOS.PreviewManagerLayer(logger);
#elif WINDOWS_UWP || (UNITY_WSA && !UNITY_EDITOR)
return new UWP.PreviewManagerMediaPlayerElement(logger);
#elif UNITY_5_3_OR_NEWER // #if UNITY
return new Unity.PreviewManagerScreenQuadTexture(logger); // uses custom shader
// return new Unity.PreviewManagerUnityGUI(); // uses GUI.DrawTexture
#else
return null;
#endif
}
// Unity Texture Previews
#if UNITY_5_3_OR_NEWER // #if UNITY
static public IVideoRecorder CreateVideoRecorderUnityTexture(ILogger logger, VoiceInfo info, DeviceInfo camDevice, Action<IVideoRecorder> onReady)
{
#if UNITY_ANDROID && !UNITY_EDITOR
return new Unity.AndroidVideoRecorderUnityTexture(logger, info, onReady);
#elif UNITY_IOS && !UNITY_EDITOR
if (info.Codec == Codec.VideoH264)
{
return new IOS.VideoRecorderUnityTexture(logger, info, onReady);
}
throw new UnsupportedCodecException("Platform.CreateVideoRecorderUnityTexture", info.Codec);
#elif WINDOWS_UWP || (UNITY_WSA && !UNITY_EDITOR)
if (info.Codec == Codec.VideoH264)
{
return new UWP.VideoRecorderUnityTexture(logger, info, camDevice.IDString, onReady);
}
throw new UnsupportedCodecException("Platform.CreateVideoRecorderUnityTexture", info.Codec);
#else // multi-platform VideoRecorderUnity
var ve = CreateDefaultVideoEncoder(logger, info);
return new Unity.VideoRecorderUnity(ve, null, camDevice.IDString, info.Width, info.Height, info.FPS, onReady);
#endif
}
static public IVideoPlayer CreateVideoPlayerUnityTexture(ILogger logger, VoiceInfo info, Action<IVideoPlayer> onReady)
{
#if UNITY_ANDROID && !UNITY_EDITOR
return new Unity.AndroidVideoPlayerUnityTexture(logger, info, onReady);
#elif UNITY_IOS && !UNITY_EDITOR
if (info.Codec == Codec.VideoH264)
{
return new IOS.VideoPlayerUnityTexture(logger, info, onReady);
}
throw new UnsupportedCodecException("Platform.CreateVideoPlayerUnityTexture", info.Codec);
#elif WINDOWS_UWP || (UNITY_WSA && !UNITY_EDITOR)
if (info.Codec == Codec.VideoH264)
{
return new UWP.VideoPlayerUnityTexture(logger, info, onReady);
}
throw new UnsupportedCodecException("Platform.CreateVideoPlayerUnityTexture", info.Codec);
#else // multi-platform VideoPlayerUnity
var vd = CreateDefaultVideoDecoder(logger, info);
var vp = new Unity.VideoPlayerUnity(vd, onReady);
// assign Draw method copying Image to Unity texture as software decoder Output
vd.Output = vp.Draw;
return vp;
#endif
}
static public IPreviewManager CreatePreviewManagerUnityTexture(ILogger logger)
{
return new Unity.PreviewManagerScreenQuadTexture(logger);
}
#endif // UNITY_5_3_OR_NEWER
#endif // PHOTON_VOICE_VIDEO_ENABLE
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: dfd8e6805c1ef9a46803919a55533466
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,131 @@
using System;
namespace Photon.Voice
{
public class RawCodec
{
public class Encoder<T> : IEncoderDirect<T[]>
{
public string Error { get; private set; }
public Action<ArraySegment<byte>, FrameFlags> Output { set; get; }
int sizeofT = System.Runtime.InteropServices.Marshal.SizeOf(default(T));
byte[] byteBuf = new byte[0];
private static readonly ArraySegment<byte> EmptyBuffer = new ArraySegment<byte>(new byte[] { });
public ArraySegment<byte> DequeueOutput(out FrameFlags flags)
{
flags = 0;
return EmptyBuffer;
}
public void EndOfStream()
{
}
public I GetPlatformAPI<I>() where I : class
{
return null;
}
public void Dispose()
{
}
public void Input(T[] buf)
{
if (Error != null)
{
return;
}
if (Output == null)
{
Error = "RawCodec.Encoder: Output action is not set";
return;
}
if (buf == null)
{
return;
}
if (buf.Length == 0)
{
return;
}
var s = buf.Length * sizeofT;
if (byteBuf.Length < s)
{
byteBuf = new byte[s];
}
Buffer.BlockCopy(buf, 0, byteBuf, 0, s);
Output(new ArraySegment<byte>(byteBuf, 0, s), 0);
}
}
public class Decoder<T> : IDecoder
{
public string Error { get; private set; }
public Decoder(Action<FrameOut<T>> output)
{
this.output = output;
}
public void Open(VoiceInfo info)
{
}
T[] buf = new T[0];
int sizeofT = System.Runtime.InteropServices.Marshal.SizeOf(default(T));
public void Input(ref FrameBuffer byteBuf)
{
if (byteBuf.Array == null)
{
return;
}
if (byteBuf.Length == 0)
{
return;
}
var s = byteBuf.Length / sizeofT;
if (buf.Length < s)
{
buf = new T[s];
}
Buffer.BlockCopy(byteBuf.Array, byteBuf.Offset, buf, 0, byteBuf.Length);
output(new FrameOut<T>((T[])(object)buf, false));
}
public void Dispose()
{
}
private Action<FrameOut<T>> output;
}
// Adapts FrameOut<float> output to FrameOut<short> decoder
// new RawCodec.Decoder<short>(new RawCodec.ShortToFloat(output as Action<FrameOut<float>>).Output);
public class ShortToFloat
{
public ShortToFloat(Action<FrameOut<float>> output)
{
this.output = output;
}
public void Output(FrameOut<short> shortBuf)
{
if (buf.Length < shortBuf.Buf.Length)
{
buf = new float[shortBuf.Buf.Length];
}
AudioUtil.Convert(shortBuf.Buf, buf, shortBuf.Buf.Length);
output(new FrameOut<float>((float[])(object)buf, false));
}
Action<FrameOut<float>> output;
float[] buf = new float[0];
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 9516187db373c5147a69fc975b62bfc0
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,88 @@
using System;
using System.Linq;
using System.Threading;
namespace Photon.Voice
{
// Does not work until Start() gets called
internal class SpacingProfile
{
short[] buf;
bool[] info;
int capacity;
int ptr = 0;
System.Diagnostics.Stopwatch watch;
long watchLast;
bool flushed;
public SpacingProfile(int capacity)
{
this.capacity = capacity;
}
public void Start()
{
if (watch == null)
{
buf = new short[capacity];
info = new bool[capacity];
watch = System.Diagnostics.Stopwatch.StartNew();
}
}
public void Update(bool lost, bool flush)
{
if (watch == null)
{
return;
}
if (flushed)
{
watchLast = watch.ElapsedMilliseconds;
}
var t = watch.ElapsedMilliseconds;
buf[ptr] = (short)(t - watchLast);
info[ptr] = lost;
watchLast = t;
ptr++;
if (ptr == buf.Length)
{
ptr = 0;
}
flushed = flush;
}
public string Dump
{
get
{
if (watch == null)
{
return "Error: Profiler not started.";
}
else
{
var buf2 = buf.Select((v, i) => (info[i] ? "-" : "") + v.ToString()).ToArray();
return "max=" + Max + " " + string.Join(",", buf2, ptr, buf.Length - ptr) + ", " + string.Join(",", buf2, 0, ptr);
}
}
}
// do not call frequently
public int Max { get { return buf.Select(v => Math.Abs(v)).Max(); } }
}
internal static class Util
{
static public void SetThreadName(Thread t, string name)
{
const int MAX = 25;
if (name.Length > MAX)
{
name = name.Substring(0, MAX);
}
t.Name = name;
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: b9318da328735db4c94813dcec17ada1
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,662 @@
// -----------------------------------------------------------------------
// <copyright file="Voice.cs" company="Exit Games GmbH">
// Photon Voice API Framework for Photon - Copyright (C) 2017 Exit Games GmbH
// </copyright>
// <summary>
// Photon data streaming support.
// </summary>
// <author>developer@photonengine.com</author>
// ----------------------------------------------------------------------------
using System;
using System.Collections.Generic;
using System.Threading;
namespace Photon.Voice
{
/// <summary>
/// Interface for pulling data, in case this is more appropriate than pushing it.
/// </summary>
public interface IDataReader<T> : IDisposable
{
/// <summary>Fill full given frame buffer with source uncompressed data or return false if not enough such data.</summary>
/// <param name="buffer">Buffer to fill.</param>
/// <returns>True if buffer was filled successfully, false otherwise.</returns>
bool Read(T[] buffer);
}
/// <summary>
/// Interface for classes that want their Service() function to be called regularly in the context of a LocalVoice.
/// </summary>
public interface IServiceable
{
/// <summary>Service function that should be called regularly.</summary>
void Service(LocalVoice localVoice);
}
public class FrameOut<T>
{
public FrameOut(T[] buf, bool endOfStream)
{
Set(buf, endOfStream);
}
public FrameOut<T> Set(T[] buf, bool endOfStream)
{
Buf = buf;
EndOfStream = endOfStream;
return this;
}
public T[] Buf { get; private set; }
public bool EndOfStream { get; private set; } // stream interrupted but may be resumed, flush the output
}
/// <summary>
/// Represents outgoing data stream.
/// </summary>
public class LocalVoice : IDisposable
{
public const int DATA_POOL_CAPACITY = 50; // TODO: may depend on data type and properties, set for average audio stream
[Obsolete("Use InterestGroup.")]
public byte Group { get { return InterestGroup; } set { InterestGroup = value; } }
/// <summary>If InterestGroup != 0, voice's data is sent only to clients listening to this group (if supported by transport).</summary>
public byte InterestGroup { get; set; }
/// <summary>Returns Info structure assigned on local voice cration.</summary>
public VoiceInfo Info { get { return info; } }
/// <summary>If true, stream data broadcasted.</summary>
public bool TransmitEnabled
{
get
{
return transmitEnabled;
}
set
{
if (transmitEnabled != value)
{
if (transmitEnabled)
{
if (encoder != null && this.voiceClient.transport.IsChannelJoined(this.channelId))
{
encoder.EndOfStream();
}
}
transmitEnabled = value;
}
}
}
private bool transmitEnabled = true;
/// <summary>Returns true if stream broadcasts.</summary>
public bool IsCurrentlyTransmitting
{
get { return Environment.TickCount - lastTransmitTime < NO_TRANSMIT_TIMEOUT_MS; }
}
/// <summary>Sent frames counter.</summary>
public int FramesSent { get; private set; }
/// <summary>Sent frames bytes counter.</summary>
public int FramesSentBytes { get; private set; }
/// <summary>Send data reliable.</summary>
public bool Reliable { get; set; }
/// <summary>Send data encrypted.</summary>
public bool Encrypt { get; set; }
/// <summary>Optional user object attached to LocalVoice. its Service() will be called at each VoiceClient.Service() call.</summary>
public IServiceable LocalUserServiceable { get; set; }
/// <summary>
/// If true, outgoing stream routed back to client via server same way as for remote client's streams.
/// Can be swithed any time. OnRemoteVoiceInfoAction and OnRemoteVoiceRemoveAction are triggered if required.
/// This functionality availability depends on transport.
/// </summary>
public bool DebugEchoMode
{
get { return debugEchoMode; }
set
{
if (debugEchoMode != value)
{
debugEchoMode = value;
if (voiceClient != null && voiceClient.transport != null)
{
if (voiceClient.transport.IsChannelJoined(this.channelId))
{
if (debugEchoMode)
{
voiceClient.sendVoicesInfoAndConfigFrame(new List<LocalVoice>() { this }, channelId, -1);
}
else
{
voiceClient.transport.SendVoiceRemove(this, channelId, -1);
}
}
}
}
}
}
bool debugEchoMode;
public void SendSpacingProfileStart()
{
sendSpacingProfile.Start();
}
public string SendSpacingProfileDump { get { return sendSpacingProfile.Dump; } }
/// <summary>
/// Logs input frames time spacing profiling results. Do not call frequently.
/// </summary>
public int SendSpacingProfileMax { get { return sendSpacingProfile.Max; } }
public byte ID { get { return id; } }
public byte EvNumber { get { return evNumber; } }
#region nonpublic
protected VoiceInfo info;
protected IEncoder encoder;
internal byte id;
internal int channelId;
internal byte evNumber = 0; // sequence used by receivers to detect loss. will overflow.
protected VoiceClient voiceClient;
protected ArraySegment<byte> configFrame;
volatile protected bool disposed;
protected object disposeLock = new object();
internal LocalVoice() // for dummy voices
{
}
internal LocalVoice(VoiceClient voiceClient, IEncoder encoder, byte id, VoiceInfo voiceInfo, int channelId)
{
this.info = voiceInfo;
this.channelId = channelId;
this.voiceClient = voiceClient;
this.id = id;
if (encoder == null)
{
var m = LogPrefix + ": encoder is null";
voiceClient.logger.LogError(m);
throw new ArgumentNullException("encoder");
}
this.encoder = encoder;
this.encoder.Output = sendFrame;
}
protected string shortName { get { return "v#" + id + "ch#" + voiceClient.channelStr(channelId); } }
public string Name { get { return "Local " + info.Codec + " v#" + id + " ch#" + voiceClient.channelStr(channelId); } }
public string LogPrefix { get { return "[PV] " + Name; } }
private const int NO_TRANSMIT_TIMEOUT_MS = 100; // should be greater than SendFrame() call interval
private int lastTransmitTime = Environment.TickCount - NO_TRANSMIT_TIMEOUT_MS;
internal virtual void service()
{
while (true)
{
FrameFlags f;
var x = encoder.DequeueOutput(out f);
if (x.Count == 0)
{
break;
}
else
{
sendFrame(x, f);
}
}
if (LocalUserServiceable != null)
{
LocalUserServiceable.Service(this);
}
}
internal void sendConfigFrame(int targetPlayerId)
{
if (configFrame.Count != 0)
{
this.voiceClient.logger.LogInfo(LogPrefix + " Sending config frame to pl " + targetPlayerId);
sendFrame0(configFrame, FrameFlags.Config, targetPlayerId, true);
}
}
internal void sendFrame(ArraySegment<byte> compressed, FrameFlags flags)
{
if ((flags & FrameFlags.Config) != 0)
{
byte[] a = configFrame.Array != null && configFrame.Array.Length >= compressed.Count ? configFrame.Array : new byte[compressed.Count];
Buffer.BlockCopy(compressed.Array, compressed.Offset, a, 0, compressed.Count);
configFrame = new ArraySegment<byte>(a, 0, compressed.Count);
this.voiceClient.logger.LogInfo(LogPrefix + " Got config frame " + configFrame.Count + " bytes");
}
if (this.voiceClient.transport.IsChannelJoined(this.channelId) && this.TransmitEnabled)
{
sendFrame0(compressed, flags, 0, Reliable);
}
}
internal void sendFrame0(ArraySegment<byte> compressed, FrameFlags flags, int targetPlayerId, bool reliable)
{
if ((flags & FrameFlags.Config) != 0)
{
reliable = true;
}
if ((flags & FrameFlags.KeyFrame) != 0)
{
reliable = true;
}
// sending reliably breaks timing
// consider sending multiple EndOfStream packets for reliability
if ((flags & FrameFlags.EndOfStream) != 0)
{
// reliable = true;
}
this.FramesSent++;
this.FramesSentBytes += compressed.Count;
this.voiceClient.transport.SendFrame(compressed, flags, evNumber, id, this.channelId, targetPlayerId, reliable, this);
this.sendSpacingProfile.Update(false, false);
if (this.DebugEchoMode)
{
this.eventTimestamps[evNumber] = Environment.TickCount;
}
evNumber++;
if (compressed.Count > 0 && (flags & FrameFlags.Config) == 0) // otherwise the frame is config or control (EOS)
{
lastTransmitTime = Environment.TickCount;
}
}
internal Dictionary<byte, int> eventTimestamps = new Dictionary<byte, int>();
SpacingProfile sendSpacingProfile = new SpacingProfile(1000);
#endregion
/// <summary>Remove this voice from it's VoiceClient (using VoiceClient.RemoveLocalVoice</summary>
public void RemoveSelf()
{
if (this.voiceClient != null) // dummy voice can try to remove self
{
this.voiceClient.RemoveLocalVoice(this);
}
}
public virtual void Dispose()
{
if (!disposed)
{
if (this.encoder != null)
{
this.encoder.Dispose();
}
disposed = true;
}
}
}
/// <summary>Event Actions and other options for a remote voice (incoming stream).</summary>
public struct RemoteVoiceOptions
{
public RemoteVoiceOptions(ILogger logger, string logPrefix, VoiceInfo voiceInfo)
{
this.logger = logger;
this.logPrefix = logPrefix;
this.voiceInfo = voiceInfo;
this.Decoder = null;
this.OnRemoteVoiceRemoveAction = null;
}
/// <summary>
/// Create default audio decoder and register a method to be called when a data frame is decoded.
/// </summary>
public void SetOutput(Action<FrameOut<float>> output)
{
if (voiceInfo.Codec == Codec.Raw) // Debug only. Assumes that original data is short[].
{
this.Decoder = new RawCodec.Decoder<short>(new RawCodec.ShortToFloat(output as Action<FrameOut<float>>).Output);
return;
}
setOutput<float>(output);
}
/// <summary>
/// Create default audio decoder and register a method to be called when a data frame is decoded.
/// </summary>
public void SetOutput(Action<FrameOut<short>> output)
{
if (voiceInfo.Codec == Codec.Raw) // Debug only. Assumes that original data is short[].
{
this.Decoder = new RawCodec.Decoder<short>(output);
return;
}
setOutput<short>(output);
}
private void setOutput<T>(Action<FrameOut<T>> output)
{
logger.LogInfo(logPrefix + ": Creating default decoder " + voiceInfo.Codec + " for output FrameOut<" + typeof(T) + ">");
if (voiceInfo.Codec == Codec.AudioOpus)
{
this.Decoder = new OpusCodec.Decoder<T>(output, logger);
}
else
{
logger.LogError(logPrefix + ": FrameOut<" + typeof(T) + "> output set for non-audio decoder " + voiceInfo.Codec);
}
}
/// <summary>
/// Register a method to be called when the remote voice is removed.
/// </summary>
public Action OnRemoteVoiceRemoveAction { get; set; }
/// <summary>Remote voice data decoder. Use to set decoder options or override it with user decoder.</summary>
public IDecoder Decoder { get; set; }
private readonly ILogger logger;
private readonly VoiceInfo voiceInfo;
internal string logPrefix { get; }
}
internal class RemoteVoice : IDisposable
{
// Client.RemoteVoiceInfos support
internal VoiceInfo Info { get; private set; }
internal RemoteVoiceOptions options;
internal int channelId;
internal int DelayFrames { get; set; }
private int playerId;
private byte voiceId;
volatile private bool disposed;
object disposeLock = new object();
internal RemoteVoice(VoiceClient client, RemoteVoiceOptions options, int channelId, int playerId, byte voiceId, VoiceInfo info, byte lastEventNumber)
{
this.options = options;
this.LogPrefix = options.logPrefix;
this.voiceClient = client;
this.channelId = channelId;
this.playerId = playerId;
this.voiceId = voiceId;
this.Info = info;
this.lastEvNumber = lastEventNumber;
if (this.options.Decoder == null)
{
var m = LogPrefix + ": decoder is null (set it with options Decoder property or SetOutput method in OnRemoteVoiceInfoAction)";
voiceClient.logger.LogError(m);
disposed = true;
return;
}
#if PHOTON_VOICE_THREADING_DISABLE
voiceClient.logger.LogInfo(LogPrefix + ": Starting decode singlethreaded");
options.Decoder.Open(Info);
#else
#if NETFX_CORE
Windows.System.Threading.ThreadPool.RunAsync((x) =>
{
decodeThread();
});
#else
var t = new Thread(() => decodeThread());
Util.SetThreadName(t, "[PV] Dec" + shortName);
t.Start();
#endif
#endif
}
private string shortName { get { return "v#" + voiceId + "ch#" + voiceClient.channelStr(channelId) + "p#" + playerId; } }
public string LogPrefix { get; private set; }
SpacingProfile receiveSpacingProfile = new SpacingProfile(1000);
/// <summary>
/// Starts input frames time spacing profiling. Once started, it can't be stopped.
/// </summary>
public void ReceiveSpacingProfileStart()
{
receiveSpacingProfile.Start();
}
public string ReceiveSpacingProfileDump { get { return receiveSpacingProfile.Dump; } }
/// <summary>
/// Logs input frames time spacing profiling results. Do not call frequently.
/// </summary>
public int ReceiveSpacingProfileMax { get { return receiveSpacingProfile.Max; } }
internal byte lastEvNumber = 0;
private VoiceClient voiceClient;
private static byte byteDiff(byte latest, byte last)
{
return (byte)(latest - (last + 1));
}
internal void receiveBytes(ref FrameBuffer receivedBytes, byte evNumber)
{
// receive-gap detection and compensation
if (evNumber != this.lastEvNumber) // skip check for 1st event
{
int missing = byteDiff(evNumber, this.lastEvNumber);
if (missing == 0)
{
this.lastEvNumber = evNumber;
}
else if (missing < 127)
{
this.voiceClient.logger.LogWarning(LogPrefix + " evNumer: " + evNumber + " playerVoice.lastEvNumber: " + this.lastEvNumber + " missing: " + missing + " r/b " + receivedBytes.Length);
this.voiceClient.FramesLost += missing;
this.lastEvNumber = evNumber;
// restoring missing frames
receiveNullFrames(missing);
} else {
// late (out of order) frames, just ignore them
// these frames already counted in FramesLost
this.voiceClient.logger.LogWarning(LogPrefix + " evNumer: " + evNumber + " playerVoice.lastEvNumber: " + this.lastEvNumber + " late: " + (255 - missing) + " r/b " + receivedBytes.Length);
}
}
this.receiveFrame(ref receivedBytes);
}
Queue<FrameBuffer> frameQueue = new Queue<FrameBuffer>();
AutoResetEvent frameQueueReady = new AutoResetEvent(false);
int flushingFramePosInQueue = -1; // if >= 0, we are flushing since the frame at this (dynamic) position got into the queue: process the queue w/o delays until this frame encountered
FrameBuffer nullFrame = new FrameBuffer();
void receiveFrame(ref FrameBuffer frame)
{
#if PHOTON_VOICE_THREADING_DISABLE
if (disposed) return;
options.Decoder.Input(ref frame);
frame.Release();
#else
lock (disposeLock) // sync with Dispose and decodeThread 'finally'
{
if (disposed) return;
receiveSpacingProfile.Update(false, (frame.Flags & FrameFlags.EndOfStream) != 0);
lock (frameQueue)
{
frameQueue.Enqueue(frame);
frame.Retain();
if ((frame.Flags & FrameFlags.EndOfStream) != 0)
{
flushingFramePosInQueue = frameQueue.Count - 1;
}
}
frameQueueReady.Set();
}
#endif
}
void receiveNullFrames(int count)
{
lock (disposeLock) // sync with Dispose and decodeThread 'finally'
{
if (disposed) return;
for (int i = 0; i < count; i++)
{
receiveSpacingProfile.Update(true, false);
lock (frameQueue)
{
frameQueue.Enqueue(nullFrame);
}
}
frameQueueReady.Set();
}
}
void decodeThread()
{
//#if UNITY_5_3_OR_NEWER
// UnityEngine.Profiling.Profiler.BeginThreadProfiling("PhotonVoice", LogPrefix);
//#endif
voiceClient.logger.LogInfo(LogPrefix + ": Starting decode thread");
var decoder = this.options.Decoder;
try
{
#if UNITY_ANDROID
UnityEngine.AndroidJNI.AttachCurrentThread();
#endif
decoder.Open(Info);
while (!disposed)
{
frameQueueReady.WaitOne(); // Wait until data is pushed to the queue or Dispose signals.
//#if UNITY_5_3_OR_NEWER
// UnityEngine.Profiling.Profiler.BeginSample("Decoder");
//#endif
while (true) // Dequeue and process while the queue is not empty
{
if (disposed) break; // early exit to save few resources
FrameBuffer f;
bool haveFrame = false;
lock (frameQueue)
{
var df = 0;
// if flushing, process all frames in the queue
// otherwise keep the queue length equal DelayFrames, also check DelayFrames for validity
if (flushingFramePosInQueue < 0 && DelayFrames > 0 && DelayFrames < 300) // 10 sec. of video or max 3 sec. audio
{
df = DelayFrames;
}
if (frameQueue.Count > df)
{
f = frameQueue.Dequeue();
flushingFramePosInQueue--; // -1 if f is flushing frame (f.Flags == FrameFlags.EndOfStream), the next frame will be processed with delay
// leave it decrementing to have an idea when the last flush was triggered
// but avoid overflow which will happen in 248.5 days for 100 input frames per sec
if (flushingFramePosInQueue == Int32.MinValue)
{
flushingFramePosInQueue = -1;
}
haveFrame = true;
}
else
{
break;
}
}
if (haveFrame)
{
decoder.Input(ref f);
f.Release();
}
}
//#if UNITY_5_3_OR_NEWER
// UnityEngine.Profiling.Profiler.EndSample();
//#endif
}
}
catch (Exception e)
{
voiceClient.logger.LogError(LogPrefix + ": Exception in decode thread: " + e);
throw e;
}
finally
{
lock (disposeLock) // sync with receiveFrame/receiveNullFrames
{
disposed = true; // set to disposing state if exiting due to exception
}
// cleaning up being sure that fields are not updated anymore
#if NETFX_CORE
frameQueueReady.Dispose();
#else
frameQueueReady.Close();
#endif
lock (frameQueue)
{
while (frameQueue.Count > 0)
{
frameQueue.Dequeue().Release();
}
}
decoder.Dispose();
#if UNITY_ANDROID
UnityEngine.AndroidJNI.DetachCurrentThread();
#endif
voiceClient.logger.LogInfo(LogPrefix + ": Exiting decode thread");
//#if UNITY_5_3_OR_NEWER
// UnityEngine.Profiling.Profiler.EndThreadProfiling();
//#endif
}
}
internal void removeAndDispose()
{
if (options.OnRemoteVoiceRemoveAction != null)
{
options.OnRemoteVoiceRemoveAction();
}
Dispose();
}
public void Dispose()
{
#if PHOTON_VOICE_THREADING_DISABLE
if (options.Decoder != null)
{
disposed = true;
options.Decoder.Dispose();
}
#else
lock (disposeLock) // sync with receiveFrame/receiveNullFrames
{
if (!disposed)
{
disposed = true;
frameQueueReady.Set(); // let decodeThread dispose resporces and exit
}
}
#endif
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: b871d5079e00c7441bb1e52d42d0b60c
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,203 @@
using System;
namespace Photon.Voice
{
public interface IResettable
{
void Reset();
}
/// <summary>Audio Source interface.</summary>
public interface IAudioDesc : IDisposable
{
/// <summary>Sampling rate of the audio signal (in Hz).</summary>
int SamplingRate { get; }
/// <summary>Number of channels in the audio signal.</summary>
int Channels { get; }
/// <summary>If not null, audio object is in invalid state.</summary>
string Error { get; }
}
// Trivial implementation. Used to build erroneous source.
public class AudioDesc : IAudioDesc
{
public AudioDesc(int samplingRate, int channels, string error)
{
SamplingRate = samplingRate;
Channels = channels;
Error = error;
}
public int SamplingRate { get; private set; }
public int Channels { get; private set; }
public string Error { get; private set; }
public void Dispose() { }
}
/// <summary>Audio Reader interface.</summary>
/// Opposed to an IAudioPusher (which will push its audio data whenever it is ready),
/// an IAudioReader will deliver audio data when it is "pulled" (it's Read function is called).
public interface IAudioReader<T> : IDataReader<T>, IAudioDesc
{
}
/// <summary>Audio Pusher interface.</summary>
/// Opposed to an IAudioReader (which will deliver audio data when it is "pulled"),
/// an IAudioPusher will push its audio data whenever it is ready,
public interface IAudioPusher<T> : IAudioDesc
{
/// <summary>Set the callback function used for pushing data.</summary>
/// <param name="callback">Callback function to use.</param>
/// <param name="bufferFactory">Buffer factory used to create the buffer that is pushed to the callback</param>
void SetCallback(Action<T[]> callback, ObjectFactory<T[], int> bufferFactory);
}
/// <summary>Interface for an outgoing audio stream.</summary>
/// A LocalVoice always brings a LevelMeter and a VoiceDetector, which you can access using this interface.
public interface ILocalVoiceAudio
{
/// <summary>The VoiceDetector in use.</summary>
/// Use it to enable or disable voice detector and set its parameters.
AudioUtil.IVoiceDetector VoiceDetector { get; }
/// <summary>The LevelMeter utility in use.</summary>
AudioUtil.ILevelMeter LevelMeter { get; }
/// <summary>If true, voice detector calibration is in progress.</summary>
bool VoiceDetectorCalibrating { get; }
/// <summary>
/// Trigger voice detector calibration process.
/// </summary>
/// While calibrating, keep silence. Voice detector sets threshold based on measured backgroud noise level.
/// <param name="durationMs">Duration of calibration (in milliseconds).</param>
/// <param name="onCalibrated">Called when calibration is complete. Parameter is new threshold value.</param>
void VoiceDetectorCalibrate(int durationMs, Action<float> onCalibrated = null);
}
/// <summary>The type of samples used for audio processing.</summary>
public enum AudioSampleType
{
Source,
Short,
Float,
}
/// <summary>Outgoing audio stream.</summary>
abstract public class LocalVoiceAudio<T> : LocalVoiceFramed<T>, ILocalVoiceAudio
{
/// <summary>Create a new LocalVoiceAudio{T} instance.</summary>
/// <param name="voiceClient">The VoiceClient to use for this outgoing stream.</param>
/// <param name="voiceId">Numeric ID for this voice.</param>
/// <param name="encoder">Encoder to use for this voice.</param>
/// <param name="voiceInfo">Outgoing stream parameters.</param>
/// <param name="audioSourceDesc">Audio source parameters.</param>
/// <param name="channelId">Voice transport channel ID to use for this voice.</param>
/// <returns>The new LocalVoiceAudio{T} instance.</returns>
public static LocalVoiceAudio<T> Create(VoiceClient voiceClient, byte voiceId, IEncoder encoder, VoiceInfo voiceInfo, IAudioDesc audioSourceDesc, int channelId)
{
if (typeof(T) == typeof(float))
{
return new LocalVoiceAudioFloat(voiceClient, encoder, voiceId, voiceInfo, audioSourceDesc, channelId) as LocalVoiceAudio<T>;
}
else if (typeof(T) == typeof(short))
{
return new LocalVoiceAudioShort(voiceClient, encoder, voiceId, voiceInfo, audioSourceDesc, channelId) as LocalVoiceAudio<T>;
}
else
{
throw new UnsupportedSampleTypeException(typeof(T));
}
}
public virtual AudioUtil.IVoiceDetector VoiceDetector { get { return voiceDetector; } }
protected AudioUtil.VoiceDetector<T> voiceDetector;
protected AudioUtil.VoiceDetectorCalibration<T> voiceDetectorCalibration;
public virtual AudioUtil.ILevelMeter LevelMeter { get { return levelMeter; } }
protected AudioUtil.LevelMeter<T> levelMeter;
/// <summary>Trigger voice detector calibration process.</summary>
/// While calibrating, keep silence. Voice detector sets threshold basing on measured backgroud noise level.
/// <param name="durationMs">Duration of calibration in milliseconds.</param>
/// <param name="onCalibrated">Called when calibration is complete. Parameter is new threshold value.</param>
public void VoiceDetectorCalibrate(int durationMs, Action<float> onCalibrated = null)
{
voiceDetectorCalibration.Calibrate(durationMs, onCalibrated);
}
/// <summary>True if the VoiceDetector is currently calibrating.</summary>
public bool VoiceDetectorCalibrating { get { return voiceDetectorCalibration.IsCalibrating; } }
protected int channels;
protected bool resampleSource;
internal LocalVoiceAudio(VoiceClient voiceClient, IEncoder encoder, byte id, VoiceInfo voiceInfo, IAudioDesc audioSourceDesc, int channelId)
: base(voiceClient, encoder, id, voiceInfo, channelId,
voiceInfo.SamplingRate != 0 ? voiceInfo.FrameSize * audioSourceDesc.SamplingRate / voiceInfo.SamplingRate : voiceInfo.FrameSize
)
{
this.channels = voiceInfo.Channels;
if (audioSourceDesc.SamplingRate != voiceInfo.SamplingRate)
{
this.resampleSource = true;
this.voiceClient.logger.LogWarning("[PV] Local voice #" + this.id + " audio source frequency " + audioSourceDesc.SamplingRate + " and encoder sampling rate " + voiceInfo.SamplingRate + " do not match. Resampling will occur before encoding.");
}
}
protected void initBuiltinProcessors()
{
if (this.resampleSource)
{
AddPostProcessor(new AudioUtil.Resampler<T>(this.info.FrameSize, channels));
}
this.voiceDetectorCalibration = new AudioUtil.VoiceDetectorCalibration<T>(voiceDetector, levelMeter, this.info.SamplingRate, (int)this.channels);
AddPostProcessor(levelMeter, voiceDetectorCalibration, voiceDetector); // level meter and calibration should be processed even if no signal detected
}
}
/// <summary>Dummy LocalVoiceAudio</summary>
/// For testing, this LocalVoiceAudio implementation features a <see cref="AudioUtil.VoiceDetectorDummy"></see> and a <see cref="AudioUtil.LevelMeterDummy"></see>
public class LocalVoiceAudioDummy : LocalVoice, ILocalVoiceAudio
{
private AudioUtil.VoiceDetectorDummy voiceDetector;
private AudioUtil.LevelMeterDummy levelMeter;
public AudioUtil.IVoiceDetector VoiceDetector { get { return voiceDetector; } }
public AudioUtil.ILevelMeter LevelMeter { get { return levelMeter; } }
public bool VoiceDetectorCalibrating { get { return false; } }
public void VoiceDetectorCalibrate(int durationMs, Action<float> onCalibrated = null) { }
public LocalVoiceAudioDummy()
{
voiceDetector = new AudioUtil.VoiceDetectorDummy();
levelMeter = new AudioUtil.LevelMeterDummy();
}
/// <summary>A Dummy LocalVoiceAudio instance.</summary>
public static LocalVoiceAudioDummy Dummy = new LocalVoiceAudioDummy();
}
/// <summary>Specialization of <see cref="LocalVoiceAudio{T}"></see> for float audio</summary>
public class LocalVoiceAudioFloat : LocalVoiceAudio<float>
{
internal LocalVoiceAudioFloat(VoiceClient voiceClient, IEncoder encoder, byte id, VoiceInfo voiceInfo, IAudioDesc audioSourceDesc, int channelId)
: base(voiceClient, encoder, id, voiceInfo, audioSourceDesc, channelId)
{
// these 2 processors go after resampler
this.levelMeter = new AudioUtil.LevelMeterFloat(this.info.SamplingRate, this.info.Channels);
this.voiceDetector = new AudioUtil.VoiceDetectorFloat(this.info.SamplingRate, this.info.Channels);
initBuiltinProcessors();
}
}
/// <summary>Specialization of <see cref="LocalVoiceAudio{T}"></see> for short audio</summary>
public class LocalVoiceAudioShort : LocalVoiceAudio<short>
{
internal LocalVoiceAudioShort(VoiceClient voiceClient, IEncoder encoder, byte id, VoiceInfo voiceInfo, IAudioDesc audioSourceDesc, int channelId)
: base(voiceClient, encoder, id, voiceInfo,audioSourceDesc, channelId)
{
// these 2 processors go after resampler
this.levelMeter = new AudioUtil.LevelMeterShort(this.info.SamplingRate, this.info.Channels); //1/2 sec
this.voiceDetector = new AudioUtil.VoiceDetectorShort(this.info.SamplingRate, this.info.Channels);
initBuiltinProcessors();
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 910c3d1cbb2b8af4aa76983c5c935878
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,756 @@
// -----------------------------------------------------------------------
// <copyright file="VoiceClient.cs" company="Exit Games GmbH">
// Photon Voice API Framework for Photon - Copyright (C) 2017 Exit Games GmbH
// </copyright>
// <summary>
// Photon data streaming support.
// </summary>
// <author>developer@photonengine.com</author>
// ----------------------------------------------------------------------------
using System;
using System.Linq;
using System.Collections.Generic;
namespace Photon.Voice
{
public interface ILogger
{
void LogError(string fmt, params object[] args);
void LogWarning(string fmt, params object[] args);
void LogInfo(string fmt, params object[] args);
void LogDebug(string fmt, params object[] args);
}
public interface IVoiceTransport
{
bool IsChannelJoined(int channelId);
// targetPlayerId: to all if 0, to myself if -1
void SendVoicesInfo(IEnumerable<LocalVoice> voices, int channelId, int targetPlayerId);
// targetPlayerId: to all if 0, to myself if -1
void SendVoiceRemove(LocalVoice voice, int channelId, int targetPlayerId);
// targetPlayerId: to all if 0, to myself if -1
void SendFrame(ArraySegment<byte> data, FrameFlags flags, byte evNumber, byte voiceId, int channelId, int targetPlayerId, bool reliable, LocalVoice localVoice);
string ChannelIdStr(int channelId);
string PlayerIdStr(int playerId);
}
/// <summary>
/// Voice client interact with other clients on network via IVoiceTransport.
/// </summary>
public class VoiceClient : IDisposable
{
internal IVoiceTransport transport;
internal ILogger logger;
/// <summary>Lost frames counter.</summary>
public int FramesLost { get; internal set; }
/// <summary>Received frames counter.</summary>
public int FramesReceived { get; private set; }
/// <summary>Sent frames counter.</summary>
public int FramesSent { get { int x = 0; foreach (var v in this.localVoices) { x += v.Value.FramesSent; } return x; } }
/// <summary>Sent frames bytes counter.</summary>
public int FramesSentBytes { get { int x = 0; foreach (var v in this.localVoices) { x += v.Value.FramesSentBytes; } return x; } }
/// <summary>Average time required voice packet to return to sender.</summary>
public int RoundTripTime { get; private set; }
/// <summary>Average round trip time variation.</summary>
public int RoundTripTimeVariance { get; private set; }
/// <summary>Do not log warning when duplicate info received.</summary>
public bool SuppressInfoDuplicateWarning { get; set; }
/// <summary>Remote voice info event delegate.</summary>
public delegate void RemoteVoiceInfoDelegate(int channelId, int playerId, byte voiceId, VoiceInfo voiceInfo, ref RemoteVoiceOptions options);
/// <summary>
/// Register a method to be called when remote voice info arrived (after join or new new remote voice creation).
/// Metod parameters: (int channelId, int playerId, byte voiceId, VoiceInfo voiceInfo, ref RemoteVoiceOptions options);
/// </summary>
public RemoteVoiceInfoDelegate OnRemoteVoiceInfoAction { get; set; }
/// <summary>Lost frames simulation ratio.</summary>
public int DebugLostPercent { get; set; }
private int prevRtt = 0;
/// <summary>Iterates through copy of all local voices list.</summary>
public IEnumerable<LocalVoice> LocalVoices
{
get
{
var res = new LocalVoice[this.localVoices.Count];
this.localVoices.Values.CopyTo(res, 0);
return res;
}
}
/// <summary>Iterates through copy of all local voices list of given channel.</summary>
public IEnumerable<LocalVoice> LocalVoicesInChannel(int channelId)
{
List<LocalVoice> channelVoices;
if (this.localVoicesPerChannel.TryGetValue(channelId, out channelVoices))
{
var res = new LocalVoice[channelVoices.Count];
channelVoices.CopyTo(res, 0);
return res;
}
else
{
return new LocalVoice[0];
}
}
/// <summary>Iterates through all remote voices infos.</summary>
public IEnumerable<RemoteVoiceInfo> RemoteVoiceInfos
{
get
{
foreach (var playerVoices in this.remoteVoices)
{
foreach (var voice in playerVoices.Value)
{
yield return new RemoteVoiceInfo(voice.Value.channelId, playerVoices.Key, voice.Key, voice.Value.Info);
}
}
}
}
public void LogSpacingProfiles()
{
foreach (var voice in this.localVoices)
{
voice.Value.SendSpacingProfileStart(); // in case it's not started yet
this.logger.LogInfo(voice.Value.LogPrefix + " ev. prof.: " + voice.Value.SendSpacingProfileDump);
}
foreach (var playerVoices in this.remoteVoices)
{
foreach (var voice in playerVoices.Value)
{
voice.Value.ReceiveSpacingProfileStart(); // in case it's not started yet
this.logger.LogInfo(voice.Value.LogPrefix + " ev. prof.: " + voice.Value.ReceiveSpacingProfileDump);
}
}
}
public void LogStats()
{
int dc = FrameBuffer.statDisposerCreated;
int dd = FrameBuffer.statDisposerDisposed;
int pp = FrameBuffer.statPinned;
int pu = FrameBuffer.statUnpinned;
this.logger.LogInfo("[PV] FrameBuffer stats Disposer: " + dc + " - " + dd + " = " + (dc - dd));
this.logger.LogInfo("[PV] FrameBuffer stats Pinned: " + pp + " - " + pu + " = " + (pp - pu));
}
public void SetRemoteVoiceDelayFrames(Codec codec, int delayFrames)
{
remoteVoiceDelayFrames[codec] = delayFrames;
foreach (var playerVoices in this.remoteVoices)
{
foreach (var voice in playerVoices.Value)
{
if (codec == voice.Value.Info.Codec)
{
voice.Value.DelayFrames = delayFrames;
}
}
}
}
// store delay to apply on new remote voices
private Dictionary<Codec, int> remoteVoiceDelayFrames = new Dictionary<Codec, int>();
public struct CreateOptions
{
public byte VoiceIDMin;
public byte VoiceIDMax;
static public CreateOptions Default = new CreateOptions()
{
VoiceIDMin = 1, // 0 means invalid id
VoiceIDMax = 15 // preserve ids for other clients creating voices for the same player (server plugin)
};
}
/// <summary>Creates VoiceClient instance</summary>
public VoiceClient(IVoiceTransport transport, ILogger logger, CreateOptions opt = default(CreateOptions))
{
this.transport = transport;
this.logger = logger;
if (opt.Equals(default(CreateOptions)))
{
opt = CreateOptions.Default;
}
this.voiceIDMin = opt.VoiceIDMin;
this.voiceIDMax = opt.VoiceIDMax;
this.voiceIdLast = this.voiceIDMax;
}
/// <summary>
/// This method dispatches all available incoming commands and then sends this client's outgoing commands.
/// Call this method regularly (2..20 times a second).
/// </summary>
public void Service()
{
foreach (var v in localVoices)
{
v.Value.service();
}
}
private LocalVoice createLocalVoice(int channelId, Func<byte, int, LocalVoice> voiceFactory)
{
var newId = getNewVoiceId();
if (newId != 0)
{
LocalVoice v = voiceFactory(newId, channelId);
if (v != null)
{
addVoice(newId, channelId, v);
this.logger.LogInfo(v.LogPrefix + " added enc: " + v.Info.ToString());
return v;
}
}
return null;
}
/// <summary>
/// Creates basic outgoing stream w/o data processing support. Provided encoder should generate output data stream.
/// </summary>
/// <param name="voiceInfo">Outgoing stream parameters.</param>
/// <param name="channelId">Transport channel specific to transport.</param>
/// <param name="encoder">Encoder producing the stream.</param>
/// <returns>Outgoing stream handler.</returns>
public LocalVoice CreateLocalVoice(VoiceInfo voiceInfo, int channelId = 0, IEncoder encoder = null)
{
return (LocalVoice)createLocalVoice(channelId, (vId, chId) => new LocalVoice(this, encoder, vId, voiceInfo, chId));
}
/// <summary>
/// Creates outgoing stream consuming sequence of values passed in array buffers of arbitrary length which repacked in frames of constant length for further processing and encoding.
/// </summary>
/// <typeparam name="T">Type of data consumed by outgoing stream (element type of array buffers).</typeparam>
/// <param name="voiceInfo">Outgoing stream parameters.</param>
/// <param name="frameSize">Size of buffer LocalVoiceFramed repacks input data stream to.</param>
/// <param name="channelId">Transport channel specific to transport.</param>
/// <param name="encoder">Encoder compressing data stream in pipeline.</param>
/// <returns>Outgoing stream handler.</returns>
public LocalVoiceFramed<T> CreateLocalVoiceFramed<T>(VoiceInfo voiceInfo, int frameSize, int channelId = 0, IEncoder encoder = null)
{
return (LocalVoiceFramed<T>)createLocalVoice(channelId, (vId, chId) => new LocalVoiceFramed<T>(this, encoder, vId, voiceInfo, chId, frameSize));
}
public LocalVoiceAudio<T> CreateLocalVoiceAudio<T>(VoiceInfo voiceInfo, IAudioDesc audioSourceDesc, IEncoder encoder, int channelId)
{
return (LocalVoiceAudio<T>)createLocalVoice(channelId, (vId, chId) => LocalVoiceAudio<T>.Create(this, vId, encoder, voiceInfo, audioSourceDesc, chId));
}
/// <summary>
/// Creates outgoing audio stream of type automatically assigned and adds procedures (callback or serviceable) for consuming given audio source data.
/// Adds audio specific features (e.g. resampling, level meter) to processing pipeline and to returning stream handler.
/// </summary>
/// <param name="voiceInfo">Outgoing stream parameters.</param>
/// <param name="source">Streaming audio source.</param>
/// <param name="sampleType">Voice's audio sample type. If does not match source audio sample type, conversion will occur.</param>
/// <param name="channelId">Transport channel specific to transport.</param>
/// <param name="encoder">Audio encoder. Set to null to use default Opus encoder.</param>
/// <returns>Outgoing stream handler.</returns>
/// <remarks>
/// audioSourceDesc.SamplingRate and voiceInfo.SamplingRate may do not match. Automatic resampling will occur in this case.
/// </remarks>
public LocalVoice CreateLocalVoiceAudioFromSource(VoiceInfo voiceInfo, IAudioDesc source, AudioSampleType sampleType, IEncoder encoder = null, int channelId = 0)
{
// resolve AudioSampleType.Source to concrete type for encoder creation
if (sampleType == AudioSampleType.Source)
{
if (source is IAudioPusher<float> || source is IAudioReader<float>)
{
sampleType = AudioSampleType.Float;
}
else if (source is IAudioPusher<short> || source is IAudioReader<short>)
{
sampleType = AudioSampleType.Short;
}
}
if (encoder == null)
{
switch (sampleType)
{
case AudioSampleType.Float:
encoder = Platform.CreateDefaultAudioEncoder<float>(logger, voiceInfo);
break;
case AudioSampleType.Short:
encoder = Platform.CreateDefaultAudioEncoder<short>(logger, voiceInfo);
break;
}
}
if (source is IAudioPusher<float>)
{
if (sampleType == AudioSampleType.Short)
{
logger.LogInfo("[PV] Creating local voice with source samples type conversion from IAudioPusher float to short.");
var localVoice = CreateLocalVoiceAudio<short>(voiceInfo, source, encoder, channelId);
// we can safely reuse the same buffer in callbacks from native code
//
var bufferFactory = new FactoryReusableArray<float>(0);
((IAudioPusher<float>)source).SetCallback(buf => {
var shortBuf = localVoice.BufferFactory.New(buf.Length);
AudioUtil.Convert(buf, shortBuf, buf.Length);
localVoice.PushDataAsync(shortBuf);
}, bufferFactory);
return localVoice;
}
else
{
var localVoice = CreateLocalVoiceAudio<float>(voiceInfo, source, encoder, channelId);
((IAudioPusher<float>)source).SetCallback(buf => localVoice.PushDataAsync(buf), localVoice.BufferFactory);
return localVoice;
}
}
else if (source is IAudioPusher<short>)
{
if (sampleType == AudioSampleType.Float)
{
logger.LogInfo("[PV] Creating local voice with source samples type conversion from IAudioPusher short to float.");
var localVoice = CreateLocalVoiceAudio<float>(voiceInfo, source, encoder, channelId);
// we can safely reuse the same buffer in callbacks from native code
//
var bufferFactory = new FactoryReusableArray<short>(0);
((IAudioPusher<short>)source).SetCallback(buf =>
{
var floatBuf = localVoice.BufferFactory.New(buf.Length);
AudioUtil.Convert(buf, floatBuf, buf.Length);
localVoice.PushDataAsync(floatBuf);
}, bufferFactory);
return localVoice;
}
else
{
var localVoice = CreateLocalVoiceAudio<short>(voiceInfo, source, encoder, channelId);
((IAudioPusher<short>)source).SetCallback(buf => localVoice.PushDataAsync(buf), localVoice.BufferFactory);
return localVoice;
}
}
else if (source is IAudioReader<float>)
{
if (sampleType == AudioSampleType.Short)
{
logger.LogInfo("[PV] Creating local voice with source samples type conversion from IAudioReader float to short.");
var localVoice = CreateLocalVoiceAudio<short>(voiceInfo, source, encoder, channelId);
localVoice.LocalUserServiceable = new BufferReaderPushAdapterAsyncPoolFloatToShort(localVoice, source as IAudioReader<float>);
return localVoice;
}
else
{
var localVoice = CreateLocalVoiceAudio<float>(voiceInfo, source, encoder, channelId);
localVoice.LocalUserServiceable = new BufferReaderPushAdapterAsyncPool<float>(localVoice, source as IAudioReader<float>);
return localVoice;
}
}
else if (source is IAudioReader<short>)
{
if (sampleType == AudioSampleType.Float)
{
logger.LogInfo("[PV] Creating local voice with source samples type conversion from IAudioReader short to float.");
var localVoice = CreateLocalVoiceAudio<float>(voiceInfo, source, encoder, channelId);
localVoice.LocalUserServiceable = new BufferReaderPushAdapterAsyncPoolShortToFloat(localVoice, source as IAudioReader<short>);
return localVoice;
}
else
{
var localVoice = CreateLocalVoiceAudio<short>(voiceInfo, source, encoder, channelId);
localVoice.LocalUserServiceable = new BufferReaderPushAdapterAsyncPool<short>(localVoice, source as IAudioReader<short>);
return localVoice;
}
}
else
{
logger.LogError("[PV] CreateLocalVoiceAudioFromSource does not support Voice.IAudioDesc of type {0}", source.GetType());
return LocalVoiceAudioDummy.Dummy;
}
}
#if PHOTON_VOICE_VIDEO_ENABLE
/// <summary>
/// Creates outgoing video stream consuming sequence of image buffers.
/// </summary>
/// <param name="voiceInfo">Outgoing stream parameters.</param>
/// <param name="recorder">Video recorder.</param>
/// <param name="channelId">Transport channel specific to transport.</param>
/// <returns>Outgoing stream handler.</returns>
public LocalVoiceVideo CreateLocalVoiceVideo(VoiceInfo voiceInfo, IVideoRecorder recorder, int channelId = 0)
{
var lv = (LocalVoiceVideo)createLocalVoice(channelId, (vId, chId) => new LocalVoiceVideo(this, recorder.Encoder, vId, voiceInfo, chId));
if (recorder is IVideoRecorderPusher)
{
(recorder as IVideoRecorderPusher).VideoSink = lv;
}
return lv;
}
#endif
private byte voiceIDMin;
private byte voiceIDMax;
private byte voiceIdLast; // inited with voiceIDMax: the first id will be voiceIDMin
private byte idInc(byte id)
{
return id == voiceIDMax ? voiceIDMin : (byte)(id + 1);
}
private byte getNewVoiceId()
{
var used = new bool[256];
foreach (var v in localVoices)
{
used[v.Value.id] = true;
}
for (byte id = idInc(voiceIdLast); id != voiceIdLast; id = idInc(id))
{
if (!used[id])
{
voiceIdLast = id;
return id;
}
}
return 0;
}
void addVoice(byte newId, int channelId, LocalVoice v)
{
localVoices[newId] = v;
List<LocalVoice> voiceList;
if (!localVoicesPerChannel.TryGetValue(channelId, out voiceList))
{
voiceList = new List<LocalVoice>();
localVoicesPerChannel[channelId] = voiceList;
}
voiceList.Add(v);
if (this.transport.IsChannelJoined(channelId))
{
sendVoicesInfoAndConfigFrame(new List<LocalVoice>() { v }, channelId, 0); // broadcast if joined
}
v.InterestGroup = this.GlobalInterestGroup;
}
/// <summary>
/// Removes local voice (outgoing data stream).
/// <param name="voice">Handler of outgoing stream to be removed.</param>
/// </summary>
public void RemoveLocalVoice(LocalVoice voice)
{
this.localVoices.Remove(voice.id);
this.localVoicesPerChannel[voice.channelId].Remove(voice);
if (this.transport.IsChannelJoined(voice.channelId))
{
this.transport.SendVoiceRemove(voice, voice.channelId, 0);
}
voice.Dispose();
this.logger.LogInfo(voice.LogPrefix + " removed");
}
private void sendChannelVoicesInfo(int channelId, int targetPlayerId)
{
if (this.transport.IsChannelJoined(channelId))
{
List<LocalVoice> voiceList;
if (this.localVoicesPerChannel.TryGetValue(channelId, out voiceList))
{
sendVoicesInfoAndConfigFrame(voiceList, channelId, targetPlayerId);
}
}
}
internal void sendVoicesInfoAndConfigFrame(IEnumerable<LocalVoice> voiceList, int channelId, int targetPlayerId)
{
this.transport.SendVoicesInfo(voiceList, channelId, targetPlayerId);
foreach (var v in voiceList)
{
v.sendConfigFrame(targetPlayerId);
}
// send debug echo infos to myself if broadcast requested
if (targetPlayerId == 0)
{
var debugEchoVoices = localVoices.Values.Where(x => x.DebugEchoMode);
if (debugEchoVoices.Count() > 0)
{
this.transport.SendVoicesInfo(debugEchoVoices, channelId, -1);
}
}
}
internal byte GlobalInterestGroup
{
get { return this.globalInterestGroup; }
set
{
this.globalInterestGroup = value;
foreach (var v in this.localVoices)
{
v.Value.InterestGroup = this.globalInterestGroup;
}
}
}
#region nonpublic
private byte globalInterestGroup;
private Dictionary<byte, LocalVoice> localVoices = new Dictionary<byte, LocalVoice>();
private Dictionary<int, List<LocalVoice>> localVoicesPerChannel = new Dictionary<int, List<LocalVoice>>();
// player id -> voice id -> voice
private Dictionary<int, Dictionary<byte, RemoteVoice>> remoteVoices = new Dictionary<int, Dictionary<byte, RemoteVoice>>();
private void clearRemoteVoices()
{
foreach (var playerVoices in remoteVoices)
{
foreach (var voice in playerVoices.Value)
{
voice.Value.removeAndDispose();
}
}
remoteVoices.Clear();
this.logger.LogInfo("[PV] Remote voices cleared");
}
private void clearRemoteVoicesInChannel(int channelId)
{
foreach (var playerVoices in remoteVoices)
{
List<byte> toRemove = new List<byte>();
foreach (var voice in playerVoices.Value)
{
if (voice.Value.channelId == channelId)
{
voice.Value.removeAndDispose();
toRemove.Add(voice.Key);
}
}
foreach (var id in toRemove)
{
playerVoices.Value.Remove(id);
}
}
this.logger.LogInfo("[PV] Remote voices for channel " + this.channelStr(channelId) + " cleared");
}
private void clearRemoteVoicesInChannelForPlayer(int channelId, int playerId)
{
Dictionary<byte, RemoteVoice> playerVoices = null;
if (remoteVoices.TryGetValue(playerId, out playerVoices))
{
List<byte> toRemove = new List<byte>();
foreach (var v in playerVoices)
{
if (v.Value.channelId == channelId)
{
v.Value.removeAndDispose();
toRemove.Add(v.Key);
}
}
foreach (var id in toRemove)
{
playerVoices.Remove(id);
}
}
}
public void onJoinChannel(int channel)
{
sendChannelVoicesInfo(channel, 0);// my join, broadcast
}
public void onLeaveChannel(int channel)
{
clearRemoteVoicesInChannel(channel);
}
public void onLeaveAllChannels()
{
clearRemoteVoices();
}
public void onPlayerJoin(int channelId, int playerId)
{
sendChannelVoicesInfo(channelId, playerId);// send to new joined only
}
public void onPlayerLeave(int channelId, int playerId)
{
clearRemoteVoicesInChannelForPlayer(channelId, playerId);
}
public void onVoiceInfo(int channelId, int playerId, byte voiceId, byte eventNumber, VoiceInfo info)
{
Dictionary<byte, RemoteVoice> playerVoices = null;
if (!remoteVoices.TryGetValue(playerId, out playerVoices))
{
playerVoices = new Dictionary<byte, RemoteVoice>();
remoteVoices[playerId] = playerVoices;
}
if (!playerVoices.ContainsKey(voiceId))
{
var voiceStr = " p#" + this.playerStr(playerId) + " v#" + voiceId + " ch#" + channelStr(channelId);
this.logger.LogInfo("[PV] " + voiceStr + " Info received: " + info.ToString() + " ev=" + eventNumber);
var logPrefix = "[PV] Remote " + info.Codec + voiceStr;
RemoteVoiceOptions options = new RemoteVoiceOptions(logger, logPrefix, info);
if (this.OnRemoteVoiceInfoAction != null)
{
this.OnRemoteVoiceInfoAction(channelId, playerId, voiceId, info, ref options);
}
var rv = new RemoteVoice(this, options, channelId, playerId, voiceId, info, eventNumber);
playerVoices[voiceId] = rv;
int delayFrames;
if (remoteVoiceDelayFrames.TryGetValue(info.Codec, out delayFrames))
{
rv.DelayFrames = delayFrames;
}
}
else
{
if (!this.SuppressInfoDuplicateWarning)
{
this.logger.LogWarning("[PV] Info duplicate for voice #" + voiceId + " of player " + this.playerStr(playerId) + " at channel " + this.channelStr(channelId));
}
}
}
public void onVoiceRemove(int channelId, int playerId, byte[] voiceIds)
{
Dictionary<byte, RemoteVoice> playerVoices = null;
if (remoteVoices.TryGetValue(playerId, out playerVoices))
{
foreach (var voiceId in voiceIds)
{
RemoteVoice voice;
if (playerVoices.TryGetValue(voiceId, out voice))
{
playerVoices.Remove(voiceId);
this.logger.LogInfo("[PV] Remote voice #" + voiceId + " of player " + this.playerStr(playerId) + " at channel " + this.channelStr(channelId) + " removed");
voice.removeAndDispose();
}
else
{
this.logger.LogWarning("[PV] Remote voice #" + voiceId + " of player " + this.playerStr(playerId) + " at channel " + this.channelStr(channelId) + " not found when trying to remove");
}
}
}
else
{
this.logger.LogWarning("[PV] Remote voice list of player " + this.playerStr(playerId) + " at channel " + this.channelStr(channelId) + " not found when trying to remove voice(s)");
}
}
Random rnd = new Random();
public void onFrame(int channelId, int playerId, byte voiceId, byte evNumber, ref FrameBuffer receivedBytes, bool isLocalPlayer)
{
if (isLocalPlayer)
{
// rtt measurement in debug echo mode
LocalVoice voice;
if (this.localVoices.TryGetValue(voiceId, out voice))
{
int sendTime;
if (voice.eventTimestamps.TryGetValue(evNumber, out sendTime))
{
int rtt = Environment.TickCount - sendTime;
int rttvar = rtt - prevRtt;
prevRtt = rtt;
if (rttvar < 0) rttvar = -rttvar;
this.RoundTripTimeVariance = (rttvar + RoundTripTimeVariance * 19) / 20;
this.RoundTripTime = (rtt + RoundTripTime * 19) / 20;
}
}
//internal Dictionary<byte, DateTime> localEventTimestamps = new Dictionary<byte, DateTime>();
}
if (this.DebugLostPercent > 0 && rnd.Next(100) < this.DebugLostPercent)
{
this.logger.LogWarning("[PV] Debug Lost Sim: 1 packet dropped");
return;
}
FramesReceived++;
Dictionary<byte, RemoteVoice> playerVoices = null;
if (remoteVoices.TryGetValue(playerId, out playerVoices))
{
RemoteVoice voice = null;
if (playerVoices.TryGetValue(voiceId, out voice))
{
voice.receiveBytes(ref receivedBytes, evNumber);
}
else
{
this.logger.LogWarning("[PV] Frame event for not inited voice #" + voiceId + " of player " + this.playerStr(playerId) + " at channel " + this.channelStr(channelId));
}
}
else
{
this.logger.LogWarning("[PV] Frame event for voice #" + voiceId + " of not inited player " + this.playerStr(playerId) + " at channel " + this.channelStr(channelId));
}
}
internal string channelStr(int channelId)
{
var str = this.transport.ChannelIdStr(channelId);
if (str != null)
{
return channelId + "(" + str + ")";
}
else
{
return channelId.ToString();
}
}
internal string playerStr(int playerId)
{
var str = this.transport.PlayerIdStr(playerId);
if (str != null)
{
return playerId + "(" + str + ")";
}
else
{
return playerId.ToString();
}
}
//public string ToStringFull()
//{
// return string.Format("Photon.Voice.Client, local: {0}, remote: {1}", localVoices.Count, remoteVoices.Count);
//}
#endregion
public void Dispose()
{
foreach (var v in this.localVoices)
{
v.Value.Dispose();
}
foreach (var playerVoices in remoteVoices)
{
foreach (var voice in playerVoices.Value)
{
voice.Value.Dispose();
}
}
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: b957d86707d108044af9c76171188f87
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,377 @@
// -----------------------------------------------------------------------
// <copyright file="VoiceCodec.cs" company="Exit Games GmbH">
// Photon Voice API Framework for Photon - Copyright (C) 2017 Exit Games GmbH
// </copyright>
// <summary>
// Photon data streaming support.
// </summary>
// <author>developer@photonengine.com</author>
// ----------------------------------------------------------------------------
using System;
using System.Collections.Generic;
using System.Runtime.InteropServices;
namespace Photon.Voice
{
public enum FrameFlags : byte
{
Config = 1,
KeyFrame = 2,
PartialFrame = 4,
EndOfStream = 8
}
/// <summary>Generic encoder interface.</summary>
/// <remarks>
/// Depending on implementation, encoder should either call Output on eaach data frame or return next data frame in DequeueOutput() call.
/// </remarks>
public interface IEncoder : IDisposable
{
/// <summary>If not null, the object is in invalid state.</summary>
string Error { get; }
/// <summary>Set callback encoder calls on each encoded data frame (if such output supported).</summary>
Action<ArraySegment<byte>, FrameFlags> Output { set; }
/// <summary>Returns next encoded data frame (if such output supported).</summary>
ArraySegment<byte> DequeueOutput(out FrameFlags flags);
/// <summary>Forces an encoder to flush and produce frame with EndOfStream flag (in output queue).</summary>
void EndOfStream();
/// <summary>Returns an platform-specific interface.</summary>
I GetPlatformAPI<I>() where I : class;
}
/// <summary>Interface for an encoder which consumes input data via explicit call.</summary>
public interface IEncoderDirect<B> : IEncoder
{
/// <summary>Consumes the given raw data.</summary>
/// <param name="buf">Array containing raw data (e.g. audio samples).</param>
void Input(B buf);
}
/// <summary>Interface for an encoder which consumes images via explicit call.</summary>
public interface IEncoderDirectImage : IEncoderDirect<ImageBufferNative>
{
/// <summary>Recommended encoder input image format. Encoder may support other formats.</summary>
ImageFormat ImageFormat { get; }
}
/// <summary>Generic decoder interface.</summary>
public interface IDecoder : IDisposable
{
/// <summary>Open (initialize) the decoder.</summary>
/// <param name="info">Properties of the data stream to decode.</param>
void Open(VoiceInfo info);
/// <summary>If not null, the object is in invalid state.</summary>
string Error { get; }
/// <summary>Consumes the given encoded data.</summary>
/// <remarks>
/// The callee can call buf.Retain() to prevent the caller from disposing the buffer.
/// In this case, the callee should call buf.Release() when buffer is no longer needed.
/// </remarks>
void Input(ref FrameBuffer buf);
}
/// <summary>Interface for an decoder which outputs data via explicit call.</summary>
public interface IDecoderDirect<B> : IDecoder
{
/// <summary>Callback to call when a new decoded data buffer is available.</summary>
Action<B> Output { get; set; }
}
/// <summary>Exception thrown if an unsupported audio sample type is encountered.</summary>
/// <remarks>
/// PhotonVoice generally supports 32-bit floating point ("float") or 16-bit signed integer ("short") audio,
/// but it usually won't be converted automatically due to the high CPU overhead (and potential loss of precision) involved.
/// </remarks>
class UnsupportedSampleTypeException : Exception
{
/// <summary>Create a new UnsupportedSampleTypeException.</summary>
/// <param name="t">The sample type actually encountered.</param>
public UnsupportedSampleTypeException(Type t) : base("[PV] unsupported sample type: " + t) { }
}
/// <summary>Exception thrown if an unsupported codec is encountered.</summary>
class UnsupportedCodecException : Exception
{
/// <summary>Create a new UnsupportedCodecException.</summary>
/// <param name="info">The info prepending standard message.</param>
/// <param name="codec">The codec actually encountered.</param>
public UnsupportedCodecException(string info, Codec codec) : base("[PV] " + info + ": unsupported codec: " + codec) { }
}
/// <summary>Exception thrown if an unsupported platform is encountered.</summary>
class UnsupportedPlatformException : Exception
{
/// <summary>Create a new UnsupportedPlatformException.</summary>
/// <param name="subject">The info prepending standard message.</param>
/// /// <param name="platform">Optional platform name.</param>
public UnsupportedPlatformException(string subject, string platform = null) : base("[PV] " + subject + " does not support " + (platform == null ? "current" : platform) + " platform") { }
}
/// <summary>Enum for Media Codecs supported by PhotonVoice.</summary>
/// <remarks>Transmitted in <see cref="VoiceInfo"></see>. Do not change the values of this Enum!</remarks>
public enum Codec
{
Raw = 1,
/// <summary>OPUS audio</summary>
AudioOpus = 11,
#if PHOTON_VOICE_VIDEO_ENABLE
VideoVP8 = 21,
VideoVP9 = 22,
VideoH264 = 31,
#endif
}
public enum ImageFormat
{
Undefined,
I420, // native vpx (no format conversion before encodong)
YV12, // native vpx (no format conversion before encodong)
Android420,
ABGR,
BGRA,
ARGB,
NV12,
}
public enum Rotation
{
Undefined = -1,
Rotate0 = 0, // No rotation.
Rotate90 = 90, // Rotate 90 degrees clockwise.
Rotate180 = 180, // Rotate 180 degrees.
Rotate270 = 270, // Rotate 270 degrees clockwise.
}
public struct Flip
{
public bool IsVertical { get; private set; }
public bool IsHorizontal { get; private set; }
public static bool operator ==(Flip f1, Flip f2)
{
return f1.IsVertical == f2.IsVertical && f1.IsHorizontal == f2.IsHorizontal;
}
public static bool operator !=(Flip f1, Flip f2)
{
return f1.IsVertical != f2.IsVertical || f1.IsHorizontal != f2.IsHorizontal;
}
// trivial implementation to avoid warnings CS0660 and CS0661 about missing overrides when == and != defined
public override bool Equals(object obj) { return base.Equals(obj); }
public override int GetHashCode() { return base.GetHashCode(); }
public static Flip operator *(Flip f1, Flip f2)
{
return new Flip
{
IsVertical = f1.IsVertical != f2.IsVertical,
IsHorizontal = f1.IsHorizontal != f2.IsHorizontal,
};
}
public static Flip None;
public static Flip Vertical = new Flip() { IsVertical = true };
public static Flip Horizontal = new Flip() { IsHorizontal = true };
public static Flip Both = Vertical * Horizontal;
}
// Image buffer pool support
public struct ImageBufferInfo
{
[StructLayout(LayoutKind.Sequential)] // the struct instance may be used where IntPtr[] expected by native method
public struct StrideSet
{
private int stride0;
private int stride1;
private int stride2;
private int stride3;
public StrideSet(int length, int s0 = 0, int s1 = 0, int s2 = 0, int s3 = 0)
{
Length = length;
stride0 = s0;
stride1 = s1;
stride2 = s2;
stride3 = s3;
}
public int this[int key]
{
get
{
switch (key)
{
case 0: return stride0;
case 1: return stride1;
case 2: return stride2;
case 3: return stride3;
default: return 0;
}
}
set
{
switch (key)
{
case 0: stride0 = value; break;
case 1: stride1 = value; break;
case 2: stride2 = value; break;
case 3: stride3 = value; break;
}
}
}
public int Length { get; private set; }
}
public int Width { get; }
public int Height { get; }
public StrideSet Stride { get; }
public ImageFormat Format { get; }
public Rotation Rotation { get; set; }
public Flip Flip { get; set; }
public ImageBufferInfo(int width, int height, StrideSet stride, ImageFormat format)
{
Width = width;
Height = height;
Stride = stride;
Format = format;
Rotation = Rotation.Rotate0;
Flip = Flip.None;
}
}
public class ImageBufferNative
{
[StructLayout(LayoutKind.Sequential)] // the struct instance may be used where IntPtr[] expected by native method (does not work on Mac, so we use intermediate IntPtr[] to pass planes)
public struct PlaneSet
{
private IntPtr plane0;
private IntPtr plane1;
private IntPtr plane2;
private IntPtr plane3;
public PlaneSet(int length, IntPtr p0 = default(IntPtr), IntPtr p1 = default(IntPtr), IntPtr p2 = default(IntPtr), IntPtr p3 = default(IntPtr))
{
Length = length;
plane0 = p0;
plane1 = p1;
plane2 = p2;
plane3 = p3;
}
public IntPtr this[int key]
{
get
{
switch (key)
{
case 0: return plane0;
case 1: return plane1;
case 2: return plane2;
case 3: return plane3;
default: return IntPtr.Zero;
}
}
set
{
switch (key)
{
case 0: plane0 = value; break;
case 1: plane1 = value; break;
case 2: plane2 = value; break;
case 3: plane3 = value; break;
}
}
}
public int Length { get; private set; }
}
public ImageBufferNative(ImageBufferInfo info)
{
Info = info;
Planes = new PlaneSet(info.Stride.Length);
}
public ImageBufferNative(IntPtr buf, int width, int height, int stride, ImageFormat imageFormat)
{
Info = new ImageBufferInfo(width, height, new ImageBufferInfo.StrideSet(1, stride), imageFormat);
Planes = new PlaneSet(1, buf);
}
public ImageBufferInfo Info;
public PlaneSet Planes; // operator[] setter does not compile if this member is a property (because [] applies to a copy of the property)
// Release resources for dispose or reuse.
public virtual void Release() { }
public virtual void Dispose() { }
}
// Allocates native buffers for planes
// Supports releasing to image pool with allocation reuse
public class ImageBufferNativeAlloc : ImageBufferNative, IDisposable
{
ImageBufferNativePool<ImageBufferNativeAlloc> pool;
public ImageBufferNativeAlloc(ImageBufferNativePool<ImageBufferNativeAlloc> pool, ImageBufferInfo info) : base(info)
{
this.pool = pool;
for (int i = 0; i < info.Stride.Length; i++)
{
Planes[i] = Marshal.AllocHGlobal(info.Stride[i] * info.Height);
}
}
public override void Release()
{
if (pool != null)
{
pool.Release(this);
}
}
public override void Dispose()
{
for (int i = 0; i < Info.Stride.Length; i++)
{
Marshal.FreeHGlobal(Planes[i]);
}
}
}
// Acquires byte[] plane via GHandle. Optimized for single plane images.
// Supports releasing to image pool after freeing GHandle (object itself reused only)
public class ImageBufferNativeGCHandleSinglePlane : ImageBufferNative, IDisposable
{
ImageBufferNativePool<ImageBufferNativeGCHandleSinglePlane> pool;
GCHandle planeHandle;
public ImageBufferNativeGCHandleSinglePlane(ImageBufferNativePool<ImageBufferNativeGCHandleSinglePlane> pool, ImageBufferInfo info) : base(info)
{
if (info.Stride.Length != 1)
{
throw new Exception("ImageBufferNativeGCHandleSinglePlane wrong plane count " + info.Stride.Length);
}
this.pool = pool;
}
public void PinPlane(byte[] plane)
{
planeHandle = GCHandle.Alloc(plane, GCHandleType.Pinned);
Planes[0] = planeHandle.AddrOfPinnedObject();
}
public override void Release()
{
planeHandle.Free();
if (pool != null)
{
pool.Release(this);
}
}
public override void Dispose()
{
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 7ebdb2c4bb62b814ab3d75f168ecf94e
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,416 @@
// -----------------------------------------------------------------------
// <copyright file="VoiceFramed.cs" company="Exit Games GmbH">
// Photon Voice API Framework for Photon - Copyright (C) 2017 Exit Games GmbH
// </copyright>
// <summary>
// Photon data streaming support.
// </summary>
// <author>developer@photonengine.com</author>
// ----------------------------------------------------------------------------
using System;
using System.Collections.Generic;
#if DUMP_TO_FILE
using System.IO;
#endif
using System.Threading;
namespace Photon.Voice
{
/// <summary>Audio Processor interface.</summary>
public interface IProcessor<T> : IDisposable
{
/// <summary>Process a frame of audio data.</summary>
/// <param name="buf">Buffer containing input audio data</param>
/// <returns>Buffer containing output audio data or null if frame has been discarded (VAD)</returns>
T[] Process(T[] buf);
}
/// <summary>Utility class to re-frame audio packets.</summary>
public class Framer<T>
{
T[] frame;
/// <summary>Create new Framer instance.</summary>
public Framer(int frameSize)
{
this.frame = new T[frameSize];
var x = new T[1];
if (x[0] is byte)
this.sizeofT = sizeof(byte);
else if (x[0] is short)
this.sizeofT = sizeof(short);
else if (x[0] is float)
this.sizeofT = sizeof(float);
else
throw new Exception("Input data type is not supported: " + x[0].GetType());
}
int sizeofT;
int framePos = 0;
/// <summary>Get the number of frames available after adding bufLen samples.</summary>
/// <param name="bufLen">Number of samples that would be added.</param>
/// <returns>Number of full frames available when adding bufLen samples.</returns>
public int Count(int bufLen)
{
return (bufLen + framePos) / frame.Length;
}
/// <summary>Append arbitrary-sized buffer and return available full frames.</summary>
/// <param name="buf">Array of samples to add.</param>
/// <returns>Enumerator of full frames (might be none).</returns>
public IEnumerable<T[]> Frame(T[] buf)
{
// quick return in trivial case
if (frame.Length == buf.Length && framePos == 0)
{
yield return buf;
}
else
{
var bufPos = 0;
while (frame.Length - framePos <= buf.Length - bufPos)
{
var l = frame.Length - framePos;
Buffer.BlockCopy(buf, bufPos * sizeofT, frame, framePos * sizeofT, l * sizeofT);
//Console.WriteLine("=== Y {0} {1} -> {2} {3} ", bufPos, bufPos + l, sourceFramePos, sourceFramePos + l);
bufPos += l;
framePos = 0;
yield return this.frame;
}
if (bufPos != buf.Length)
{
var l = buf.Length - bufPos;
Buffer.BlockCopy(buf, bufPos * sizeofT, frame, framePos * sizeofT, l * sizeofT);
//Console.WriteLine("=== L {0} {1} -> {2} {3} ", bufPos, bufPos + l, sourceFramePos, sourceFramePos + l);
framePos += l;
}
}
}
}
/// <summary>
/// Typed re-framing LocalVoice
/// </summary>
/// <remarks>Base class for typed re-framing LocalVoice implementation (<see cref="LocalVoiceFramed{T}"></see>) </remarks>
public class LocalVoiceFramedBase : LocalVoice
{
/// <summary>Data flow will be repacked to frames of this size. May differ from input voiceInfo.FrameSize. Processors should resample in this case.</summary>
public int FrameSize { get; private set; }
internal LocalVoiceFramedBase(VoiceClient voiceClient, IEncoder encoder, byte id, VoiceInfo voiceInfo, int channelId, int frameSize)
: base(voiceClient, encoder, id, voiceInfo, channelId)
{
this.FrameSize = frameSize;
}
}
/// <summary>
/// Typed re-framing LocalVoice
/// </summary>
/// <remarks>
/// Consumes data in array buffers of arbitrary length. Repacks them in frames of constant length for further processing and encoding.
/// </remarks>
public class LocalVoiceFramed<T> : LocalVoiceFramedBase
{
Framer<T> framer;
#if DUMP_TO_FILE
FileStream file;
static int fileCnt = 0;
#endif
// Optionally process input data.
// Should return arrays exactly of info.FrameSize size or null to skip sending
protected T[] processFrame(T[] buf)
{
lock (this.processors)
{
foreach (var p in processors)
{
buf = p.Process(buf);
if (buf == null)
{
break;
}
}
}
return buf;
}
/// <summary>
/// Adds processors after any built-in processors and everything added with AddPreProcessor.
/// </summary>
/// <param name="processors"></param>
public void AddPostProcessor(params IProcessor<T>[] processors)
{
lock (this.processors)
{
foreach (var p in processors)
{
this.processors.Add(p);
}
}
}
int preProcessorsCnt;
/// <summary>
/// Adds processors before built-in processors and everything added with AddPostProcessor.
/// </summary>
/// <param name="processors"></param>
public void AddPreProcessor(params IProcessor<T>[] processors)
{
lock (this.processors)
{
foreach (var p in processors)
{
this.processors.Insert(preProcessorsCnt++, p);
}
}
}
/// <summary>
/// Clears all processors in pipeline including built-in resampling.
/// User should add at least resampler processor after call.
/// </summary>
public void ClearProcessors()
{
lock (this.processors)
{
this.processors.Clear();
preProcessorsCnt = 0;
}
}
List<IProcessor<T>> processors = new List<IProcessor<T>>();
internal LocalVoiceFramed(VoiceClient voiceClient, IEncoder encoder, byte id, VoiceInfo voiceInfo, int channelId, int frameSize)
: base(voiceClient, encoder, id, voiceInfo, channelId, frameSize)
{
#if DUMP_TO_FILE
file = File.Open("dump-" + fileCnt++ + ".raw", FileMode.Create);
#endif
if (frameSize == 0)
{
throw new Exception(LogPrefix + ": non 0 frame size required for framed stream");
}
this.framer = new Framer<T>(FrameSize);
this.bufferFactory = new FactoryPrimitiveArrayPool<T>(DATA_POOL_CAPACITY, Name + " Data", FrameSize);
}
bool dataEncodeThreadStarted;
Queue<T[]> pushDataQueue = new Queue<T[]>();
AutoResetEvent pushDataQueueReady = new AutoResetEvent(false);
public FactoryPrimitiveArrayPool<T> BufferFactory { get { return bufferFactory; } }
FactoryPrimitiveArrayPool<T> bufferFactory;
/// <summary>Wether this LocalVoiceFramed has capacity for more data buffers to be pushed asynchronously.</summary>
public bool PushDataAsyncReady { get { lock (pushDataQueue) return pushDataQueue.Count < DATA_POOL_CAPACITY - 1; } } // 1 slot for buffer currently processed and not contained either by pool or queue
/// <summary>Asynchronously push data into this stream.</summary>
// Accepts array of arbitrary size. Automatically splits or aggregates input to buffers of length <see cref="FrameSize"></see>.
// Expects buf content to be preserved until PushData is called from a worker thread. Releases buffer to <see cref="BufferFactory"></see> then.
public void PushDataAsync(T[] buf)
{
if (disposed) return;
#if PHOTON_VOICE_THREADING_DISABLE
PushData(buf);
return;
#endif
if (!dataEncodeThreadStarted)
{
voiceClient.logger.LogInfo(LogPrefix + ": Starting data encode thread");
#if NETFX_CORE
Windows.System.Threading.ThreadPool.RunAsync((x) =>
{
PushDataAsyncThread();
});
#else
var t = new Thread(PushDataAsyncThread);
t.Start();
Util.SetThreadName(t, "[PV] EncData " + shortName);
#endif
dataEncodeThreadStarted = true;
}
// Caller should check this asap in general case if packet production is expensive.
// This is not the case For lightweight audio stream. Also overflow does not happen for audio stream normally.
// Make sure that queue is not too large even if caller missed the check.
if (this.PushDataAsyncReady)
{
lock (pushDataQueue)
{
pushDataQueue.Enqueue(buf);
}
pushDataQueueReady.Set();
}
else
{
this.bufferFactory.Free(buf, buf.Length);
if (framesSkipped == framesSkippedNextLog)
{
voiceClient.logger.LogWarning(LogPrefix + ": PushData queue overflow. Frames skipped: " + (framesSkipped + 1));
framesSkippedNextLog = framesSkipped + 10;
}
framesSkipped++;
}
}
int framesSkippedNextLog;
int framesSkipped;
bool exitThread = false;
private void PushDataAsyncThread()
{
//#if UNITY_5_3_OR_NEWER
// UnityEngine.Profiling.Profiler.BeginThreadProfiling("PhotonVoice", LogPrefix);
//#endif
try
{
while (!exitThread)
{
pushDataQueueReady.WaitOne(); // Wait until data is pushed to the queue or Dispose signals.
//#if UNITY_5_3_OR_NEWER
// UnityEngine.Profiling.Profiler.BeginSample("Encoder");
//#endif
while (true) // Dequeue and process while the queue is not empty
{
if (exitThread) break; // early exit to save few resources
T[] b = null;
lock (pushDataQueue)
{
if (pushDataQueue.Count > 0)
{
b = pushDataQueue.Dequeue();
}
}
if (b != null)
{
PushData(b);
this.bufferFactory.Free(b, b.Length);
}
else
{
break;
}
}
//#if UNITY_5_3_OR_NEWER
// UnityEngine.Profiling.Profiler.EndSample();
//#endif
}
}
catch (Exception e)
{
voiceClient.logger.LogError(LogPrefix + ": Exception in encode thread: " + e);
throw e;
}
finally
{
Dispose();
this.bufferFactory.Dispose();
#if NETFX_CORE
pushDataQueueReady.Dispose();
#else
pushDataQueueReady.Close();
#endif
voiceClient.logger.LogInfo(LogPrefix + ": Exiting data encode thread");
//#if UNITY_5_3_OR_NEWER
// UnityEngine.Profiling.Profiler.EndThreadProfiling();
//#endif
}
}
// counter for detection of first frame for which process() returned null
int processNullFramesCnt = 0;
/// <summary>Synchronously push data into this stream.</summary>
// Accepts array of arbitrary size. Automatically splits or aggregates input to buffers of length <see cref="FrameSize"></see>.
public void PushData(T[] buf)
{
if (this.voiceClient.transport.IsChannelJoined(this.channelId))
{
if (this.TransmitEnabled)
{
if (this.encoder is IEncoderDirect<T[]>)
{
lock (disposeLock)
{
if (!disposed)
{
foreach (var framed in framer.Frame(buf))
{
var processed = processFrame(framed);
if (processed != null)
{
#if DUMP_TO_FILE
var b = new byte[processed.Length * sizeof(short)];
Buffer.BlockCopy(processed, 0, b, 0, b.Length);
file.Write(b, 0, b.Length);
#endif
processNullFramesCnt = 0;
((IEncoderDirect<T[]>)this.encoder).Input(processed);
}
else
{
processNullFramesCnt++;
if (processNullFramesCnt == 1)
{
this.encoder.EndOfStream();
}
}
}
}
}
}
else
{
throw new Exception(LogPrefix + ": PushData(T[]) called on encoder of unsupported type " + (this.encoder == null ? "null" : this.encoder.GetType().ToString()));
}
}
}
}
/// <summary>
/// Releases resources used by the <see cref="LocalVoiceFramed{T}"/> instance.
/// Buffers used for asynchronous push will be disposed in encoder thread's 'finally'.
/// </summary>
public override void Dispose()
{
#if DUMP_TO_FILE
file.Close();
#endif
exitThread = true;
lock (disposeLock)
{
if (!disposed)
{
lock (this.processors)
{
foreach (var p in processors)
{
p.Dispose();
}
}
base.Dispose();
pushDataQueueReady.Set(); // let worker exit
}
}
base.Dispose();
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: ef70ab0e29cc1544896039f593d5a667
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,137 @@
// -----------------------------------------------------------------------
// <copyright file="VoiceInfo.cs" company="Exit Games GmbH">
// Photon Voice API Framework for Photon - Copyright (C) 2017 Exit Games GmbH
// </copyright>
// <summary>
// Photon data streaming support.
// </summary>
// <author>developer@photonengine.com</author>
// ----------------------------------------------------------------------------
using System.Collections.Generic;
namespace Photon.Voice
{
/// <summary>Describes stream properties.</summary>
public struct VoiceInfo
{
/// <summary>
/// Create stream info for an Opus audio stream.
/// </summary>
/// <param name="samplingRate">Audio sampling rate.</param>
/// <param name="channels">Number of channels.</param>
/// <param name="frameDurationUs">Uncompressed frame (audio packet) size in microseconds.</param>
/// <param name="bitrate">Stream bitrate (in bits/second).</param>
/// <param name="userdata">Optional user data. Should be serializable by Photon.</param>
/// <returns>VoiceInfo instance.</returns>
static public VoiceInfo CreateAudioOpus(POpusCodec.Enums.SamplingRate samplingRate, int channels, OpusCodec.FrameDuration frameDurationUs, int bitrate, object userdata = null)
{
return new VoiceInfo()
{
Codec = Codec.AudioOpus,
SamplingRate = (int)samplingRate,
Channels = channels,
FrameDurationUs = (int)frameDurationUs,
Bitrate = bitrate,
UserData = userdata
};
}
/// <summary>
/// Create stream info for an audio stream.
/// </summary>
/// <param name="codec">Audio codec.</param>
/// <param name="samplingRate">Audio sampling rate.</param>
/// <param name="channels">Number of channels.</param>
/// <param name="frameDurationUs">Uncompressed frame (audio packet) size in microseconds.</param>
/// <param name="userdata">Optional user data. Should be serializable by Photon.</param>
/// <returns>VoiceInfo instance.</returns>
static public VoiceInfo CreateAudio(Codec codec, int samplingRate, int channels, int frameDurationUs, object userdata = null)
{
return new VoiceInfo()
{
Codec = codec,
SamplingRate = (int)samplingRate,
Channels = channels,
FrameDurationUs = (int)frameDurationUs,
UserData = userdata
};
}
#if PHOTON_VOICE_VIDEO_ENABLE
/// <summary>
/// Create stream info for a video stream.
/// </summary>
/// <param name="codec">Video codec.</param>
/// <param name="bitrate">Stream bitrate.</param>
/// <param name="width">Streamed video width. If 0, width and height of video source used (no rescaling).</param>
/// <param name="heigth">Streamed video height. If -1, aspect ratio preserved during rescaling.</param>
/// <param name="fps">Streamed video frames per second.</param>
/// <param name="keyFrameInt">Keyframes interval in frames.</param>///
/// <param name="userdata">Optional user data. Should be serializable by Photon.</param>
/// <returns>VoiceInfo instance.</returns>
static public VoiceInfo CreateVideo(Codec codec, int bitrate, int width, int heigth, int fps, int keyFrameInt, object userdata = null)
{
return new VoiceInfo()
{
Codec = codec,
Bitrate = bitrate,
Width = width,
Height = heigth,
FPS = fps,
KeyFrameInt = keyFrameInt,
UserData = userdata,
};
}
#endif
public override string ToString()
{
return "c=" + Codec + " f=" + SamplingRate + " ch=" + Channels + " d=" + FrameDurationUs + " s=" + FrameSize + " b=" + Bitrate + " w=" + Width + " h=" + Height + " fps=" + FPS + " kfi=" + KeyFrameInt + " ud=" + UserData;
}
public Codec Codec { get; set; }
/// <summary>Audio sampling rate (frequency, in Hz).</summary>
public int SamplingRate { get; set; }
/// <summary>Number of channels.</summary>
public int Channels { get; set; }
/// <summary>Uncompressed frame (audio packet) size in microseconds.</summary>
public int FrameDurationUs { get; set; }
/// <summary>Target bitrate (in bits/second).</summary>
public int Bitrate { get; set; }
/// <summary>Video width.</summary>
public int Width { get; set; }
/// <summary>Video height</summary>
public int Height { get; set; }
/// <summary>Video frames per second</summary>
public int FPS { get; set; }
/// <summary>Video keyframe interval in frames</summary>
public int KeyFrameInt { get; set; }
/// <summary>Optional user data. Should be serializable by Photon.</summary>
public object UserData { get; set; }
/// <summary>Uncompressed frame (data packet) size in samples.</summary>
public int FrameDurationSamples { get { return (int)(this.SamplingRate * (long)this.FrameDurationUs / 1000000); } }
/// <summary>Uncompressed frame (data packet) array size.</summary>
public int FrameSize { get { return this.FrameDurationSamples * this.Channels; } }
}
/// <summary>Information about a remote voice (incoming stream).</summary>
public class RemoteVoiceInfo
{
internal RemoteVoiceInfo(int channelId, int playerId, byte voiceId, VoiceInfo info)
{
this.ChannelId = channelId;
this.PlayerId = playerId;
this.VoiceId = voiceId;
this.Info = info;
}
/// <summary>Remote voice info.</summary>
public VoiceInfo Info { get; private set; }
/// <summary>ID of channel used for transmission.</summary>
public int ChannelId { get; private set; }
/// <summary>Player ID of voice owner.</summary>
public int PlayerId { get; private set; }
/// <summary>Voice ID (unique in the room).</summary>
public byte VoiceId { get; private set; }
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 8466257066e89eb4d9fd374d4c77e405
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,207 @@
// -----------------------------------------------------------------------
// <copyright file="VoiceSourceAdapter.cs" company="Exit Games GmbH">
// Photon Voice API Framework for Photon - Copyright (C) 2017 Exit Games GmbH
// </copyright>
// <summary>
// Photon data streaming support.
// </summary>
// <author>developer@photonengine.com</author>
// ----------------------------------------------------------------------------
using System;
namespace Photon.Voice
{
/// <summary>
/// Adapter base reading data from <see cref="IDataReader{T}.Read"></see> and pushing it to <see cref="LocalVoice"></see>.
/// </summary>
/// <remarks>
/// Use this with a LocalVoice of same T type.
/// </remarks>
public abstract class BufferReaderPushAdapterBase<T> : IServiceable
{
protected IDataReader<T> reader;
/// <summary>Do the actual data read/push.</summary>
/// <param name="localVoice">LocalVoice instance to push data to.</param>
public abstract void Service(LocalVoice localVoice);
/// <summary>Create a new BufferReaderPushAdapterBase instance</summary>
/// <param name="reader">DataReader to read from.</param>
public BufferReaderPushAdapterBase(IDataReader<T> reader)
{
this.reader = reader;
}
/// <summary>Release resources associated with this instance.</summary>
public void Dispose()
{
this.reader.Dispose();
}
}
/// <summary>
/// Simple <see cref="BufferReaderPushAdapterBase{T}"></see> implementation using a single buffer and synchronous <see cref="LocalVoiceFramed{T}.PushData"></see>
/// </summary>
public class BufferReaderPushAdapter<T> : BufferReaderPushAdapterBase<T>
{
protected T[] buffer;
/// <summary>Create a new BufferReaderPushAdapter instance</summary>
/// <param name="localVoice">LocalVoice instance to push data to.</param>
/// <param name="reader">DataReader to read from.</param>
public BufferReaderPushAdapter(LocalVoice localVoice, IDataReader<T> reader) : base(reader)
{
// any buffer will work but only of localVoice.FrameSize avoids additional processing
buffer = new T[((LocalVoiceFramed<T>)localVoice).FrameSize];
}
public override void Service(LocalVoice localVoice)
{
while (this.reader.Read(this.buffer))
{
((LocalVoiceFramed<T>)localVoice).PushData(this.buffer);
}
}
}
/// <summary>
/// <see cref="BufferReaderPushAdapter{T}"></see> implementation using asynchronous <see cref="LocalVoiceFramed{T}.PushDataAsync"></see>.
/// </summary>
/// <remarks>
/// Acquires a buffer from pool before each Read, releases buffer after last Read (brings Acquire/Release overhead).
/// Expects localVoice to be a <see cref="LocalVoiceFramed{T}"></see> of same T.
/// </remarks>
public class BufferReaderPushAdapterAsyncPool<T> : BufferReaderPushAdapterBase<T>
{
/// <summary>Create a new BufferReaderPushAdapter instance</summary>
/// <param name="localVoice">LocalVoice instance to push data to.</param>
/// <param name="reader">DataReader to read from.</param>
public BufferReaderPushAdapterAsyncPool(LocalVoice localVoice, IDataReader<T> reader) : base(reader) { }
/// <summary>Do the actual data read/push.</summary>
/// <param name="localVoice">LocalVoice instance to push data to. Must be a <see cref="LocalVoiceFramed{T}"></see> of same T.</param>
public override void Service(LocalVoice localVoice)
{
var v = ((LocalVoiceFramed<T>)localVoice);
T[] buf = v.BufferFactory.New();
while (this.reader.Read(buf))
{
v.PushDataAsync(buf);
buf = v.BufferFactory.New();
}
// release unused buffer
v.BufferFactory.Free(buf, buf.Length);
}
}
/// <summary>
/// <see cref="BufferReaderPushAdapter{T}"></see> implementation using asynchronous <see cref="LocalVoiceFramed{T}.PushDataAsync(T[])"></see> and data copy.
/// </summary>
/// <remarks>
/// Reads data to preallocated buffer, copies it to buffer from pool before pushing.
/// Compared with <see cref="BufferReaderPushAdapterAsyncPool{T}"></see>, this avoids one pool Acquire/Release cycle at the cost
/// of a buffer copy.
/// Expects localVoice to be a <see cref="LocalVoiceFramed{T}"></see> of same T.
/// </remarks>
public class BufferReaderPushAdapterAsyncPoolCopy<T> : BufferReaderPushAdapterBase<T>
{
protected T[] buffer;
/// <summary>Create a new BufferReaderPushAdapter instance</summary>
/// <param name="localVoice">LocalVoice instance to push data to.</param>
/// <param name="reader">DataReader to read from.</param>
public BufferReaderPushAdapterAsyncPoolCopy(LocalVoice localVoice, IDataReader<T> reader) : base(reader)
{
buffer = new T[((LocalVoiceFramedBase)localVoice).FrameSize];
}
/// <summary>Do the actual data read/push.</summary>
/// <param name="localVoice">LocalVoice instance to push data to. Must be a <see cref="LocalVoiceFramed{T}"/> of same T.</param>
public override void Service(LocalVoice localVoice)
{
while (this.reader.Read(buffer))
{
var v = ((LocalVoiceFramed<T>)localVoice);
var buf = v.BufferFactory.New();
Array.Copy(buffer, buf, buffer.Length);
v.PushDataAsync(buf);
}
}
}
/// <summary>
/// <see cref="BufferReaderPushAdapter{T}"></see> implementation using asynchronous <see cref="LocalVoiceFramed{T}.PushDataAsync"></see>, converting float samples to short.
/// </summary>
/// <remarks>
/// This adapter works exactly like <see cref="BufferReaderPushAdapterAsyncPool{T}"></see>, but it converts float samples to short.
/// Acquires a buffer from pool before each Read, releases buffer after last Read.
///
/// Expects localVoice to be a <see cref="LocalVoiceFramed{T}"></see> of same T.
/// </remarks>
public class BufferReaderPushAdapterAsyncPoolFloatToShort : BufferReaderPushAdapterBase<float>
{
float[] buffer;
/// <summary>Create a new BufferReaderPushAdapter instance</summary>
/// <param name="localVoice">LocalVoice instance to push data to.</param>
/// <param name="reader">DataReader to read from.</param>
public BufferReaderPushAdapterAsyncPoolFloatToShort(LocalVoice localVoice, IDataReader<float> reader) : base(reader)
{
buffer = new float[((LocalVoiceFramed<short>)localVoice).FrameSize];
}
/// <summary>Do the actual data read/push.</summary>
/// <param name="localVoice">LocalVoice instance to push data to. Must be a <see cref="LocalVoiceFramed{T}"></see> of same T.</param>
public override void Service(LocalVoice localVoice)
{
var v = ((LocalVoiceFramed<short>)localVoice);
short[] buf = v.BufferFactory.New();
while (this.reader.Read(buffer))
{
AudioUtil.Convert(buffer, buf, buf.Length);
v.PushDataAsync(buf);
buf = v.BufferFactory.New();
}
// release unused buffer
v.BufferFactory.Free(buf, buf.Length);
}
}
/// <summary>
/// <see cref="BufferReaderPushAdapter{T}"></see> implementation using asynchronous <see cref="LocalVoiceFramed{T}.PushDataAsync"></see>, converting short samples to float.
/// </summary>
/// This adapter works exactly like <see cref="BufferReaderPushAdapterAsyncPool{T}"></see>, but it converts short samples to float.
/// Acquires a buffer from pool before each Read, releases buffer after last Read.
///
/// Expects localVoice to be a <see cref="LocalVoiceFramed{T}"></see> of same T.
public class BufferReaderPushAdapterAsyncPoolShortToFloat : BufferReaderPushAdapterBase<short>
{
short[] buffer;
/// <summary>Create a new BufferReaderPushAdapter instance</summary>
/// <param name="localVoice">LocalVoice instance to push data to.</param>
/// <param name="reader">DataReader to read from.</param>
public BufferReaderPushAdapterAsyncPoolShortToFloat(LocalVoice localVoice, IDataReader<short> reader) : base(reader)
{
buffer = new short[((LocalVoiceFramed<float>)localVoice).FrameSize];
}
/// <summary>Do the actual data read/push.</summary>
/// <param name="localVoice">LocalVoice instance to push data to. Must be a <see cref="LocalVoiceFramed{T}"></see> of same T.</param>
public override void Service(LocalVoice localVoice)
{
var v = ((LocalVoiceFramed<float>)localVoice);
float[] buf = v.BufferFactory.New();
while (this.reader.Read(buffer))
{
AudioUtil.Convert(buffer, buf, buf.Length);
v.PushDataAsync(buf);
buf = v.BufferFactory.New();
}
// release unused buffer
v.BufferFactory.Free(buf, buf.Length);
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 91cde8da49ff1cc4cabfa7be9d821414
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,442 @@
#if (UNITY_IOS && !UNITY_EDITOR) || __IOS__
#define DLL_IMPORT_INTERNAL
#endif
using System;
using System.Collections.Generic;
using System.Runtime.InteropServices;
using System.Threading;
namespace Photon.Voice
{
public class WebRTCAudioProcessor : WebRTCAudioLib, IProcessor<short>
{
const int REVERSE_BUFFER_POOL_CAPACITY = 50;
int reverseStreamDelayMs;
bool aec = false;
bool aecHighPass = true;
bool aecm = false;
bool highPass = false;
bool ns = false;
bool agc = true;
int agcCompressionGain = 9;
int agcTargetLevel = 3;
bool agc2 = false;
bool vad;
bool reverseStreamThreadRunning = false;
Queue<short[]> reverseStreamQueue = new Queue<short[]>();
AutoResetEvent reverseStreamQueueReady = new AutoResetEvent(false);
FactoryPrimitiveArrayPool<short> reverseBufferFactory;
public int AECStreamDelayMs { set { if (reverseStreamDelayMs != value) { reverseStreamDelayMs = value; if (proc != IntPtr.Zero) setParam(Param.REVERSE_STREAM_DELAY_MS, value); } } }
public bool AEC
{
set
{
if (aec != value)
{
aec = value;
InitReverseStream();
if (proc != IntPtr.Zero) setParam(Param.AEC, aec ? 1 : 0);
aecm = aec ? false : aecm;
}
}
}
public bool AECHighPass { set { if (aecHighPass != value) { aecHighPass = value; if (proc != IntPtr.Zero) setParam(Param.AEC_HIGH_PASS_FILTER, value ? 1 : 0); } } }
public bool AECMobile
{
set
{
if (aecm != value)
{
aecm = value;
InitReverseStream();
if (proc != IntPtr.Zero) setParam(Param.AECM, aecm ? 1 : 0);
aec = aecm ? false : aec;
}
}
}
public bool HighPass { set { if (highPass != value) { highPass = value; if (proc != IntPtr.Zero) setParam(Param.HIGH_PASS_FILTER, value ? 1 : 0); } } }
public bool NoiseSuppression { set { if (ns != value) { ns = value; if (proc != IntPtr.Zero) setParam(Param.NS, value ? 1 : 0); } } }
public bool AGC { set { if (agc != value) { agc = value; if (proc != IntPtr.Zero) setParam(Param.AGC, value ? 1 : 0); } } }
public int AGCCompressionGain
{
set
{
if (agcCompressionGain != value)
{
if (value < 0 || value > 90)
{
logger.LogError("[PV] WebRTCAudioProcessor: new AGCCompressionGain value {0} not in range [0..90]", value);
}
else
{
agcCompressionGain = value;
if (proc != IntPtr.Zero)
{
setParam(Param.AGC_COMPRESSION_GAIN, value);
}
}
}
}
}
public int AGCTargetLevel
{
set
{
if (agcTargetLevel != value)
{
if (value > 31 || value < 0)
{
logger.LogError("[PV] WebRTCAudioProcessor: new AGCTargetLevel value {0} not in range [0..31]", value);
}
else
{
agcTargetLevel = value;
if (proc != IntPtr.Zero)
setParam(Param.AGC_TARGET_LEVEL_DBFS, value);
}
}
}
}
public bool AGC2 { set { if (agc2 != value) { agc2 = value; if (proc != IntPtr.Zero) setParam(Param.AGC2, value ? 1 : 0); } } }
public bool VAD { set { if (vad != value) { vad = value; if (proc != IntPtr.Zero) setParam(Param.VAD, value ? 1 : 0); } } }
public bool Bypass
{
set
{
if (bypass != value) logger.LogInfo("[PV] WebRTCAudioProcessor: setting bypass=" + value);
bypass = value;
}
private get { return bypass; }
}
bool bypass = false;
int inFrameSize; // frames passed to Process
int processFrameSize; // frames passed to webrtc_audio_processor_process
int samplingRate; // input sampling rate (the same for Process and webrtc_audio_processor_process)
int channels;
IntPtr proc;
bool disposed;
Framer<float> reverseFramer;
int reverseSamplingRate;
int reverseChannels;
ILogger logger;
// audio parameters supported by webrtc
const int supportedFrameLenMs = 10;
public static readonly int[] SupportedSamplingRates = { 8000, 16000, 32000, 48000 };
public WebRTCAudioProcessor(ILogger logger, int frameSize, int samplingRate, int channels, int reverseSamplingRate, int reverseChannels)
{
bool ok = false;
foreach (var s in SupportedSamplingRates)
{
if (samplingRate == s)
{
ok = true;
break;
}
}
if (!ok)
{
logger.LogError("[PV] WebRTCAudioProcessor: input sampling rate ({0}) must be 8000, 16000, 32000 or 48000", samplingRate);
disposed = true;
return;
}
this.logger = logger;
this.inFrameSize = frameSize;
this.processFrameSize = samplingRate * supportedFrameLenMs / 1000;
if (this.inFrameSize / this.processFrameSize * this.processFrameSize != this.inFrameSize)
{
logger.LogError("[PV] WebRTCAudioProcessor: input frame size ({0} samples / {1} ms) must be equal to or N times more than webrtc processing frame size ({2} samples / 10 ms)", this.inFrameSize, 1000f * this.inFrameSize / samplingRate, processFrameSize);
disposed = true;
return;
}
this.samplingRate = samplingRate;
this.channels = channels;
this.reverseSamplingRate = reverseSamplingRate;
this.reverseChannels = reverseChannels;
this.proc = webrtc_audio_processor_create(samplingRate, channels, this.processFrameSize, samplingRate /* reverseSamplingRate to be converted */, reverseChannels);
webrtc_audio_processor_init(this.proc);
logger.LogInfo("[PV] WebRTCAudioProcessor create sampling rate {0}, channels{1}, frame size {2}, frame samples {3}, reverseChannels {4}", samplingRate, channels, this.processFrameSize, this.inFrameSize / this.channels, this.reverseChannels);
}
bool aecInited;
private void InitReverseStream()
{
lock (this)
{
if (!aecInited)
{
if (disposed)
{
return;
}
int size = processFrameSize * reverseSamplingRate / samplingRate * reverseChannels;
reverseFramer = new Framer<float>(size);
reverseBufferFactory = new FactoryPrimitiveArrayPool<short>(REVERSE_BUFFER_POOL_CAPACITY, "WebRTCAudioProcessor Reverse Buffers", this.inFrameSize);
logger.LogInfo("[PV] WebRTCAudioProcessor Init reverse stream: frame size {0}, reverseSamplingRate {1}, reverseChannels {2}", size, reverseSamplingRate, reverseChannels);
if (!reverseStreamThreadRunning)
{
#if NETFX_CORE
Windows.System.Threading.ThreadPool.RunAsync((x) =>
{
ReverseStreamThread();
});
#else
var t = new Thread(ReverseStreamThread);
t.Start();
Util.SetThreadName(t, "[PV] WebRTCProcRevStream");
#endif
}
if (reverseSamplingRate != samplingRate)
{
logger.LogWarning("[PV] WebRTCAudioProcessor AEC: output sampling rate {0} != {1} capture sampling rate. For better AEC, set audio source (microphone) and audio output samping rates to the same value.", reverseSamplingRate, samplingRate);
}
aecInited = true;
}
}
}
public short[] Process(short[] buf)
{
if (Bypass) return buf;
if (disposed) return buf;
if (proc == IntPtr.Zero) return buf;
if (buf.Length != this.inFrameSize)
{
this.logger.LogError("[PV] WebRTCAudioProcessor Process: frame size expected: {0}, passed: {1}", this.inFrameSize, buf);
return buf;
}
bool voiceDetected = false;
for (int offset = 0; offset < inFrameSize; offset += processFrameSize)
{
bool vd = true;
int err = webrtc_audio_processor_process(proc, buf, offset, out vd);
if (vd)
voiceDetected = true;
if (lastProcessErr != err)
{
lastProcessErr = err;
this.logger.LogError("[PV] WebRTCAudioProcessor Process: webrtc_audio_processor_process() error {0}", err);
return buf;
}
}
if (vad && !voiceDetected)
{
return null;
}
else
{
return buf;
}
}
int lastProcessErr = 0;
int lastProcessReverseErr = 0;
public void OnAudioOutFrameFloat(float[] data)
{
if (disposed) return;
if (!aecInited) return;
if (proc == IntPtr.Zero) return;
foreach (var reverseBufFloat in reverseFramer.Frame(data))
{
var reverseBuf = reverseBufferFactory.New();
if (reverseBufFloat.Length != reverseBuf.Length)
{
AudioUtil.ResampleAndConvert(reverseBufFloat, reverseBuf, reverseBuf.Length, this.reverseChannels);
}
else
{
AudioUtil.Convert(reverseBufFloat, reverseBuf, reverseBuf.Length);
}
lock (reverseStreamQueue)
{
if (reverseStreamQueue.Count < REVERSE_BUFFER_POOL_CAPACITY - 1)
{
reverseStreamQueue.Enqueue(reverseBuf);
reverseStreamQueueReady.Set();
}
else
{
this.logger.LogError("[PV] WebRTCAudioProcessor Reverse stream queue overflow");
this.reverseBufferFactory.Free(reverseBuf);
}
}
}
}
private void ReverseStreamThread()
{
logger.LogInfo("[PV] WebRTCAudioProcessor: Starting reverse stream thread");
reverseStreamThreadRunning = true;
try
{
while (!disposed)
{
reverseStreamQueueReady.WaitOne(); // Wait until data is pushed to the queue or Dispose signals.
//#if UNITY_5_3_OR_NEWER
// UnityEngine.Profiling.Profiler.BeginSample("Encoder");
//#endif
while (true) // Dequeue and process while the queue is not empty
{
short[] reverseBuf = null;
lock (reverseStreamQueue)
{
if (reverseStreamQueue.Count > 0)
{
reverseBuf = reverseStreamQueue.Dequeue();
}
}
if (reverseBuf != null)
{
int err = webrtc_audio_processor_process_reverse(proc, reverseBuf, reverseBuf.Length);
this.reverseBufferFactory.Free(reverseBuf);
if (lastProcessReverseErr != err)
{
lastProcessReverseErr = err;
this.logger.LogError("[PV] WebRTCAudioProcessor: OnAudioOutFrameFloat: webrtc_audio_processor_process_reverse() error {0}", err);
}
}
else
{
break;
}
}
}
}
catch (Exception e)
{
this.logger.LogError("[PV] WebRTCAudioProcessor: ReverseStreamThread Exceptions: " + e);
}
finally
{
logger.LogInfo("[PV] WebRTCAudioProcessor: Exiting reverse stream thread");
reverseStreamThreadRunning = false;
}
}
private int setParam(Param param, int v)
{
if (disposed) return 0;
logger.LogInfo("[PV] WebRTCAudioProcessor: setting param " + param + "=" + v);
return webrtc_audio_processor_set_param(proc, (int)param, v);
}
public void Dispose()
{
lock (this)
{
if (!disposed)
{
disposed = true;
logger.LogInfo("[PV] WebRTCAudioProcessor: destroying...");
reverseStreamQueueReady.Set();
if (proc != IntPtr.Zero)
{
while (reverseStreamThreadRunning)
{
#if WINDOWS_UWP || ENABLE_WINMD_SUPPORT
System.Threading.Tasks.Task.Delay(1).Wait();
#else
Thread.Sleep(1);
#endif
}
webrtc_audio_processor_destroy(proc);
logger.LogInfo("[PV] WebRTCAudioProcessor: destroyed");
}
}
}
}
}
public class WebRTCAudioLib
{
#if DLL_IMPORT_INTERNAL
const string lib_name = "__Internal";
#else
const string lib_name = "webrtc-audio";
#endif
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
public static extern IntPtr webrtc_audio_processor_create(int samplingRate, int channels, int frameSize, int revSamplingRate, int revChannels);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
public static extern int webrtc_audio_processor_init(IntPtr proc);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
public static extern int webrtc_audio_processor_set_param(IntPtr proc, int param, int v);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
public static extern int webrtc_audio_processor_process(IntPtr proc, short[] buffer, int offset, out bool voiceDetected);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
public static extern int webrtc_audio_processor_process_reverse(IntPtr proc, short[] buffer, int bufferSize);
[DllImport(lib_name, CallingConvention = CallingConvention.Cdecl, CharSet = CharSet.Ansi)]
public static extern void webrtc_audio_processor_destroy(IntPtr proc);
// library methods return webrtc error codes
public enum Error
{
// Fatal errors.
kNoError = 0,
kUnspecifiedError = -1,
kCreationFailedError = -2,
kUnsupportedComponentError = -3,
kUnsupportedFunctionError = -4,
kNullPointerError = -5,
kBadParameterError = -6,
kBadSampleRateError = -7,
kBadDataLengthError = -8,
kBadNumberChannelsError = -9,
kFileError = -10,
kStreamParameterNotSetError = -11,
kNotEnabledError = -12,
// Warnings are non-fatal.
// This results when a set_stream_ parameter is out of range. Processing
// will continue, but the parameter may have been truncated.
kBadStreamParameterWarning = -13
};
public enum Param
{
REVERSE_STREAM_DELAY_MS = 1,
AEC = 10,
AEC_HIGH_PASS_FILTER = 11,
AECM = 20,
HIGH_PASS_FILTER = 31,
NS = 41,
NS_LEVEL = 42,
AGC = 51,
// AGC_MODE = 52,
AGC_TARGET_LEVEL_DBFS = 55,
AGC_COMPRESSION_GAIN = 56,
AGC_LIMITER = 57,
VAD = 61,
VAD_FRAME_SIZE_MS = 62,
VAD_LIKELIHOOD = 63,
AGC2 = 71,
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 41e3ccdd78fce694686c717a662f424c
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,291 @@
// -----------------------------------------------------------------------
// <copyright file="LoadBalancingTransport.cs" company="Exit Games GmbH">
// Photon Voice API Framework for Photon - Copyright (C) 2015 Exit Games GmbH
// </copyright>
// <summary>
// Extends Photon Realtime API with media streaming functionality.
// </summary>
// <author>developer@photonengine.com</author>
// ----------------------------------------------------------------------------
using System;
using System.Collections.Generic;
using System.Linq;
using ExitGames.Client.Photon;
using Photon.Realtime;
namespace Photon.Voice
{
class VoiceEvent
{
/// <summary>
/// Single event used for voice communications.
/// </summary>
/// Change if it conflicts with other event codes used in the same Photon room.
public const byte Code = 202; // all photon voice events use single event code
public const byte FrameCode = 203; // LoadBalancingTransport2 uses separate code for frame event serialized as byte[]
}
/// <summary>
/// Extends LoadBalancingClient with media streaming functionality.
/// </summary>
/// <remarks>
/// Use your normal LoadBalancing workflow to join a Voice room.
/// All standard LoadBalancing features are available.
/// Use <see cref="VoiceClient"/> to work with media streams.
/// </remarks>
public class LoadBalancingTransport : LoadBalancingClient, IVoiceTransport, ILogger, IDisposable
{
internal const int VOICE_CHANNEL = 0;
/// <summary>The <see cref="VoiceClient"></see> implementation associated with this LoadBalancingTransport.</summary>
public VoiceClient VoiceClient { get { return this.voiceClient; } }
protected VoiceClient voiceClient;
private PhotonTransportProtocol protocol;
public void LogError(string fmt, params object[] args) { this.DebugReturn(DebugLevel.ERROR, string.Format(fmt, args)); }
public void LogWarning(string fmt, params object[] args) { this.DebugReturn(DebugLevel.WARNING, string.Format(fmt, args)); }
public void LogInfo(string fmt, params object[] args) { this.DebugReturn(DebugLevel.INFO, string.Format(fmt, args)); }
public void LogDebug(string fmt, params object[] args) { this.DebugReturn(DebugLevel.ALL, string.Format(fmt, args)); }
// send different media type to different channels for efficiency
internal byte photonChannelForCodec(Codec c)
{
return (byte)(1 + Array.IndexOf(Enum.GetValues(typeof(Codec)), c));
}
public bool IsChannelJoined(int channelId) { return this.State == ClientState.Joined; }
/// <summary>
/// Initializes a new <see cref="LoadBalancingTransport"/>.
/// </summary>
/// <param name="logger">ILogger instance. If null, this instance LoadBalancingClient.DebugReturn implementation is used.<see cref="ConnectionProtocol"></see></param>
/// <param name="connectionProtocol">Connection protocol (UDP or TCP). <see cref="ConnectionProtocol"></see></param>
public LoadBalancingTransport(ILogger logger = null, ConnectionProtocol connectionProtocol = ConnectionProtocol.Udp) : base(connectionProtocol)
{
if (logger == null)
{
logger = this;
}
base.EventReceived += onEventActionVoiceClient;
base.StateChanged += onStateChangeVoiceClient;
this.voiceClient = new VoiceClient(this, logger);
var voiceChannelsCount = Enum.GetValues(typeof(Codec)).Length + 1; // channel per stream type, channel 0 is for user events
if (LoadBalancingPeer.ChannelCount < voiceChannelsCount)
{
this.LoadBalancingPeer.ChannelCount = (byte)voiceChannelsCount;
}
this.protocol = new PhotonTransportProtocol(voiceClient, logger);
}
/// <summary>
/// This method dispatches all available incoming commands and then sends this client's outgoing commands.
/// Call this method regularly (2 to 20 times a second).
/// </summary>
new public void Service()
{
base.Service();
this.voiceClient.Service();
}
[Obsolete("Use LoadBalancingPeer::OpChangeGroups().")]
public virtual bool ChangeAudioGroups(byte[] groupsToRemove, byte[] groupsToAdd)
{
return this.LoadBalancingPeer.OpChangeGroups(groupsToRemove, groupsToAdd);
}
[Obsolete("Use GlobalInterestGroup.")]
public byte GlobalAudioGroup
{
get { return GlobalInterestGroup; }
set { GlobalInterestGroup = value; }
}
/// <summary>
/// Set global interest group for this client. This call sets InterestGroup for existing local voices and for created later to given value.
/// Client set as listening to this group only until LoadBalancingPeer.OpChangeGroups() called. This method can be called any time.
/// </summary>
/// <see cref="LocalVoice.InterestGroup"/>
/// <see cref="LoadBalancingPeer.OpChangeGroups(byte[], byte[])"/>
public byte GlobalInterestGroup
{
get { return this.voiceClient.GlobalInterestGroup; }
set
{
this.voiceClient.GlobalInterestGroup = value;
if (this.State == ClientState.Joined)
{
if (this.voiceClient.GlobalInterestGroup != 0)
{
this.LoadBalancingPeer.OpChangeGroups(new byte[0], new byte[] { this.voiceClient.GlobalInterestGroup });
}
else
{
this.LoadBalancingPeer.OpChangeGroups(new byte[0], null);
}
}
}
}
#region nonpublic
public void SendVoicesInfo(IEnumerable<LocalVoice> voices, int channelId, int targetPlayerId)
{
foreach (var codecVoices in voices.GroupBy(v => v.Info.Codec))
{
object content = protocol.buildVoicesInfo(codecVoices, true);
var sendOpt = new SendOptions()
{
Reliability = true,
Channel = photonChannelForCodec(codecVoices.Key),
};
var opt = new RaiseEventOptions();
if (targetPlayerId == -1)
{
opt.TargetActors = new int[] { this.LocalPlayer.ActorNumber };
}
else if (targetPlayerId != 0)
{
opt.TargetActors = new int[] { targetPlayerId };
}
this.OpRaiseEvent(VoiceEvent.Code, content, opt, sendOpt);
}
}
public void SendVoiceRemove(LocalVoice voice, int channelId, int targetPlayerId)
{
object content = protocol.buildVoiceRemoveMessage(voice);
var sendOpt = new SendOptions()
{
Reliability = true,
Channel = photonChannelForCodec(voice.Info.Codec),
};
var opt = new RaiseEventOptions();
if (targetPlayerId == -1)
{
opt.TargetActors = new int[] { this.LocalPlayer.ActorNumber };
}
else if (targetPlayerId != 0)
{
opt.TargetActors = new int[] { targetPlayerId };
}
if (voice.DebugEchoMode)
{
opt.Receivers = ReceiverGroup.All;
}
this.OpRaiseEvent(VoiceEvent.Code, content, opt, sendOpt);
}
public virtual void SendFrame(ArraySegment<byte> data, FrameFlags flags, byte evNumber, byte voiceId, int channelId, int targetPlayerId, bool reliable, LocalVoice localVoice)
{
object[] content = protocol.buildFrameMessage(voiceId, evNumber, data, flags);
var sendOpt = new SendOptions()
{
Reliability = reliable,
Channel = photonChannelForCodec(localVoice.Info.Codec),
Encrypt = localVoice.Encrypt
};
var opt = new RaiseEventOptions();
if (targetPlayerId == -1)
{
opt.TargetActors = new int[] { this.LocalPlayer.ActorNumber };
}
else if (targetPlayerId != 0)
{
opt.TargetActors = new int[] { targetPlayerId };
}
if (localVoice.DebugEchoMode)
{
opt.Receivers = ReceiverGroup.All;
}
opt.InterestGroup = localVoice.InterestGroup;
this.OpRaiseEvent(VoiceEvent.Code, content, opt, sendOpt);
while (this.LoadBalancingPeer.SendOutgoingCommands());
}
public string ChannelIdStr(int channelId) { return null; }
public string PlayerIdStr(int playerId) { return null; }
protected virtual void onEventActionVoiceClient(EventData ev)
{
// check for voice event first
if (ev.Code == VoiceEvent.Code)
{
// Payloads are arrays. If first array element is 0 than next is event subcode. Otherwise, the event is data frame with voiceId in 1st element.
protocol.onVoiceEvent(ev[(byte)ParameterCode.CustomEventContent], VOICE_CHANNEL, ev.Sender, ev.Sender == this.LocalPlayer.ActorNumber);
}
else
{
int playerId;
switch (ev.Code)
{
case (byte)EventCode.Join:
playerId = ev.Sender;
if (playerId == this.LocalPlayer.ActorNumber)
{
}
else
{
this.voiceClient.onPlayerJoin(VOICE_CHANNEL, playerId);
}
break;
case (byte)EventCode.Leave:
{
playerId = ev.Sender;
if (playerId == this.LocalPlayer.ActorNumber)
{
this.voiceClient.onLeaveAllChannels();
}
else
{
this.voiceClient.onPlayerLeave(VOICE_CHANNEL, playerId);
}
}
break;
}
}
}
void onStateChangeVoiceClient(ClientState fromState, ClientState state)
{
switch (fromState)
{
case ClientState.Joined:
this.voiceClient.onLeaveChannel(VOICE_CHANNEL);
break;
}
switch (state)
{
case ClientState.Joined:
this.voiceClient.onJoinChannel(VOICE_CHANNEL);
if (this.voiceClient.GlobalInterestGroup != 0)
{
this.LoadBalancingPeer.OpChangeGroups(new byte[0], new byte[] { this.voiceClient.GlobalInterestGroup });
}
break;
}
}
#endregion
/// <summary>
/// Releases all resources used by the <see cref="LoadBalancingTransport"/> instance.
/// </summary>
public void Dispose()
{
this.voiceClient.Dispose();
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: ab0c658ad9190f54f8fd20e9ef3acd56
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,128 @@
// -----------------------------------------------------------------------
// <copyright file="LoadBalancingTransport2.cs" company="Exit Games GmbH">
// Photon Voice API Framework for Photon - Copyright (C) 2020 Exit Games GmbH
// </copyright>
// <summary>
// Extends Photon Realtime API with audio streaming functionality.
// </summary>
// <author>developer@photonengine.com</author>
// ----------------------------------------------------------------------------
namespace Photon.Voice
{
using System;
using ExitGames.Client.Photon;
using Realtime;
/// <summary>
/// Variant of LoadBalancingTransport. Aims to be non-alloc at the cost of breaking compatibility with older clients.
/// </summary>
public class LoadBalancingTransport2 : LoadBalancingTransport
{
public LoadBalancingTransport2(ILogger logger = null, ConnectionProtocol connectionProtocol = ConnectionProtocol.Udp) : base(logger, connectionProtocol)
{
this.LoadBalancingPeer.UseByteArraySlicePoolForEvents = true; // incoming byte[] events can be deserialized to a pooled ByteArraySlice
this.LoadBalancingPeer.ReuseEventInstance = true; // this won't store references to the event anyways
}
const int DATA_OFFSET = 4;
public override void SendFrame(ArraySegment<byte> data, FrameFlags flags, byte evNumber, byte voiceId, int channelId, int targetPlayerId, bool reliable, LocalVoice localVoice)
{
// this uses a pooled slice, which is released within the send method (here RaiseEvent at the bottom)
ByteArraySlice frameData = this.LoadBalancingPeer.ByteArraySlicePool.Acquire(data.Count + DATA_OFFSET);
frameData.Buffer[0] = DATA_OFFSET;
frameData.Buffer[1] = voiceId;
frameData.Buffer[2] = evNumber;
frameData.Buffer[3] = (byte)flags;
Buffer.BlockCopy(data.Array, 0, frameData.Buffer, DATA_OFFSET, data.Count);
frameData.Count = data.Count + DATA_OFFSET; // need to set the count, as we manipulated the buffer directly
SendOptions sendOpt = new SendOptions() { Reliability = reliable, Channel = this.photonChannelForCodec(localVoice.Info.Codec), Encrypt = localVoice.Encrypt };
RaiseEventOptions opt = new RaiseEventOptions();
if (targetPlayerId == -1)
{
opt.TargetActors = new int[] { this.LocalPlayer.ActorNumber };
}
else if (targetPlayerId != 0)
{
opt.TargetActors = new int[] { targetPlayerId };
}
if (localVoice.DebugEchoMode)
{
opt.Receivers = ReceiverGroup.All;
}
opt.InterestGroup = localVoice.InterestGroup;
this.OpRaiseEvent(VoiceEvent.FrameCode, frameData, opt, sendOpt);
// each voice has it's own connection? else, we could aggregate voices data in less count of datagrams
while (this.LoadBalancingPeer.SendOutgoingCommands());
}
protected override void onEventActionVoiceClient(EventData ev)
{
if (ev.Code == VoiceEvent.FrameCode)
{
// Payloads are arrays. If first array element is 0 than next is event subcode. Otherwise, the event is data frame with voiceId in 1st element.
this.onVoiceFrameEvent(ev[(byte)ParameterCode.CustomEventContent], VOICE_CHANNEL, ev.Sender, this.LocalPlayer.ActorNumber);
}
else
{
base.onEventActionVoiceClient(ev);
}
}
internal void onVoiceFrameEvent(object content0, int channelId, int playerId, int localPlayerId)
{
byte[] content;
int contentLength;
int sliceOffset = 0;
ByteArraySlice slice = content0 as ByteArraySlice;
if (slice != null)
{
content = slice.Buffer;
contentLength = slice.Count;
sliceOffset = slice.Offset;
}
else
{
content = content0 as byte[];
contentLength = content.Length;
}
if (content == null || contentLength < 3)
{
this.LogError("[PV] onVoiceFrameEvent did not receive data (readable as byte[]) " + content0);
}
else
{
byte dataOffset = (byte)content[sliceOffset];
byte voiceId = (byte)content[sliceOffset + 1];
byte evNumber = (byte)content[sliceOffset + 2];
FrameFlags flags = 0;
if (dataOffset > 3)
{
flags = (FrameFlags)content[3];
}
FrameBuffer buffer;
if (slice != null)
{
buffer = new FrameBuffer(slice.Buffer, slice.Offset + dataOffset, contentLength - dataOffset, flags, slice);
}
else
{
buffer = new FrameBuffer(content, dataOffset, contentLength - dataOffset, flags, null);
}
this.voiceClient.onFrame(channelId, playerId, voiceId, evNumber, ref buffer, playerId == localPlayerId);
buffer.Release();
}
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 3da79bc7318460e45a532a074bd776cc
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,162 @@
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
namespace Photon.Voice
{
// for convenience, it also calls VoiceClient payload handlers
internal class PhotonTransportProtocol
{
enum EventSubcode : byte
{
VoiceInfo = 1,
VoiceRemove = 2,
Frame = 3,
}
enum EventParam : byte
{
VoiceId = 1,
SamplingRate = 2,
Channels = 3,
FrameDurationUs = 4,
Bitrate = 5,
Width = 6,
Height = 7,
FPS = 8,
KeyFrameInt = 9,
UserData = 10,
EventNumber = 11,
Codec = 12,
}
private VoiceClient voiceClient;
private ILogger logger;
public PhotonTransportProtocol(VoiceClient voiceClient, ILogger logger)
{
this.voiceClient = voiceClient;
this.logger = logger;
}
internal object[] buildVoicesInfo(IEnumerable<LocalVoice> voicesToSend, bool logInfo)
{
object[] infos = new object[voicesToSend.Count()];
object[] content = new object[] { (byte)0, EventSubcode.VoiceInfo, infos };
int i = 0;
foreach (var v in voicesToSend)
{
infos[i] = new Dictionary<byte, object>() {
{ (byte)EventParam.VoiceId, v.ID },
{ (byte)EventParam.Codec, v.Info.Codec },
{ (byte)EventParam.SamplingRate, v.Info.SamplingRate },
{ (byte)EventParam.Channels, v.Info.Channels },
{ (byte)EventParam.FrameDurationUs, v.Info.FrameDurationUs },
{ (byte)EventParam.Bitrate, v.Info.Bitrate },
{ (byte)EventParam.Width, v.Info.Width },
{ (byte)EventParam.Height, v.Info.Height },
{ (byte)EventParam.FPS, v.Info.FPS },
{ (byte)EventParam.KeyFrameInt, v.Info.KeyFrameInt },
{ (byte)EventParam.UserData, v.Info.UserData },
{ (byte)EventParam.EventNumber, v.EvNumber }
};
i++;
if (logInfo)
{
logger.LogInfo(v.LogPrefix + " Sending info: " + v.Info.ToString() + " ev=" + v.EvNumber);
}
}
return content;
}
internal object[] buildVoiceRemoveMessage(LocalVoice v)
{
byte[] ids = new byte[] { v.ID };
object[] content = new object[] { (byte)0, EventSubcode.VoiceRemove, ids };
logger.LogInfo(v.LogPrefix + " remove sent");
return content;
}
internal object[] buildFrameMessage(byte voiceId, byte evNumber, ArraySegment<byte> data, FrameFlags flags)
{
return new object[] { voiceId, evNumber, data, (byte)flags };
}
// isLocalPlayer is required only for VoiceClient.RoundTripTime calculation
internal void onVoiceEvent(object content0, int channelId, int playerId, bool isLocalPlayer)
{
object[] content = (object[])content0;
if ((byte)content[0] == (byte)0)
{
switch ((byte)content[1])
{
case (byte)EventSubcode.VoiceInfo:
this.onVoiceInfo(channelId, playerId, content[2]);
break;
case (byte)EventSubcode.VoiceRemove:
this.onVoiceRemove(channelId, playerId, content[2]);
break;
default:
logger.LogError("[PV] Unknown sevent subcode " + content[1]);
break;
}
}
else
{
byte voiceId = (byte)content[0];
byte evNumber = (byte)content[1];
byte[] receivedBytes = (byte[])content[2];
FrameFlags flags = 0;
if (content.Length > 3)
{
flags = (FrameFlags)content[3];
}
var buffer = new FrameBuffer(receivedBytes, flags);
this.voiceClient.onFrame(channelId, playerId, voiceId, evNumber, ref buffer, isLocalPlayer);
buffer.Release();
}
}
private void onVoiceInfo(int channelId, int playerId, object payload)
{
foreach (var el in (object[])payload)
{
var h = (Dictionary<byte, Object>)el;
var voiceId = (byte)h[(byte)EventParam.VoiceId];
var eventNumber = (byte)h[(byte)EventParam.EventNumber];
var info = createVoiceInfoFromEventPayload(h);
voiceClient.onVoiceInfo(channelId, playerId, voiceId, eventNumber, info);
}
}
private void onVoiceRemove(int channelId, int playerId, object payload)
{
var voiceIds = (byte[])payload;
voiceClient.onVoiceRemove(channelId, playerId, voiceIds);
}
private VoiceInfo createVoiceInfoFromEventPayload(Dictionary<byte, object> h)
{
var i = new VoiceInfo();
i.Codec = (Codec)h[(byte)EventParam.Codec];
i.SamplingRate = (int)h[(byte)EventParam.SamplingRate];
i.Channels = (int)h[(byte)EventParam.Channels];
i.FrameDurationUs = (int)h[(byte)EventParam.FrameDurationUs];
i.Bitrate = (int)h[(byte)EventParam.Bitrate];
// check to keep compatibility with old clients
if (h.ContainsKey((byte)EventParam.Width)) i.Width = (int)h[(byte)EventParam.Width];
if (h.ContainsKey((byte)EventParam.Height)) i.Height = (int)h[(byte)EventParam.Height];
if (h.ContainsKey((byte)EventParam.FPS)) i.FPS = (int)h[(byte)EventParam.FPS];
if (h.ContainsKey((byte)EventParam.KeyFrameInt)) i.KeyFrameInt = (int)h[(byte)EventParam.KeyFrameInt];
i.UserData = h[(byte)EventParam.UserData];
return i;
}
}
}

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: a7b562e0f0770a8498e3e1e07993ddbe
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,10 @@
{
"name": "PhotonVoice.API",
"references": [
"PhotonRealtime"
],
"optionalUnityReferences": [],
"includePlatforms": [],
"excludePlatforms": [],
"allowUnsafeCode": true
}

View File

@@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: 8e3c55029ca641543bf030857af4d525
timeCreated: 1538045250
licenseType: Store
DefaultImporter:
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: 926a0760dc579494ba14853e66dee0cc
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,8 @@
fileFormatVersion: 2
guid: bd72d08421bfd144c9627c0d3e37d0f6
folderAsset: yes
DefaultImporter:
externalObjects: {}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,77 @@
#if UNITY_EDITOR_OSX || UNITY_STANDALONE_OSX
using System;
using System.Collections;
using System.Collections.Generic;
using System.Runtime.InteropServices;
namespace Photon.Voice.MacOS
{
public class AudioInChangeNotifier : IAudioInChangeNotifier
{
public bool IsSupported => true;
const string lib_name = "AudioIn";
[DllImport(lib_name)]
private static extern IntPtr Photon_Audio_In_CreateChangeNotifier(int instanceID, Action<int> callback);
[DllImport(lib_name)]
private static extern IntPtr Photon_Audio_In_DestroyChangeNotifier(IntPtr handle);
private delegate void CallbackDelegate(int instanceID);
IntPtr handle;
int instanceID;
Action callback;
public AudioInChangeNotifier(Action callback, ILogger logger)
{
this.callback = callback;
//nativeCallback(8888);
var handle = Photon_Audio_In_CreateChangeNotifier(instanceCnt, nativeCallback);
lock (instancePerHandle)
{
this.handle = handle;
this.instanceID = instanceCnt;
instancePerHandle.Add(instanceCnt++, this);
}
}
// IL2CPP does not support marshaling delegates that point to instance methods to native code.
// Using static method and per instance table.
static int instanceCnt;
private static Dictionary<int, AudioInChangeNotifier> instancePerHandle = new Dictionary<int, AudioInChangeNotifier>();
[MonoPInvokeCallbackAttribute(typeof(CallbackDelegate))]
private static void nativeCallback(int instanceID)
{
AudioInChangeNotifier instance;
bool ok;
lock (instancePerHandle)
{
ok = instancePerHandle.TryGetValue(instanceID, out instance);
}
if (ok)
{
instance.callback();
}
}
/// <summary>If not null, the enumerator is in invalid state.</summary>
public string Error { get; private set; }
/// <summary>Disposes enumerator.
/// Call it to free native resources.
/// </summary>
public void Dispose()
{
lock (instancePerHandle)
{
instancePerHandle.Remove(instanceID);
}
if (handle != IntPtr.Zero)
{
Photon_Audio_In_DestroyChangeNotifier(handle);
handle = IntPtr.Zero;
}
}
}
}
#endif

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 893b0d66c964e974798c7da6a2e88cd0
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,96 @@
#if UNITY_EDITOR_OSX || UNITY_STANDALONE_OSX
using System;
using System.Collections.Generic;
using System.Runtime.InteropServices;
namespace Photon.Voice.MacOS
{
public class MonoPInvokeCallbackAttribute : System.Attribute
{
private Type type;
public MonoPInvokeCallbackAttribute(Type t) { type = t; }
}
public class AudioInPusher : IAudioPusher<float>
{
const string lib_name = "AudioIn";
[DllImport(lib_name)]
private static extern IntPtr Photon_Audio_In_CreatePusher(int instanceID, int deviceID, Action<int, IntPtr, int> pushCallback);
[DllImport(lib_name)]
private static extern void Photon_Audio_In_Destroy(IntPtr handler);
private delegate void CallbackDelegate(int instanceID, IntPtr buf, int len);
public AudioInPusher(int deviceID, ILogger logger)
{
this.deviceID = deviceID;
try
{
handle = Photon_Audio_In_CreatePusher(instanceCnt, deviceID, nativePushCallback);
instancePerHandle.Add(instanceCnt++, this);
}
catch (Exception e)
{
Error = e.ToString();
if (Error == null) // should never happen but since Error used as validity flag, make sure that it's not null
{
Error = "Exception in AudioInPusher constructor";
}
logger.LogError("[PV] AudioInPusher: " + Error);
}
}
private int deviceID;
// IL2CPP does not support marshaling delegates that point to instance methods to native code.
// Using static method and per instance table.
static int instanceCnt;
private static Dictionary<int, AudioInPusher> instancePerHandle = new Dictionary<int, AudioInPusher>();
[MonoPInvokeCallbackAttribute(typeof(CallbackDelegate))]
private static void nativePushCallback(int instanceID, IntPtr buf, int len)
{
AudioInPusher instance;
if (instancePerHandle.TryGetValue(instanceID, out instance))
{
instance.push(buf, len);
}
}
IntPtr handle;
Action<float[]> pushCallback;
ObjectFactory<float[], int> bufferFactory;
// Supposed to be called once at voice initialization.
// Otherwise recreate native object (instead of adding 'set callback' method to native interface)
public void SetCallback(Action<float[]> callback, ObjectFactory<float[], int> bufferFactory)
{
this.bufferFactory = bufferFactory;
this.pushCallback = callback;
}
private void push(IntPtr buf, int len)
{
if (this.pushCallback != null)
{
var bufManaged = bufferFactory.New(len);
Marshal.Copy(buf, bufManaged, 0, len);
pushCallback(bufManaged);
}
}
public int Channels { get { return 1; } }
public int SamplingRate { get { return 44100; } }
public string Error { get; private set; }
public void Dispose()
{
if (handle != IntPtr.Zero)
{
Photon_Audio_In_Destroy(handle);
handle = IntPtr.Zero;
}
// TODO: Remove this from instancePerHandle
}
}
}
#endif

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: b0e2dc3e0f865a747b515b1a0450cc12
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,56 @@
#if UNITY_EDITOR_OSX || UNITY_STANDALONE_OSX
using System;
using System.Runtime.InteropServices;
namespace Photon.Voice.MacOS
{
public class AudioInReader : IAudioReader<float>
{
const string lib_name = "AudioIn";
[DllImport(lib_name)]
private static extern IntPtr Photon_Audio_In_CreateReader(int deviceID);
[DllImport(lib_name)]
private static extern void Photon_Audio_In_Destroy(IntPtr handler);
[DllImport(lib_name)]
private static extern bool Photon_Audio_In_Read(IntPtr handle, float[] buf, int len);
IntPtr audioIn;
public AudioInReader(int deviceID, ILogger logger)
{
try
{
audioIn = Photon_Audio_In_CreateReader(deviceID);
}
catch (Exception e)
{
Error = e.ToString();
if (Error == null) // should never happen but since Error used as validity flag, make sure that it's not null
{
Error = "Exception in AudioInReader constructor";
}
logger.LogError("[PV] AudioInReader: " + Error);
}
}
public int Channels { get { return 1; } }
public int SamplingRate { get { return 44100; } }
public string Error { get; private set; }
public void Dispose()
{
if (audioIn != IntPtr.Zero)
{
Photon_Audio_In_Destroy(audioIn);
audioIn = IntPtr.Zero;
}
}
public bool Read(float[] buf)
{
return audioIn != IntPtr.Zero && Photon_Audio_In_Read(audioIn, buf, buf.Length);
}
}
}
#endif

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 7f22e06cf781af34abf9e24bf6010fce
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,70 @@
#if UNITY_EDITOR_OSX || UNITY_STANDALONE_OSX
using System;
using System.Collections;
using System.Collections.Generic;
using System.Runtime.InteropServices;
namespace Photon.Voice.MacOS
{
/// <summary>Enumerates microphones available on device.
/// </summary>
public class AudioInEnumerator : DeviceEnumeratorBase
{
const string lib_name = "AudioIn";
[DllImport(lib_name)]
private static extern IntPtr Photon_Audio_In_CreateMicEnumerator();
[DllImport(lib_name)]
private static extern void Photon_Audio_In_DestroyMicEnumerator(IntPtr handle);
[DllImport(lib_name)]
private static extern int Photon_Audio_In_MicEnumerator_Count(IntPtr handle);
[DllImport(lib_name)]
private static extern IntPtr Photon_Audio_In_MicEnumerator_NameAtIndex(IntPtr handle, int idx);
[DllImport(lib_name)]
private static extern int Photon_Audio_In_MicEnumerator_IDAtIndex(IntPtr handle, int idx);
IntPtr handle;
public AudioInEnumerator(ILogger logger) : base(logger)
{
Refresh();
}
/// <summary>Refreshes the microphones list.
/// </summary>
public override void Refresh()
{
Dispose();
try
{
handle = Photon_Audio_In_CreateMicEnumerator();
var count = Photon_Audio_In_MicEnumerator_Count(handle);
devices = new List<DeviceInfo>();
for (int i = 0; i < count; i++)
{
devices.Add(new DeviceInfo(Photon_Audio_In_MicEnumerator_IDAtIndex(handle, i), Marshal.PtrToStringAuto(Photon_Audio_In_MicEnumerator_NameAtIndex(handle, i))));
}
Error = null;
}
catch (Exception e)
{
Error = e.ToString();
if (Error == null) // should never happen but since Error used as validity flag, make sure that it's not null
{
Error = "Exception in AudioInEnumerator.Refresh()";
}
}
}
/// <summary>Disposes enumerator.
/// Call it to free native resources.
/// </summary>
public override void Dispose()
{
if (handle != IntPtr.Zero && Error == null)
{
Photon_Audio_In_DestroyMicEnumerator(handle);
handle = IntPtr.Zero;
}
}
}
}
#endif

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 7dcf1a594e0ea9845b5ec7ae2931428b
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,83 @@
#if (UNITY_IOS && !UNITY_EDITOR) || __IOS__
using System;
using System.Collections;
using System.Collections.Generic;
using System.Runtime.InteropServices;
namespace Photon.Voice.IOS
{
public class AudioInChangeNotifier : IAudioInChangeNotifier
{
public bool IsSupported => true;
const string lib_name = "__Internal";
[DllImport(lib_name)]
private static extern IntPtr Photon_Audio_In_CreateChangeNotifier(int instanceID, Action<int> callback);
[DllImport(lib_name)]
private static extern IntPtr Photon_Audio_In_DestroyChangeNotifier(IntPtr handle);
private delegate void CallbackDelegate(int instanceID);
IntPtr handle;
int instanceID;
Action callback;
public AudioInChangeNotifier(Action callback, ILogger logger)
{
this.callback = callback;
var handle = Photon_Audio_In_CreateChangeNotifier(instanceCnt, nativeCallback);
lock (instancePerHandle)
{
this.handle = handle;
this.instanceID = instanceCnt;
instancePerHandle.Add(instanceCnt++, this);
}
}
public class MonoPInvokeCallbackAttribute : System.Attribute
{
private Type type;
public MonoPInvokeCallbackAttribute(Type t) { type = t; }
}
// IL2CPP does not support marshaling delegates that point to instance methods to native code.
// Using static method and per instance table.
static int instanceCnt;
private static Dictionary<int, AudioInChangeNotifier> instancePerHandle = new Dictionary<int, AudioInChangeNotifier>();
[MonoPInvokeCallbackAttribute(typeof(CallbackDelegate))]
private static void nativeCallback(int instanceID)
{
AudioInChangeNotifier instance;
bool ok;
lock (instancePerHandle)
{
ok = instancePerHandle.TryGetValue(instanceID, out instance);
}
if (ok)
{
instance.callback();
}
}
/// <summary>If not null, the enumerator is in invalid state.</summary>
public string Error { get; private set; }
/// <summary>Disposes enumerator.
/// Call it to free native resources.
/// </summary>
public void Dispose()
{
lock (instancePerHandle)
{
instancePerHandle.Remove(instanceID);
}
if (handle != IntPtr.Zero)
{
Photon_Audio_In_DestroyChangeNotifier(handle);
handle = IntPtr.Zero;
}
}
}
}
#endif

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 18c26ffb450f01444a49013a7455b8e7
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,139 @@
#if (UNITY_IOS && !UNITY_EDITOR) || __IOS__
using System;
using System.Threading;
using System.Collections.Generic;
using System.Runtime.InteropServices;
namespace Photon.Voice.IOS
{
public class MonoPInvokeCallbackAttribute : System.Attribute
{
private Type type;
public MonoPInvokeCallbackAttribute(Type t) { type = t; }
}
public class AudioInPusher : IAudioPusher<float>, IResettable
{
const string lib_name = "__Internal";
[DllImport(lib_name)]
private static extern IntPtr Photon_Audio_In_CreatePusher(int instanceID, Action<int, IntPtr, int> pushCallback, int sessionCategory, int sessionMode, int sessionCategoryOptions);
[DllImport(lib_name)]
private static extern void Photon_Audio_In_Reset(IntPtr handler);
[DllImport(lib_name)]
private static extern void Photon_Audio_In_Destroy(IntPtr handler);
private delegate void CallbackDelegate(int instanceID, IntPtr buf, int len);
private bool initializationFinished;
public AudioInPusher(AudioSessionParameters sessParam, ILogger logger)
{
// initialization in a separate thread to avoid 0.5 - 1 sec. pauses in main thread execution
var t = new Thread(() =>
{
lock (instancePerHandle) // prevent concurrent initialization
{
try
{
var handle = Photon_Audio_In_CreatePusher(instanceCnt, nativePushCallback, (int)sessParam.Category, (int)sessParam.Mode, sessParam.CategoryOptionsToInt());
this.handle = handle;
this.instanceID = instanceCnt;
instancePerHandle.Add(instanceCnt++, this);
}
catch (Exception e)
{
Error = e.ToString();
if (Error == null) // should never happen but since Error used as validity flag, make sure that it's not null
{
Error = "Exception in AudioInPusher constructor";
}
logger.LogError("[PV] AudioInPusher: " + Error);
}
finally
{
initializationFinished = true;
}
}
});
Util.SetThreadName(t, "[PV] IOSAudioInPusherCtr");
t.Start();
}
// IL2CPP does not support marshaling delegates that point to instance methods to native code.
// Using static method and per instance table.
static int instanceCnt;
private static Dictionary<int, AudioInPusher> instancePerHandle = new Dictionary<int, AudioInPusher>();
[MonoPInvokeCallbackAttribute(typeof(CallbackDelegate))]
private static void nativePushCallback(int instanceID, IntPtr buf, int len)
{
AudioInPusher instance;
bool ok;
lock (instancePerHandle)
{
ok = instancePerHandle.TryGetValue(instanceID, out instance);
}
if (ok)
{
instance.push(buf, len);
}
}
IntPtr handle;
int instanceID;
Action<float[]> pushCallback;
ObjectFactory<float[], int> bufferFactory;
// Supposed to be called once at voice initialization.
// Otherwise recreate native object (instead of adding 'set callback' method to native interface)
public void SetCallback(Action<float[]> callback, ObjectFactory<float[], int> bufferFactory)
{
this.bufferFactory = bufferFactory;
this.pushCallback = callback;
}
private void push(IntPtr buf, int len)
{
if (this.pushCallback != null)
{
var bufManaged = bufferFactory.New(len);
Marshal.Copy(buf, bufManaged, 0, len);
pushCallback(bufManaged);
}
}
public int Channels { get { return 1; } }
public int SamplingRate { get { return 48000; } }
public string Error { get; private set; }
public void Reset()
{
lock (instancePerHandle)
{
if (handle != IntPtr.Zero)
{
Photon_Audio_In_Reset(handle);
}
}
}
public void Dispose()
{
lock (instancePerHandle)
{
instancePerHandle.Remove(instanceID);
while (!initializationFinished) // should never happen because of lock if the thread in constructor started before Dispose() call
{
Thread.Sleep(1);
}
if (handle != IntPtr.Zero)
{
Photon_Audio_In_Destroy(handle);
handle = IntPtr.Zero;
}
}
}
}
}
#endif

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: a173aa7f3a7bbc94aa78a6747d55f3f6
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,96 @@
#if (UNITY_IOS && !UNITY_EDITOR) || __IOS__
using System;
using System.Threading;
using System.Runtime.InteropServices;
namespace Photon.Voice.IOS
{
public class AudioInReader : IAudioReader<float>, IResettable
{
const string lib_name = "__Internal";
[DllImport(lib_name)]
private static extern IntPtr Photon_Audio_In_CreateReader(int sessionCategory, int sessionMode, int sessionCategoryOptions);
[DllImport(lib_name)]
private static extern void Photon_Audio_In_Reset(IntPtr handler);
[DllImport(lib_name)]
private static extern void Photon_Audio_In_Destroy(IntPtr handler);
[DllImport(lib_name)]
private static extern bool Photon_Audio_In_Read(IntPtr handle, float[] buf, int len);
IntPtr audioIn;
private bool initializationFinished;
public AudioInReader(AudioSessionParameters sessParam, ILogger logger)
{
// initialization in a separate thread to avoid 0.5 - 1 sec. pauses in main thread execution
var t = new Thread(() =>
{
lock (this)
{
try
{
var audioIn = Photon_Audio_In_CreateReader((int)sessParam.Category, (int)sessParam.Mode, sessParam.CategoryOptionsToInt());
lock (this)
{
this.audioIn = audioIn;
}
}
catch (Exception e)
{
Error = e.ToString();
if (Error == null) // should never happen but since Error used as validity flag, make sure that it's not null
{
Error = "Exception in AudioInReader constructor";
}
logger.LogError("[PV] AudioInReader: " + Error);
}
finally
{
initializationFinished = true;
}
}
});
Util.SetThreadName(t, "[PV] IOSAudioInReaderCtr");
t.Start();
}
public int Channels { get { return 1; } }
public int SamplingRate { get { return 48000; } }
public string Error { get; private set; }
public void Reset()
{
lock (this)
{
if (audioIn != IntPtr.Zero)
{
Photon_Audio_In_Reset(audioIn);
}
}
}
public void Dispose()
{
lock (this)
{
while (!initializationFinished) // should never happen because of lock if the thread in constructor started before Dispose() call
{
Thread.Sleep(1);
}
if (audioIn != IntPtr.Zero)
{
Photon_Audio_In_Destroy(audioIn);
audioIn = IntPtr.Zero;
}
}
}
public bool Read(float[] buf)
{
return audioIn != IntPtr.Zero && Photon_Audio_In_Read(audioIn, buf, buf.Length);
}
}
}
#endif

View File

@@ -0,0 +1,11 @@
fileFormatVersion: 2
guid: 7c99541283e96384ea52e265269b1dee
MonoImporter:
externalObjects: {}
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
assetBundleName:
assetBundleVariant:

View File

@@ -0,0 +1,346 @@
namespace Photon.Voice.IOS
{
public enum AudioSessionCategory // values are the same as in AudioIn.mm enums
{
/// <summary>
/// Use this category for background sounds such as rain, car engine noise, etc.
/// Mixes with other music.
/// </summary>
/// <remarks>API_AVAILABLE(ios(3.0), watchos(2.0), tvos(9.0)) API_UNAVAILABLE(macos);</remarks>
Ambient = 0,
/// <summary> Use this category for background sounds. Other music will stop playing. </summary>
/// <remarks>API_AVAILABLE(ios(3.0), watchos(2.0), tvos(9.0)) API_UNAVAILABLE(macos);</remarks>
SoloAmbient = 1,
/// <summary> Use this category for music tracks. </summary>
/// <remarks>API_AVAILABLE(ios(3.0), watchos(2.0), tvos(9.0)) API_UNAVAILABLE(macos);</remarks>
Playback = 2,
/// <summary> Use this category when recording audio. </summary>
/// <remarks>API_AVAILABLE(ios(3.0), watchos(2.0), tvos(9.0)) API_UNAVAILABLE(macos);</remarks>
Record = 3,
/// <summary> Use this category when recording and playing back audio. </summary>
/// <remarks>API_AVAILABLE(ios(3.0), watchos(2.0), tvos(9.0)) API_UNAVAILABLE(macos);</remarks>
PlayAndRecord = 4,
/// <summary> Use this category when using a hardware codec or signal processor while
/// not playing or recording audio. </summary>
/// <remarks>API_DEPRECATED("No longer supported", ios(3.0, 10.0)) API_UNAVAILABLE(watchos, tvos) API_UNAVAILABLE(macos);</remarks>
AudioProcessing = 5,
/// <summary> Use this category to customize the usage of available audio accessories and built-in audio hardware.
/// For example, this category provides an application with the ability to use an available USB output
/// and headphone output simultaneously for separate, distinct streams of audio data. Use of
/// this category by an application requires a more detailed knowledge of, and interaction with,
/// the capabilities of the available audio routes. May be used for input, output, or both.
/// Note that not all output types and output combinations are eligible for multi-route. Input is limited
/// to the last-in input port. Eligible inputs consist of the following:
/// AVAudioSessionPortUSBAudio, AVAudioSessionPortHeadsetMic, and AVAudioSessionPortBuiltInMic.
/// Eligible outputs consist of the following:
/// AVAudioSessionPortUSBAudio, AVAudioSessionPortLineOut, AVAudioSessionPortHeadphones, AVAudioSessionPortHDMI,
/// and AVAudioSessionPortBuiltInSpeaker.
/// Note that AVAudioSessionPortBuiltInSpeaker is only allowed to be used when there are no other eligible
/// outputs connected. </summary>
/// <remarks>API_AVAILABLE(ios(6.0), watchos(2.0), tvos(9.0)) API_UNAVAILABLE(macos);</remarks>
MultiRoute = 6,
}
public enum AudioSessionMode // values are the same as in AudioIn.mm enums
{
/// <summary>
/// Modes modify the audio category in order to introduce behavior that is tailored to the specific
/// use of audio within an application. Available in iOS 5.0 and greater.
/// </summary>
/// <remarks>
/// The default mode
/// API_AVAILABLE(ios(5.0), watchos(2.0), tvos(9.0)) API_UNAVAILABLE(macos);
/// </remarks>
Default = 0,
/// <summary>
/// Only valid with AVAudioSessionCategoryPlayAndRecord. Appropriate for Voice over IP
/// (VoIP) applications. Reduces the number of allowable audio routes to be only those
/// that are appropriate for VoIP applications and may engage appropriate system-supplied
/// signal processing. Has the side effect of setting AVAudioSessionCategoryOptionAllowBluetooth
/// </summary>
/// <remarks>
/// API_AVAILABLE(ios(5.0), watchos(2.0), tvos(9.0)) API_UNAVAILABLE(macos);
/// </remarks>
VoiceChat = 1,
/* Set by Game Kit on behalf of an application that uses a GKVoiceChat object; valid
only with the AVAudioSessionCategoryPlayAndRecord category.
Do not set this mode directly. If you need similar behavior and are not using
a GKVoiceChat object, use AVAudioSessionModeVoiceChat instead. */
// GameChat = 2, // API_AVAILABLE(ios(5.0), watchos(2.0), tvos(9.0)) API_UNAVAILABLE(macos);
/// <summary>
/// Only valid with AVAudioSessionCategoryPlayAndRecord or AVAudioSessionCategoryRecord.
/// Modifies the audio routing options and may engage appropriate system-supplied signal processing.
/// </summary>
/// <remarks>
/// API_AVAILABLE(ios(5.0), watchos(2.0), tvos(9.0)) API_UNAVAILABLE(macos);
/// </remarks>
VideoRecording = 3,
/// <summary>
/// Appropriate for applications that wish to minimize the effect of system-supplied signal
/// processing for input and/or output audio signals.
/// </summary>
/// <remarks>
/// API_AVAILABLE(ios(5.0), watchos(2.0), tvos(9.0)) API_UNAVAILABLE(macos);
/// </remarks>
Measurement = 4,
/// <summary>
/// Engages appropriate output signal processing for movie playback scenarios. Currently
/// only applied during playback over built-in speaker.
/// </summary>
/// <remarks>
/// API_AVAILABLE(ios(6.0), watchos(2.0), tvos(9.0)) API_UNAVAILABLE(macos);
/// </remarks>
MoviePlayback = 5,
/// <summary>
/// Only valid with kAudioSessionCategory_PlayAndRecord. Reduces the number of allowable audio
/// routes to be only those that are appropriate for video chat applications. May engage appropriate
/// system-supplied signal processing. Has the side effect of setting
/// AVAudioSessionCategoryOptionAllowBluetooth and AVAudioSessionCategoryOptionDefaultToSpeaker.
/// </summary>
/// <remarks>
/// API_AVAILABLE(ios(7.0), watchos(2.0), tvos(9.0)) API_UNAVAILABLE(macos);
/// </remarks>
VideoChat = 6,
/* Appropriate for applications which play spoken audio and wish to be paused (via audio session interruption) rather than ducked
if another app (such as a navigation app) plays a spoken audio prompt. Examples of apps that would use this are podcast players and
audio books. For more information, see the related category option AVAudioSessionCategoryOptionInterruptSpokenAudioAndMixWithOthers. */
// SpokenAudio = 7, // API_AVAILABLE(ios(9.0), watchos(2.0), tvos(9.0)) API_UNAVAILABLE(macos);
/* Appropriate for applications which play audio using text to speech. Setting this mode allows for different routing behaviors when
connected to certain audio devices such as CarPlay. An example of an app that would use this mode is a turn by turn navigation app that
plays short prompts to the user. Typically, these same types of applications would also configure their session to use
AVAudioSessionCategoryOptionDuckOthers and AVAudioSessionCategoryOptionInterruptSpokenAudioAndMixWithOthers */
// VoicePrompt = 8, // API_AVAILABLE(ios(12.0), watchos(5.0), tvos(12.0)) API_UNAVAILABLE(macos);
}
public enum AudioSessionCategoryOption // values as defined in Apple Audio Session API
{
/*
AVAudioSessionCategoryOptionMixWithOthers --
This allows an application to set whether or not other active audio apps will be interrupted or mixed with
when your app's audio session goes active. The typical cases are:
(1) AVAudioSessionCategoryPlayAndRecord or AVAudioSessionCategoryMultiRoute
this will default to false, but can be set to true. This would allow other applications to play in the background
while an app had both audio input and output enabled
(2) AVAudioSessionCategoryPlayback
this will default to false, but can be set to true. This would allow other applications to play in the background,
but an app will still be able to play regardless of the setting of the ringer switch
(3) Other categories
this defaults to false and cannot be changed (that is, the mix with others setting of these categories
cannot be overridden. An application must be prepared for setting this property to fail as behaviour
may change in future releases. If an application changes their category, they should reassert the
option (it is not sticky across category changes).
AVAudioSessionCategoryOptionDuckOthers --
This allows an application to set whether or not other active audio apps will be ducked when when your app's audio
session goes active. An example of this is the Nike app, which provides periodic updates to its user (it reduces the
volume of any music currently being played while it provides its status). This defaults to off. Note that the other
audio will be ducked for as long as the current session is active. You will need to deactivate your audio
session when you want full volume playback of the other audio.
If your category is AVAudioSessionCategoryPlayback, AVAudioSessionCategoryPlayAndRecord, or
AVAudioSessionCategoryMultiRoute, by default the audio session will be non-mixable and non-ducking.
Setting this option will also make your category mixable with others (AVAudioSessionCategoryOptionMixWithOthers
will be set).
AVAudioSessionCategoryOptionAllowBluetooth --
This allows an application to change the default behaviour of some audio session categories with regards to showing
bluetooth Hands-Free Profile (HFP) devices as available routes. The current category behavior is:
(1) AVAudioSessionCategoryPlayAndRecord
this will default to false, but can be set to true. This will allow a paired bluetooth HFP device to show up as
an available route for input, while playing through the category-appropriate output
(2) AVAudioSessionCategoryRecord
this will default to false, but can be set to true. This will allow a paired bluetooth HFP device to show up
as an available route for input
(3) Other categories
this defaults to false and cannot be changed (that is, enabling bluetooth for input in these categories is
not allowed)
An application must be prepared for setting this option to fail as behaviour may change in future releases.
If an application changes their category or mode, they should reassert the override (it is not sticky
across category and mode changes).
AVAudioSessionCategoryOptionDefaultToSpeaker --
This allows an application to change the default behaviour of some audio session categories with regards to
the audio route. The current category behavior is:
(1) AVAudioSessionCategoryPlayAndRecord category
this will default to false, but can be set to true. this will route to Speaker (instead of Receiver)
when no other audio route is connected.
(2) Other categories
this defaults to false and cannot be changed (that is, the default to speaker setting of these
categories cannot be overridden
An application must be prepared for setting this property to fail as behaviour may change in future releases.
If an application changes their category, they should reassert the override (it is not sticky across
category and mode changes).
AVAudioSessionCategoryOptionInterruptSpokenAudioAndMixWithOthers --
If another app's audio session mode is set to AVAudioSessionModeSpokenAudio (podcast playback in the background for example),
then that other app's audio will be interrupted when the current application's audio session goes active. An example of this
is a navigation app that provides navigation prompts to its user (it pauses any spoken audio currently being played while it
plays the prompt). This defaults to off. Note that the other app's audio will be paused for as long as the current session is
active. You will need to deactivate your audio session to allow the other audio to resume playback.
Setting this option will also make your category mixable with others (AVAudioSessionCategoryOptionMixWithOthers
will be set). If you want other non-spoken audio apps to duck their audio when your app's session goes active, also set
AVAudioSessionCategoryOptionDuckOthers.
AVAudioSessionCategoryOptionAllowBluetoothA2DP --
This allows an application to change the default behaviour of some audio session categories with regards to showing
bluetooth Advanced Audio Distribution Profile (A2DP), i.e. stereo Bluetooth, devices as available routes. The current
category behavior is:
(1) AVAudioSessionCategoryPlayAndRecord
this will default to false, but can be set to true. This will allow a paired bluetooth A2DP device to show up as
an available route for output, while recording through the category-appropriate input
(2) AVAudioSessionCategoryMultiRoute and AVAudioSessionCategoryRecord
this will default to false, and cannot be set to true.
(3) Other categories
this defaults to true and cannot be changed (that is, bluetooth A2DP ports are always supported in output-only categories).
An application must be prepared for setting this option to fail as behaviour may change in future releases.
If an application changes their category or mode, they should reassert the override (it is not sticky
across category and mode changes).
Setting both AVAudioSessionCategoryOptionAllowBluetooth and AVAudioSessionCategoryOptionAllowBluetoothA2DP is allowed. In cases
where a single Bluetooth device supports both HFP and A2DP, the HFP ports will be given a higher priority for routing. For HFP
and A2DP ports on separate hardware devices, the last-in wins rule applies.
AVAudioSessionCategoryOptionAllowAirPlay --
This allows an application to change the default behaviour of some audio session categories with regards to showing
AirPlay devices as available routes. See the documentation of AVAudioSessionCategoryOptionAllowBluetoothA2DP for details on
how this option applies to specific categories.
*/
/// <summary>
/// This allows an application to set whether or not other active audio apps will be interrupted or mixed with
/// when your app's audio session goes active. The typical cases are:
/// (1) AVAudioSessionCategoryPlayAndRecord or AVAudioSessionCategoryMultiRoute
/// this will default to false, but can be set to true. This would allow other applications to play in the background
/// while an app had both audio input and output enabled
/// (2) AVAudioSessionCategoryPlayback
/// this will default to false, but can be set to true. This would allow other applications to play in the background,
/// but an app will still be able to play regardless of the setting of the ringer switch
/// (3) Other categories
/// this defaults to false and cannot be changed (that is, the mix with others setting of these categories
/// cannot be overridden. An application must be prepared for setting this property to fail as behaviour
/// may change in future releases. If an application changes their category, they should reassert the
/// option (it is not sticky across category changes).
/// MixWithOthers is only valid with AVAudioSessionCategoryPlayAndRecord, AVAudioSessionCategoryPlayback, and AVAudioSessionCategoryMultiRoute
/// </summary>
MixWithOthers = 0x1,
/// <summary>
/// This allows an application to set whether or not other active audio apps will be ducked when when your app's audio
/// session goes active. An example of this is the Nike app, which provides periodic updates to its user (it reduces the
/// volume of any music currently being played while it provides its status). This defaults to off. Note that the other
/// audio will be ducked for as long as the current session is active. You will need to deactivate your audio
/// session when you want full volume playback of the other audio.
/// If your category is AVAudioSessionCategoryPlayback, AVAudioSessionCategoryPlayAndRecord, or
/// AVAudioSessionCategoryMultiRoute, by default the audio session will be non-mixable and non-ducking.
/// Setting this option will also make your category mixable with others (AVAudioSessionCategoryOptionMixWithOthers
/// will be set).
/// DuckOthers is only valid with AVAudioSessionCategoryAmbient, AVAudioSessionCategoryPlayAndRecord, AVAudioSessionCategoryPlayback, and AVAudioSessionCategoryMultiRoute
/// </summary>
DuckOthers = 0x2,
/// <summary>
/// This allows an application to change the default behaviour of some audio session categories with regards to showing
/// bluetooth Hands-Free Profile (HFP) devices as available routes. The current category behavior is:
/// (1) AVAudioSessionCategoryPlayAndRecord
/// this will default to false, but can be set to true. This will allow a paired bluetooth HFP device to show up as
/// an available route for input, while playing through the category-appropriate output
/// (2) AVAudioSessionCategoryRecord
/// this will default to false, but can be set to true. This will allow a paired bluetooth HFP device to show up
/// as an available route for input
/// (3) Other categories
/// this defaults to false and cannot be changed (that is, enabling bluetooth for input in these categories is
/// not allowed)
/// An application must be prepared for setting this option to fail as behaviour may change in future releases.
/// If an application changes their category or mode, they should reassert the override (it is not sticky
/// across category and mode changes).
/// AllowBluetooth is only valid with AVAudioSessionCategoryRecord and AVAudioSessionCategoryPlayAndRecord
/// </summary>
AllowBluetooth = 0x4, // API_UNAVAILABLE(tvos, watchos, macos)
/// <summary>
/// This allows an application to change the default behaviour of some audio session categories with regards to
/// the audio route. The current category behavior is:
/// (1) AVAudioSessionCategoryPlayAndRecord category
/// this will default to false, but can be set to true. this will route to Speaker (instead of Receiver)
/// when no other audio route is connected.
/// (2) Other categories
/// this defaults to false and cannot be changed (that is, the default to speaker setting of these
/// categories cannot be overridden
/// An application must be prepared for setting this property to fail as behaviour may change in future releases.
/// If an application changes their category, they should reassert the override (it is not sticky across
/// category and mode changes).
/// DefaultToSpeaker is only valid with AVAudioSessionCategoryPlayAndRecord
/// </summary>
DefaultToSpeaker = 0x8, // API_UNAVAILABLE(tvos, watchos, macos)
/* InterruptSpokenAudioAndMixWithOthers is only valid with AVAudioSessionCategoryPlayAndRecord, AVAudioSessionCategoryPlayback, and AVAudioSessionCategoryMultiRoute */
// InterruptSpokenAudioAndMixWithOthers = 0x11, // API_AVAILABLE(ios(9.0), watchos(2.0), tvos(9.0)) API_UNAVAILABLE(macos)
/* AllowBluetoothA2DP is only valid with AVAudioSessionCategoryPlayAndRecord */
// AllowBluetoothA2DP = 0x20, // API_AVAILABLE(ios(10.0), watchos(3.0), tvos(10.0)) API_UNAVAILABLE(macos)
/* AllowAirPlay is only valid with AVAudioSessionCategoryPlayAndRecord */
// AllowAirPlay = 0x40, // API_AVAILABLE(ios(10.0), tvos(10.0)) API_UNAVAILABLE(watchos, macos)
}
[System.Serializable]
public struct AudioSessionParameters
{
public AudioSessionCategory Category;//= AudioSessionCategory.PlayAndRecord;
public AudioSessionMode Mode;// = AudioSessionMode.Default;
public AudioSessionCategoryOption[] CategoryOptions;
public int CategoryOptionsToInt()
{
int opt = 0;
if (CategoryOptions != null)
{
for (int i = 0; i < CategoryOptions.Length; i++)
{
opt |= (int)CategoryOptions[i];
}
}
return opt;
}
public override string ToString()
{
var opt = "[";
if (CategoryOptions != null)
{
for (int i = 0; i < CategoryOptions.Length; i++)
{
opt += CategoryOptions[i];
if (i != CategoryOptions.Length - 1)
{
opt += ", ";
}
}
}
opt += "]";
return string.Format("category = {0}, mode = {1}, options = {2}", Category, Mode, opt);
}
}
public static class AudioSessionParametersPresets
{
public static AudioSessionParameters Game = new AudioSessionParameters()
{
Category = AudioSessionCategory.PlayAndRecord,
Mode = AudioSessionMode.Default,
CategoryOptions = new AudioSessionCategoryOption[] { AudioSessionCategoryOption.DefaultToSpeaker, AudioSessionCategoryOption.AllowBluetooth }
};
public static AudioSessionParameters VoIP = new AudioSessionParameters()
{
Category = AudioSessionCategory.PlayAndRecord,
Mode = AudioSessionMode.VoiceChat,
// VoiceChat should have the side effect of setting AVAudioSessionCategoryOptionAllowBluetooth according to doc
// but tests don't confirm this
CategoryOptions = new AudioSessionCategoryOption[] { AudioSessionCategoryOption.AllowBluetooth }
};
}
}

Some files were not shown because too many files have changed in this diff Show More