Initial (untested) push of Core Audio API

pull/133/head
dahall 2020-05-11 10:26:34 -06:00
parent 99a1f77520
commit e2d3411a86
16 changed files with 19978 additions and 0 deletions

View File

@ -0,0 +1,91 @@
using System;
using System.Runtime.InteropServices;
namespace Vanara.PInvoke
{
public static partial class CoreAudio
{
/// <summary>Defines the buffer validation flags for the APO_CONNECTION_PROPERTY structure associated with each APO connection.</summary>
/// <remarks>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioapotypes/ne-audioapotypes-apo_buffer_flags typedef enum APO_BUFFER_FLAGS
// { BUFFER_INVALID, BUFFER_VALID, BUFFER_SILENT } ;
[PInvokeData("audioapotypes.h", MSDNShortId = "996b56d7-1187-4ed7-b5f5-7d77291113f6")]
public enum APO_BUFFER_FLAGS
{
/// <summary>
/// There is no valid data in the connection buffer. The buffer pointer is valid and the buffer is capable of holding the amount
/// of valid audio data specified in the APO_CONNECTION_PROPERTY structure. While processing audio data, the audio engine marks
/// every connection as BUFFER_INVALID before calling IAudioOutputEndpoint::GetOutputDataPointer or IAudioInputEndpointRT::GetInputDataPointer.
/// </summary>
BUFFER_INVALID,
/// <summary>
/// The connection buffer contains valid data. This is the operational state of the connection buffer. The APO sets this flag
/// after it starts writing valid data into the buffer.Capture endpoints should set this flag in the GetInputDataPointer method
/// upon successful completion of the call.
/// </summary>
BUFFER_VALID,
/// <summary>
/// The connection buffer must be treated as if it contains silence. If the endpoint receives an input connection buffer that is
/// identified as BUFFER_SILENT, then the endpoint can assume the data represents silence. When capturing, the endpoint can also
/// set this flag, if necessary for a capture buffer.
/// </summary>
BUFFER_SILENT,
}
/// <summary>
/// The <c>AUDIO_CURVE_TYPE</c> enumeration defines constants that specify a curve algorithm to be applied to set a volume level.
/// </summary>
/// <remarks>
/// <para>
/// The following snippet of pseudocode shows the logic for the algorithm that is applied to the volume setting to reach the target
/// volume level.
/// </para>
/// <para>And the following diagram shows a graphical representation of the preceding pseudocode for setting the volume level.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows-hardware/drivers/ddi/ksmedia/ne-ksmedia-audio_curve_type typedef enum {
// AUDIO_CURVE_TYPE_NONE, AUDIO_CURVE_TYPE_WINDOWS_FADE } AUDIO_CURVE_TYPE;
[PInvokeData("AudioAPITypes.h", MSDNShortId = "E3CE3385-8744-4E3F-A5EF-41AC4E3E4375")]
public enum AUDIO_CURVE_TYPE
{
/// <summary>
/// Specifies that no curve algorithm will be applied. When this curve is specified, the duration of the curve specified must be
/// equal to 0.
/// </summary>
AUDIO_CURVE_TYPE_NONE,
/// <summary>
/// Specifies that the algorithm that is applied to the volume setting must follow the curve shown in the diagram in the Remarks section.
/// </summary>
AUDIO_CURVE_TYPE_WINDOWS_FADE,
}
/// <summary>Contains the dynamically changing connection properties.</summary>
/// <remarks>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioapotypes/ns-audioapotypes-apo_connection_property typedef struct
// APO_CONNECTION_PROPERTY { UINT_PTR pBuffer; UINT32 u32ValidFrameCount; APO_BUFFER_FLAGS u32BufferFlags; UINT32 u32Signature; } APO_CONNECTION_PROPERTY;
[PInvokeData("audioapotypes.h", MSDNShortId = "dbf7ed62-445e-4f15-bc21-46117e694dc0")]
[StructLayout(LayoutKind.Sequential)]
public struct APO_CONNECTION_PROPERTY
{
/// <summary>A pointer to the connection buffer. Endpoints use this buffer to read and write audio data.</summary>
public IntPtr pBuffer;
/// <summary>
/// The number of valid frames in the connection buffer. An APO uses the valid frame count to determine the amount of data to
/// read and process in the input buffer. An APO sets the valid frame count after writing data into its output connection.
/// </summary>
public uint u32ValidFrameCount;
/// <summary>
/// The connection flags for this buffer. This indicates the validity status of the APOs. For more information about these
/// flags, see APO_BUFFER_FLAGS.
/// </summary>
public APO_BUFFER_FLAGS u32BufferFlags;
/// <summary>A tag that identifies a valid <c>APO_CONNECTION_PROPERTY</c> structure. A valid structure is marked as <c>APO_CONNECTION_PROPERTY_SIGNATURE</c>.</summary>
public uint u32Signature;
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,43 @@
using System;
using System.Runtime.InteropServices;
namespace Vanara.PInvoke
{
/// <summary>Functions, structures and constants from Windows Core Audio Api.</summary>
public static partial class CoreAudio
{
/// <summary>Allows the application to specify which formats are reset.</summary>
[PInvokeData("audioendpoints.h", MSDNShortId = "7FF7DCF2-0580-4B50-8EA9-87DB9478B1E8")]
[Flags]
public enum ENDPOINT_RESET
{
/// <summary>Only reset the mix format. The endpoint's device format will not be reset if this flag is set.</summary>
ENDPOINT_FORMAT_RESET_MIX_ONLY = 0x00000001
}
/// <summary>Used for resetting the current audio endpoint device format.</summary>
/// <remarks>
/// This setting is exposed to the user through the "Sounds" control panel and can be read from the endpoint property store using PKEY_AudioEngine_DeviceFormat.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioendpoints/nn-audioendpoints-iaudioendpointformatcontrol
[PInvokeData("audioendpoints.h", MSDNShortId = "7FF7DCF2-0580-4B50-8EA9-87DB9478B1E8")]
[ComImport, Guid("784CFD40-9F89-456E-A1A6-873B006A664E"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioEndpointFormatControl
{
/// <summary>Resets the format to the default setting provided by the device manufacturer.</summary>
/// <param name="ResetFlags">
/// <para>
/// Allows the application to specify which formats are reset. If no flags are set, then this method reevaluates both the
/// endpoint's device format and mix format and sets them to their default values.
/// </para>
/// <para>
/// ENDPOINT_FORMAT_RESET_MIX_ONLY: Only reset the mix format. The endpoint's device format will not be reset if this flag is set.
/// </para>
/// </param>
/// <returns>If this method succeeds, it returns <c>S_OK</c>. Otherwise, it returns an <c>HRESULT</c> error code.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/audioendpoints/nf-audioendpoints-iaudioendpointformatcontrol-resettodefault
// HRESULT ResetToDefault( DWORD ResetFlags );
HRESULT ResetToDefault(ENDPOINT_RESET ResetFlags);
}
}
}

View File

@ -0,0 +1,804 @@
using System;
using System.Runtime.InteropServices;
using Vanara.InteropServices;
using static Vanara.PInvoke.PropSys;
using IAudioMediaType = System.IntPtr; // TODO: Replace IAudioMediaType with reference to new library when done.
namespace Vanara.PInvoke
{
/// <summary>Functions, structures and constants from Windows Core Audio Api.</summary>
public static partial class CoreAudio
{
/// <summary>
/// <para>
/// The <c>APO_CONNECTION_BUFFER_TYPE</c> enumeration defines constants that indicate whether the audio engine allocates the
/// connection buffer or uses the buffer that is provided by the APO. These flags are used by the <c>Type</c> member of the
/// <c>APO_CONNECTION_DESCRIPTOR</c> structure that stores the configuration settings of an APO connection. These settings are
/// required by the audio engine when an APO connection is created.
/// </para>
/// </summary>
/// <remarks>The Terminal Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</remarks>
// https://docs.microsoft.com/en-us/previous-versions/dd408130(v=vs.85) typedef enum { APO_CONNECTION_BUFFER_TYPE_ALLOCATED = 0,
// APO_CONNECTION_BUFFER_TYPE_EXTERNAL = 1, APO_CONNECTION_BUFFER_TYPE_DEPENDANT = 2 } APO_CONNECTION_BUFFER_TYPE;
[PInvokeData("Audioenginebaseapo.h")]
public enum APO_CONNECTION_BUFFER_TYPE
{
/// <summary>The connection buffer is internally allocated by the audio engine.</summary>
APO_CONNECTION_BUFFER_TYPE_ALLOCATED = 0,
/// <summary>
/// The connection buffer is allocated by the APO, and the audio engine must use the connection buffer that is specified in the
/// pBuffer member of the APO_CONNECTION_DESCRIPTOR structure.
/// </summary>
APO_CONNECTION_BUFFER_TYPE_EXTERNAL = 1,
/// <summary>The connection buffer is extracted by the audio engine from another connection.</summary>
APO_CONNECTION_BUFFER_TYPE_DEPENDANT = 2,
}
/// <summary>
/// <para>The APO_FLAG enumeration defines constants that are used as flags by an audio processing object (APO).</para>
/// <para>This enumeration is used by the APO_REG_PROPERTIES structure to help describe the registration properties of an APO.</para>
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/ne-audioenginebaseapo-apo_flag typedef enum APO_FLAG {
// APO_FLAG_NONE, APO_FLAG_INPLACE, APO_FLAG_SAMPLESPERFRAME_MUST_MATCH, APO_FLAG_FRAMESPERSECOND_MUST_MATCH,
// APO_FLAG_BITSPERSAMPLE_MUST_MATCH, APO_FLAG_MIXER, APO_FLAG_DEFAULT } ;
[PInvokeData("audioenginebaseapo.h", MSDNShortId = "42134625-A351-4CB6-B83C-3F2E662D1938")]
[Flags]
public enum APO_FLAG
{
/// <summary>Indicates that there are no flags enabled for this APO.</summary>
APO_FLAG_NONE = 0x00,
/// <summary>
/// Indicates that this APO can perform in-place processing. This allows the processor to use a common buffer for input and output.
/// </summary>
APO_FLAG_INPLACE = 0x01,
/// <summary>Indicates that the samples per frame for the input and output connections must match.</summary>
APO_FLAG_SAMPLESPERFRAME_MUST_MATCH = 0x02,
/// <summary>Indicates that the frames per second for the input and output connections must match.</summary>
APO_FLAG_FRAMESPERSECOND_MUST_MATCH = 0x04,
/// <summary>Indicates that bits per sample AND bytes per sample container for the input and output connections must match.</summary>
APO_FLAG_BITSPERSAMPLE_MUST_MATCH = 0x08,
/// <summary/>
APO_FLAG_MIXER = 0x10,
/// <summary>
/// The value of this member is determined by the logical OR result of the three preceding members. In other
/// words:APO_FLAG_DEFAULT = ( APO_FLAG_SAMPLESPERFRAME_MUST_MATCH
/// </summary>
APO_FLAG_DEFAULT = APO_FLAG_SAMPLESPERFRAME_MUST_MATCH | APO_FLAG_FRAMESPERSECOND_MUST_MATCH | APO_FLAG_BITSPERSAMPLE_MUST_MATCH,
}
/// <summary>
/// <para>
/// System Effects Audio Processing Objects (sAPOs) are typically used in or called from real-time process threads. However, it is
/// sometimes necessary to use an sAPO in a non real-time mode. For example, when an sAPO is initialized, it is called from a non
/// real-time thread. But when audio processing begins, the sAPO is called from a real-time thread. The interface exposes methods
/// that enable a client to access the non real-time compliant parts of an sAPO.
/// </para>
/// <para>The interface supports the following methods:</para>
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nn-audioenginebaseapo-iaudioprocessingobject
[PInvokeData("audioenginebaseapo.h", MSDNShortId = "71be0151-20dd-40e3-a478-d67e4d8d9c36")]
[ComImport, Guid("FD7F2B29-24D0-4b5c-B177-592C39F9CA10"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioProcessingObject
{
/// <summary>
/// The Reset method resets the APO to its original state. This method does not cause any changes in the connection objects that
/// are attached to the input or the output of the APO.
/// </summary>
/// <remarks>
/// This method is not real-time compliant and must not be called from a real-time processing thread. The implementation of this
/// method does not and must not touch paged memory. Additionally, it must not call any blocking system routines.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nf-audioenginebaseapo-iaudioprocessingobject-reset
// HRESULT Reset();
void Reset();
/// <summary>
/// The GetLatency method returns the latency for this APO. Latency is the amount of time it takes a frame to traverse the
/// processing pass of an APO.
/// </summary>
/// <returns>
/// A MFTIME structure that will receive the number of units of delay that this APO introduces. Each unit of delay represents
/// 100 nanoseconds.
/// </returns>
/// <remarks>
/// <para>
/// If the client that is calling this APO knows the sampling rate, the client can calculate the latency in terms of the number
/// of frames. To get the total latency of the entire audio signal processing stream, the client must query every APO in the
/// processing chain and add up the results.
/// </para>
/// <para><c>Important</c> This method is not real-time compliant and must not be called from a real-time processing thread.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nf-audioenginebaseapo-iaudioprocessingobject-getlatency
// HRESULT GetLatency( HNSTIME *pTime );
long GetLatency();
/// <summary>GetRegistrationProperties returns the registration properties of the audio processing object (APO).</summary>
/// <returns>The registration properties of the APO. This parameter is of type APO_REG_PROPERTIES.</returns>
/// <remarks>
/// <para>The caller must free the memory returned by .</para>
/// <para><c>Note</c> This method must not be called from a real-time processing thread.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nf-audioenginebaseapo-iaudioprocessingobject-getregistrationproperties
// HRESULT GetRegistrationProperties( APO_REG_PROPERTIES **ppRegProps );
SafeCoTaskMemHandle GetRegistrationProperties();
/// <summary>The Initialize method initializes the APO and supports data of variable length.</summary>
/// <param name="cbDataSize">This is the size, in bytes, of the initialization data.</param>
/// <param name="pbyData">This is initialization data that is specific to this APO.</param>
/// <remarks>
/// If this method is used to initialize an APO without the need to initialize any data, it is acceptable to supply a
/// <c>NULL</c> as the value of the pbyData parameter and a 0 (zero) as the value of the cbDataSize parameter. The data that is
/// supplied is of variable length and must have the following format:
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nf-audioenginebaseapo-iaudioprocessingobject-initialize
// HRESULT Initialize( UINT32 cbDataSize, BYTE *pbyData );
void Initialize(uint cbDataSize, [In, MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 0)] byte[] pbyData);
/// <summary>
/// This method negotiates with the Windows Vista audio engine to establish a data format for the stream of audio data.
/// </summary>
/// <param name="pOppositeFormat">
/// A pointer to an IAudioMediaType interface. This parameter is used to indicate the output format of the data. The value of
/// pOppositeFormat must be set to <c>NULL</c> to indicate that the output format can be any type.
/// </param>
/// <param name="pRequestedInputFormat">
/// A pointer to an IAudioMediaType interface. This parameter is used to indicate the input format that is to be verified.
/// </param>
/// <param name="ppSupportedInputFormat">
/// This parameter indicates the supported format that is closest to the format to be verified.
/// </param>
/// <returns>
/// <para>
/// If the call completed successfully, the ppSupportedInputFormat parameter returns a pRequestedInputFormat pointer and the
/// IsInputFormatSupported method returns a value of S_OK. Otherwise, this method returns one of the following error codes:
/// </para>
/// <list type="table">
/// <listheader>
/// <term>Return code</term>
/// <term>Description</term>
/// </listheader>
/// <item>
/// <term>S_FALSE</term>
/// <term>The format of the input/output format pair is not supported. ppSupportedInputFormat returns a suggested new format.</term>
/// </item>
/// <item>
/// <term>APOERR_FORMAT_NOT_SUPPORTED</term>
/// <term>The format to be verified is not supported. The value of ppSupportedInputFormat does not change.</term>
/// </item>
/// <item>
/// <term>E_POINTER</term>
/// <term>Invalid pointer that is passed to the method. The value of ppSupportedInputFormat does not change.</term>
/// </item>
/// <item>
/// <term>Other HRESULT values</term>
/// <term>These additional error conditions are tracked by the audio engine.</term>
/// </item>
/// </list>
/// </returns>
/// <remarks>
/// <para>
/// There are differences in the implementation of the method by the different APOs. For example, with certain implementations,
/// the output can only be of type float when the input format is of type integer.
/// </para>
/// <para>
/// To initiate format negotiation, the audio service first sets the output of the LFX sAPO to the default float32-based format.
/// The audio service then calls the method of the LFX sAPO, suggests the default format, and monitors the HRESULT response of
/// this method. If the input of the LFX sAPO can support the suggested format, it returns S_OK, together with a reference to
/// the supported format. If the input of the LFX sAPO cannot support the suggested format, it returns S_FALSE together with a
/// reference to a format that is the closest match to the suggested one. If the LFX sAPO cannot support the suggested format
/// and does not have a close match, it returns APOERR_FORMAT_NOT_SUPPORTED. The GFX sAPO works with the output format of the
/// LFX sAPO. So the GFX sAPO is not involved in the format negotiation process.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nf-audioenginebaseapo-iaudioprocessingobject-isinputformatsupported
// HRESULT IsInputFormatSupported( IAudioMediaType *pOppositeFormat, IAudioMediaType *pRequestedInputFormat, IAudioMediaType
// **ppSupportedInputFormat );
[PreserveSig]
HRESULT IsInputFormatSupported(IAudioMediaType pOppositeFormat, IAudioMediaType pRequestedInputFormat, out IAudioMediaType ppSupportedInputFormat);
/// <summary>The method is used to verify that a specific output format is supported.</summary>
/// <param name="pOppositeFormat">
/// A pointer to an IAudioMediaType interface. This parameter indicates the output format. This parameter must be set to
/// <c>NULL</c> to indicate that the output format can be any type.
/// </param>
/// <param name="pRequestedOutputFormat">
/// A pointer to an <c>IAudioMediaType</c> interface. This parameter indicates the output format that is to be verified.
/// </param>
/// <param name="ppSupportedOutputFormat">
/// This parameter indicates the supported output format that is closest to the format to be verified.
/// </param>
/// <returns>
/// <para>
/// If the call completes successfully, the ppSupportedOutputFormat parameter returns a pRequestedOutputFormat pointer and the
/// IsOutputFormatSupported method returns a value of S_OK. Otherwise, this method returns one of the following error codes:
/// </para>
/// <list type="table">
/// <listheader>
/// <term>Return code</term>
/// <term>Description</term>
/// </listheader>
/// <item>
/// <term>S_FALSE</term>
/// <term>
/// The format of Input/output format pair is not supported. The ppSupportedOutPutFormat parameter returns a suggested new format.
/// </term>
/// </item>
/// <item>
/// <term>APOERR_FORMAT_NOT_SUPPORTED</term>
/// <term>The format is not supported. The value of ppSupportedOutputFormat does not change.</term>
/// </item>
/// <item>
/// <term>E_POINTER</term>
/// <term>An invalid pointer was passed to the function. The value of ppSupportedOutputFormat does not change.</term>
/// </item>
/// <item>
/// <term>Other HRESULT values</term>
/// <term>These additional error conditions are tracked by the audio engine.</term>
/// </item>
/// </list>
/// </returns>
/// <remarks>
/// There are differences in the implementation of the method by the different APOs. For example, with certain implementations,
/// the output can only be of type float when the input format is of type integer.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nf-audioenginebaseapo-iaudioprocessingobject-isoutputformatsupported
// HRESULT IsOutputFormatSupported( IAudioMediaType *pOppositeFormat, IAudioMediaType *pRequestedOutputFormat, IAudioMediaType
// **ppSupportedOutputFormat );
[PreserveSig]
HRESULT IsOutputFormatSupported(IAudioMediaType pOppositeFormat, IAudioMediaType pRequestedOutputFormat, out IAudioMediaType ppSupportedOutputFormat);
/// <summary>GetInputChannelCount returns the input channel count (samples-per-frame) for this APO.</summary>
/// <returns>The input channel count.</returns>
/// <remarks>The input channel count that is returned refers to the input side of the APO.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nf-audioenginebaseapo-iaudioprocessingobject-getinputchannelcount
// HRESULT GetInputChannelCount( UINT32 *pu32ChannelCount );
uint GetInputChannelCount();
}
/// <summary>
/// <para>The interface is used to configure the APO. This interface uses its methods to lock and unlock the APO for processing.</para>
/// <para>The interface supports the following methods:</para>
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nn-audioenginebaseapo-iaudioprocessingobjectconfiguration
[PInvokeData("audioenginebaseapo.h", MSDNShortId = "6311a5d1-b9d3-4c62-99aa-8feda32b4a2f")]
[ComImport, Guid("0E5ED805-ABA6-49c3-8F9A-2B8C889C4FA8"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioProcessingObjectConfiguration
{
/// <summary>The method is used to verify that the APO is locked and ready to process data.</summary>
/// <param name="u32NumInputConnections">Number of input connections that are attached to this APO.</param>
/// <param name="ppInputConnections">Connection descriptor for each input connection that is attached to this APO.</param>
/// <param name="u32NumOutputConnections">Number of output connections that are attached to this APO.</param>
/// <param name="ppOutputConnections">Connection descriptor for each output connection that is attached to this APO.</param>
/// <remarks>
/// When the method is called, it first performs an internal check to see if the APO has been initialized and is ready to
/// process data. Each APO has different initialization requirements so each APO must define its own Initialize method if needed.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nf-audioenginebaseapo-iaudioprocessingobjectconfiguration-lockforprocess
// HRESULT LockForProcess( UINT32 u32NumInputConnections, APO_CONNECTION_DESCRIPTOR **ppInputConnections, UINT32
// u32NumOutputConnections, APO_CONNECTION_DESCRIPTOR **ppOutputConnections );
void LockForProcess([In] uint u32NumInputConnections, [In, MarshalAs(UnmanagedType.LPArray, ArraySubType = UnmanagedType.LPStruct, SizeParamIndex = 0)] APO_CONNECTION_DESCRIPTOR[] ppInputConnections,
[In] uint u32NumOutputConnections, [In, MarshalAs(UnmanagedType.LPArray, ArraySubType = UnmanagedType.LPStruct, SizeParamIndex = 2)] APO_CONNECTION_DESCRIPTOR[] ppOutputConnections);
/// <summary>The method releases the lock that was imposed on the APO by the LockForProcess method.</summary>
/// <remarks>
/// The method places the APO in a mode that makes configuration changes possible. These changes include Add, Remove, and Swap
/// of the input and output connections that are attached to the APO.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nf-audioenginebaseapo-iaudioprocessingobjectconfiguration-unlockforprocess
// HRESULT UnlockForProcess();
void UnlockForProcess();
}
/// <summary>
/// <para>
/// This interface can operate in real-time mode and its methods can be called form real-time processing threads. The implementation
/// of the methods for this interface must not block or touch paged memory. Additionally, you must not call any blocking system
/// routines in the implementation of the methods.
/// </para>
/// <para>The interface includes the following methods:</para>
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nn-audioenginebaseapo-iaudioprocessingobjectrt
[PInvokeData("audioenginebaseapo.h", MSDNShortId = "640ac817-16f2-47c8-87e9-1ae0136e6e55")]
[ComImport, Guid("9E1D6A6D-DDBC-4E95-A4C7-AD64BA37846C"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioProcessingObjectRT
{
/// <summary>The APOProcess method causes the APO to make a processing pass.</summary>
/// <param name="u32NumInputConnections">The number of input connections that are attached to this APO.</param>
/// <param name="ppInputConnections">An array of input connection property structures. There is one structure per input connection.</param>
/// <param name="u32NumOutputConnections">The number of output connections that are attached to this APO.</param>
/// <param name="ppOutputConnections">
/// An array of output connection property structures. There is one structure per output connection.
/// </param>
/// <returns>
/// <para>None</para>
/// <list type="table">
/// <listheader>
/// <term>Return code</term>
/// <term>Description</term>
/// </listheader>
/// </list>
/// </returns>
/// <remarks>
/// <para>
/// The method must not change the data in the ppOutputConnections array. But it must set the properties of the output
/// connections after processing.
/// </para>
/// <para>
/// The method is called from a real-time processing thread. The implementation of this method must not touch paged memory and
/// it should not call any system blocking routines.
/// </para>
/// <para>
/// For a detailed look at an implementation of this method, see the Swap sample code and refer to the Swapapolfx.cpp file.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nf-audioenginebaseapo-iaudioprocessingobjectrt-apoprocess
// void APOProcess( UINT32 u32NumInputConnections, APO_CONNECTION_PROPERTY **ppInputConnections, UINT32 u32NumOutputConnections,
// APO_CONNECTION_PROPERTY **ppOutputConnections );
[PreserveSig]
void APOProcess([In] uint u32NumInputConnections, [In, Out, MarshalAs(UnmanagedType.LPArray, ArraySubType = UnmanagedType.LPStruct, SizeParamIndex = 0)] APO_CONNECTION_PROPERTY[] ppInputConnections,
[In] uint u32NumOutputConnections, [In, Out, MarshalAs(UnmanagedType.LPArray, ArraySubType = UnmanagedType.LPStruct, SizeParamIndex = 2)] APO_CONNECTION_PROPERTY[] ppOutputConnections);
/// <summary>The method returns the number of input frames that an APO requires to generate a given number of output frames.</summary>
/// <param name="u32OutputFrameCount">This is a count of the number of output frames.</param>
/// <returns>
/// <para>The method returns the number of input frames that are required to generate the given number of output frames.</para>
/// <list type="table">
/// <listheader>
/// <term>Return code</term>
/// <term>Description</term>
/// </listheader>
/// </list>
/// </returns>
/// <remarks>
/// The method is called from a real-time processing thread. The implementation of this method must not touch paged memory and
/// it should not call any system blocking routines.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nf-audioenginebaseapo-iaudioprocessingobjectrt-calcinputframes
// UINT32 CalcInputFrames( UINT32 u32OutputFrameCount );
[PreserveSig]
uint CalcInputFrames(uint u32OutputFrameCount);
/// <summary>The method returns the number of output frames that an APO requires for a given number of input frames.</summary>
/// <param name="u32InputFrameCount">This is a count of the number of input frames.</param>
/// <returns>
/// <para>The method returns the number of output frames that an APO will generate for a given number of input frames.</para>
/// <list type="table">
/// <listheader>
/// <term>Return code</term>
/// <term>Description</term>
/// </listheader>
/// </list>
/// </returns>
/// <remarks>
/// The method can be called form a real-time processing thread. The implementation of this method must not block or touch paged
/// memory and it should not call any system blocking routines.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nf-audioenginebaseapo-iaudioprocessingobjectrt-calcoutputframes
// UINT32 CalcOutputFrames( UINT32 u32InputFrameCount );
[PreserveSig]
uint CalcOutputFrames([In] uint u32InputFrameCount);
}
/// <summary>
/// <para>
/// The IAudioSystemEffects interface uses the basic methods that are inherited from <c>IUnknown</c>, and must implement an
/// <c>Initialize</c> method. The parameters that are passed to this <c>Initialize</c> method must be passed directly to the
/// <c>IAudioProcessingObject::Initialize</c> method.
/// </para>
/// <para>
/// Refer to the IAudioProcessingObject::Initialize method for information about the structure and the parameters that are required
/// to implement the <c>IAudioSystemEffects::Initialize</c> method.
/// </para>
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nn-audioenginebaseapo-iaudiosystemeffects
[PInvokeData("audioenginebaseapo.h", MSDNShortId = "86429c51-6831-4266-9774-1547dc04bcb0")]
[ComImport, Guid("5FA00F27-ADD6-499a-8A9D-6B98521FA75B"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioSystemEffects
{
}
/// <summary>
/// The <c>IAudioSystemEffects2</c> interface was introduced with Windows 8.1 for retrieving information about the processing
/// objects in a given mode.
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nn-audioenginebaseapo-iaudiosystemeffects2
[PInvokeData("audioenginebaseapo.h", MSDNShortId = "5989BAFB-6B2D-4186-9A8D-96C8974E0D18")]
[ComImport, Guid("BAFE99D2-7436-44CE-9E0E-4D89AFBFFF56"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioSystemEffects2 : IAudioSystemEffects
{
/// <summary>
/// The GetEffectsList method is used for retrieving the list of audio processing effects that are currently active, and stores
/// an event to be signaled if the list changes.
/// </summary>
/// <param name="ppEffectsIds">
/// Pointer to the list of GUIDs that represent audio processing effects. The caller is responsible for freeing this memory by
/// calling CoTaskMemFree.
/// </param>
/// <param name="pcEffects">A count of the audio processing effects in the list.</param>
/// <param name="Event">The HANDLE of the event that will be signaled if the list changes.</param>
/// <remarks>
/// <para>
/// The APO signals the specified event when the list of audio processing effects changes from the list that was returned by
/// <c>GetEffectsList</c>. The APO uses this event until either <c>GetEffectsList</c> is called again, or the APO is destroyed.
/// The passed handle can be NULL, in which case the APO stops using any previous handle and does not signal an event.
/// </para>
/// <para>
/// An APO implements this method to allow Windows to discover the current effects applied by the APO. The list of effects may
/// depend on the processing mode that the APO initialized, and on any end user configuration. The processing mode is indicated
/// by the AudioProcessingMode member of APOInitSystemEffects2.
/// </para>
/// <para>
/// APOs should identify effects using GUIDs defined by Windows, such as AUDIO_EFFECT_TYPE_ACOUSTIC_ECHO_CANCELLATION. An APO
/// should only define and return a custom GUID in rare cases where the type of effect is clearly different from the ones
/// defined by Windows.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nf-audioenginebaseapo-iaudiosystemeffects2-geteffectslist
// HRESULT GetEffectsList( LPGUID *ppEffectsIds, UINT *pcEffects, HANDLE Event );
void GetEffectsList(out SafeCoTaskMemHandle ppEffectsIds, out uint pcEffects, [In] IntPtr Event);
}
/// <summary>
/// <para>
/// The interface is supported in Windows Vista and later versions of Windows. When you develop an audio processing object (APO) to
/// drive an audio adapter with an atypical format, the APO must support the interface.
/// </para>
/// <para>
/// The Windows operating system can instantiate your APO outside the audio engine and use the interface to retrieve information
/// about the atypical format. The associated user interface displays the data that is retrieved.
/// </para>
/// <para>
/// <c>Important</c> Although the interface continues to be supported in Windows, note that the type of APO to which you can apply
/// this interface depends on the version of Windows you are targeting. The following table provides more information:
/// </para>
/// <list type="table">
/// <listheader>
/// <term>Target OS</term>
/// <term>Target APO type</term>
/// </listheader>
/// <item>
/// <term>Windows Vista</term>
/// <term>Global effects (GFX)</term>
/// </item>
/// <item>
/// <term>Windows 7</term>
/// <term>Global effects (GFX)</term>
/// </item>
/// <item>
/// <term>Windows 8</term>
/// <term>Global effects (GFX)</term>
/// </item>
/// <item>
/// <term>Windows 8.1</term>
/// <term>Endpoint effects (EFX)</term>
/// </item>
/// </list>
/// <para>The interface inherits from <c>IUnknown</c> and also supports the following methods:</para>
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nn-audioenginebaseapo-iaudiosystemeffectscustomformats
[PInvokeData("audioenginebaseapo.h", MSDNShortId = "29b758c0-5bbe-489c-9950-bc92a185fbaf")]
[ComImport, Guid("B1176E34-BB7F-4f05-BEBD-1B18A534E097"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioSystemEffectsCustomFormats
{
/// <summary>The method retrieves the number of custom formats supported by the system effects audio processing object (sAPO).</summary>
/// <returns>
/// Specifies a pointer to an unsigned integer. The unsigned integer represents the number of formats supported by the sAPO.
/// </returns>
/// <remarks>For more information about sAPOs, see System Effects Audio Processing Objects.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nf-audioenginebaseapo-iaudiosystemeffectscustomformats-getformatcount
// HRESULT GetFormatCount( UINT *pcFormats );
uint GetFormatCount();
/// <summary>The method retrieves an IAudioMediaType representation of a custom format.</summary>
/// <param name="nFormat">
/// Specifies the index of a supported format. This parameter can be any value in the range from zero to one less than the
/// return value of GetFormatCount. In other words, any value in the range from zero to GetFormatCount( ) - 1.
/// </param>
/// <returns>
/// Specifies a pointer to a pointer to an <c>IAudioMediaType</c> interface. It is the responsibility of the caller to release
/// the <c>IAudioMediaType</c> interface to which the ppFormat parameter points.
/// </returns>
/// <remarks>
/// When the audio system calls the method, the sAPO creates an audio media type object and returns an <c>IAudioMediaType</c>
/// interface. The sAPO implementation can use the CreateAudioMediaType utility function to create the audio media type object.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nf-audioenginebaseapo-iaudiosystemeffectscustomformats-getformat
// HRESULT GetFormat( UINT nFormat, IAudioMediaType **ppFormat );
IAudioMediaType GetFormat(uint nFormat);
/// <summary>The method retrieves a string representation of the custom format so that it can be displayed on a user-interface.</summary>
/// <param name="nFormat">
/// Specifies the index of a supported format. This parameter can be any value in the range from zero to one less than the
/// return value of GetFormatCount. In other words, any value in the range from zero to GetFormatCount( ) - 1.
/// </param>
/// <param name="ppwstrFormatRep">
/// Specifies the address of the buffer that receives a NULL-terminated Unicode string that describes the custom format.
/// </param>
/// <returns>
/// <para>
/// The method returns S_OK when the call is successful. Otherwise, it returns one of the error codes shown in the following table.
/// </para>
/// <list type="table">
/// <listheader>
/// <term>Return code</term>
/// <term>Description</term>
/// </listheader>
/// <item>
/// <term>E_POINTER</term>
/// <term>Invalid pointer passed to function</term>
/// </item>
/// <item>
/// <term>E_OUTOFMEMORY</term>
/// <term>Return buffer cannot be allocated</term>
/// </item>
/// <item>
/// <term>E_INVALIDARG</term>
/// <term>nFormat is out of range</term>
/// </item>
/// </list>
/// </returns>
/// <remarks>
/// The sAPO uses CoTaskMemAlloc to allocate the returned string. The caller must use CoTaskMemFree to delete the buffer that is
/// pointed to by the ppwstrFormatRep parameter.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/nf-audioenginebaseapo-iaudiosystemeffectscustomformats-getformatrepresentation
// HRESULT GetFormatRepresentation( UINT nFormat, LPWSTR *ppwstrFormatRep );
void GetFormatRepresentation(uint nFormat, out SafeCoTaskMemString ppwstrFormatRep);
}
/// <summary>
/// The GetEffectsList method is used for retrieving the list of audio processing effects that are currently active, and stores an
/// event to be signaled if the list changes.
/// </summary>
/// <param name="ase2">The <c>IAudioSystemEffects2</c> instance.</param>
/// <param name="Event">The HANDLE of the event that will be signaled if the list changes.</param>
/// <returns>The list of GUIDs that represent audio processing effects.</returns>
/// <remarks>
/// <para>
/// The APO signals the specified event when the list of audio processing effects changes from the list that was returned by
/// <c>GetEffectsList</c>. The APO uses this event until either <c>GetEffectsList</c> is called again, or the APO is destroyed. The
/// passed handle can be NULL, in which case the APO stops using any previous handle and does not signal an event.
/// </para>
/// <para>
/// An APO implements this method to allow Windows to discover the current effects applied by the APO. The list of effects may
/// depend on the processing mode that the APO initialized, and on any end user configuration. The processing mode is indicated by
/// the AudioProcessingMode member of APOInitSystemEffects2.
/// </para>
/// <para>
/// APOs should identify effects using GUIDs defined by Windows, such as AUDIO_EFFECT_TYPE_ACOUSTIC_ECHO_CANCELLATION. An APO should
/// only define and return a custom GUID in rare cases where the type of effect is clearly different from the ones defined by Windows.
/// </para>
/// </remarks>
public static Guid[] GetEffectsList(this IAudioSystemEffects2 ase2, [In] IntPtr Event)
{
ase2.GetEffectsList(out var ids, out var i, Event);
using (ids)
return ids.ToArray<Guid>((int)i);
}
/// <summary>The method retrieves a string representation of the custom format so that it can be displayed on a user-interface.</summary>
/// <param name="fmts">The <c>IAudioSystemEffectsCustomFormats</c> instance.</param>
/// <param name="nFormat">
/// Specifies the index of a supported format. This parameter can be any value in the range from zero to one less than the return
/// value of GetFormatCount. In other words, any value in the range from zero to GetFormatCount( ) - 1.
/// </param>
/// <returns>A string that describes the custom format.</returns>
public static string GetFormatRepresentation(this IAudioSystemEffectsCustomFormats fmts, uint nFormat)
{
fmts.GetFormatRepresentation(nFormat, out var rep);
using (rep)
return (string)rep;
}
/// <summary>The APO_CONNECTION_DESCRIPTOR structure stores the description of an APO connection buffer.</summary>
/// <remarks>The Terminal Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</remarks>
[PInvokeData("Audioenginebaseapo.h")]
[StructLayout(LayoutKind.Sequential)]
public struct APO_CONNECTION_DESCRIPTOR
{
/// <summary>
/// A value of the APO_CONNECTION_BUFFER_TYPE enumeration that indicates how the connection buffer inside the APO connection is
/// allocated. This member is set only by the APO connection during initialization. It is a private member that should be
/// cleared before creating the connection.
/// </summary>
public APO_CONNECTION_BUFFER_TYPE Type;
/// <summary>
/// A pointer to the buffer to be used for the APO connection. If this member is NULL, the audio engine allocates memory for the
/// buffer and the Type member is set to APO_CONNECTION_BUFFER_TYPE_ALLOCATED. Otherwise, the audio engine uses the specified
/// memory region as the connection buffer. The buffer to be used for the APO connection must be frame aligned or 128-bit
/// aligned, both at the beginning of the buffer and at the start of the audio buffer section. The buffer to be used for the APO
/// connection must be large enough to hold the number of frames indicated in u32MaxFrameCount. This member must point to the
/// beginning of the audio buffer area. If the audio engine must use the memory pointed by this member, the Type member is set
/// to APO_CONNECTION_BUFFER_TYPE_EXTERNAL.
/// </summary>
public IntPtr pBuffer;
/// <summary>
/// The maximum number of frames that the connection buffer can hold. The actual space allocated depends on the exact format of
/// the audio data specified by the pFormat member.
/// </summary>
public uint u32MaxFrameCount;
/// <summary>
/// A pointer to the audio media object that specifies the format of the connection. This also represents the format of the data
/// in the connection buffer.
/// </summary>
public IntPtr pFormat;
/// <summary>A tag that identifies a valid APO_CONNECTION_DESCRIPTOR structure. A valid structure is marked as APO_CONNECTION_DESCRIPTOR_SIGNATURE.</summary>
public uint u32Signature;
}
/// <summary>
/// The APO_REG_PROPERTIES structure is used by IAudioProcessingObject::GetRegistrationProperties for returning the registration
/// properties of an audio processing object (APO).
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/ns-audioenginebaseapo-apo_reg_properties typedef struct
// APO_REG_PROPERTIES { CLSID clsid; APO_FLAG Flags; WCHAR szFriendlyName[256]; WCHAR szCopyrightInfo[256]; UINT32 u32MajorVersion;
// UINT32 u32MinorVersion; UINT32 u32MinInputConnections; UINT32 u32MaxInputConnections; UINT32 u32MinOutputConnections; UINT32
// u32MaxOutputConnections; UINT32 u32MaxInstances; UINT32 u32NumAPOInterfaces; IID iidAPOInterfaceList[1]; } APO_REG_PROPERTIES, *PAPO_REG_PROPERTIES;
[PInvokeData("audioenginebaseapo.h", MSDNShortId = "466215E5-5345-4570-A29B-086562882F5D")]
[StructLayout(LayoutKind.Sequential, CharSet = CharSet.Unicode)]
public struct APO_REG_PROPERTIES
{
/// <summary>The class ID for this APO.</summary>
public Guid clsid;
/// <summary>The flags for this APO. This parameter is an enumerated constant of type APO_FLAG.</summary>
public APO_FLAG Flags;
/// <summary>The friendly name of this APO. This is a string of characters with a max length of 256.</summary>
[MarshalAs(UnmanagedType.ByValTStr, SizeConst = 256)]
public string szFriendlyName;
/// <summary>The copyright info for this APO. This is a string of characters with a max length of 256.</summary>
[MarshalAs(UnmanagedType.ByValTStr, SizeConst = 256)]
public string szCopyrightInfo;
/// <summary>The major version number for this APO.</summary>
public uint u32MajorVersion;
/// <summary>The minor version number for this APO.</summary>
public uint u32MinorVersion;
/// <summary>The minimum number of input connections for this APO.</summary>
public uint u32MinInputConnections;
/// <summary>The maximum number of input connections for this APO.</summary>
public uint u32MaxInputConnections;
/// <summary>The minimum number of output connections for this APO.</summary>
public uint u32MinOutputConnections;
/// <summary>The maximum number of output connections for this APO.</summary>
public uint u32MaxOutputConnections;
/// <summary>The maximum number of instances of this APO.</summary>
public uint u32MaxInstances;
/// <summary>The number of interfaces for this APO.</summary>
public uint u32NumAPOInterfaces;
/// <summary/>
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 1)]
public Guid[] iidAPOInterfaceList;
}
/// <summary>
/// The APOInitBaseStruct structure is the base initialization header that must precede other initialization data in IAudioProcessingObject::Initialize.
/// </summary>
/// <remarks>
/// If the specified CLSID does not match, then the APOInitBaseStruct structure was not designed for this APO, and this is an error
/// condition. And if the CLSID of the APO changes between versions, then the CLSID may also be used for version management. In the
/// case where the CLSID is used for version management, a previous version may still be supported by the APO.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/ns-audioenginebaseapo-apoinitbasestruct typedef struct
// APOInitBaseStruct { UINT32 cbSize; CLSID clsid; } APOInitBaseStruct;
[PInvokeData("audioenginebaseapo.h", MSDNShortId = "15C973AE-B0E8-42FD-9F34-671A6A915B47")]
[StructLayout(LayoutKind.Sequential)]
public struct APOInitBaseStruct
{
/// <summary>The total size of the structure in bytes.</summary>
public uint cbSize;
/// <summary>The Class ID (CLSID) of the APO.</summary>
public Guid clsid;
}
/// <summary>The APOInitSystemEffects structure gets passed to the system effects APO for initialization.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/ns-audioenginebaseapo-apoinitsystemeffects typedef struct
// APOInitSystemEffects { APOInitBaseStruct APOInit; IPropertyStore *pAPOEndpointProperties; IPropertyStore
// *pAPOSystemEffectsProperties; void *pReserved; IMMDeviceCollection *pDeviceCollection; } APOInitSystemEffects;
[PInvokeData("audioenginebaseapo.h", MSDNShortId = "E33B1F94-4E3A-4EC1-AFB5-FD803FA391BC")]
[StructLayout(LayoutKind.Sequential)]
public struct APOInitSystemEffects
{
/// <summary>An APOInitBaseStruct structure.</summary>
public APOInitBaseStruct APOInit;
/// <summary>A pointer to an IPropertyStore object.</summary>
public IPropertyStore pAPOEndpointProperties;
/// <summary>A pointer to an IPropertyStore object.</summary>
public IPropertyStore pAPOSystemEffectsProperties;
/// <summary>Reserved for future use.</summary>
public IntPtr pReserved;
/// <summary>A pointer to an IMMDeviceCollection object.</summary>
public IMMDeviceCollection pDeviceCollection;
}
/// <summary>
/// The APOInitSystemEffects2 structure was introduced with Windows 8.1, to make it possible to provide additional initialization
/// context to the audio processing object (APO) for initialization.
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/ns-audioenginebaseapo-apoinitsystemeffects2 typedef struct
// APOInitSystemEffects2 { APOInitBaseStruct APOInit; IPropertyStore *pAPOEndpointProperties; IPropertyStore
// *pAPOSystemEffectsProperties; void *pReserved; IMMDeviceCollection *pDeviceCollection; UINT nSoftwareIoDeviceInCollection; UINT
// nSoftwareIoConnectorIndex; GUID AudioProcessingMode; BOOL InitializeForDiscoveryOnly; } APOInitSystemEffects2;
[PInvokeData("audioenginebaseapo.h", MSDNShortId = "87E59FCE-1965-4B23-B1F5-F54FEDD5A83E")]
[StructLayout(LayoutKind.Sequential)]
public struct APOInitSystemEffects2
{
/// <summary>An APOInitBaseStruct structure.</summary>
public APOInitBaseStruct APOInit;
/// <summary>A pointer to an IPropertyStore object.</summary>
public IPropertyStore pAPOEndpointProperties;
/// <summary>A pointer to an IPropertyStore object.</summary>
public IPropertyStore pAPOSystemEffectsProperties;
/// <summary>Reserved for future use.</summary>
public IntPtr pReserved;
/// <summary>A pointer to an IMMDeviceCollection object.</summary>
public IMMDeviceCollection pDeviceCollection;
/// <summary>
/// Specifies the MMDevice that implements the DeviceTopology that includes the software connector for which the APO is
/// initializing. The MMDevice is contained in pDeviceCollection.
/// </summary>
public uint nSoftwareIoDeviceInCollection;
/// <summary>Specifies the index of a Software_IO connector in the DeviceTopology.</summary>
public uint nSoftwareIoConnectorIndex;
/// <summary>Specifies the processing mode for the audio graph.</summary>
public Guid AudioProcessingMode;
/// <summary>Indicates whether the audio system is initializing the APO for effects discovery only.</summary>
[MarshalAs(UnmanagedType.Bool)]
public bool InitializeForDiscoveryOnly;
}
/// <summary>The AudioFXExtensionParams structure is passed to the system effects ControlPanel Extension PropertyPage via IShellPropSheetExt::AddPages.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/audioenginebaseapo/ns-audioenginebaseapo-audiofxextensionparams typedef struct
// __MIDL___MIDL_itf_audioenginebaseapo_0000_0008_0001 { LPARAM AddPageParam; LPWSTR pwstrEndpointID; IPropertyStore *pFxProperties;
// } AudioFXExtensionParams;
[PInvokeData("audioenginebaseapo.h", MSDNShortId = "832F1190-ED3E-4059-AB45-18C23D98663B")]
[StructLayout(LayoutKind.Sequential)]
public struct AudioFXExtensionParams
{
/// <summary>Parameters for the Property Page extension.</summary>
public IntPtr AddPageParam;
/// <summary>The ID for the audio endpoint.</summary>
[MarshalAs(UnmanagedType.LPWStr)]
public string pwstrEndpointID;
/// <summary>An IPropertyStore object.</summary>
public IPropertyStore pFxProperties;
}
}
}

View File

@ -0,0 +1,615 @@
using System;
using System.Runtime.InteropServices;
using Vanara.InteropServices;
using static Vanara.PInvoke.Winmm;
namespace Vanara.PInvoke
{
/// <summary>Functions, structures and constants from Windows Core Audio Api.</summary>
public static partial class CoreAudio
{
/// <summary>
/// Defines constants for the AE_CURRENT_POSITION structure. These constants describe the degree of validity of the current position.
/// </summary>
/// <remarks>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/ne-audioengineendpoint-ae_position_flags typedef enum
// AE_POSITION_FLAGS { POSITION_INVALID, POSITION_DISCONTINUOUS, POSITION_CONTINUOUS, POSITION_QPC_ERROR } ;
[PInvokeData("audioengineendpoint.h", MSDNShortId = "09edc9ae-923c-4f57-9479-c0331588dd92")]
[Flags]
public enum AE_POSITION_FLAGS
{
/// <summary>The position is not valid and must not be used.</summary>
POSITION_INVALID = 0,
/// <summary>
/// The position is valid; however, there has been a disruption such as a glitch or state transition. This current position is
/// not correlated with the previous position. The start of a stream should not reflect a discontinuity.
/// </summary>
POSITION_DISCONTINUOUS = 1,
/// <summary>The position is valid. The previous packet and the current packet are both synchronized with the timeline.</summary>
POSITION_CONTINUOUS = 2,
/// <summary>
/// The quality performance counter (QPC) timer value associated with this position is not accurate. This flag is set when a
/// position error is encountered and the implementation is unable to compute an accurate QPC value that correlates with the position.
/// </summary>
POSITION_QPC_ERROR = 4,
}
/// <summary>Used by <see cref="AUDIO_ENDPOINT_SHARED_CREATE_PARAMS"/>.</summary>
[PInvokeData("audioengineendpoint.h")]
public enum EndpointConnectorType
{
/// <summary/>
eHostProcessConnector = 0,
/// <summary/>
eOffloadConnector,
/// <summary/>
eLoopbackConnector,
/// <summary/>
eKeywordDetectorConnector,
/// <summary/>
eConnectorCount
}
[ComImport, Guid("D4952F5A-A0B2-4cc4-8B82-9358488DD8AC"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioDeviceEndpoint
{
HRESULT SetBuffer(long MaxPeriod, uint u32LatencyCoefficient);
HRESULT GetRTCaps(out BOOL pbIsRTCapable);
HRESULT GetEventDrivenCapable(out BOOL pbisEventCapable);
HRESULT WriteExclusiveModeParametersToSharedMemory([In] IntPtr hTargetProcess, long hnsPeriod, long hnsBufferDuration, uint u32LatencyCoefficient, out uint pu32SharedMemorySize, out IntPtr phSharedMemory);
}
/// <summary>Provides information to the audio engine about an audio endpoint. This interface is implemented by an audio endpoint.</summary>
/// <remarks>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nn-audioengineendpoint-iaudioendpoint
[PInvokeData("audioengineendpoint.h", MSDNShortId = "a1bb3fe4-6051-4b9c-8270-70375e700f01")]
[ComImport, Guid("30A99515-1527-4451-AF9F-00C5F0234DAF"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioEndpoint
{
/// <summary>
/// The <c>GetFrameFormat</c> method retrieves the format of the audio endpoint.
/// </summary>
/// <param name="ppFormat">Receives a pointer to a <c>WAVEFORMATEX</c> structure that contains the format information for the device that the audio
/// endpoint represents. The implementation must allocate memory for the structure by using <c>CoTaskMemAlloc</c>. The caller
/// must free the buffer by using <c>CoTaskMemFree</c>. For information about CoTaskMemAlloc and CoTaskMemFree, see the Windows
/// SDK documentation.</param>
/// <remarks>
/// <para>This method must not be called from a real-time processing thread.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpoint-getframeformat
// HRESULT GetFrameFormat( WAVEFORMATEX **ppFormat );
void GetFrameFormat(out SafeCoTaskMemHandle ppFormat);
/// <summary>The <c>GetFramesPerPacket</c> method gets the maximum number of frames per packet that the audio endpoint can support, based on the endpoint's period and the sample rate.</summary>
/// <returns>Receives the maximum number of frames per packet that the endpoint can support.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpoint-getframesperpacket
// HRESULT GetFramesPerPacket( UINT32 *pFramesPerPacket );
uint GetFramesPerPacket();
/// <summary>The <c>GetLatency</c> method gets the latency of the audio endpoint.</summary>
/// <returns>A pointer to an <c>HNSTIME</c> variable that receives the latency that is added to the stream by the audio endpoint.</returns>
/// <remarks>
/// <para>There is some latency for an endpoint so that the buffer can stay ahead of the data already committed for input/output (I/O) transfer (playback or capture). For example, if an audio endpoint is using 5-millisecond buffers to stay ahead of the I/O transfer, the latency returned by this method is 5 milliseconds.</para>
/// <para>This method must not be called from a real-time processing thread.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpoint-getlatency
// HRESULT GetLatency( HNSTIME *pLatency );
long GetLatency();
/// <summary>
/// The <c>SetStreamFlags</c> method sets the stream configuration flags on the audio endpoint.
/// </summary>
/// <param name="streamFlags">A bitwise <c>OR</c> of one or more of the AUDCLNT_STREAMFLAGS_XXX constants.</param>
/// <remarks>
/// <para>This method must not be called from a real-time processing thread.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpoint-setstreamflags
// HRESULT SetStreamFlags( DWORD streamFlags );
void SetStreamFlags(AUDCLNT_STREAMFLAGS streamFlags);
/// <summary>
/// The <c>SetEventHandle</c> method sets the handle for the event that the endpoint uses to signal that it has completed processing of a buffer.
/// </summary>
/// <param name="eventHandle">The event handle used to invoke a buffer completion callback.</param>
/// <remarks>
/// <para>The <c>SetEventHandle</c> method sets the audio engine event handle on the endpoint. In this implementation, the caller should receive an error response of <c>AEERR_NOT_INITIALIZED</c> if the audio endpoint is not initialized or the buffer is not set by the SetBuffer method.</para>
/// <para>To get event notifications, the audio engine will have set the <c>AUDCLNT_STREAMFLAGS_EVENTCALLBACK</c> flag on the endpoint. To set this flag, the audio engine calls the IAudioEndpoint::SetStreamFlags method.</para>
/// <para>This method must not be called from a real-time processing thread.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpoint-seteventhandle
// HRESULT SetEventHandle( HANDLE eventHandle );
void SetEventHandle([In] IntPtr eventHandle);
}
/// <summary>Controls the stream state of an endpoint.</summary>
/// <remarks>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nn-audioengineendpoint-iaudioendpointcontrol
[PInvokeData("audioengineendpoint.h", MSDNShortId = "4514521a-e9a9-4f39-ab7d-4ef7e514bd10")]
[ComImport, Guid("C684B72A-6DF4-4774-BDF9-76B77509B653"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioEndpointControl
{
/// <summary>
/// The <c>Start</c> method starts the endpoint stream.
/// </summary>
/// <remarks>
/// <para>The implementation of this method can differ depending on the type of device that the endpoint represents.</para>
/// <para>This method must not be called from a real-time processing thread.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpointcontrol-start
// HRESULT Start();
void Start();
/// <summary>
/// The <c>Reset</c> method resets the endpoint stream.
/// </summary>
/// <remarks>
/// <para>
/// <c>Reset</c> discards all data that has not been processed yet. The implementation of this method may differ depending on the type of device that the endpoint represents.</para>
/// <para>This method must not be called from a real-time processing thread.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpointcontrol-reset
// HRESULT Reset();
void Reset();
/// <summary>
/// The <c>Stop</c> method stops the endpoint stream.
/// </summary>
/// <remarks>
/// <para>The implementation of this method can differ depending on the type of device that the endpoint represents.</para>
/// <para>This method must not be called from a real-time processing thread.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpointcontrol-stop
// HRESULT Stop();
void Stop();
}
/// <summary>Provides functionality to allow an offload stream client to notify the endpoint that the last buffer has been sent only partially filled.</summary>
/// <remarks>This is an optional interface on an endpoint.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nn-audioengineendpoint-iaudioendpointlastbuffercontrol
[PInvokeData("audioengineendpoint.h", MSDNShortId = "79f4b370-fd04-41a9-ad74-54f7edd084c2")]
[ComImport, Guid("F8520DD3-8F9D-4437-9861-62F584C33DD6"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioEndpointLastBufferControl
{
/// <summary>Indicates if last buffer control is supported.</summary>
/// <returns><c>true</c> if last buffer control is supported; otherwise, <c>false</c>.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpointlastbuffercontrol-islastbuffercontrolsupported
// BOOL IsLastBufferControlSupported();
[PreserveSig]
[return: MarshalAs(UnmanagedType.Bool)]
bool IsLastBufferControlSupported();
/// <summary>Releases the output data pointer for the last buffer.</summary>
/// <param name="pConnectionProperty">The APO connection property.</param>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpointlastbuffercontrol-releaseoutputdatapointerforlastbuffer
// void ReleaseOutputDataPointerForLastBuffer( const APO_CONNECTION_PROPERTY *pConnectionProperty );
[PreserveSig]
void ReleaseOutputDataPointerForLastBuffer(in APO_CONNECTION_PROPERTY pConnectionProperty);
}
/// <summary>The <c>IAudioEndpointOffloadStreamMeter</c> interface retrieves general information about the audio channels in the offloaded audio stream.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nn-audioengineendpoint-iaudioendpointoffloadstreammeter
[PInvokeData("audioengineendpoint.h", MSDNShortId = "B19413F9-1DE9-4940-B0A1-11E5278F084B")]
[ComImport, Guid("E1546DCE-9DD1-418B-9AB2-348CED161C86"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioEndpointOffloadStreamMeter
{
/// <summary>Gets the number of available audio channels in the offloaded stream that can be metered.</summary>
/// <returns>A Pointer to a variable that indicates the number of available audio channels in the offloaded stream that can be metered.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpointoffloadstreammeter-getmeterchannelcount
// HRESULT GetMeterChannelCount( UINT32 *pu32ChannelCount );
uint GetMeterChannelCount();
/// <summary>The <c>GetMeteringData</c> method retrieves general information about the available audio channels in the offloaded stream.</summary>
/// <param name="u32ChannelCount">Indicates the number of available audio channels in the offloaded audio stream.</param>
/// <returns>A pointer to the peak values for the audio channels in the offloaded audio stream.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpointoffloadstreammeter-getmeteringdata
// HRESULT GetMeteringData( UINT32 u32ChannelCount, FLOAT32 *pf32PeakValues );
float GetMeteringData(uint u32ChannelCount);
}
/// <summary>The <c>IAudioEndpointOffloadStreamMute</c> interface allows a client to manipulate the mute status of the offloaded audio stream.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nn-audioengineendpoint-iaudioendpointoffloadstreammute
[ComImport, Guid("DFE21355-5EC2-40E0-8D6B-710AC3C00249"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioEndpointOffloadStreamMute
{
/// <summary>
/// The <c>SetMute</c> method sets the mute status of the offloaded audio stream.
/// </summary>
/// <param name="bMuted">Indicates whether or not the offloaded audio stream is to be muted. A value of <c>TRUE</c> mutes the stream, and a value of <c>FALSE</c> sets the stream to a non-muted state.</param>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpointoffloadstreammute-setmute
// HRESULT SetMute( boolean bMuted );
void SetMute([MarshalAs(UnmanagedType.U1)] bool bMuted);
/// <summary>
/// The <c>GetMute</c> method retrieves the mute status of the offloaded audio stream.
/// </summary>
/// <returns>
/// The <c>GetMute</c> method returns <c>S_OK</c> to indicate that it has completed successfully. Otherwise it returns an appropriate error code.
/// </returns>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpointoffloadstreammute-getmute
// HRESULT GetMute( boolean *pbMuted );
[return: MarshalAs(UnmanagedType.U1)]
bool GetMute();
}
/// <summary>The <c>IAudioEndpointOffloadStreamVolume</c> interface allows the client application to manipulate the volume level of the offloaded audio stream.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nn-audioengineendpoint-iaudioendpointoffloadstreamvolume
[ComImport, Guid("64F1DD49-71CA-4281-8672-3A9EDDD1D0B6"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioEndpointOffloadStreamVolume
{
/// <summary>The <c>GetVolumeChannelCount</c> method retrieves the number of available audio channels in the offloaded stream.</summary>
/// <returns>A pointer to the number of available audio channels in the offloaded stream.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpointoffloadstreamvolume-getvolumechannelcount
// HRESULT GetVolumeChannelCount( UINT32 *pu32ChannelCount );
uint GetVolumeChannelCount();
/// <summary>
/// The <c>SetChannelVolumes</c> method sets the volume levels for the various audio channels in the offloaded stream.
/// </summary>
/// <param name="u32ChannelCount">Indicates the number of available audio channels in the offloaded stream.</param>
/// <param name="pf32Volumes">A pointer to the volume levels for the various audio channels in the offloaded stream.</param>
/// <param name="u32CurveType">A value from the AUDIO_CURVE_TYPE enumeration specifying the curve to use when changing the channel volumes.</param>
/// <param name="pCurveDuration">A <c>LONGLONG</c> value specifying the curve duration in hundred nanosecond units.</param>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpointoffloadstreamvolume-setchannelvolumes
// HRESULT SetChannelVolumes( UINT32 u32ChannelCount, FLOAT32 *pf32Volumes, AUDIO_CURVE_TYPE u32CurveType, HNSTIME *pCurveDuration );
void SetChannelVolumes(uint u32ChannelCount, [In, MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 0)] float[] pf32Volumes, AUDIO_CURVE_TYPE u32CurveType, in long pCurveDuration);
/// <summary>
/// The <c>GetChannelVolumes</c> method retrieves the volume levels for the various audio channels in the offloaded stream.
/// </summary>
/// <param name="u32ChannelCount">Indicates the numer of available audio channels in the offloaded stream.</param>
/// <param name="pf32Volumes">A pointer to the volume levels for the various audio channels in the offloaded stream.</param>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpointoffloadstreamvolume-getchannelvolumes
// HRESULT GetChannelVolumes( UINT32 u32ChannelCount, FLOAT32 *pf32Volumes );
void GetChannelVolumes(uint u32ChannelCount, [Out, MarshalAs(UnmanagedType.LPArray, SizeParamIndex = 0)] float[] pf32Volumes);
}
/// <summary>
/// <para>Gets the difference between the current read and write positions in the endpoint buffer. The <c>IAudioEndpointRT</c> interface is used by the audio engine.</para>
/// <para><c>IAudioEndpointRT</c> methods can be called from a real-time processing thread. The implementation of the methods of this interface must not block, access paged memory, or call any blocking system routines.</para>
/// </summary>
/// <remarks>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nn-audioengineendpoint-iaudioendpointrt
[PInvokeData("audioengineendpoint.h", MSDNShortId = "3fb05ce4-a3be-4c84-8e03-71213f453f74")]
[ComImport, Guid("DFD2005F-A6E5-4d39-A265-939ADA9FBB4D"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioEndpointRT
{
/// <summary>
/// The <c>GetCurrentPadding</c> method gets the amount, in 100-nanosecond units, of data that is queued up in the endpoint.
/// </summary>
/// <param name="pPadding">Receives the number of frames available in the endpoint buffer.</param>
/// <param name="pAeCurrentPosition">Receives information about the position of the current frame in the endpoint buffer in an AE_CURRENT_POSITION structure specified by the caller.</param>
/// <remarks>
/// <para>The audio engine uses this information to calculate the amount of data that requires processing. This calculation depends on the implementation. The value of the pPadding parameter specifies the number of audio frames that are queued up to play in the endpoint buffer. Before writing to the endpoint buffer, the audio engine can calculate the amount of available space in the buffer by subtracting the padding value from the buffer length. For a CaptureStream endpoint, the padding value reported by the <c>GetCurrentPadding</c> method specifies the number of frames of capture data that are available in the next packet in the endpoint buffer and that might be ready for the audio engine to read from the buffer.</para>
/// <para>This method can be called from a real-time processing thread. The implementation of this method must not block, access paged memory, or call any blocking system routines.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpointrt-getcurrentpadding
// void GetCurrentPadding( HNSTIME *pPadding, AE_CURRENT_POSITION *pAeCurrentPosition );
[PreserveSig]
void GetCurrentPadding(out long pPadding, out AE_CURRENT_POSITION pAeCurrentPosition);
/// <summary>The <c>ProcessingComplete</c> method notifies the endpoint that a processing pass has been completed.</summary>
/// <returns>None</returns>
/// <remarks>
/// <para>This method enables the audio engine to call into the endpoint to set an event that indicates that a processing pass had been completed and that there is audio data ready to be retrieved or passed to the endpoint device.</para>
/// <para>This method can be called from a real-time processing thread. The implementation of this method must not block, access paged memory, or call any blocking system routines.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpointrt-processingcomplete
// void ProcessingComplete();
[PreserveSig]
void ProcessingComplete();
/// <summary>
/// The <c>SetPinInactive</c> method notifies the endpoint that it must change the state of the underlying stream resources to an inactive state.
/// </summary>
/// <remarks>
/// <para>This method enables the audio engine to call into the endpoint to indicate that the endpoint can pause the underlying stream resources. In most cases, this method can simply return <c>S_OK</c>.</para>
/// <para>This method can be called from a real-time processing thread. The implementation of this method must not block, access paged memory, or call any blocking system routines.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpointrt-setpininactive
// HRESULT SetPinInactive();
void SetPinInactive();
/// <summary>
/// The <c>SetPinActive</c> method notifies the endpoint that it must change the state of the underlying streaming resources to an active state.
/// </summary>
/// <remarks>
/// <para>This method enables the audio engine to call into the endpoint to indicate that the endpoint must prepare any audio stream resources. In most cases, this method can simply return <c>S_OK</c>.</para>
/// <para>This method can be called from a real-time processing thread. The implementation of this method must not block, access paged memory, or call any blocking system routines.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioendpointrt-setpinactive
// HRESULT SetPinActive();
void SetPinActive();
}
/// <summary>Gets the input buffer for each processing pass.The <c>IAudioInputEndpointRT</c> interface is used by the audio engine.</summary>
/// <remarks>
/// <para><c>IAudioInputEndpointRT</c> methods can be called from a real-time processing thread. The implementation of the methods of this interface must not block, access paged memory, or call any blocking system routines.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nn-audioengineendpoint-iaudioinputendpointrt
[PInvokeData("audioengineendpoint.h", MSDNShortId = "f9638dea-f61d-45f6-b91d-72e4fc1b4a92")]
[ComImport, Guid("8026AB61-92B2-43c1-A1DF-5C37EBD08D82"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioInputEndpointRT
{
/// <summary>The <c>GetInputDataPointer</c> method gets a pointer to the buffer from which data will be read by the audio engine.</summary>
/// <param name="pConnectionProperty">
/// <para>A pointer to an APO_CONNECTION_PROPERTYstructure.</para>
/// <para>The caller sets the member values as follows:</para>
/// <list type="bullet">
/// <item>
/// <term><c>pBuffer</c> is set to <c>NULL</c>.</term>
/// </item>
/// <item>
/// <term><c>u32ValidFrameCount</c> contains the number of frames that need to be in the retrieved data pointer. The endpoint object must not cache this information. The audio engine can change this number depending on its processing needs.</term>
/// </item>
/// <item>
/// <term><c>u32BufferFlags</c> is set to <c>BUFFER_INVALID</c>.</term>
/// </item>
/// </list>
/// <para>If this call completes successfully, the endpoint must set the member values as follows:</para>
/// <list type="bullet">
/// <item>
/// <term><c>pBuffer</c> points to valid memory where the data has been read. This could include silence depending on the flags that were set in the <c>u32BufferFlags</c> member.</term>
/// </item>
/// <item>
/// <term><c>u32ValidFrameCount</c> is unchanged.</term>
/// </item>
/// <item>
/// <term><c>u32BufferFlags</c> is set to <c>BUFFER_VALID</c> if the data pointer contains valid data or to <c>BUFFER_SILENT</c> if the data pointer contains only silent data. The data in the buffer does not actually need to be silence, but the buffer specified in <c>pBuffer</c> must be capable of holding all the frames of silence contained in <c>u32ValidFrameCount</c> to match the required frame count.</term>
/// </item>
/// </list>
/// </param>
/// <param name="pAeTimeStamp">A pointer to an AE_CURRENT_POSITION structure that contains the time stamp of the data that is captured in the buffer. This parameter is optional.</param>
/// <returns>None</returns>
/// <remarks>
/// <para>This method returns a pointer from the endpoint to the buffer pConnectionProperty-&gt;<c>pBuffer</c>, which contains data that needs to be passed into the engine as input. The data and the buffer pointer must remain valid until the IAudioInputEndpointRT::ReleaseInputDataPointer method is called. The endpoint object must set the requested amount of information and insert silence if no valid data exists. The buffer pointer, pConnectionProperty-&gt;<c>pBuffer</c>, returned by the endpoint object must be frame aligned. Endpoints do not support the extra space, which may be available in the APO_CONNECTION_PROPERTY associated with the connection properties passed in the pConnectionProperty parameter.</para>
/// <para>Passing zero in the <c>u32ValidFrameCount</c> member is a valid request. In this case, the input pointer must be valid but the endpoint does not read from it. The pConnectionProperty-&gt;<c>u32ValidFrameCount</c>value must be less than or equal to the maximum frame count supported by the endpoint. To get the supported number of frames, call the IAudioEndpoint::GetFramesPerPacket method.</para>
/// <para>This method can be called from a real-time processing thread. The implementation of this method must not block, access paged memory, or call any blocking system routines.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioinputendpointrt-getinputdatapointer
// void GetInputDataPointer( APO_CONNECTION_PROPERTY *pConnectionProperty, AE_CURRENT_POSITION *pAeTimeStamp );
[PreserveSig]
void GetInputDataPointer(ref APO_CONNECTION_PROPERTY pConnectionProperty, ref AE_CURRENT_POSITION pAeTimeStamp);
/// <summary>The <c>ReleaseInputDataPointer</c> method releases the acquired data pointer.</summary>
/// <param name="u32FrameCount">The number of frames that have been consumed by the audio engine. This count might not be the same as the value returned by the IAudioInputEndpointRT::GetInputDataPointer method in the pConnectionProperty-&gt;<c>u32ValidFrameCount</c> member.</param>
/// <param name="pDataPointer">The pointer to the buffer retrieved by the GetInputDataPointer method received in the pConnectionProperty-&gt;<c>pBuffer</c> member.</param>
/// <returns>None</returns>
/// <remarks>
/// <para><c>ReleaseInputDataPointer</c> notifies the endpoint that the audio engine no longer requires the input data pointer and also indicates the number of frames used during the session. For example, an endpoint, which represents a looped buffer, is connected to the input of the audio engine and can advance its read pointer by using the actual frame count. If <c>u32FrameCount</c> is zero, this indicates that the client did not use any data from the specified input buffer. The <c>u32FrameCount</c> must be less than or equal to the maximum frame count supported by the endpoint. To get the supported number of frames, the audio engine calls the IAudioEndpoint::GetFramesPerPacket method.</para>
/// <para>This method can be called from a real-time processing thread. The implementation of this method must not block, access paged memory, or call any blocking system routines.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioinputendpointrt-releaseinputdatapointer
// void ReleaseInputDataPointer( UINT32 u32FrameCount, UINT_PTR pDataPointer );
[PreserveSig]
void ReleaseInputDataPointer(uint u32FrameCount, [In] IntPtr pDataPointer);
/// <summary>The <c>PulseEndpoint</c> method is reserved.</summary>
/// <returns>None</returns>
/// <remarks>
/// <para>This method can be called from a real-time processing thread. The implementation of this method must not block, access paged memory, or call any blocking system routines.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudioinputendpointrt-pulseendpoint
// void PulseEndpoint();
[PreserveSig]
void PulseEndpoint();
}
/// <summary>The <c>IAudioLfxControl</c> interface allows the client to apply or remove local effects from the offloaded audio stream.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nn-audioengineendpoint-iaudiolfxcontrol
[PInvokeData("audioengineendpoint.h", MSDNShortId = "E4290AE9-7F2E-4D0B-BEAF-F01D95B3E03D")]
[ComImport, Guid("076A6922-D802-4F83-BAF6-409D9CA11BFE"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioLfxControl
{
/// <summary>
/// The <c>SetLocalEffectsState</c> method sets the local effects state that is to be applied to the offloaded audio stream.
/// </summary>
/// <param name="bEnabled">Indicates the local effects state that is to be applied to the offloaded audio stream. A value of <c>TRUE</c> enables local effects, and the local effects in the audio graph are applied to the stream. A value of <c>FALSE</c> disables local effects, so that the local effects in the audio graph are not applied to the audio stream.</param>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudiolfxcontrol-setlocaleffectsstate
// HRESULT SetLocalEffectsState( BOOL bEnabled );
void SetLocalEffectsState([MarshalAs(UnmanagedType.Bool)] bool bEnabled);
/// <summary>The <c>GetLocalEffectsState</c> method retrieves the local effects state that is currently applied to the offloaded audio stream.</summary>
/// <returns>A pointer to the Boolean variable that indicates the state of the local effects that have been applied to the offloaded audio stream. A value of <c>TRUE</c> indicates that local effects have been enabled and applied to the stream. A value of <c>FALSE</c> indicates that local effects have been disabled.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudiolfxcontrol-getlocaleffectsstate
// HRESULT GetLocalEffectsState( BOOL *pbEnabled );
[return: MarshalAs(UnmanagedType.Bool)]
bool GetLocalEffectsState();
}
/// <summary>Gets the output buffer for each processing pass. The <c>IAudioOutputEndpointRT</c> interface is used by the audio engine.</summary>
/// <remarks>
/// <para><c>IAudioOutputEndpointRT</c> methods can be called from a real-time processing thread. The implementation of the methods of this interface must not block, access paged memory, or call any blocking system routines.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nn-audioengineendpoint-iaudiooutputendpointrt
[PInvokeData("audioengineendpoint.h", MSDNShortId = "b881b2f9-ffe9-46ff-94aa-eef0af172a3e")]
[ComImport, Guid("8FA906E4-C31C-4e31-932E-19A66385E9AA"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IAudioOutputEndpointRT
{
/// <summary>The <c>GetOutputDataPointer</c> method returns a pointer to the output buffer in which data will be written by the audio engine.</summary>
/// <param name="u32FrameCount">The number of frames in the output buffer pointed to by the data pointer that is returned by this method. The endpoint must not cache this information because this can be changed by the audio engine depending on its processing requirements. For more information, see Remarks.</param>
/// <param name="pAeTimeStamp">A pointer to an AE_CURRENT_POSITION structure that specifies the time stamp of the data that is rendered. This parameter is optional.</param>
/// <returns>A pointer to the buffer to which data will be written.</returns>
/// <remarks>
/// <para>This method returns a pointer to a buffer in which the audio engine writes data. The data is not valid until the IAudioOutputEndpointRT::ReleaseOutputDataPointer method is called. The returned pointer must be frame-aligned.</para>
/// <para>The frame count passed in <c>u32FrameCount</c> must be less than or equal to the maximum number of frames supported by the endpoint. To get the maximum frame count that the endpoint can support, the audio engine calls the IAudioEndpoint::GetFramesPerPacket method.</para>
/// <para>This method can be called from a real-time processing thread. The implementation of this method must not block, access paged memory, or call any blocking system routines.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudiooutputendpointrt-getoutputdatapointer
// UINT_PTR GetOutputDataPointer( UINT32 u32FrameCount, AE_CURRENT_POSITION *pAeTimeStamp );
[PreserveSig]
IntPtr GetOutputDataPointer(uint u32FrameCount, [In] IntPtr pAeTimeStamp);
/// <summary>The <c>ReleaseOutputDataPointer</c> method releases the pointer to the output buffer.</summary>
/// <param name="pConnectionProperty">
/// <para>A pointer to an APO_CONNECTION_PROPERTYstructure. The values in the structure must not be changed. The caller sets the members as follows:</para>
/// <list type="bullet">
/// <item>
/// <term><c>pBuffer</c> is set to the pointer to the output data buffer returned by the IAudioOutputEndpointRT::GetOutputDataPointer method.</term>
/// </item>
/// <item>
/// <term><c>u32ValidFrameCount</c> is set to the actual number of frames that have been generated by the audio engine. The value might not be the same as the frame count passed in the u32FrameCount parameter of the GetOutputDataPointer method.</term>
/// </item>
/// <item>
/// <term><c>u32BufferFlags</c> is set to <c>BUFFER_VALID</c> if the output buffer pointed to by the <c>pBuffer</c> member contains valid data. <c>u32BufferFlags</c> is set to <c>BUFFER_SILENT</c> if the output buffer contains only silent data. The data in the buffer does not actually need to be silence, but the buffer specified in the <c>pBuffer</c> member must be capable of holding all the frames of silence contained in the <c>u32ValidFrameCount</c> member. Therefore, if <c>BUFFER_SILENT</c> is specified, the endpoint should write silence in the output buffer.</term>
/// </item>
/// </list>
/// </param>
/// <returns>None</returns>
/// <remarks>
/// <para><c>ReleaseOutputDataPointer</c> notifies the endpoint that the audio engine has completed the task of writing data in the output buffer and no longer requires the data pointer. This method also relays information such as the time that corresponds to the audio samples in the output buffer, the number of frames generated by the audio engine, and whether the buffer is full of valid data or silence data. Based on this information, an endpoint that represents a looped buffer and is attached to the output of the audio engine can advance its write position in the buffer. A value of zero in the u32FrameCount parameter of the GetOutputDataPointer method indicates that the audio engine did not write any valid data in the output buffer. The u32FrameCount parameter value must be less than or equal to the frame count specified in <c>GetOutputDataPointer</c>. The endpoint must not assume that all data requested by <c>GetOutputDataPointer</c> was actually generated.</para>
/// <para>This method can be called from a real-time processing thread. The implementation of this method must not block, access paged memory, or call any blocking system routines.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudiooutputendpointrt-releaseoutputdatapointer
// void ReleaseOutputDataPointer( const APO_CONNECTION_PROPERTY *pConnectionProperty );
[PreserveSig]
void ReleaseOutputDataPointer(in APO_CONNECTION_PROPERTY pConnectionProperty);
/// <summary>
/// <para>The <c>PulseEndpoint</c> method is reserved.</para>
/// <para>This method is called by the audio engine at the end of a processing pass. The event handle is set by calling the IAudioEndpoint::SetEventHandle method.</para>
/// </summary>
/// <returns>None</returns>
/// <remarks>
/// <para>This method can be called from a real-time processing thread. The implementation of this method must not block, access paged memory, or call any blocking system routines.</para>
/// <para>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-iaudiooutputendpointrt-pulseendpoint
// void PulseEndpoint();
[PreserveSig]
void PulseEndpoint();
}
/// <summary>The <c>IHardwareAudioEngineBase</c> interface is implemented by audio endpoints for the audio stack to use to configure and retrieve information about the hardware audio engine.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nn-audioengineendpoint-ihardwareaudioenginebase
[PInvokeData("audioengineendpoint.h", MSDNShortId = "6FB9BEDB-111B-4F0A-B8BB-B0BA2024EB24")]
[ComImport, Guid("EDDCE3E4-F3C1-453a-B461-223563CBD886"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface IHardwareAudioEngineBase
{
/// <summary>The <c>GetAvailableOffloadConnectorCount</c> method retrieves the number of avaialable endpoints that can handle offloaded streams on the hardware audio engine.</summary>
/// <param name="_pwstrDeviceId">A pointer to the device ID of the hardware audio engine device.</param>
/// <param name="_uConnectorId">The identifier for the endpoint connector.</param>
/// <returns>The number of available endpoint connectors that can handle offloaded audio streams.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-ihardwareaudioenginebase-getavailableoffloadconnectorcount
// HRESULT GetAvailableOffloadConnectorCount( LPWSTR _pwstrDeviceId, UINT32 _uConnectorId, UINT32 *_pAvailableConnectorInstanceCount );
uint GetAvailableOffloadConnectorCount([MarshalAs(UnmanagedType.LPWStr)] string _pwstrDeviceId, uint _uConnectorId);
/// <summary>The <c>GetEngineFormat</c> method retrieves the current data format of the offloaded audio stream.</summary>
/// <param name="pDevice">A pointer to an IMMDevice interface.</param>
/// <param name="_bRequestDeviceFormat">A Boolean variable that indicates whether or not the <c>IMMDevice</c> interface is being accessed to retrieve the device format.</param>
/// <param name="_ppwfxFormat">A pointer to a pointer to a WAVEFORMATEX structure that provides information about the hardware audio engine. This includes the waveform audio format type, the number of audio channels, and the sample rate of the audio engine.</param>
/// <returns>The <c>GetEngineFormat</c> method returns <c>S_OK</c> to indicate that it has completed successfully. Otherwise it returns an appropriate error code.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-ihardwareaudioenginebase-getengineformat
// HRESULT GetEngineFormat( IMMDevice *pDevice, BOOL _bRequestDeviceFormat, WAVEFORMATEX **_ppwfxFormat );
void GetEngineFormat(IMMDevice pDevice, [MarshalAs(UnmanagedType.Bool)] bool _bRequestDeviceFormat, out SafeCoTaskMemHandle _ppwfxFormat);
/// <summary>
/// The <c>SetEngineDeviceFormat</c> method sets the waveform audio format for the hardware audio engine.
/// </summary>
/// <param name="pDevice">A pointer to an IMMDevice interface.</param>
/// <param name="_pwfxFormat">A pointer to a WAVEFORMATEX structure that provides information about the hardware audio engine.</param>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-ihardwareaudioenginebase-setenginedeviceformat
// HRESULT SetEngineDeviceFormat( IMMDevice *pDevice, WAVEFORMATEX *_pwfxFormat );
void SetEngineDeviceFormat(IMMDevice pDevice, in WAVEFORMATEX _pwfxFormat);
/// <summary>
/// The <c>SetGfxState</c> method sets the GFX state of the offloaded audio stream.
/// </summary>
/// <param name="pDevice">Pointer to an IMMDevice interface.</param>
/// <param name="_bEnable">Pointer to a boolean variable.</param>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-ihardwareaudioenginebase-setgfxstate
// HRESULT SetGfxState( IMMDevice *pDevice, BOOL _bEnable );
void SetGfxState(IMMDevice pDevice, [MarshalAs(UnmanagedType.Bool)] bool _bEnable);
/// <summary>The <c>GetGfxState</c> method retrieves the GFX state of the offloaded audio stream.</summary>
/// <param name="pDevice">Pointer to an IMMDevice interface.</param>
/// <returns>Pointer to a boolean variable.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/nf-audioengineendpoint-ihardwareaudioenginebase-getgfxstate
// HRESULT GetGfxState( IMMDevice *pDevice, BOOL *_pbEnable );
[return: MarshalAs(UnmanagedType.Bool)]
bool GetGfxState(IMMDevice pDevice);
}
/// <summary>Reports the current frame position from the device to the clients.</summary>
/// <remarks>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audioengineendpoint/ns-audioengineendpoint-ae_current_position typedef struct
// AE_CURRENT_POSITION { UINT64 u64DevicePosition; UINT64 u64StreamPosition; UINT64 u64PaddingFrames; long hnsQPCPosition; float
// f32FramesPerSecond; AE_POSITION_FLAGS Flag; } AE_CURRENT_POSITION, *PAE_CURRENT_POSITION;
[PInvokeData("audioengineendpoint.h", MSDNShortId = "2e239114-1af7-455a-a60f-2054b05e1414")]
[StructLayout(LayoutKind.Sequential)]
public struct AE_CURRENT_POSITION
{
/// <summary>The device position, in frames.</summary>
public ulong u64DevicePosition;
/// <summary>
/// The stream position, in frames, used to determine the starting point for audio capture and the render device position
/// relative to the stream.
/// </summary>
public ulong u64StreamPosition;
/// <summary>The amount of padding, in frames, between the current position and the stream fill point.</summary>
public ulong u64PaddingFrames;
/// <summary>
/// A translated quality performance counter (QPC) timer value taken at the time that the <c>u64DevicePosition</c> member was checked.
/// </summary>
public long hnsQPCPosition;
/// <summary>The calculated data rate at the point at the time the position was set.</summary>
public float f32FramesPerSecond;
/// <summary>A value of the AE_POSITION_FLAGS enumeration that indicates the validity of the position information.</summary>
public AE_POSITION_FLAGS Flag;
}
/// <summary>Contains creation parameters for the endpoint used in shared mode.</summary>
/// <remarks>The Remote Desktop Services AudioEndpoint API is for use in Remote Desktop scenarios; it is not for client applications.</remarks>
// https://docs.microsoft.com/en-us/previous-versions/dd408134(v=vs.85) typedef struct _AUDIO_ENDPOINT_SHARED_CREATE_PARAMS { uint
// u32Size; uint u32TSSessionId; EndpointConnectorType targetEndpointConnectorType; WAVEFORMATEX wfxDeviceFormat; }
// AUDIO_ENDPOINT_SHARED_CREATE_PARAMS, *PAUDIO_ENDPOINT_SHARED_CREATE_PARAMS;
[PInvokeData("Audioengineendpoint.h")]
[StructLayout(LayoutKind.Sequential)]
public struct AUDIO_ENDPOINT_SHARED_CREATE_PARAMS
{
/// <summary>The size of this structure.</summary>
public uint u32Size;
/// <summary>The session identifier.</summary>
public uint u32TSSessionId;
/// <summary>The type of the endpoint.</summary>
public EndpointConnectorType targetEndpointConnectorType;
/// <summary>The format of the device that is represented by the endpoint.</summary>
public WAVEFORMATEX wfxDeviceFormat;
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,289 @@
using System;
namespace Vanara.PInvoke
{
/// <summary>Functions, structures and constants from Windows Core Audio Api.</summary>
public static partial class CoreAudio
{
/// <summary>
/// <para>
/// The AUDCLNT_SESSIONFLAGS_XXX constants indicate characteristics of an audio session associated with the stream. A client can
/// specify these options during the initialization of the stream by through the StreamFlags parameter of the
/// <c>IAudioClient::Initialize</c> method.
/// </para>
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/coreaudio/audclnt-sessionflags-xxx-constants
[PInvokeData("Audiosessiontypes.h", MSDNShortId = "5745d5bc-71e8-4b33-8227-c1c84226b6ee")]
public enum AUDCLNT_SESSIONFLAGS : uint
{
/// <summary>The session expires when there are no associated streams and owning session control objects holding references.</summary>
AUDCLNT_SESSIONFLAGS_EXPIREWHENUNOWNED = 0x10000000,
/// <summary>
/// The volume control is hidden in the volume mixer user interface when the audio session is created. If the session associated
/// with the stream already exists before IAudioClient::Initialize opens the stream, the volume control is displayed in the
/// volume mixer.
/// </summary>
AUDCLNT_SESSIONFLAGS_DISPLAY_HIDE = 0x20000000,
/// <summary>The volume control is hidden in the volume mixer user interface after the session expires.</summary>
AUDCLNT_SESSIONFLAGS_DISPLAY_HIDEWHENEXPIRED = 0x40000000,
}
/// <summary>
/// The <c>AUDCLNT_SHAREMODE</c> enumeration defines constants that indicate whether an audio stream will run in shared mode or in
/// exclusive mode.
/// </summary>
/// <remarks>
/// <para>
/// The IAudioClient::Initialize and IAudioClient::IsFormatSupported methods use the constants defined in the
/// <c>AUDCLNT_SHAREMODE</c> enumeration.
/// </para>
/// <para>
/// In shared mode, the client can share the audio endpoint device with clients that run in other user-mode processes. The audio
/// engine always supports formats for client streams that match the engine's mix format. In addition, the audio engine might
/// support another format if the Windows audio service can insert system effects into the client stream to convert the client
/// format to the mix format.
/// </para>
/// <para>
/// In exclusive mode, the Windows audio service attempts to establish a connection in which the client has exclusive access to the
/// audio endpoint device. In this mode, the audio engine inserts no system effects into the local stream to aid in the creation of
/// the connection point. Either the audio device can handle the specified format directly or the method fails.
/// </para>
/// <para>For more information about shared-mode and exclusive-mode streams, see User-Mode Audio Components.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audiosessiontypes/ne-audiosessiontypes-audclnt_sharemode typedef enum
// _AUDCLNT_SHAREMODE { AUDCLNT_SHAREMODE_SHARED, AUDCLNT_SHAREMODE_EXCLUSIVE } AUDCLNT_SHAREMODE;
[PInvokeData("audiosessiontypes.h", MSDNShortId = "f4870d0f-85d1-48ad-afe0-2f5a960c08fb")]
public enum AUDCLNT_SHAREMODE
{
/// <summary>The audio stream will run in shared mode. For more information, see Remarks.</summary>
AUDCLNT_SHAREMODE_SHARED,
/// <summary>The audio stream will run in exclusive mode. For more information, see Remarks.</summary>
AUDCLNT_SHAREMODE_EXCLUSIVE,
}
/// <summary>Specifies characteristics that a client can assign to an audio stream during the initialization of the stream.</summary>
/// <remarks>
/// The <c>IAudioClient::Initialize</c> method and the <c>DIRECTX_AUDIO_ACTIVATION_PARAMS</c> structure use the
/// AUDCLNT_STREAMFLAGS_XXX constants.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/coreaudio/audclnt-streamflags-xxx-constants
[PInvokeData("Audiosessiontypes.h", MSDNShortId = "7b2267c3-79f5-4ada-a7ce-78dd514f8487")]
[Flags]
public enum AUDCLNT_STREAMFLAGS : uint
{
/// <summary>
/// The audio stream will be a member of a cross-process audio session.
/// <para>
/// The AUDCLNT_STREAMFLAGS_CROSSPROCESS flag indicates that the audio session for the stream is a cross-process session. A
/// cross-process session can accept streams from more than one process. If two applications in two separate processes call
/// <c>IAudioClient::Initialize</c> with identical session GUIDs, and both applications set the AUDCLNT_SHAREMODE_CROSSPROCESS
/// flag, then the audio engine assigns their streams to the same cross-process session. This flag overrides the default
/// behavior, which is to assign the stream to a process-specific session rather than a cross-process session. The
/// AUDCLNT_STREAMFLAGS_CROSSPROCESS flag bit is incompatible with exclusive mode. For more information about cross-process
/// sessions, see Audio Sessions.
/// </para>
/// </summary>
AUDCLNT_STREAMFLAGS_CROSSPROCESS = 0x00010000,
/// <summary>
/// The audio stream will operate in loopback mode.
/// <para>
/// The AUDCLNT_STREAMFLAGS_LOOPBACK flag enables loopback recording. In loopback recording, the audio engine copies the audio
/// stream that is being played by a rendering endpoint device into an audio endpoint buffer so that a WASAPI client can capture
/// the stream. If this flag is set, the <c>IAudioClient::Initialize</c> method attempts to open a capture buffer on the
/// rendering device. This flag is valid only for a rendering device and only if the <c>Initialize</c> call sets the ShareMode
/// parameter to AUDCLNT_SHAREMODE_SHARED. Otherwise the <c>Initialize</c> call will fail. If the call succeeds, the client can
/// call the <c>IAudioClient::GetService</c> method to obtain an <c>IAudioCaptureClient</c> interface on the rendering device.
/// For more information, see Loopback Recording.
/// </para>
/// </summary>
AUDCLNT_STREAMFLAGS_LOOPBACK = 0x00020000,
/// <summary>
/// Processing of the audio buffer by the client will be event driven.
/// <para>
/// The AUDCLNT_STREAMFLAGS_EVENTCALLBACK flag enables event-driven buffering. If a client sets this flag in the call to
/// <c>IAudioClient::Initialize</c> that initializes a stream, the client must subsequently call the
/// <c>IAudioClient::SetEventHandle</c> method to supply an event handle for the stream. After the stream starts, the audio
/// engine will signal the event handle to notify the client each time a buffer becomes ready for the client to process. WASAPI
/// supports event-driven buffering for both rendering and capture buffers. Both shared-mode and exclusive-mode streams can use
/// event-driven buffering. For a code example that uses the AUDCLNT_STREAMFLAGS_EVENTCALLBACK flag, see Exclusive-Mode Streams.
/// </para>
/// </summary>
AUDCLNT_STREAMFLAGS_EVENTCALLBACK = 0x00040000,
/// <summary>
/// The volume and mute settings for an audio session will not persist across system restarts.
/// <para>
/// The AUDCLNT_STREAMFLAGS_NOPERSIST flag disables persistence of the volume and mute settings for a session that contains
/// rendering streams. By default, the volume level and muting state for a rendering session are persistent across system
/// restarts. The volume level and muting state for a capture session are never persistent. For more information about the
/// persistence of session volume and mute settings, see Audio Sessions.
/// </para>
/// </summary>
AUDCLNT_STREAMFLAGS_NOPERSIST = 0x00080000,
/// <summary>
/// This constant is new in Windows 7. The sample rate of the stream is adjusted to a rate specified by an application. For more
/// information, see Remarks.
/// </summary>
AUDCLNT_STREAMFLAGS_RATEADJUST = 0x00100000,
/// <summary>
/// Prevents the render stream from being included in any application loopback streams. Note that this stream will continue to
/// be included in the endpoint loopback stream. This has no effect on Exclusive-Mode Streams. This constant is available
/// starting with Windows 10, version 1803.
/// <para>
/// The AUDCLNT_STREAMFLAGS_RATEADJUST flag enables an application to get a reference to the <c>IAudioClockAdjustment</c>
/// interface that is used to set the sample rate for the stream. To get a pointer to this interace, an application must
/// initialize the audio client with this flag and then call <c>IAudioClient::GetService</c> by specifying the
/// <c>IID_IAudioClockAdjustment</c> identifier. To set the new sample rate, call <c>IAudioClockAdjustment::SetSampleRate</c>.
/// This flag is valid only for a rendering device. Otherwise the <c>GetService</c> call fails with the error code
/// AUDCLNT_E_WRONG_ENDPOINT_TYPE. The application must also set the ShareMode parameter to AUDCLNT_SHAREMODE_SHARED during the
/// <c>Initialize</c> call. <c>SetSampleRate</c> fails if the audio client is not in shared mode.
/// </para>
/// </summary>
AUDCLNT_STREAMFLAGS_PREVENT_LOOPBACK_CAPTURE = 0x01000000,
/// <summary>
/// A channel matrixer and a sample rate converter are inserted as necessary to convert between the uncompressed format supplied
/// to IAudioClient::Initialize and the audio engine mix format.
/// </summary>
AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM = 0x80000000,
/// <summary>
/// When used with AUDCLNT_STREAMFLAGS_AUTOCONVERTPCM, a sample rate converter with better quality than the default conversion
/// but with a higher performance cost is used. This should be used if the audio is ultimately intended to be heard by humans as
/// opposed to other scenarios such as pumping silence or populating a meter.
/// </summary>
AUDCLNT_STREAMFLAGS_SRC_DEFAULT_QUALITY = 0x08000000,
}
/// <summary>Specifies the category of an audio stream.</summary>
/// <remarks>
/// <para>Note that only a subset of the audio stream categories are valid for certain stream types.</para>
/// <list type="table">
/// <listheader>
/// <term>Stream type</term>
/// <term>Valid categories</term>
/// </listheader>
/// <item>
/// <term>Render stream</term>
/// <term>All categories are valid.</term>
/// </item>
/// <item>
/// <term>Capture stream</term>
/// <term>AudioCategory_Communications, AudioCategory_Speech, AudioCategory_Other</term>
/// </item>
/// <item>
/// <term>Loopback stream</term>
/// <term>AudioCategory_Other</term>
/// </item>
/// </list>
/// <para>
/// Games should categorize their music streams as <c>AudioCategory_GameMedia</c> so that game music mutes automatically if another
/// application plays music in the background. Music or video applications should categorize their streams as
/// <c>AudioCategory_Media</c> or <c>AudioCategory_Movie</c> so they will take priority over <c>AudioCategory_GameMedia</c> streams.
/// Game audio for in-game cinematics or cutscenes, when the audio is premixed or for creative reasons should take priority over
/// background audio, should also be categorized as <c>Media</c> or <c>Movie</c>.
/// </para>
/// <para>
/// The values <c>AudioCategory_ForegroundOnlyMedia</c> and <c>AudioCategory_BackgroundCapableMedia</c> are deprecated. For Windows
/// Store apps, these values will continue to function the same when running on Windows 10 as they did on Windows 8.1. Attempting to
/// use these values in a Universal Windows Platform (UWP) app, will result in compilation errors and an exception at runtime. Using
/// these values in a Windows desktop application built with the Windows 10 SDK the will result in a compilation error.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audiosessiontypes/ne-audiosessiontypes-audio_stream_category typedef enum
// _AUDIO_STREAM_CATEGORY { AudioCategory_Other, AudioCategory_ForegroundOnlyMedia, AudioCategory_BackgroundCapableMedia,
// AudioCategory_Communications, AudioCategory_Alerts, AudioCategory_SoundEffects, AudioCategory_GameEffects,
// AudioCategory_GameMedia, AudioCategory_GameChat, AudioCategory_Speech, AudioCategory_Movie, AudioCategory_Media } AUDIO_STREAM_CATEGORY;
[PInvokeData("audiosessiontypes.h", MSDNShortId = "B6B9195A-2704-4633-AFCF-B01CED6B6DB4")]
public enum AUDIO_STREAM_CATEGORY
{
/// <summary>Other audio stream.</summary>
AudioCategory_Other,
/// <summary>
/// Media that will only stream when the app is in the foreground. This enumeration value has been deprecated. For more
/// information, see the Remarks section.
/// </summary>
AudioCategory_ForegroundOnlyMedia,
/// <summary>
/// Media that can be streamed when the app is in the background. This enumeration value has been deprecated. For more
/// information, see the Remarks section.
/// </summary>
AudioCategory_BackgroundCapableMedia,
/// <summary>Real-time communications, such as VOIP or chat.</summary>
AudioCategory_Communications,
/// <summary>Alert sounds.</summary>
AudioCategory_Alerts,
/// <summary>Sound effects.</summary>
AudioCategory_SoundEffects,
/// <summary>Game sound effects.</summary>
AudioCategory_GameEffects,
/// <summary>Background audio for games.</summary>
AudioCategory_GameMedia,
/// <summary>
/// Game chat audio. Similar to AudioCategory_Communications except that AudioCategory_GameChat will not attenuate other streams.
/// </summary>
AudioCategory_GameChat,
/// <summary>Speech.</summary>
AudioCategory_Speech,
/// <summary>Stream that includes audio with dialog.</summary>
AudioCategory_Movie,
/// <summary>Stream that includes audio without dialog.</summary>
AudioCategory_Media,
}
/// <summary>The <c>AudioSessionState</c> enumeration defines constants that indicate the current state of an audio session.</summary>
/// <remarks>
/// <para>
/// When a client opens a session by assigning the first stream to the session (by calling the IAudioClient::Initialize method), the
/// initial session state is inactive. The session state changes from inactive to active when a stream in the session begins running
/// (because the client has called the IAudioClient::Start method). The session changes from active to inactive when the client
/// stops the last running stream in the session (by calling the IAudioClient::Stop method). The session state changes to expired
/// when the client destroys the last stream in the session by releasing all references to the stream object.
/// </para>
/// <para>
/// The system volume-control program, Sndvol, displays volume controls for both active and inactive sessions. Sndvol stops
/// displaying the volume control for a session when the session state changes to expired. For more information about Sndvol, see
/// Audio Sessions.
/// </para>
/// <para>
/// The IAudioSessionControl::GetState and IAudioSessionEvents::OnStateChanged methods use the constants defined in the
/// <c>AudioSessionState</c> enumeration.
/// </para>
/// <para>For more information about session states, see Audio Sessions.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/audiosessiontypes/ne-audiosessiontypes-audiosessionstate typedef enum
// _AudioSessionState { AudioSessionStateInactive, AudioSessionStateActive, AudioSessionStateExpired } AudioSessionState;
[PInvokeData("audiosessiontypes.h", MSDNShortId = "a972fed6-425f-46c8-b0cc-6538460bb104")]
public enum AudioSessionState
{
/// <summary>
/// The audio session is inactive. (It contains at least one stream, but none of the streams in the session is currently running.)
/// </summary>
AudioSessionStateInactive,
/// <summary>The audio session is active. (At least one of the streams in the session is running.)</summary>
AudioSessionStateActive,
/// <summary>The audio session has expired. (It contains no streams.)</summary>
AudioSessionStateExpired,
}
}
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

1115
PInvoke/CoreAudio/MmReg.cs Normal file

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,674 @@
using System;
using System.Runtime.InteropServices;
namespace Vanara.PInvoke
{
/// <summary>Functions, structures and constants from Windows Core Audio Api.</summary>
public static partial class CoreAudio
{
/// <summary>Specifies the shape in which sound is emitted by an ISpatialAudioObjectForHrtf.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/ne-spatialaudiohrtf-spatialaudiohrtfdirectivitytype typedef
// enum SpatialAudioHrtfDirectivityType { SpatialAudioHrtfDirectivity_OmniDirectional, SpatialAudioHrtfDirectivity_Cardioid,
// SpatialAudioHrtfDirectivity_Cone } ;
[PInvokeData("spatialaudiohrtf.h", MSDNShortId = "3A1426B5-F4FF-4CF0-9E0A-3096371B3D2E")]
public enum SpatialAudioHrtfDirectivityType
{
/// <summary>The sound is emitted in all directions.</summary>
SpatialAudioHrtfDirectivity_OmniDirectional,
/// <summary>The sound is emitted in a cardioid shape.</summary>
SpatialAudioHrtfDirectivity_Cardioid,
/// <summary>The sound is emitted in a cone shape.</summary>
SpatialAudioHrtfDirectivity_Cone,
}
/// <summary>
/// Specifies the type of decay applied over distance from the position of an ISpatialAudioObjectForHrtf to the position of the listener.
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/ne-spatialaudiohrtf-spatialaudiohrtfdistancedecaytype typedef
// enum SpatialAudioHrtfDistanceDecayType { SpatialAudioHrtfDistanceDecay_NaturalDecay, SpatialAudioHrtfDistanceDecay_CustomDecay } ;
[PInvokeData("spatialaudiohrtf.h", MSDNShortId = "EF4ACEB1-E802-4337-AA76-467BCB90D7C6")]
public enum SpatialAudioHrtfDistanceDecayType
{
/// <summary>
/// A natural decay over distance, as constrained by minimum and maximum gain distance limits. The output drops to silent at the
/// distance specified by SpatialAudioHrtfDistanceDecay.CutoffDistance.
/// </summary>
SpatialAudioHrtfDistanceDecay_NaturalDecay,
/// <summary>A custom gain curve, within the maximum and minimum gain limit.</summary>
SpatialAudioHrtfDistanceDecay_CustomDecay,
}
/// <summary>Specifies the type of acoustic environment that is simulated when audio is processed for an ISpatialAudioObjectForHrtf.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/ne-spatialaudiohrtf-spatialaudiohrtfenvironmenttype typedef
// enum SpatialAudioHrtfEnvironmentType { SpatialAudioHrtfEnvironment_Small, SpatialAudioHrtfEnvironment_Medium,
// SpatialAudioHrtfEnvironment_Large, SpatialAudioHrtfEnvironment_Outdoors, SpatialAudioHrtfEnvironment_Average } ;
[PInvokeData("spatialaudiohrtf.h", MSDNShortId = "017FC8D4-2B74-4B13-AF5B-D7FFF97A7E45")]
public enum SpatialAudioHrtfEnvironmentType
{
/// <summary>A small room.</summary>
SpatialAudioHrtfEnvironment_Small,
/// <summary>A medium-sized room.</summary>
SpatialAudioHrtfEnvironment_Medium,
/// <summary>A large room.</summary>
SpatialAudioHrtfEnvironment_Large,
/// <summary>An outdoor space.</summary>
SpatialAudioHrtfEnvironment_Outdoors,
/// <summary>Reserved for Microsoft use. Apps should not use this value.</summary>
SpatialAudioHrtfEnvironment_Average,
}
/// <summary>
/// <para>
/// Represents an object that provides audio data to be rendered from a position in 3D space, relative to the user, a head-relative
/// transfer function (HRTF). Spatial audio objects can be static or dynamic, which you specify with the type parameter to the
/// ISpatialAudioObjectRenderStreamForHrtf::ActivateSpatialAudioObjectForHrtf method. Dynamic audio objects can be placed in an
/// arbitrary position in space and can be moved over time. Static audio objects are assigned to one or more channels, defined in
/// the AudioObjectType enumeration, that each correlate to a fixed speaker location that may be a physical or a virtualized speaker
/// </para>
/// <para>
/// This interface is a part of Windows Sonic, Microsofts audio platform for more immersive audio which includes integrated spatial
/// sound on Xbox and Windows.
/// </para>
/// </summary>
/// <remarks>
/// <c>Note</c> Many of the methods provided by this interface are implemented in the inherited ISpatialAudioObjectBase interface.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/nn-spatialaudiohrtf-ispatialaudioobjectforhrtf
[PInvokeData("spatialaudiohrtf.h", MSDNShortId = "E69F1D09-B937-4BCC-A040-18EF8A838289")]
[ComImport, Guid("D7436ADE-1978-4E14-ABA0-555BD8EB83B4"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface ISpatialAudioObjectForHrtf : ISpatialAudioObjectBase
{
/// <summary>Gets a buffer that is used to supply the audio data for the ISpatialAudioObject.</summary>
/// <param name="buffer">The buffer into which audio data is written.</param>
/// <param name="bufferLength">
/// The length of the buffer in bytes. This length will be the value returned in the frameCountPerBuffer parameter to
/// ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects multiplied by the value of the <c>nBlockAlign</c> field of the
/// WAVEFORMATEX structure passed in the SpatialAudioObjectRenderStreamActivationParams parameter to ISpatialAudioClient::ActivateSpatialAudioStream.
/// </param>
/// <remarks>
/// <para>
/// The first time <c>GetBuffer</c> is called after the ISpatialAudioObject is activated with a call
/// ISpatialAudioObjectRenderStream::ActivateSpatialAudioObject, lifetime of the spatial audio object starts. To keep the
/// spatial audio object alive after that, this <c>GetBuffer</c> must be called on every processing pass (between calls to
/// ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects and ISpatialAudioObjectRenderStream::EndUpdatingAudioObjects). If
/// <c>GetBuffer</c> is not called within an audio processing pass, SetEndOfStream is called implicitly on the audio object to
/// deactivate, and the audio object can only be reused after calling Release on the object and then reactivating the object by
/// calling <c>ActivateSpatialAudioObject</c> again.
/// </para>
/// <para>
/// The pointers retrieved by <c>GetBuffer</c> should not be used after ISpatialAudioObjectRenderStream::EndUpdatingAudioObjects
/// has been called.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectbase-getbuffer
// HRESULT GetBuffer( BYTE **buffer, UINT32 *bufferLength );
new void GetBuffer(out IntPtr buffer, out uint bufferLength);
/// <summary>
/// Instructs the system that the final block of audio data has been submitted for the ISpatialAudioObject so that the object
/// can be deactivated and it's resources reused.
/// </summary>
/// <param name="frameCount">
/// The number of audio frames in the audio buffer that should be included in the final processing pass. This number may be
/// smaller than or equal to the value returned in the frameCountPerBuffer parameter to ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects.
/// </param>
/// <returns>
/// <para>
/// If the method succeeds, it returns S_OK. If it fails, possible return codes include, but are not limited to, the values
/// shown in the following table.
/// </para>
/// <list type="table">
/// <listheader>
/// <term>Return code</term>
/// <term>Description</term>
/// </listheader>
/// <item>
/// <term>SPTLAUDCLNT_E_OUT_OF_ORDER</term>
/// <term>ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects was not called before the call to SetEndOfStream.</term>
/// </item>
/// <item>
/// <term>SPTLAUDCLNT_E_RESOURCES_INVALIDATED</term>
/// <term>
/// SetEndOfStream was called either explicitly or implicitly in a previous audio processing pass. SetEndOfStream is called
/// implicitly by the system if GetBuffer is not called within an audio processing pass (between calls to
/// ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects and ISpatialAudioObjectRenderStream::EndUpdatingAudioObjects).
/// </term>
/// </item>
/// </list>
/// </returns>
/// <remarks>Call Release after calling <c>SetEndOfStream</c> to make free the audio object resources for future use.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectbase-setendofstream
// HRESULT SetEndOfStream( UINT32 frameCount );
new void SetEndOfStream([In] uint frameCount);
/// <summary>Gets a boolean value indicating whether the ISpatialAudioObject is valid.</summary>
/// <returns><c>TRUE</c> if the audio object is currently valid; otherwise, <c>FALSE</c>.</returns>
/// <remarks>
/// <para>If this value is false, you should call Release to make the audio object resource available in the future.</para>
/// <para>
/// <c>IsActive</c> will be set to false after SetEndOfStream is called implicitly or explicitly. <c>SetEndOfStream</c> is
/// called implicitly by the system if GetBuffer is not called within an audio processing pass (between calls to
/// ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects and ISpatialAudioObjectRenderStream::EndUpdatingAudioObjects).
/// </para>
/// <para>
/// The rendering engine will also deactivate the audio object, setting <c>IsActive</c> to false, when audio object resources
/// become unavailable. In this case, a notification is sent via ISpatialAudioObjectRenderStreamNotify before the object is
/// deactivated. The value returned in the availableDynamicObjectCount parameter to
/// ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects indicates how many objects will be processed for each pass.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectbase-isactive
// HRESULT IsActive( BOOL *isActive );
[return: MarshalAs(UnmanagedType.Bool)]
new bool IsActive();
/// <summary>
/// Gets a value specifying the type of audio object that is represented by the ISpatialAudioObject. This value indicates if the
/// object is dynamic or static. If the object is static, one and only one of the static audio channel values to which the
/// object is assigned is returned.
/// </summary>
/// <returns>A value specifying the type of audio object that is represented</returns>
/// <remarks>
/// Set the type of the audio object with the type parameter to the ISpatialAudioObjectRenderStream::ActivateSpatialAudioObject method.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectbase-getaudioobjecttype
// HRESULT GetAudioObjectType( AudioObjectType *audioObjectType );
new AudioObjectType GetAudioObjectType();
/// <summary>
/// Sets the position in 3D space, relative to the listener, from which the ISpatialAudioObjectForHrtf audio data will be rendered.
/// </summary>
/// <param name="x">
/// The x position of the audio object, in meters, relative to the listener. Positive values are to the right of the listener
/// and negative values are to the left.
/// </param>
/// <param name="y">
/// The y position of the audio object, in meters, relative to the listener. Positive values are above the listener and negative
/// values are below.
/// </param>
/// <param name="z">
/// The z position of the audio object, in meters, relative to the listener. Positive values are behind the listener and
/// negative values are in front.
/// </param>
/// <remarks>
/// <para>
/// This method can only be called on a ISpatialAudioObjectForHrtf that is of type <c>AudioObjectType_Dynamic</c>. Set the type
/// of the audio object with the type parameter to the ISpatialAudioObjectRenderStreamForHrtf::ActivateSpatialAudioObjectForHrtf method.
/// </para>
/// <para>
/// Position values use a right-handed Cartesian coordinate system, where each unit represents 1 meter. The coordinate system is
/// relative to the listener where the origin (x=0.0, y=0.0, z=0.0) represents the center point between the listener's ears.
/// </para>
/// <para>
/// If <c>SetPosition</c> is never called, the origin (x=0.0, y=0.0, z=0.0) is used as the default position. After
/// <c>SetPosition</c> is called, the position that is set will be used for the audio object until the position is changed with
/// another call to <c>SetPosition</c>.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/nf-spatialaudiohrtf-ispatialaudioobjectforhrtf-setposition
// HRESULT SetPosition( float x, float y, float z );
void SetPosition([In] float x, [In] float y, [In] float z);
/// <summary>Sets the gain for the ISpatialAudioObjectForHrtf.</summary>
/// <param name="gain">The gain for the ISpatialAudioObjectForHrtf.</param>
/// <remarks>
/// <para>
/// This is valid only for spatial audio objects configured to use the SpatialAudioHrtfDistanceDecay_CustomDecay decay type. Set
/// the decay type of an ISpatialAudioObjectForHrtf object by calling SetDistanceDecay. Set the default decay type for an all
/// objects in an HRTF render stream by setting the <c>DistanceDecay</c> field of the SpatialAudioHrtfActivationParams passed
/// into ISpatialAudioClient::ActivateSpatialAudioStream.
/// </para>
/// <para>
/// If <c>SetGain</c> is never called, the default value of 0.0 is used. After <c>SetGain</c> is called, the gain that is set
/// will be used for the audio object until the gain is changed with another call to <c>SetGain</c>.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/nf-spatialaudiohrtf-ispatialaudioobjectforhrtf-setgain
// HRESULT SetGain( float gain );
void SetGain([In] float gain);
/// <summary>
/// Sets the orientation in 3D space, relative to the listener's frame of reference, from which the ISpatialAudioObjectForHrtf
/// audio data will be rendered.
/// </summary>
/// <param name="orientation">An array of floats defining row-major 3x3 rotation matrix.</param>
/// <remarks>
/// If <c>SetOrientation</c> is never called, the default value of an identity matrix is used. After <c>SetOrientation</c> is
/// called, the orientation that is set will be used for the audio object until the orientation is changed with another call to <c>SetOrientation</c>.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/nf-spatialaudiohrtf-ispatialaudioobjectforhrtf-setorientation
// HRESULT SetOrientation( const SpatialAudioHrtfOrientation *orientation );
void SetOrientation(in SpatialAudioHrtfOrientation orientation);
/// <summary>Sets the type of acoustic environment that is simulated when audio is processed for the ISpatialAudioObjectForHrtf.</summary>
/// <param name="environment">
/// A value specifying the type of acoustic environment that is simulated when audio is processed for the ISpatialAudioObjectForHrtf.
/// </param>
/// <remarks>If <c>SetEnvironment</c> is not called, the default value of SpatialAudioHrtfEnvironment_Small is used.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/nf-spatialaudiohrtf-ispatialaudioobjectforhrtf-setenvironment
// HRESULT SetEnvironment( SpatialAudioHrtfEnvironmentType environment );
void SetEnvironment([In] SpatialAudioHrtfEnvironmentType environment);
/// <summary>
/// Sets the decay model that is applied over distance from the position of an ISpatialAudioObjectForHrtf to the position of the listener.
/// </summary>
/// <param name="distanceDecay">The decay model.</param>
/// <remarks>If <c>SetEnvironment</c> is not called, the default values are used.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/nf-spatialaudiohrtf-ispatialaudioobjectforhrtf-setdistancedecay
// HRESULT SetDistanceDecay( SpatialAudioHrtfDistanceDecay *distanceDecay );
void SetDistanceDecay(in SpatialAudioHrtfDistanceDecay distanceDecay);
/// <summary>Sets the spatial audio directivity model for the ISpatialAudioObjectForHrtf.</summary>
/// <param name="directivity">
/// <para>The spatial audio directivity model. This value can be one of the following structures:</para>
/// <list type="bullet">
/// <item>
/// <term>SpatialAudioHrtfDirectivity</term>
/// </item>
/// <item>
/// <term>SpatialAudioHrtfDirectivityCardioid</term>
/// </item>
/// <item>
/// <term>SpatialAudioHrtfDirectivityCone</term>
/// </item>
/// </list>
/// </param>
/// <remarks>
/// <para>
/// The SpatialAudioHrtfDirectivity structure represents an omnidirectional model that can be linearly interpolated with a
/// cardioid or cone model.
/// </para>
/// <para>
/// If <c>SetDirectivity</c> is not called, the default type of SpatialAudioHrtfDirectivity_OmniDirectional is used with no interpolation.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/nf-spatialaudiohrtf-ispatialaudioobjectforhrtf-setdirectivity
// HRESULT SetDirectivity( SpatialAudioHrtfDirectivityUnion *directivity );
void SetDirectivity(in SpatialAudioHrtfDirectivityUnion directivity);
}
/// <summary>
/// <para>
/// Provides methods for controlling an Hrtf spatial audio object render stream, including starting, stopping, and resetting the
/// stream. Also provides methods for activating new ISpatialAudioObjectForHrtf instances and notifying the system when you are
/// beginning and ending the process of updating activated spatial audio objects and data.
/// </para>
/// <para>
/// This interface is a part of Windows Sonic, Microsofts audio platform for more immersive audio which includes integrated spatial
/// sound on Xbox and Windows.
/// </para>
/// </summary>
/// <remarks>
/// <c>Note</c> Many of the methods provided by this interface are implemented in the inherited ISpatialAudioObjectRenderStreamBase interface.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/nn-spatialaudiohrtf-ispatialaudioobjectrenderstreamforhrtf
[ComImport, Guid("E08DEEF9-5363-406E-9FDC-080EE247BBE0"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface ISpatialAudioObjectRenderStreamForHrtf : ISpatialAudioObjectRenderStreamBase
{
/// <summary>Gets the number of dynamic spatial audio objects that are currently available.</summary>
/// <returns>The number of dynamic spatial audio objects that are currently available.</returns>
/// <remarks>
/// <para>
/// A dynamic ISpatialAudioObject is one that was activated by setting the type parameter to the ActivateSpatialAudioObject
/// method to <c>AudioObjectType_Dynamic</c>. The system has a limit of the maximum number of dynamic spatial audio objects that
/// can be activated at one time. Call Release on an <c>ISpatialAudioObject</c> when it is no longer being used to free up the
/// resource to create new dynamic spatial audio objects.
/// </para>
/// <para>
/// You should not call this method after streaming has started, as the value is already provided by
/// ISpatialAudioObjectRenderStreamBase::BeginUpdatingAudioObjects. This method should only be called before streaming has
/// started, which occurs after ISpatialAudioObjectRenderStreamBase::Start is called.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectrenderstreambase-getavailabledynamicobjectcount
// HRESULT GetAvailableDynamicObjectCount( UINT32 *value );
new uint GetAvailableDynamicObjectCount();
/// <summary>Gets additional services from the <c>ISpatialAudioObjectRenderStream</c>.</summary>
/// <param name="riid">
/// <para>The interface ID for the requested service. The client should set this parameter to one of the following REFIID values:</para>
/// <para>IID_IAudioClock</para>
/// <para>IID_IAudioClock2</para>
/// <para>IID_IAudioStreamVolume</para>
/// </param>
/// <param name="service">
/// Pointer to a pointer variable into which the method writes the address of an instance of the requested interface. Through
/// this method, the caller obtains a counted reference to the interface. The caller is responsible for releasing the interface,
/// when it is no longer needed, by calling the interface's Release method. If the <c>GetService</c> call fails, *ppv is NULL.
/// </param>
/// <returns>
/// <para>
/// If the method succeeds, it returns S_OK. If it fails, possible return codes include, but are not limited to, the values
/// shown in the following table.
/// </para>
/// <list type="table">
/// <listheader>
/// <term>Return code</term>
/// <term>Description</term>
/// </listheader>
/// <item>
/// <term>E_POINTER</term>
/// <term>Parameter ppv is NULL.</term>
/// </item>
/// <item>
/// <term>SPTLAUDCLNT_E_DESTROYED</term>
/// <term>The ISpatialAudioClient associated with the spatial audio stream has been destroyed.</term>
/// </item>
/// <item>
/// <term>AUDCLNT_E_DEVICE_INVALIDATED</term>
/// <term>
/// The audio endpoint device has been unplugged, or the audio hardware or associated hardware resources have been reconfigured,
/// disabled, removed, or otherwise made unavailable for use.
/// </term>
/// </item>
/// <item>
/// <term>SPTLAUDCLNT_E_INTERNAL</term>
/// <term>An internal error has occurred.</term>
/// </item>
/// <item>
/// <term>AUDCLNT_E_UNSUPPORTED_FORMAT</term>
/// <term>The media associated with the spatial audio stream uses an unsupported format.</term>
/// </item>
/// </list>
/// </returns>
/// <remarks>
/// <para>The <c>GetService</c> method supports the following service interfaces:</para>
/// <list type="bullet">
/// <item>
/// <term>IAudioClock</term>
/// </item>
/// <item>
/// <term>IAudioClock2</term>
/// </item>
/// <item>
/// <term>IAudioStreamVolume</term>
/// </item>
/// </list>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectrenderstreambase-getservice
// HRESULT GetService( REFIID riid, void **service );
[PreserveSig]
new HRESULT GetService(in Guid riid, [MarshalAs(UnmanagedType.IUnknown, IidParameterIndex = 0)] out object service);
/// <summary>Starts the spatial audio stream.</summary>
/// <remarks>
/// <para>
/// Starting the stream causes data flow between the endpoint buffer and the audio engine. The first time this method is called,
/// the stream's audio clock position will be at 0. Otherwise, the clock resumes from its position at the time that the stream
/// was last paused with a call to Stop. Call Reset to reset the clock position to 0 and cause all active ISpatialAudioObject
/// instances to be revoked.
/// </para>
/// <para>The stream must have been previously stopped with a call to Stop or the method will fail and return SPTLAUDCLNT_E_STREAM_NOT_STOPPED.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectrenderstreambase-start
// HRESULT Start();
new void Start();
/// <summary>Stops a running audio stream.</summary>
/// <remarks>
/// Stopping stream causes data to stop flowing between the endpoint buffer and the audio engine. You can consider this
/// operation to pause the stream because it leaves the stream's audio clock at its current stream position and does not reset
/// it to 0. A subsequent call to Start causes the stream to resume running from the current position. Call Reset to reset the
/// clock position to 0 and cause all active ISpatialAudioObject instances to be revoked.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectrenderstreambase-stop
// HRESULT Stop();
new void Stop();
/// <summary>Reset a stopped audio stream.</summary>
/// <remarks>
/// <para>
/// Resetting the audio stream flushes all pending data and resets the audio clock stream position to 0. Resetting the stream
/// also causes all active ISpatialAudioObject instances to be revoked. A subsequent call to Start causes the stream to start
/// from 0 position.
/// </para>
/// <para>The stream must have been previously stopped with a call to Stop or the method will fail and return SPTLAUDCLNT_E_STREAM_NOT_STOPPED.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectrenderstreambase-reset
// HRESULT Reset();
new void Reset();
/// <summary>
/// Puts the system into the state where audio object data can be submitted for processing and the ISpatialAudioObject state can
/// be modified.
/// </summary>
/// <param name="availableDynamicObjectCount">
/// The number of dynamic audio objects that are available to be rendered for the current processing pass. All allocated static
/// audio objects can be rendered in every pass. For information on audio object types, see AudioObjectType.
/// </param>
/// <param name="frameCountPerBuffer">The size, in audio frames, of the buffer returned by GetBuffer.</param>
/// <remarks>
/// <para>
/// This method must be called each time the event passed in the SpatialAudioObjectRenderStreamActivationParams to
/// ISpatialAudioClient::ActivateSpatialAudioStream is signaled, even if there no audio object data to submit.
/// </para>
/// <para>
/// For each <c>BeginUpdatingAudioObjects</c> call, there should be a corresponding call to EndUpdatingAudioObjects call. If
/// <c>BeginUpdatingAudioObjects</c> is called twice without a call <c>EndUpdatingAudioObjects</c> between them, the second call
/// to <c>BeginUpdatingAudioObjects</c> will return SPTLAUDCLNT_E_OUT_OF_ORDER.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectrenderstreambase-beginupdatingaudioobjects
// HRESULT BeginUpdatingAudioObjects( UINT32 *availableDynamicObjectCount, UINT32 *frameCountPerBuffer );
new void BeginUpdatingAudioObjects(out uint availableDynamicObjectCount, out uint frameCountPerBuffer);
/// <summary>
/// Notifies the system that the app has finished supplying audio data for the spatial audio objects activated with ActivateSpatialAudioObject.
/// </summary>
/// <remarks>The pointers retrieved with ISpatialAudioObjectBase::GetBuffer can no longer be used after this method is called.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectrenderstreambase-endupdatingaudioobjects
// HRESULT EndUpdatingAudioObjects();
new void EndUpdatingAudioObjects();
/// <summary>Activates an ISpatialAudioObjectForHrtf for audio rendering.</summary>
/// <param name="type">
/// The type of audio object to activate. For dynamic audio objects, this value must be <c>AudioObjectType_Dynamic</c>. For
/// static audio objects, specify one of the static audio channel values from the enumeration. Specifying
/// <c>AudioObjectType_None</c> will produce an audio object that is not spatialized.
/// </param>
/// <returns>Receives a pointer to the activated interface.</returns>
/// <remarks>
/// A dynamic ISpatialAudioObjectForHrtf is one that was activated by setting the type parameter to the
/// <c>ActivateSpatialAudioObjectForHrtf</c> method to <c>AudioObjectType_Dynamic</c>. The client has a limit of the maximum
/// number of dynamic spatial audio objects that can be activated at one time. After the limit has been reached, attempting to
/// activate additional audio objects will result in this method returning an SPTLAUDCLNT_E_NO_MORE_OBJECTS error. To avoid
/// this, call Release on each dynamic <c>ISpatialAudioObjectForHrtf</c> after it is no longer being used to free up the
/// resource so that it can be reallocated. See ISpatialAudioObjectgBase::IsActive and ISpatialAudioObjectgBase::SetEndOfStream
/// for more information on the managing the lifetime of spatial audio objects.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/nf-spatialaudiohrtf-ispatialaudioobjectrenderstreamforhrtf-activatespatialaudioobjectforhrtf
// HRESULT ActivateSpatialAudioObjectForHrtf( AudioObjectType type, ISpatialAudioObjectForHrtf **audioObject );
ISpatialAudioObjectForHrtf ActivateSpatialAudioObjectForHrtf([In] AudioObjectType type);
}
/// <summary>Specifies the activation parameters for an ISpatialAudioRenderStreamForHrtf.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/ns-spatialaudiohrtf-spatialaudiohrtfactivationparams typedef
// struct SpatialAudioHrtfActivationParams { const WAVEFORMATEX *ObjectFormat; AudioObjectType StaticObjectTypeMask; UINT32
// MinDynamicObjectCount; UINT32 MaxDynamicObjectCount; AUDIO_STREAM_CATEGORY Category; HANDLE EventHandle;
// ISpatialAudioObjectRenderStreamNotify *NotifyObject; SpatialAudioHrtfDistanceDecay *DistanceDecay;
// SpatialAudioHrtfDirectivityUnion *Directivity; SpatialAudioHrtfEnvironmentType *Environment; SpatialAudioHrtfOrientation
// *Orientation; } SpatialAudioHrtfActivationParams;
[PInvokeData("spatialaudiohrtf.h", MSDNShortId = "6A549BFB-993A-4A20-AFAB-B38D03EAE35C")]
[StructLayout(LayoutKind.Sequential)]
public struct SpatialAudioHrtfActivationParams
{
/// <summary>
/// Format descriptor for spatial audio objects associated with the stream. All objects must have the same format and must be of
/// type WAVEFORMATEX or WAVEFORMATEXTENSIBLE.
/// </summary>
public IntPtr ObjectFormat;
/// <summary>
/// A bitwise combination of <c>AudioObjectType</c> values indicating the set of static spatial audio channels that will be
/// allowed by the activated stream.
/// </summary>
public AudioObjectType StaticObjectTypeMask;
/// <summary>
/// The minimum number of concurrent dynamic objects. If this number of dynamic audio objects can't be activated simultaneously,
/// no dynamic audio objects will be activated.
/// </summary>
public uint MinDynamicObjectCount;
/// <summary>The maximum number of concurrent dynamic objects that can be activated with ISpatialAudioRenderStreamForHrtf.</summary>
public uint MaxDynamicObjectCount;
/// <summary>The category of the audio stream and its spatial audio objects.</summary>
public AUDIO_STREAM_CATEGORY Category;
/// <summary>
/// The event that will signal the client to provide more audio data. This handle will be duplicated internally before it is used.
/// </summary>
public IntPtr EventHandle;
/// <summary>
/// The object that provides notifications for spatial audio clients to respond to changes in the state of an
/// ISpatialAudioRenderStreamForHrtf. This object is used to notify clients that the number of dynamic spatial audio objects
/// that can be activated concurrently is about to change.
/// </summary>
public IntPtr NotifyObject;
/// <summary>
/// Optional default value for the decay model used for ISpatialAudioObjectForHrtf objects associated with the stream.
/// <c>nullptr</c> if unused.
/// </summary>
public IntPtr DistanceDecay;
/// <summary>
/// Optional default value for the spatial audio directivity model used for ISpatialAudioObjectForHrtf objects associated with
/// the stream. <c>nullptr</c> if unused.
/// </summary>
public IntPtr Directivity;
/// <summary>
/// Optional default value for the type of environment that is simulated when audio is processed for ISpatialAudioObjectForHrtf
/// objects associated with the stream. <c>nullptr</c> if unused.
/// </summary>
public IntPtr Environment;
/// <summary>
/// Optional default value for the orientation of ISpatialAudioObjectForHrtf objects associated with the stream. <c>nullptr</c>
/// if unused.
/// </summary>
public IntPtr Orientation;
}
/// <summary>
/// Represents an omnidirectional model for an ISpatialAudioObjectForHrtf. The omnidirectional emission is interpolated linearly
/// with the directivity model specified in the <c>Type</c> field based on the value of the <c>Scaling</c> field.
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/ns-spatialaudiohrtf-spatialaudiohrtfdirectivity typedef
// struct SpatialAudioHrtfDirectivity { SpatialAudioHrtfDirectivityType Type; float Scaling; } SpatialAudioHrtfDirectivity;
[PInvokeData("spatialaudiohrtf.h", MSDNShortId = "A3D149E0-F2C1-47C7-8858-35C5F51C7F75")]
[StructLayout(LayoutKind.Sequential)]
public struct SpatialAudioHrtfDirectivity
{
/// <summary>The type of shape in which sound is emitted by an ISpatialAudioObjectForHrtf.</summary>
public SpatialAudioHrtfDirectivityType Type;
/// <summary>
/// The amount of linear interpolation applied between omnidirectional sound and the directivity specified in the <c>Type</c>
/// field. This is a normalized value between 0 and 1.0 where 0 is omnidirectional and 1.0 is full directivity using the
/// specified type.
/// </summary>
public float Scaling;
}
/// <summary>Represents a cardioid-shaped directivity model for an ISpatialAudioObjectForHrtf.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/ns-spatialaudiohrtf-spatialaudiohrtfdirectivitycardioid
// typedef struct SpatialAudioHrtfDirectivityCardioid { SpatialAudioHrtfDirectivity directivity; float Order; } SpatialAudioHrtfDirectivityCardioid;
[PInvokeData("spatialaudiohrtf.h", MSDNShortId = "71E2E152-14DC-472B-B582-82D4412EAA85")]
[StructLayout(LayoutKind.Sequential)]
public struct SpatialAudioHrtfDirectivityCardioid
{
/// <summary>A structure that expresses the direction in which sound is emitted by an ISpatialAudioObjectForHrtf.</summary>
public SpatialAudioHrtfDirectivity directivity;
/// <summary>The order of the cardioid.</summary>
public float Order;
}
/// <summary>Represents a cone-shaped directivity model for an ISpatialAudioObjectForHrtf.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/ns-spatialaudiohrtf-spatialaudiohrtfdirectivitycone typedef
// struct SpatialAudioHrtfDirectivityCone { SpatialAudioHrtfDirectivity directivity; float InnerAngle; float OuterAngle; } SpatialAudioHrtfDirectivityCone;
[PInvokeData("spatialaudiohrtf.h", MSDNShortId = "C34F26C2-4979-4C06-8EAC-64547745238F")]
[StructLayout(LayoutKind.Sequential)]
public struct SpatialAudioHrtfDirectivityCone
{
/// <summary>A structure that expresses the direction in which sound is emitted by an ISpatialAudioObjectForHrtf.</summary>
public SpatialAudioHrtfDirectivity directivity;
/// <summary>The inner angle of the cone.</summary>
public float InnerAngle;
/// <summary>The outer angle of the cone.</summary>
public float OuterAngle;
}
/// <summary>Defines a spatial audio directivity model for an ISpatialAudioObjectForHrtf.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/ns-spatialaudiohrtf-spatialaudiohrtfdirectivityunion typedef
// union SpatialAudioHrtfDirectivityUnion { SpatialAudioHrtfDirectivityCone Cone; SpatialAudioHrtfDirectivityCardioid Cardiod;
// SpatialAudioHrtfDirectivity Omni; } SpatialAudioHrtfDirectivityUnion;
[PInvokeData("spatialaudiohrtf.h", MSDNShortId = "BBBE4B0B-59C2-44E0-9BB4-B10CE5CE12E3")]
[StructLayout(LayoutKind.Explicit)]
public struct SpatialAudioHrtfDirectivityUnion
{
/// <summary>A cone-shaped directivity model</summary>
[FieldOffset(0)]
public SpatialAudioHrtfDirectivityCone Cone;
/// <summary/>
[FieldOffset(0)]
public SpatialAudioHrtfDirectivityCardioid Cardiod;
/// <summary>An omni-direction directivity model that can be interpolated linearly with one of the other directivity models.</summary>
[FieldOffset(0)]
public SpatialAudioHrtfDirectivity Omni;
}
/// <summary>
/// Represents the decay model that is applied over distance from the position of an ISpatialAudioObjectForHrtf to the position of
/// the listener.
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiohrtf/ns-spatialaudiohrtf-spatialaudiohrtfdistancedecay typedef
// struct SpatialAudioHrtfDistanceDecay { SpatialAudioHrtfDistanceDecayType Type; float MaxGain; float MinGain; float
// UnityGainDistance; float CutoffDistance; } SpatialAudioHrtfDistanceDecay;
[PInvokeData("spatialaudiohrtf.h", MSDNShortId = "2EBAE322-2A5F-4610-B64F-F1B8CE2DFD2D")]
[StructLayout(LayoutKind.Sequential)]
public struct SpatialAudioHrtfDistanceDecay
{
/// <summary>The type of decay, natural or custom. The default value for this field is <c>SpatialAudioHrtfDistanceDecay_NaturalDecay</c>.</summary>
public SpatialAudioHrtfDistanceDecayType Type;
/// <summary/>
public float MaxGain;
/// <summary/>
public float MinGain;
/// <summary/>
public float UnityGainDistance;
/// <summary/>
public float CutoffDistance;
}
/// <summary>Represents the orientation of an <c>ISpatialAudioObjectForHrtf</c>.</summary>
// https://docs.microsoft.com/en-us/windows/win32/coreaudio/spatialaudiohrtforientation
[PInvokeData("spatialaudiohrtf.h", MSDNShortId = "BDC1C409-F461-4903-A411-3F0647C59DBA")]
[StructLayout(LayoutKind.Sequential)]
public struct SpatialAudioHrtfOrientation
{
/// <summary>A row-major 3x3 rotation matrix.</summary>
[MarshalAs(UnmanagedType.ByValArray, SizeConst = 9)]
public float[] Orientation;
}
}
}

View File

@ -0,0 +1,998 @@
using System;
using System.Runtime.InteropServices;
namespace Vanara.PInvoke
{
/// <summary>Functions, structures and constants from Windows Core Audio Api.</summary>
public static partial class CoreAudio
{
/// <summary>Specifies the copy mode used when calling ISpatialAudioMetadataCopier::CopyMetadataForFrames.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/ne-spatialaudiometadata-spatialaudiometadatacopymode
// typedef enum SpatialAudioMetadataCopyMode { SpatialAudioMetadataCopy_Overwrite, SpatialAudioMetadataCopy_Append,
// SpatialAudioMetadataCopy_AppendMergeWithLast, SpatialAudioMetadataCopy_AppendMergeWithFirst } ;
[PInvokeData("spatialaudiometadata.h", MSDNShortId = "2E9C2C66-26EB-43E8-A518-25980B287542")]
// public enum SpatialAudioMetadataCopyMode{SpatialAudioMetadataCopy_Overwrite, SpatialAudioMetadataCopy_Append,
// SpatialAudioMetadataCopy_AppendMergeWithLast, SpatialAudioMetadataCopy_AppendMergeWithFirst, }
public enum SpatialAudioMetadataCopyMode
{
/// <summary>
/// Creates a direct copy of the number of metadata items specified with the copyFrameCount parameter into destination buffer,
/// overwriting any previously existing data.
/// </summary>
SpatialAudioMetadataCopy_Overwrite,
/// <summary>
/// Performs an append operation which will fail if the resulting ISpatialAudioMetadataItemsBuffer has too many items.
/// </summary>
SpatialAudioMetadataCopy_Append,
/// <summary>
/// Performs an append operation, and if overflow occurs, extra items are merged into last item, adopting last merged item's
/// offset value.
/// </summary>
SpatialAudioMetadataCopy_AppendMergeWithLast,
/// <summary>
/// Performs an append operation, and if overflow occurs, extra items are merged, assigning the offset to the offset of the
/// first non-overflow item.
/// </summary>
SpatialAudioMetadataCopy_AppendMergeWithFirst,
}
/// <summary>
/// Specifies the desired behavior when an ISpatialAudioMetadataWriter attempts to write more items into the metadata buffer than
/// was specified when the client was initialized.
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/ne-spatialaudiometadata-spatialaudiometadatawriteroverflowmode
// typedef enum SpatialAudioMetadataWriterOverflowMode { SpatialAudioMetadataWriterOverflow_Fail,
// SpatialAudioMetadataWriterOverflow_MergeWithNew, SpatialAudioMetadataWriterOverflow_MergeWithLast } ;
[PInvokeData("spatialaudiometadata.h", MSDNShortId = "B61C8D75-FCC3-42A6-84DE-01DBA7492962")]
public enum SpatialAudioMetadataWriterOverflowMode
{
/// <summary>The write operation will fail.</summary>
SpatialAudioMetadataWriterOverflow_Fail,
/// <summary>
/// The write operation will succeed, the overflow item will be merged with previous item and adopt the frame offset of newest item.
/// </summary>
SpatialAudioMetadataWriterOverflow_MergeWithNew,
/// <summary>
/// The write operation will succeed, the overflow item will be merged with previous item and keep the existing frame offset.
/// </summary>
SpatialAudioMetadataWriterOverflow_MergeWithLast,
}
/// <summary>
/// <para>
/// Provides a class factory for creating ISpatialAudioMetadataItems, ISpatialAudioMetadataWriter, ISpatialAudioMetadataReader, and
/// ISpatialAudioMetadataCopier objects. When an <c>ISpatialAudioMetadataItems</c> is activated, a metadata format ID is specified,
/// which defines the metadata format enforced for all objects created from this factory. If the specified format is not supported
/// by the current audio render endpoint, the class factory will not successfully activate the interface and will return an error.
/// </para>
/// <para>
/// This interface is a part of Windows Sonic, Microsofts audio platform for more immersive audio which includes integrated spatial
/// sound on Xbox and Windows.
/// </para>
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nn-spatialaudiometadata-ispatialaudiometadataclient
[ComImport, Guid("777D4A3B-F6FF-4A26-85DC-68D7CDEDA1D4"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface ISpatialAudioMetadataClient
{
/// <summary>Creates an ISpatialAudioMetadataItems object for storing spatial audio metadata items.</summary>
/// <param name="maxItemCount">The maximum number of metadata items that can be stored in the returned ISpatialAudioMetadataItems.</param>
/// <param name="frameCount">The valid range of frame offset positions for metadata items stored in the returned ISpatialAudioMetadataItems.</param>
/// <param name="metadataItemsBuffer">
/// If a pointer is supplied, returns an ISpatialAudioMetadataItemsBuffer interface which provides methods for attaching
/// caller-provided memory for storage of metadata items. If this parameter is NULL, the object will allocate internal storage
/// for the items. This interface cannot be obtained via QueryInterface.
/// </param>
/// <param name="metadataItems">
/// Receives an instance ISpatialAudioMetadataItems object which can be populated with metadata items using an by
/// ISpatialAudioMetadataWriter or ISpatialAudioMetadataCopier and can be read with an ISpatialAudioMetadataReader.
/// </param>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadataclient-activatespatialaudiometadataitems
// HRESULT ActivateSpatialAudioMetadataItems( UINT16 maxItemCount, UINT16 frameCount, ISpatialAudioMetadataItemsBuffer
// **metadataItemsBuffer, ISpatialAudioMetadataItems **metadataItems );
void ActivateSpatialAudioMetadataItems([In] ushort maxItemCount, [In] ushort frameCount, out ISpatialAudioMetadataItemsBuffer metadataItemsBuffer, out ISpatialAudioMetadataItems metadataItems);
/// <summary>
/// Gets the length of the buffer required to store the specified number of spatial audio metadata items. Use this method to
/// determine the correct buffer size to use when attaching caller-provided memory through the ISpatialAudioMetadataItemsBuffer interface.
/// </summary>
/// <param name="maxItemCount">The maximum number of metadata items to be stored in an ISpatialAudioMetadataItems object.</param>
/// <returns>
/// The length of the buffer required to store the number of spatial audio metadata items specified in the maxItemCount parameter.
/// </returns>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadataclient-getspatialaudiometadataitemsbufferlength
// HRESULT GetSpatialAudioMetadataItemsBufferLength( UINT16 maxItemCount, UINT32 *bufferLength );
uint GetSpatialAudioMetadataItemsBufferLength([In] ushort maxItemCount);
/// <summary>
/// Creates an ISpatialAudioMetadataWriter object for writing spatial audio metadata items to an ISpatialAudioMetadataItems object.
/// </summary>
/// <param name="overflowMode">
/// A value that specifies the behavior when attempting to write more metadata items to the ISpatialAudioMetadataItems than the
/// maximum number of items specified when calling ActivateSpatialAudioMetadataItems.
/// </param>
/// <returns>Receives a pointer to an instance of ISpatialAudioMetadataWriter.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadataclient-activatespatialaudiometadatawriter
// HRESULT ActivateSpatialAudioMetadataWriter( SpatialAudioMetadataWriterOverflowMode overflowMode, ISpatialAudioMetadataWriter
// **metadataWriter );
ISpatialAudioMetadataWriter ActivateSpatialAudioMetadataWriter([In] SpatialAudioMetadataWriterOverflowMode overflowMode);
/// <summary>
/// Creates an ISpatialAudioMetadataWriter object for copying spatial audio metadata items from one ISpatialAudioMetadataItems
/// object to another.
/// </summary>
/// <returns>Receives a pointer to an instance of ISpatialAudioMetadataWriter.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadataclient-activatespatialaudiometadatacopier
// HRESULT ActivateSpatialAudioMetadataCopier( ISpatialAudioMetadataCopier **metadataCopier );
ISpatialAudioMetadataCopier ActivateSpatialAudioMetadataCopier();
/// <summary>
/// Creates an ISpatialAudioMetadataWriter object for reading spatial audio metadata items from an ISpatialAudioMetadataItems object.
/// </summary>
/// <returns>Receives a pointer to an instance of ISpatialAudioMetadataReader.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadataclient-activatespatialaudiometadatareader
// HRESULT ActivateSpatialAudioMetadataReader( ISpatialAudioMetadataReader **metadataReader );
ISpatialAudioMetadataReader ActivateSpatialAudioMetadataReader();
}
/// <summary>
/// <para>
/// Provides methods for copying all or subsets of metadata items from a source SpatialAudioMetadataItems into a destination
/// <c>SpatialAudioMetadataItems</c>. The <c>SpatialAudioMetadataItems</c> object, which is populated using an
/// ISpatialAudioMetadataWriter or <c>ISpatialAudioMetadataCopier</c>, has a frame count, specified with the frameCount parameter to
/// ActivateSpatialAudioMetadataItems, that represents the valid range of metadata item offsets. <c>ISpatialAudioMetadataReader</c>
/// enables copying groups of items within a subrange of the total frame count. The object maintains an internal read position,
/// which is advanced by the number of frames specified when a copy operation is performed.
/// </para>
/// <para>
/// This interface is a part of Windows Sonic, Microsofts audio platform for more immersive audio which includes integrated spatial
/// sound on Xbox and Windows.
/// </para>
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nn-spatialaudiometadata-ispatialaudiometadatacopier
[ComImport, Guid("D224B233-E251-4FD0-9CA2-D5ECF9A68404"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface ISpatialAudioMetadataCopier
{
/// <summary>Opens an ISpatialAudioMetadataItems object for copying.</summary>
/// <param name="metadataItems">A pointer to an ISpatialAudioMetadataItems object to be opened for copying</param>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadatacopier-open
// HRESULT Open( ISpatialAudioMetadataItems *metadataItems );
void Open([In] ISpatialAudioMetadataItems metadataItems);
/// <summary>
/// Copies metadata items from the source ISpatialAudioMetadataItems, provided to the Open method, object to the destination
/// <c>ISpatialAudioMetadataItems</c> object, specified with the dstMetadataItems parameter. Each call advances the internal
/// copy position by the number of frames in the copyFrameCount parameter.
/// </summary>
/// <param name="copyFrameCount">
/// The number of frames from the current copy position for which metadata items are copied. After the copy, the internal copy
/// position within the source <c>SpatialAudioMetadataItems</c> is advanced the value specified in this parameter. Set this
/// value to 0 to copy the entire frame range contained in the source <c>SpatialAudioMetadataItems</c>.
/// </param>
/// <param name="copyMode">A value that specifies the copy mode for the operation.</param>
/// <param name="dstMetadataItems">A pointer to the destination <c>SpatialAudioMetadataItems</c> for the copy operation.</param>
/// <param name="itemsCopied">Receives number of metadata items copied in the operation.</param>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadatacopier-copymetadataforframes
// HRESULT CopyMetadataForFrames( UINT16 copyFrameCount, SpatialAudioMetadataCopyMode copyMode, ISpatialAudioMetadataItems
// *dstMetadataItems, UINT16 *itemsCopied );
void CopyMetadataForFrames([In] ushort copyFrameCount, [In] SpatialAudioMetadataCopyMode copyMode, [In] ISpatialAudioMetadataItems dstMetadataItems, out ushort itemsCopied);
/// <summary>Completes any necessary operations on the SpatialAudioMetadataItems object and releases the object.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadatacopier-close
// HRESULT Close();
void Close();
}
/// <summary>
/// <para>
/// Represents a buffer of spatial audio metadata items. Metadata commands and values can be written to, read from, and copied
/// between ISpatialAudioMetadataItems using the ISpatialAudioMetadataWriter, ISpatialAudioMetadataReader, and
/// ISpatialAudioMetadataCopier interfaces. Use caller-allocated memory to store metadata items by creating an ISpatialAudioMetadataItemsBuffer.
/// </para>
/// <para>
/// This interface is a part of Windows Sonic, Microsofts audio platform for more immersive audio which includes integrated spatial
/// sound on Xbox and Windows.
/// </para>
/// </summary>
/// <remarks>Get an instance of this interface by calling ISpatialAudioMetadataClient::ActivateSpatialAudioMetadataItems.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nn-spatialaudiometadata-ispatialaudiometadataitems
[PInvokeData("spatialaudiometadata.h", MSDNShortId = "54A6B7DE-A41E-4214-AF02-CC19250B9037")]
[ComImport, Guid("BCD7C78F-3098-4F22-B547-A2F25A381269"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface ISpatialAudioMetadataItems
{
/// <summary>Gets the total frame count of the ISpatialAudioMetadataItems, which defines valid item offsets.</summary>
/// <returns>The total frame count.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadataitems-getframecount
// HRESULT GetFrameCount( UINT16 *frameCount );
ushort GetFrameCount();
/// <summary>The current number of items stored by the ISpatialAudioMetadataItems.</summary>
/// <returns>The current number of stored items.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadataitems-getitemcount
// HRESULT GetItemCount( UINT16 *itemCount );
ushort GetItemCount();
/// <summary>The maximum number of items allowed by the ISpatialAudioMetadataItems, defined when the object is created.</summary>
/// <returns>The maximum number of items allowed.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadataitems-getmaxitemcount
// HRESULT GetMaxItemCount( UINT16 *maxItemCount );
ushort GetMaxItemCount();
/// <summary>The size of the largest command value defined by the metadata format for the ISpatialAudioMetadataItems.</summary>
/// <returns>The size of the largest command value defined by the metadata format.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadataitems-getmaxvaluebufferlength
// HRESULT GetMaxValueBufferLength( UINT32 *maxValueBufferLength );
uint GetMaxValueBufferLength();
/// <summary>Gets the total frame count for the ISpatialAudioMetadataItems, which defines valid item offsets.</summary>
/// <returns>The total frame count, which defines valid item offsets.</returns>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadataitems-getinfo
// HRESULT GetInfo( SpatialAudioMetadataItemsInfo *info );
SpatialAudioMetadataItemsInfo GetInfo();
}
/// <summary>
/// <para>
/// Provides methods for attaching buffers to SpatialAudioMetadataItems for in-place storage of data. Get an instance of this object
/// by passing a pointer to the interface into ActivateSpatialAudioMetadataItems. The buffer will be associated with the returned
/// <c>SpatialAudioMetadataItems</c>. This interface allows you to attach a buffer and reset its contents to the empty set of
/// metadata items or attach a previously-populated buffer and retain the data stored in the buffer.
/// </para>
/// <para>
/// This interface is a part of Windows Sonic, Microsofts audio platform for more immersive audio which includes integrated spatial
/// sound on Xbox and Windows.
/// </para>
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nn-spatialaudiometadata-ispatialaudiometadataitemsbuffer
[ComImport, Guid("42640A16-E1BD-42D9-9FF6-031AB71A2DBA"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface ISpatialAudioMetadataItemsBuffer
{
/// <summary>Attaches caller-provided memory for storage of ISpatialAudioMetadataItems objects.</summary>
/// <param name="buffer">A pointer to memory to use for storage.</param>
/// <param name="bufferLength">
/// The length of the supplied buffer. This size must match the length required for the metadata format and maximum metadata
/// item count.
/// </param>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadataitemsbuffer-attachtobuffer
// HRESULT AttachToBuffer( BYTE *buffer, UINT32 bufferLength );
void AttachToBuffer([Out] IntPtr buffer, [In] uint bufferLength);
/// <summary>
/// Attaches a previously populated buffer for storage of ISpatialAudioMetadataItems objects. The metadata items already in the
/// buffer are retained.
/// </summary>
/// <param name="buffer">A pointer to memory to use for storage.</param>
/// <param name="bufferLength">
/// The length of the supplied buffer. This size must match the length required for the metadata format and maximum metadata
/// item count.
/// </param>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadataitemsbuffer-attachtopopulatedbuffer
// HRESULT AttachToPopulatedBuffer( BYTE *buffer, UINT32 bufferLength );
void AttachToPopulatedBuffer([Out] IntPtr buffer, [In] uint bufferLength);
/// <summary>Detaches the buffer. Memory can only be attached to a single metadata item at a time.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadataitemsbuffer-detachbuffer
// HRESULT DetachBuffer();
void DetachBuffer();
}
/// <summary>
/// <para>
/// Provides methods for extracting spatial audio metadata items and item command value pairs from an ISpatialAudioMetadataItems
/// object. The <c>SpatialAudioMetadataItems</c> object, which is populated using an ISpatialAudioMetadataWriter or
/// ISpatialAudioMetadataCopier, has a frame count, specified with the frameCount parameter to ActivateSpatialAudioMetadataItems,
/// that represents the valid range of metadata item offsets. <c>ISpatialAudioMetadataReader</c> enables reading back groups of
/// items within a subrange of the total frame count. The object maintains an internal read position, which is advanced by the
/// number of frames specified when read operation is performed.
/// </para>
/// <para>
/// This interface is a part of Windows Sonic, Microsofts audio platform for more immersive audio which includes integrated spatial
/// sound on Xbox and Windows.
/// </para>
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nn-spatialaudiometadata-ispatialaudiometadatareader
[ComImport, Guid("B78E86A2-31D9-4C32-94D2-7DF40FC7EBEC"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface ISpatialAudioMetadataReader
{
/// <summary>Opens an ISpatialAudioMetadataItems object for reading.</summary>
/// <param name="metadataItems">A pointer to an ISpatialAudioMetadataItems object to be opened for reading</param>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadatareader-open
// HRESULT Open( ISpatialAudioMetadataItems *metadataItems );
void Open([In] ISpatialAudioMetadataItems metadataItems);
/// <summary>Gets the number of commands and the sample offset for the metadata item being read.</summary>
/// <param name="commandCount">Receives the number of command/value pairs in the metadata item being read.</param>
/// <param name="frameOffset">Gets the frame offset associated with the metadata item being read.</param>
/// <remarks>
/// <para>
/// Before calling <c>ReadNextItem</c>, you must open the ISpatialAudioMetadataReader for reading by calling Open after the
/// object is created and after Close has been called. You must also call ReadItemCountInFrames before calling <c>ReadNextItem</c>.
/// </para>
/// <para>
/// The ISpatialAudioMetadataReader keeps an internal pointer to the current position within the total range of frames contained
/// by the ISpatialAudioMetadataItems with which the reader is associated. Each call to this method causes the pointer to be
/// advanced by the number of frames specified in the readFrameCount parameter.
/// </para>
/// <para>
/// The process for reading commands and the associated values is recursive. After each call to <c>ReadItemCountInFrames</c>,
/// call <c>ReadNextItem</c> to get the number of commands in the next item. After every call to <c>ReadNextItem</c>, call
/// ReadNextItemCommand to read each command for the item. Repeat this process until the entire frame range of the
/// <c>ISpatialAudioMetadataItems</c> has been read.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadatareader-readnextitem
// HRESULT ReadNextItem( UINT8 *commandCount, UINT16 *frameOffset );
void ReadNextItem(out byte commandCount, out ushort frameOffset);
/// <summary>Reads metadata commands and value data for the current item.</summary>
/// <param name="commandID">Receives the command ID for the current command.</param>
/// <param name="valueBuffer">
/// A pointer to a buffer which receives data specific to the command as specified by the metadata format definition. The buffer
/// must be at least maxValueBufferLength to ensure all commands can be successfully retrieved.
/// </param>
/// <param name="maxValueBufferLength">The maximum size of a command value.</param>
/// <param name="valueBufferLength">The size, in bytes, of the data written to the valueBuffer parameter.</param>
/// <remarks>
/// <para>
/// Before calling <c>ReadNextItem</c>, you must open the ISpatialAudioMetadataReader for reading by calling Open after the
/// object is created and after Close has been called. You must also call ReadItemCountInFrames and then call ReadNextItem
/// before calling <c>ReadNextItem</c>.
/// </para>
/// <para>
/// The ISpatialAudioMetadataReader keeps an internal pointer to the current position within the total range of frames contained
/// by the ISpatialAudioMetadataItems with which the reader is associated. Each call to this method causes the pointer to be
/// advanced by the number of frames specified in the readFrameCount parameter.
/// </para>
/// <para>
/// The process for reading commands and the associated values is recursive. After each call to <c>ReadItemCountInFrames</c>,
/// call ReadNextItem to get the number of commands in the next item. After every call to <c>ReadNextItem</c>, call
/// <c>ReadNextItemCommand</c> to read each command for the item. Repeat this process until the entire frame range of the
/// <c>ISpatialAudioMetadataItems</c> has been read.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadatareader-readnextitemcommand
// HRESULT ReadNextItemCommand( BYTE *commandID, void *valueBuffer, UINT32 maxValueBufferLength, UINT32 *valueBufferLength );
void ReadNextItemCommand(out byte commandID, [In] IntPtr valueBuffer, [In] uint maxValueBufferLength, out uint valueBufferLength);
/// <summary>Completes any necessary operations on the SpatialAudioMetadataItems object and releases the object.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadatareader-close
// HRESULT Close();
void Close();
}
/// <summary>
/// <para>
/// Provides methods for storing spatial audio metadata items positioned within a range of corresponding audio frames. Each metadata
/// item has a zero-based offset position within the specified frame. Each item can contain one or more commands specific to the
/// metadata format ID provided in the SpatialAudioObjectRenderStreamForMetadataActivationParams when the
/// ISpatialAudioMetadataClient was created. This object does not allocate storage for the metadata it is provided, the caller is
/// expected to manage the allocation of memory used to store the packed data. Multiple metadata items can be placed in the
/// ISpatialAudioMetadataItems object. For each item, call WriteNextItem followed by a call to WriteNextItemCommand.
/// </para>
/// <para>
/// This interface is a part of Windows Sonic, Microsofts audio platform for more immersive audio which includes integrated spatial
/// sound on Xbox and Windows.
/// </para>
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nn-spatialaudiometadata-ispatialaudiometadatawriter
[ComImport, Guid("1B17CA01-2955-444D-A430-537DC589A844"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface ISpatialAudioMetadataWriter
{
/// <summary>Opens an ISpatialAudioMetadataItems object for writing.</summary>
/// <param name="metadataItems">A pointer to an ISpatialAudioMetadataItems object to be opened for writing.</param>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadatawriter-open
// HRESULT Open( ISpatialAudioMetadataItems *metadataItems );
void Open([In] ISpatialAudioMetadataItems metadataItems);
/// <summary>Starts a new metadata item at the specified offset.</summary>
/// <param name="frameOffset">The frame offset of the item within the range specified with the frameCount parameter to ActivateSpatialAudioMetadataItems.</param>
/// <remarks>
/// <para>
/// Before calling <c>WriteNextItem</c>, you must open the ISpatialAudioMetadataWriter for writing by calling Open after the
/// object is created and after Close has been called. During a writing session demarcated by calls to <c>Open</c> and
/// <c>Close</c>, the value of the frameOffset parameter must be greater than the value in the preceding call.
/// </para>
/// <para>
/// Within a single writing session, you must not use <c>WriteNextItem</c> to write more items than the value supplied in the
/// <c>MaxMetadataItemCount</c> field in the SpatialAudioObjectRenderStreamForMetadataActivationParam passed into
/// ISpatialAudioClient::ActivateSpatialAudioStream or an SPTLAUD_MD_CLNT_E_FRAMEOFFSET_OUT_OF_RANGE error will occur.
/// </para>
/// <para>
/// If the overflow mode is set to <c>SpatialAudioMetadataWriterOverflow_Fail</c>, the value of the frameOffset parameter must
/// be less than he value of the frameCount parameter to ActivateSpatialAudioMetadataItems or an
/// SPTLAUD_MD_CLNT_E_FRAMEOFFSET_OUT_OF_RANGE error will occur.
/// </para>
/// <para>After calling <c>WriteNextItem</c>, call WriteNextItemCommand to write metadata commands and value data for the item.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadatawriter-writenextitem
// HRESULT WriteNextItem( UINT16 frameOffset );
void WriteNextItem([In] ushort frameOffset);
/// <summary>Writes metadata commands and value data to the current item.</summary>
/// <param name="commandID">
/// A command supported by the metadata format of the object. The call will fail if the command not defined by metadata format.
/// Each command can only be written once per item.
/// </param>
/// <param name="valueBuffer">
/// A pointer to a buffer which stores data specific to the command as specified by the metadata format definition.
/// </param>
/// <param name="valueBufferLength">
/// The size, in bytes, of the command data supplied in the valueBuffer parameter. The size must match command definition
/// specified by the metadata format or the call will fail.
/// </param>
/// <remarks>
/// You must open the ISpatialAudioMetadataWriter for writing by calling Open, and set the current metadata item offset by
/// calling WriteNextItem before calling <c>WriteNextItemCommand</c>.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadatawriter-writenextitemcommand
// HRESULT WriteNextItemCommand( BYTE commandID, const void *valueBuffer, UINT32 valueBufferLength );
void WriteNextItemCommand([In] byte commandID, [In] IntPtr valueBuffer, [In] uint valueBufferLength);
/// <summary>
/// Completes any needed operations on the metadata buffer and releases the specified ISpatialAudioMetadataItems object.
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudiometadatawriter-close
// HRESULT Close();
void Close();
}
/// <summary>
/// <para>
/// Used to write metadata commands for spatial audio. Valid commands and value lengths are defined by the metadata format specified
/// in the SpatialAudioObjectRenderStreamForMetadataActivationParams when the ISpatialAudioObjectRenderStreamForMetadata was created.
/// </para>
/// <para>
/// This interface is a part of Windows Sonic, Microsofts audio platform for more immersive audio which includes integrated spatial
/// sound on Xbox and Windows.
/// </para>
/// </summary>
/// <remarks>
/// <c>Note</c> Many of the methods provided by this interface are implemented in the inherited ISpatialAudioObjectBase interface.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nn-spatialaudiometadata-ispatialaudioobjectformetadatacommands
[PInvokeData("spatialaudiometadata.h", MSDNShortId = "B142D5CC-7321-4F3C-804D-50E728C37D10")]
[ComImport, Guid("0DF2C94B-F5F9-472D-AF6B-C46E0AC9CD05"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface ISpatialAudioObjectForMetadataCommands : ISpatialAudioObjectBase
{
/// <summary>Gets a buffer that is used to supply the audio data for the ISpatialAudioObject.</summary>
/// <param name="buffer">The buffer into which audio data is written.</param>
/// <param name="bufferLength">
/// The length of the buffer in bytes. This length will be the value returned in the frameCountPerBuffer parameter to
/// ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects multiplied by the value of the <c>nBlockAlign</c> field of the
/// WAVEFORMATEX structure passed in the SpatialAudioObjectRenderStreamActivationParams parameter to ISpatialAudioClient::ActivateSpatialAudioStream.
/// </param>
/// <remarks>
/// <para>
/// The first time <c>GetBuffer</c> is called after the ISpatialAudioObject is activated with a call
/// ISpatialAudioObjectRenderStream::ActivateSpatialAudioObject, lifetime of the spatial audio object starts. To keep the
/// spatial audio object alive after that, this <c>GetBuffer</c> must be called on every processing pass (between calls to
/// ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects and ISpatialAudioObjectRenderStream::EndUpdatingAudioObjects). If
/// <c>GetBuffer</c> is not called within an audio processing pass, SetEndOfStream is called implicitly on the audio object to
/// deactivate, and the audio object can only be reused after calling Release on the object and then reactivating the object by
/// calling <c>ActivateSpatialAudioObject</c> again.
/// </para>
/// <para>
/// The pointers retrieved by <c>GetBuffer</c> should not be used after ISpatialAudioObjectRenderStream::EndUpdatingAudioObjects
/// has been called.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectbase-getbuffer
// HRESULT GetBuffer( BYTE **buffer, UINT32 *bufferLength );
new void GetBuffer(out IntPtr buffer, out uint bufferLength);
/// <summary>
/// Instructs the system that the final block of audio data has been submitted for the ISpatialAudioObject so that the object
/// can be deactivated and it's resources reused.
/// </summary>
/// <param name="frameCount">
/// The number of audio frames in the audio buffer that should be included in the final processing pass. This number may be
/// smaller than or equal to the value returned in the frameCountPerBuffer parameter to ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects.
/// </param>
/// <returns>
/// <para>
/// If the method succeeds, it returns S_OK. If it fails, possible return codes include, but are not limited to, the values
/// shown in the following table.
/// </para>
/// <list type="table">
/// <listheader>
/// <term>Return code</term>
/// <term>Description</term>
/// </listheader>
/// <item>
/// <term>SPTLAUDCLNT_E_OUT_OF_ORDER</term>
/// <term>ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects was not called before the call to SetEndOfStream.</term>
/// </item>
/// <item>
/// <term>SPTLAUDCLNT_E_RESOURCES_INVALIDATED</term>
/// <term>
/// SetEndOfStream was called either explicitly or implicitly in a previous audio processing pass. SetEndOfStream is called
/// implicitly by the system if GetBuffer is not called within an audio processing pass (between calls to
/// ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects and ISpatialAudioObjectRenderStream::EndUpdatingAudioObjects).
/// </term>
/// </item>
/// </list>
/// </returns>
/// <remarks>Call Release after calling <c>SetEndOfStream</c> to make free the audio object resources for future use.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectbase-setendofstream
// HRESULT SetEndOfStream( UINT32 frameCount );
new void SetEndOfStream([In] uint frameCount);
/// <summary>Gets a boolean value indicating whether the ISpatialAudioObject is valid.</summary>
/// <returns><c>TRUE</c> if the audio object is currently valid; otherwise, <c>FALSE</c>.</returns>
/// <remarks>
/// <para>If this value is false, you should call Release to make the audio object resource available in the future.</para>
/// <para>
/// <c>IsActive</c> will be set to false after SetEndOfStream is called implicitly or explicitly. <c>SetEndOfStream</c> is
/// called implicitly by the system if GetBuffer is not called within an audio processing pass (between calls to
/// ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects and ISpatialAudioObjectRenderStream::EndUpdatingAudioObjects).
/// </para>
/// <para>
/// The rendering engine will also deactivate the audio object, setting <c>IsActive</c> to false, when audio object resources
/// become unavailable. In this case, a notification is sent via ISpatialAudioObjectRenderStreamNotify before the object is
/// deactivated. The value returned in the availableDynamicObjectCount parameter to
/// ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects indicates how many objects will be processed for each pass.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectbase-isactive
// HRESULT IsActive( BOOL *isActive );
[return: MarshalAs(UnmanagedType.Bool)]
new bool IsActive();
/// <summary>
/// Gets a value specifying the type of audio object that is represented by the ISpatialAudioObject. This value indicates if the
/// object is dynamic or static. If the object is static, one and only one of the static audio channel values to which the
/// object is assigned is returned.
/// </summary>
/// <returns>A value specifying the type of audio object that is represented</returns>
/// <remarks>
/// Set the type of the audio object with the type parameter to the ISpatialAudioObjectRenderStream::ActivateSpatialAudioObject method.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectbase-getaudioobjecttype
// HRESULT GetAudioObjectType( AudioObjectType *audioObjectType );
new AudioObjectType GetAudioObjectType();
/// <summary>
/// Writes a metadata command to the spatial audio object, each command may only be added once per object per processing cycle.
/// Valid commands and value lengths are defined by the metadata format specified in the
/// SpatialAudioObjectRenderStreamForMetadataActivationParams when the ISpatialAudioObjectRenderStreamForMetadata was created.
/// </summary>
/// <param name="commandID">The ID of the metadata command.</param>
/// <param name="valueBuffer">The buffer containing the value data for the metadata command.</param>
/// <param name="valueBufferLength">The length of the valueBuffer.</param>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudioobjectformetadatacommands-writenextmetadatacommand
// HRESULT WriteNextMetadataCommand( BYTE commandID, void *valueBuffer, UINT32 valueBufferLength );
void WriteNextMetadataCommand([In] byte commandID, [In] IntPtr valueBuffer, [In] uint valueBufferLength);
}
/// <summary>
/// <para>
/// Used to write spatial audio metadata for applications that require multiple metadata items per buffer with frame-accurate
/// placement. The data written via this interface must adhere to the format defined by the metadata format specified in the
/// SpatialAudioObjectRenderStreamForMetadataActivationParams when the ISpatialAudioObjectRenderStreamForMetadata was created.
/// </para>
/// <para>
/// This interface is a part of Windows Sonic, Microsofts audio platform for more immersive audio which includes integrated spatial
/// sound on Xbox and Windows.
/// </para>
/// </summary>
/// <remarks>
/// <c>Note</c> Many of the methods provided by this interface are implemented in the inherited ISpatialAudioObjectBase interface.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nn-spatialaudiometadata-ispatialaudioobjectformetadataitems
[PInvokeData("spatialaudiometadata.h", MSDNShortId = "4861D2AA-E685-4A72-BE98-6FEEB72ACF67")]
[ComImport, Guid("DDEA49FF-3BC0-4377-8AAD-9FBCFD808566"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface ISpatialAudioObjectForMetadataItems : ISpatialAudioObjectBase
{
/// <summary>Gets a buffer that is used to supply the audio data for the ISpatialAudioObject.</summary>
/// <param name="buffer">The buffer into which audio data is written.</param>
/// <param name="bufferLength">
/// The length of the buffer in bytes. This length will be the value returned in the frameCountPerBuffer parameter to
/// ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects multiplied by the value of the <c>nBlockAlign</c> field of the
/// WAVEFORMATEX structure passed in the SpatialAudioObjectRenderStreamActivationParams parameter to ISpatialAudioClient::ActivateSpatialAudioStream.
/// </param>
/// <remarks>
/// <para>
/// The first time <c>GetBuffer</c> is called after the ISpatialAudioObject is activated with a call
/// ISpatialAudioObjectRenderStream::ActivateSpatialAudioObject, lifetime of the spatial audio object starts. To keep the
/// spatial audio object alive after that, this <c>GetBuffer</c> must be called on every processing pass (between calls to
/// ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects and ISpatialAudioObjectRenderStream::EndUpdatingAudioObjects). If
/// <c>GetBuffer</c> is not called within an audio processing pass, SetEndOfStream is called implicitly on the audio object to
/// deactivate, and the audio object can only be reused after calling Release on the object and then reactivating the object by
/// calling <c>ActivateSpatialAudioObject</c> again.
/// </para>
/// <para>
/// The pointers retrieved by <c>GetBuffer</c> should not be used after ISpatialAudioObjectRenderStream::EndUpdatingAudioObjects
/// has been called.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectbase-getbuffer
// HRESULT GetBuffer( BYTE **buffer, UINT32 *bufferLength );
new void GetBuffer(out IntPtr buffer, out uint bufferLength);
/// <summary>
/// Instructs the system that the final block of audio data has been submitted for the ISpatialAudioObject so that the object
/// can be deactivated and it's resources reused.
/// </summary>
/// <param name="frameCount">
/// The number of audio frames in the audio buffer that should be included in the final processing pass. This number may be
/// smaller than or equal to the value returned in the frameCountPerBuffer parameter to ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects.
/// </param>
/// <returns>
/// <para>
/// If the method succeeds, it returns S_OK. If it fails, possible return codes include, but are not limited to, the values
/// shown in the following table.
/// </para>
/// <list type="table">
/// <listheader>
/// <term>Return code</term>
/// <term>Description</term>
/// </listheader>
/// <item>
/// <term>SPTLAUDCLNT_E_OUT_OF_ORDER</term>
/// <term>ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects was not called before the call to SetEndOfStream.</term>
/// </item>
/// <item>
/// <term>SPTLAUDCLNT_E_RESOURCES_INVALIDATED</term>
/// <term>
/// SetEndOfStream was called either explicitly or implicitly in a previous audio processing pass. SetEndOfStream is called
/// implicitly by the system if GetBuffer is not called within an audio processing pass (between calls to
/// ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects and ISpatialAudioObjectRenderStream::EndUpdatingAudioObjects).
/// </term>
/// </item>
/// </list>
/// </returns>
/// <remarks>Call Release after calling <c>SetEndOfStream</c> to make free the audio object resources for future use.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectbase-setendofstream
// HRESULT SetEndOfStream( UINT32 frameCount );
new void SetEndOfStream([In] uint frameCount);
/// <summary>Gets a boolean value indicating whether the ISpatialAudioObject is valid.</summary>
/// <returns><c>TRUE</c> if the audio object is currently valid; otherwise, <c>FALSE</c>.</returns>
/// <remarks>
/// <para>If this value is false, you should call Release to make the audio object resource available in the future.</para>
/// <para>
/// <c>IsActive</c> will be set to false after SetEndOfStream is called implicitly or explicitly. <c>SetEndOfStream</c> is
/// called implicitly by the system if GetBuffer is not called within an audio processing pass (between calls to
/// ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects and ISpatialAudioObjectRenderStream::EndUpdatingAudioObjects).
/// </para>
/// <para>
/// The rendering engine will also deactivate the audio object, setting <c>IsActive</c> to false, when audio object resources
/// become unavailable. In this case, a notification is sent via ISpatialAudioObjectRenderStreamNotify before the object is
/// deactivated. The value returned in the availableDynamicObjectCount parameter to
/// ISpatialAudioObjectRenderStream::BeginUpdatingAudioObjects indicates how many objects will be processed for each pass.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectbase-isactive
// HRESULT IsActive( BOOL *isActive );
[return: MarshalAs(UnmanagedType.Bool)]
new bool IsActive();
/// <summary>
/// Gets a value specifying the type of audio object that is represented by the ISpatialAudioObject. This value indicates if the
/// object is dynamic or static. If the object is static, one and only one of the static audio channel values to which the
/// object is assigned is returned.
/// </summary>
/// <returns>A value specifying the type of audio object that is represented</returns>
/// <remarks>
/// Set the type of the audio object with the type parameter to the ISpatialAudioObjectRenderStream::ActivateSpatialAudioObject method.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectbase-getaudioobjecttype
// HRESULT GetAudioObjectType( AudioObjectType *audioObjectType );
new AudioObjectType GetAudioObjectType();
/// <summary>Gets a pointer to the ISpatialAudioMetadataItems object which stores metadata items for the ISpatialAudioObjectForMetadataItems.</summary>
/// <returns>Receives a pointer to the ISpatialAudioMetadataItems associated with the ISpatialAudioObjectForMetadataItems.</returns>
/// <remarks>The client must free this object when it is no longer being used by calling Release.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudioobjectformetadataitems-getspatialaudiometadataitems
// HRESULT GetSpatialAudioMetadataItems( ISpatialAudioMetadataItems **metadataItems );
ISpatialAudioMetadataItems GetSpatialAudioMetadataItems();
}
/// <summary>
/// <para>
/// Provides methods for controlling a spatial audio object render stream for metadata, including starting, stopping, and resetting
/// the stream. Also provides methods for activating new ISpatialAudioObjectForMetadataCommands and
/// ISpatialAudioObjectForMetadataItems instances and notifying the system when you are beginning and ending the process of updating
/// activated spatial audio objects and data.
/// </para>
/// <para>
/// This interface is a part of Windows Sonic, Microsofts audio platform for more immersive audio which includes integrated spatial
/// sound on Xbox and Windows.
/// </para>
/// </summary>
/// <remarks>
/// <c>Note</c> Many of the methods provided by this interface are implemented in the inherited ISpatialAudioObjectRenderStreamBase interface.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nn-spatialaudiometadata-ispatialaudioobjectrenderstreamformetadata
[PInvokeData("spatialaudiometadata.h", MSDNShortId = "1623B280-FC12-4C19-9D4A-D8463D1A1046")]
[ComImport, Guid("BBC9C907-48D5-4A2E-A0C7-F7F0D67C1FB1"), InterfaceType(ComInterfaceType.InterfaceIsIUnknown)]
public interface ISpatialAudioObjectRenderStreamForMetadata : ISpatialAudioObjectRenderStreamBase
{
/// <summary>Gets the number of dynamic spatial audio objects that are currently available.</summary>
/// <returns>The number of dynamic spatial audio objects that are currently available.</returns>
/// <remarks>
/// <para>
/// A dynamic ISpatialAudioObject is one that was activated by setting the type parameter to the ActivateSpatialAudioObject
/// method to <c>AudioObjectType_Dynamic</c>. The system has a limit of the maximum number of dynamic spatial audio objects that
/// can be activated at one time. Call Release on an <c>ISpatialAudioObject</c> when it is no longer being used to free up the
/// resource to create new dynamic spatial audio objects.
/// </para>
/// <para>
/// You should not call this method after streaming has started, as the value is already provided by
/// ISpatialAudioObjectRenderStreamBase::BeginUpdatingAudioObjects. This method should only be called before streaming has
/// started, which occurs after ISpatialAudioObjectRenderStreamBase::Start is called.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectrenderstreambase-getavailabledynamicobjectcount
// HRESULT GetAvailableDynamicObjectCount( UINT32 *value );
new uint GetAvailableDynamicObjectCount();
/// <summary>Gets additional services from the <c>ISpatialAudioObjectRenderStream</c>.</summary>
/// <param name="riid">
/// <para>The interface ID for the requested service. The client should set this parameter to one of the following REFIID values:</para>
/// <para>IID_IAudioClock</para>
/// <para>IID_IAudioClock2</para>
/// <para>IID_IAudioStreamVolume</para>
/// </param>
/// <param name="service">
/// Pointer to a pointer variable into which the method writes the address of an instance of the requested interface. Through
/// this method, the caller obtains a counted reference to the interface. The caller is responsible for releasing the interface,
/// when it is no longer needed, by calling the interface's Release method. If the <c>GetService</c> call fails, *ppv is NULL.
/// </param>
/// <returns>
/// <para>
/// If the method succeeds, it returns S_OK. If it fails, possible return codes include, but are not limited to, the values
/// shown in the following table.
/// </para>
/// <list type="table">
/// <listheader>
/// <term>Return code</term>
/// <term>Description</term>
/// </listheader>
/// <item>
/// <term>E_POINTER</term>
/// <term>Parameter ppv is NULL.</term>
/// </item>
/// <item>
/// <term>SPTLAUDCLNT_E_DESTROYED</term>
/// <term>The ISpatialAudioClient associated with the spatial audio stream has been destroyed.</term>
/// </item>
/// <item>
/// <term>AUDCLNT_E_DEVICE_INVALIDATED</term>
/// <term>
/// The audio endpoint device has been unplugged, or the audio hardware or associated hardware resources have been reconfigured,
/// disabled, removed, or otherwise made unavailable for use.
/// </term>
/// </item>
/// <item>
/// <term>SPTLAUDCLNT_E_INTERNAL</term>
/// <term>An internal error has occurred.</term>
/// </item>
/// <item>
/// <term>AUDCLNT_E_UNSUPPORTED_FORMAT</term>
/// <term>The media associated with the spatial audio stream uses an unsupported format.</term>
/// </item>
/// </list>
/// </returns>
/// <remarks>
/// <para>The <c>GetService</c> method supports the following service interfaces:</para>
/// <list type="bullet">
/// <item>
/// <term>IAudioClock</term>
/// </item>
/// <item>
/// <term>IAudioClock2</term>
/// </item>
/// <item>
/// <term>IAudioStreamVolume</term>
/// </item>
/// </list>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectrenderstreambase-getservice
// HRESULT GetService( REFIID riid, void **service );
[PreserveSig]
new HRESULT GetService(in Guid riid, [MarshalAs(UnmanagedType.IUnknown, IidParameterIndex = 0)] out object service);
/// <summary>Starts the spatial audio stream.</summary>
/// <remarks>
/// <para>
/// Starting the stream causes data flow between the endpoint buffer and the audio engine. The first time this method is called,
/// the stream's audio clock position will be at 0. Otherwise, the clock resumes from its position at the time that the stream
/// was last paused with a call to Stop. Call Reset to reset the clock position to 0 and cause all active ISpatialAudioObject
/// instances to be revoked.
/// </para>
/// <para>The stream must have been previously stopped with a call to Stop or the method will fail and return SPTLAUDCLNT_E_STREAM_NOT_STOPPED.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectrenderstreambase-start
// HRESULT Start();
new void Start();
/// <summary>Stops a running audio stream.</summary>
/// <remarks>
/// Stopping stream causes data to stop flowing between the endpoint buffer and the audio engine. You can consider this
/// operation to pause the stream because it leaves the stream's audio clock at its current stream position and does not reset
/// it to 0. A subsequent call to Start causes the stream to resume running from the current position. Call Reset to reset the
/// clock position to 0 and cause all active ISpatialAudioObject instances to be revoked.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectrenderstreambase-stop
// HRESULT Stop();
new void Stop();
/// <summary>Reset a stopped audio stream.</summary>
/// <remarks>
/// <para>
/// Resetting the audio stream flushes all pending data and resets the audio clock stream position to 0. Resetting the stream
/// also causes all active ISpatialAudioObject instances to be revoked. A subsequent call to Start causes the stream to start
/// from 0 position.
/// </para>
/// <para>The stream must have been previously stopped with a call to Stop or the method will fail and return SPTLAUDCLNT_E_STREAM_NOT_STOPPED.</para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectrenderstreambase-reset
// HRESULT Reset();
new void Reset();
/// <summary>
/// Puts the system into the state where audio object data can be submitted for processing and the ISpatialAudioObject state can
/// be modified.
/// </summary>
/// <param name="availableDynamicObjectCount">
/// The number of dynamic audio objects that are available to be rendered for the current processing pass. All allocated static
/// audio objects can be rendered in every pass. For information on audio object types, see AudioObjectType.
/// </param>
/// <param name="frameCountPerBuffer">The size, in audio frames, of the buffer returned by GetBuffer.</param>
/// <remarks>
/// <para>
/// This method must be called each time the event passed in the SpatialAudioObjectRenderStreamActivationParams to
/// ISpatialAudioClient::ActivateSpatialAudioStream is signaled, even if there no audio object data to submit.
/// </para>
/// <para>
/// For each <c>BeginUpdatingAudioObjects</c> call, there should be a corresponding call to EndUpdatingAudioObjects call. If
/// <c>BeginUpdatingAudioObjects</c> is called twice without a call <c>EndUpdatingAudioObjects</c> between them, the second call
/// to <c>BeginUpdatingAudioObjects</c> will return SPTLAUDCLNT_E_OUT_OF_ORDER.
/// </para>
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectrenderstreambase-beginupdatingaudioobjects
// HRESULT BeginUpdatingAudioObjects( UINT32 *availableDynamicObjectCount, UINT32 *frameCountPerBuffer );
new void BeginUpdatingAudioObjects(out uint availableDynamicObjectCount, out uint frameCountPerBuffer);
/// <summary>
/// Notifies the system that the app has finished supplying audio data for the spatial audio objects activated with ActivateSpatialAudioObject.
/// </summary>
/// <remarks>The pointers retrieved with ISpatialAudioObjectBase::GetBuffer can no longer be used after this method is called.</remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudioclient/nf-spatialaudioclient-ispatialaudioobjectrenderstreambase-endupdatingaudioobjects
// HRESULT EndUpdatingAudioObjects();
new void EndUpdatingAudioObjects();
/// <summary>Activate an ISpatialAudioObjectForMetadataCommands for rendering.</summary>
/// <param name="type">
/// The type of audio object to activate. For dynamic audio objects, this value must be <c>AudioObjectType_Dynamic</c>. For
/// static audio objects, specify one of the static audio channel values from the enumeration. Specifying
/// <c>AudioObjectType_None</c> will produce an audio object that is not spatialized.
/// </param>
/// <returns>Receives a pointer to the activated interface.</returns>
/// <remarks>
/// A dynamic ISpatialAudioObjectForMetadataCommands is one that was activated by setting the type parameter to the
/// <c>ActivateSpatialAudioObjectForMetadataCommands</c> method to <c>AudioObjectType_Dynamic</c>. The client has a limit of the
/// maximum number of dynamic spatial audio objects that can be activated at one time. After the limit has been reached,
/// attempting to activate additional audio objects will result in this method returning an SPTLAUDCLNT_E_NO_MORE_OBJECTS error.
/// To avoid this, call Release on each dynamic <c>ISpatialAudioObjectForMetadataCommands</c> after it is no longer being used
/// to free up the resource so that it can be reallocated. See ISpatialAudioObjectBase::IsActive and
/// ISpatialAudioObjectBase::SetEndOfStream for more information on the managing the lifetime of spatial audio objects.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudioobjectrenderstreamformetadata-activatespatialaudioobjectformetadatacommands
// HRESULT ActivateSpatialAudioObjectForMetadataCommands( AudioObjectType type, ISpatialAudioObjectForMetadataCommands
// **audioObject );
ISpatialAudioObjectForMetadataCommands ActivateSpatialAudioObjectForMetadataCommands([In] AudioObjectType type);
/// <summary>Activate an ISpatialAudioObjectForMetadataItems for rendering.</summary>
/// <param name="type">
/// The type of audio object to activate. For dynamic audio objects, this value must be <c>AudioObjectType_Dynamic</c>. For
/// static audio objects, specify one of the static audio channel values from the enumeration. Specifying
/// <c>AudioObjectType_None</c> will produce an audio object that is not spatialized.
/// </param>
/// <returns>Receives a pointer to the activated interface.</returns>
/// <remarks>
/// A dynamic ISpatialAudioObjectForMetadataItems is one that was activated by setting the type parameter to the
/// <c>ActivateSpatialAudioObjectForMetadataItems</c> method to <c>AudioObjectType_Dynamic</c>. The client has a limit of the
/// maximum number of dynamic spatial audio objects that can be activated at one time. After the limit has been reached,
/// attempting to activate additional audio objects will result in this method returning an SPTLAUDCLNT_E_NO_MORE_OBJECTS error.
/// To avoid this, call Release on each dynamic <c>ISpatialAudioObjectForMetadataItems</c> after it is no longer being used to
/// free up the resource so that it can be reallocated. See ISpatialAudioObjectBase::IsActive and
/// ISpatialAudioObjectBase::SetEndOfStream for more information on the managing the lifetime of spatial audio objects.
/// </remarks>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/nf-spatialaudiometadata-ispatialaudioobjectrenderstreamformetadata-activatespatialaudioobjectformetadataitems
// HRESULT ActivateSpatialAudioObjectForMetadataItems( AudioObjectType type, ISpatialAudioObjectForMetadataItems **audioObject );
ISpatialAudioObjectForMetadataItems ActivateSpatialAudioObjectForMetadataItems([In] AudioObjectType type);
}
/// <summary>Provides information about an ISpatialAudioMetadataItems object. Get a copy of this structure by calling GetInfo.</summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/ns-spatialaudiometadata-spatialaudiometadataitemsinfo
// typedef struct SpatialAudioMetadataItemsInfo { UINT16 FrameCount; UINT16 ItemCount; UINT16 MaxItemCount; UINT32
// MaxValueBufferLength; } SpatialAudioMetadataItemsInfo;
[PInvokeData("spatialaudiometadata.h", MSDNShortId = "EC694B26-988B-4765-8B9F-130FCF614166")]
[StructLayout(LayoutKind.Sequential, Pack = 1)]
public struct SpatialAudioMetadataItemsInfo
{
/// <summary>The total frame count, which defines valid item offsets.</summary>
public ushort FrameCount;
/// <summary>
/// <para>The current number of items stored.</para>
/// <para>MaxItemCount</para>
/// <para>The maximum number of items allowed.</para>
/// <para>MaxValueBufferLength</para>
/// <para>The size of the largest command value defined by the metadata format.</para>
/// </summary>
public ushort ItemCount;
/// <summary/>
public ushort MaxItemCount;
/// <summary/>
public uint MaxValueBufferLength;
}
/// <summary>
/// Represents activation parameters for a spatial audio render stream for metadata. Pass this structure to
/// ISpatialAudioClient::ActivateSpatialAudioStream when activating a stream.
/// </summary>
// https://docs.microsoft.com/en-us/windows/win32/api/spatialaudiometadata/ns-spatialaudiometadata-spatialaudioobjectrenderstreamformetadataactivationparams
// typedef struct SpatialAudioObjectRenderStreamForMetadataActivationParams { const WAVEFORMATEX *ObjectFormat; AudioObjectType
// StaticObjectTypeMask; UINT32 MinDynamicObjectCount; UINT32 MaxDynamicObjectCount; AUDIO_STREAM_CATEGORY Category; HANDLE
// EventHandle; GUID MetadataFormatId; UINT16 MaxMetadataItemCount; const PROPVARIANT *MetadataActivationParams;
// ISpatialAudioObjectRenderStreamNotify *NotifyObject; } SpatialAudioObjectRenderStreamForMetadataActivationParams;
[PInvokeData("spatialaudiometadata.h", MSDNShortId = "5B92F521-537F-4296-B9A7-7EC6985530B3")]
[StructLayout(LayoutKind.Sequential)]
public struct SpatialAudioObjectRenderStreamForMetadataActivationParams
{
/// <summary>
/// Format descriptor for a single spatial audio object. All objects used by the stream must have the same format and the format
/// must be of type WAVEFORMATEX or WAVEFORMATEXTENSIBLE.
/// </summary>
public IntPtr ObjectFormat;
/// <summary>
/// A bitwise combination of <c>AudioObjectType</c> values indicating the set of static spatial audio channels that will be
/// allowed by the activated stream.
/// </summary>
public AudioObjectType StaticObjectTypeMask;
/// <summary>
/// The minimum number of concurrent dynamic objects. If this number of dynamic audio objects can't be activated simultaneously,
/// ISpatialAudioClient::ActivateSpatialAudioStream will fail with this error <c>SPTLAUDCLNT_E_NO_MORE_OBJECTS</c>.
/// </summary>
public uint MinDynamicObjectCount;
/// <summary>The maximum number of concurrent dynamic objects that can be activated with ISpatialAudioObjectRenderStream.</summary>
public uint MaxDynamicObjectCount;
/// <summary>The category of the audio stream and its spatial audio objects.</summary>
public AUDIO_STREAM_CATEGORY Category;
/// <summary>
/// The event that will signal the client to provide more audio data. This handle will be duplicated internally before it is used.
/// </summary>
public IntPtr EventHandle;
/// <summary>The identifier of the metadata format for the currently active spatial rendering engine.</summary>
public Guid MetadataFormatId;
/// <summary>The maximum number of metadata items per frame.</summary>
public ushort MaxMetadataItemCount;
/// <summary>Additional activation parameters.</summary>
public IntPtr MetadataActivationParams;
/// <summary>
/// The object that provides notifications for spatial audio clients to respond to changes in the state of an
/// ISpatialAudioObjectRenderStream. This object is used to notify clients that the number of dynamic spatial audio objects that
/// can be activated concurrently is about to change.
/// </summary>
public IntPtr NotifyObject;
}
}
}

View File

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="utf-8"?>
<Project Sdk="Microsoft.NET.Sdk">
<ProjectExtensions>
<SupportedDlls>Mmdevapi.dll</SupportedDlls>
</ProjectExtensions>
<PropertyGroup>
<Description>PInvoke API (interfaces, methods, structures and constants) imported from Windows Core Audio Api.</Description>
<AssemblyTitle>$(AssemblyName)</AssemblyTitle>
<TargetFrameworks>net20;net35;net40;net45;netstandard2.0;netcoreapp2.0;netcoreapp2.1;netcoreapp3.0;netcoreapp3.1</TargetFrameworks>
<AssemblyName>Vanara.PInvoke.CoreAudio</AssemblyName>
<PackageId>$(AssemblyName)</PackageId>
<PackageTags>pinvoke;vanara;net-extensions;interop;CoreAudio;windows;audio</PackageTags>
<AllowUnsafeBlocks>True</AllowUnsafeBlocks>
<PackageReleaseNotes />
</PropertyGroup>
<ItemGroup>
<ProjectReference Include="..\..\Core\Vanara.Core.csproj" />
<ProjectReference Include="..\Ole\Vanara.PInvoke.Ole.csproj" />
<ProjectReference Include="..\Shared\Vanara.PInvoke.Shared.csproj" />
</ItemGroup>
</Project>

View File

@ -225,6 +225,8 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "CldApi", "UnitTests\PInvoke
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Vanara.PInvoke.ProjectedFSLib", "PInvoke\ProjectedFSLib\Vanara.PInvoke.ProjectedFSLib.csproj", "{30F2727D-0B8E-4364-8F0E-9EEB4CD9CB0E}"
EndProject
Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Vanara.PInvoke.CoreAudio", "PInvoke\CoreAudio\Vanara.PInvoke.CoreAudio.csproj", "{F9C2B1C3-079D-47D8-996E-1575D2F5C4D2}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug (no Unit Tests)|Any CPU = Debug (no Unit Tests)|Any CPU
@ -721,6 +723,12 @@ Global
{30F2727D-0B8E-4364-8F0E-9EEB4CD9CB0E}.Debug|Any CPU.Build.0 = Debug|Any CPU
{30F2727D-0B8E-4364-8F0E-9EEB4CD9CB0E}.Release|Any CPU.ActiveCfg = Release|Any CPU
{30F2727D-0B8E-4364-8F0E-9EEB4CD9CB0E}.Release|Any CPU.Build.0 = Release|Any CPU
{F9C2B1C3-079D-47D8-996E-1575D2F5C4D2}.Debug (no Unit Tests)|Any CPU.ActiveCfg = Debug|Any CPU
{F9C2B1C3-079D-47D8-996E-1575D2F5C4D2}.Debug (no Unit Tests)|Any CPU.Build.0 = Debug|Any CPU
{F9C2B1C3-079D-47D8-996E-1575D2F5C4D2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU
{F9C2B1C3-079D-47D8-996E-1575D2F5C4D2}.Debug|Any CPU.Build.0 = Debug|Any CPU
{F9C2B1C3-079D-47D8-996E-1575D2F5C4D2}.Release|Any CPU.ActiveCfg = Release|Any CPU
{F9C2B1C3-079D-47D8-996E-1575D2F5C4D2}.Release|Any CPU.Build.0 = Release|Any CPU
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
@ -816,6 +824,7 @@ Global
{DDFCBC19-B6CE-4DD4-A1BB-96EE86C54D93} = {212ABBD0-B724-4CFA-9D6D-E3891547FA90}
{CCEE0CAA-27BF-43B3-8609-2279BEDA3F61} = {385CAD2D-0A5E-4F80-927B-D5499D126B90}
{30F2727D-0B8E-4364-8F0E-9EEB4CD9CB0E} = {212ABBD0-B724-4CFA-9D6D-E3891547FA90}
{F9C2B1C3-079D-47D8-996E-1575D2F5C4D2} = {212ABBD0-B724-4CFA-9D6D-E3891547FA90}
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {543FAC75-2AF1-4EF1-9609-B242B63FEED4}