blob: 3775551559cfa2b1219adca558409b4fa5342ba7 [file] [log] [blame]
/*
* Copyright (C) 2009 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "APM_AudioPolicyManager"
//#define LOG_NDEBUG 0
//#define VERY_VERBOSE_LOGGING
#ifdef VERY_VERBOSE_LOGGING
#define ALOGVV ALOGV
#else
#define ALOGVV(a...) do { } while(0)
#endif
#define AUDIO_POLICY_XML_CONFIG_FILE_PATH_MAX_LENGTH 128
#define AUDIO_POLICY_XML_CONFIG_FILE_NAME "audio_policy_configuration.xml"
#define AUDIO_POLICY_A2DP_OFFLOAD_DISABLED_XML_CONFIG_FILE_NAME \
"audio_policy_configuration_a2dp_offload_disabled.xml"
#include <inttypes.h>
#include <math.h>
#include <AudioPolicyManagerInterface.h>
#include <AudioPolicyEngineInstance.h>
#include <cutils/properties.h>
#include <utils/Log.h>
#include <media/AudioParameter.h>
#include <media/AudioPolicyHelper.h>
#include <soundtrigger/SoundTrigger.h>
#include <system/audio.h>
#include <audio_policy_conf.h>
#include "AudioPolicyManager.h"
#ifndef USE_XML_AUDIO_POLICY_CONF
#include <ConfigParsingUtils.h>
#include <StreamDescriptor.h>
#endif
#include <Serializer.h>
#include "TypeConverter.h"
#include <policy.h>
namespace android {
//FIXME: workaround for truncated touch sounds
// to be removed when the problem is handled by system UI
#define TOUCH_SOUND_FIXED_DELAY_MS 100
// Largest difference in dB on earpiece in call between the voice volume and another
// media / notification / system volume.
constexpr float IN_CALL_EARPIECE_HEADROOM_DB = 3.f;
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
// Array of all surround formats.
static const audio_format_t SURROUND_FORMATS[] = {
AUDIO_FORMAT_AC3,
AUDIO_FORMAT_E_AC3,
AUDIO_FORMAT_DTS,
AUDIO_FORMAT_DTS_HD,
AUDIO_FORMAT_AAC_LC,
AUDIO_FORMAT_DOLBY_TRUEHD,
AUDIO_FORMAT_E_AC3_JOC,
};
// Array of all AAC formats. When AAC is enabled by users, all AAC formats should be enabled.
static const audio_format_t AAC_FORMATS[] = {
AUDIO_FORMAT_AAC_LC,
AUDIO_FORMAT_AAC_HE_V1,
AUDIO_FORMAT_AAC_HE_V2,
AUDIO_FORMAT_AAC_ELD,
AUDIO_FORMAT_AAC_XHE,
};
// ----------------------------------------------------------------------------
// AudioPolicyInterface implementation
// ----------------------------------------------------------------------------
status_t AudioPolicyManager::setDeviceConnectionState(audio_devices_t device,
audio_policy_dev_state_t state,
const char *device_address,
const char *device_name)
{
status_t status = setDeviceConnectionStateInt(device, state, device_address, device_name);
nextAudioPortGeneration();
return status;
}
void AudioPolicyManager::broadcastDeviceConnectionState(audio_devices_t device,
audio_policy_dev_state_t state,
const String8 &device_address)
{
AudioParameter param(device_address);
const String8 key(state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE ?
AudioParameter::keyStreamConnect : AudioParameter::keyStreamDisconnect);
param.addInt(key, device);
mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
}
status_t AudioPolicyManager::setDeviceConnectionStateInt(audio_devices_t device,
audio_policy_dev_state_t state,
const char *device_address,
const char *device_name)
{
ALOGV("setDeviceConnectionStateInt() device: 0x%X, state %d, address %s name %s",
device, state, device_address, device_name);
// connect/disconnect only 1 device at a time
if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
sp<DeviceDescriptor> devDesc =
mHwModules.getDeviceDescriptor(device, device_address, device_name);
// handle output devices
if (audio_is_output_device(device)) {
SortedVector <audio_io_handle_t> outputs;
ssize_t index = mAvailableOutputDevices.indexOf(devDesc);
// save a copy of the opened output descriptors before any output is opened or closed
// by checkOutputsForDevice(). This will be needed by checkOutputForAllStrategies()
mPreviousOutputs = mOutputs;
switch (state)
{
// handle output device connection
case AUDIO_POLICY_DEVICE_STATE_AVAILABLE: {
if (index >= 0) {
ALOGW("setDeviceConnectionState() device already connected: %x", device);
return INVALID_OPERATION;
}
ALOGV("setDeviceConnectionState() connecting device %x", device);
// register new device as available
index = mAvailableOutputDevices.add(devDesc);
if (index >= 0) {
sp<HwModule> module = mHwModules.getModuleForDevice(device);
if (module == 0) {
ALOGD("setDeviceConnectionState() could not find HW module for device %08x",
device);
mAvailableOutputDevices.remove(devDesc);
return INVALID_OPERATION;
}
mAvailableOutputDevices[index]->attach(module);
} else {
return NO_MEMORY;
}
// Before checking outputs, broadcast connect event to allow HAL to retrieve dynamic
// parameters on newly connected devices (instead of opening the outputs...)
broadcastDeviceConnectionState(device, state, devDesc->mAddress);
if (checkOutputsForDevice(devDesc, state, outputs, devDesc->mAddress) != NO_ERROR) {
mAvailableOutputDevices.remove(devDesc);
broadcastDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
devDesc->mAddress);
return INVALID_OPERATION;
}
// Propagate device availability to Engine
mEngine->setDeviceConnectionState(devDesc, state);
// outputs should never be empty here
ALOG_ASSERT(outputs.size() != 0, "setDeviceConnectionState():"
"checkOutputsForDevice() returned no outputs but status OK");
ALOGV("setDeviceConnectionState() checkOutputsForDevice() returned %zu outputs",
outputs.size());
} break;
// handle output device disconnection
case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: {
if (index < 0) {
ALOGW("setDeviceConnectionState() device not connected: %x", device);
return INVALID_OPERATION;
}
ALOGV("setDeviceConnectionState() disconnecting output device %x", device);
// Send Disconnect to HALs
broadcastDeviceConnectionState(device, state, devDesc->mAddress);
// remove device from available output devices
mAvailableOutputDevices.remove(devDesc);
checkOutputsForDevice(devDesc, state, outputs, devDesc->mAddress);
// Propagate device availability to Engine
mEngine->setDeviceConnectionState(devDesc, state);
} break;
default:
ALOGE("setDeviceConnectionState() invalid state: %x", state);
return BAD_VALUE;
}
// checkA2dpSuspend must run before checkOutputForAllStrategies so that A2DP
// output is suspended before any tracks are moved to it
checkA2dpSuspend();
checkOutputForAllStrategies();
// outputs must be closed after checkOutputForAllStrategies() is executed
if (!outputs.isEmpty()) {
for (audio_io_handle_t output : outputs) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
// close unused outputs after device disconnection or direct outputs that have been
// opened by checkOutputsForDevice() to query dynamic parameters
if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
(((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
(desc->mDirectOpenCount == 0))) {
closeOutput(output);
}
}
// check again after closing A2DP output to reset mA2dpSuspended if needed
checkA2dpSuspend();
}
updateDevicesAndOutputs();
if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
updateCallRouting(newDevice);
}
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (desc != mPrimaryOutput)) {
audio_devices_t newDevice = getNewOutputDevice(desc, true /*fromCache*/);
// do not force device change on duplicated output because if device is 0, it will
// also force a device 0 for the two outputs it is duplicated to which may override
// a valid device selection on those outputs.
bool force = !desc->isDuplicated()
&& (!device_distinguishes_on_address(device)
// always force when disconnecting (a non-duplicated device)
|| (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE));
setOutputDevice(desc, newDevice, force, 0);
}
}
if (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) {
cleanUpForDevice(devDesc);
}
mpClientInterface->onAudioPortListUpdate();
return NO_ERROR;
} // end if is output device
// handle input devices
if (audio_is_input_device(device)) {
SortedVector <audio_io_handle_t> inputs;
ssize_t index = mAvailableInputDevices.indexOf(devDesc);
switch (state)
{
// handle input device connection
case AUDIO_POLICY_DEVICE_STATE_AVAILABLE: {
if (index >= 0) {
ALOGW("setDeviceConnectionState() device already connected: %d", device);
return INVALID_OPERATION;
}
sp<HwModule> module = mHwModules.getModuleForDevice(device);
if (module == NULL) {
ALOGW("setDeviceConnectionState(): could not find HW module for device %08x",
device);
return INVALID_OPERATION;
}
// Before checking intputs, broadcast connect event to allow HAL to retrieve dynamic
// parameters on newly connected devices (instead of opening the inputs...)
broadcastDeviceConnectionState(device, state, devDesc->mAddress);
if (checkInputsForDevice(devDesc, state, inputs, devDesc->mAddress) != NO_ERROR) {
broadcastDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
devDesc->mAddress);
return INVALID_OPERATION;
}
index = mAvailableInputDevices.add(devDesc);
if (index >= 0) {
mAvailableInputDevices[index]->attach(module);
} else {
return NO_MEMORY;
}
// Propagate device availability to Engine
mEngine->setDeviceConnectionState(devDesc, state);
} break;
// handle input device disconnection
case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: {
if (index < 0) {
ALOGW("setDeviceConnectionState() device not connected: %d", device);
return INVALID_OPERATION;
}
ALOGV("setDeviceConnectionState() disconnecting input device %x", device);
// Set Disconnect to HALs
broadcastDeviceConnectionState(device, state, devDesc->mAddress);
checkInputsForDevice(devDesc, state, inputs, devDesc->mAddress);
mAvailableInputDevices.remove(devDesc);
// Propagate device availability to Engine
mEngine->setDeviceConnectionState(devDesc, state);
} break;
default:
ALOGE("setDeviceConnectionState() invalid state: %x", state);
return BAD_VALUE;
}
closeAllInputs();
// As the input device list can impact the output device selection, update
// getDeviceForStrategy() cache
updateDevicesAndOutputs();
if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
updateCallRouting(newDevice);
}
if (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) {
cleanUpForDevice(devDesc);
}
mpClientInterface->onAudioPortListUpdate();
return NO_ERROR;
} // end if is input device
ALOGW("setDeviceConnectionState() invalid device: %x", device);
return BAD_VALUE;
}
audio_policy_dev_state_t AudioPolicyManager::getDeviceConnectionState(audio_devices_t device,
const char *device_address)
{
sp<DeviceDescriptor> devDesc =
mHwModules.getDeviceDescriptor(device, device_address, "",
(strlen(device_address) != 0)/*matchAddress*/);
if (devDesc == 0) {
ALOGW("getDeviceConnectionState() undeclared device, type %08x, address: %s",
device, device_address);
return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
}
DeviceVector *deviceVector;
if (audio_is_output_device(device)) {
deviceVector = &mAvailableOutputDevices;
} else if (audio_is_input_device(device)) {
deviceVector = &mAvailableInputDevices;
} else {
ALOGW("getDeviceConnectionState() invalid device type %08x", device);
return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
}
return (deviceVector->getDevice(device, String8(device_address)) != 0) ?
AUDIO_POLICY_DEVICE_STATE_AVAILABLE : AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
}
status_t AudioPolicyManager::handleDeviceConfigChange(audio_devices_t device,
const char *device_address,
const char *device_name)
{
status_t status;
String8 reply;
AudioParameter param;
int isReconfigA2dpSupported = 0;
ALOGV("handleDeviceConfigChange(() device: 0x%X, address %s name %s",
device, device_address, device_name);
// connect/disconnect only 1 device at a time
if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
// Check if the device is currently connected
sp<DeviceDescriptor> devDesc =
mHwModules.getDeviceDescriptor(device, device_address, device_name);
ssize_t index = mAvailableOutputDevices.indexOf(devDesc);
if (index < 0) {
// Nothing to do: device is not connected
return NO_ERROR;
}
// For offloaded A2DP, Hw modules may have the capability to
// configure codecs. Check if any of the loaded hw modules
// supports this.
// If supported, send a set parameter to configure A2DP codecs
// and return. No need to toggle device state.
if (device & AUDIO_DEVICE_OUT_ALL_A2DP) {
reply = mpClientInterface->getParameters(
AUDIO_IO_HANDLE_NONE,
String8(AudioParameter::keyReconfigA2dpSupported));
AudioParameter repliedParameters(reply);
repliedParameters.getInt(
String8(AudioParameter::keyReconfigA2dpSupported), isReconfigA2dpSupported);
if (isReconfigA2dpSupported) {
const String8 key(AudioParameter::keyReconfigA2dp);
param.add(key, String8("true"));
mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
return NO_ERROR;
}
}
// Toggle the device state: UNAVAILABLE -> AVAILABLE
// This will force reading again the device configuration
status = setDeviceConnectionState(device,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
device_address, device_name);
if (status != NO_ERROR) {
ALOGW("handleDeviceConfigChange() error disabling connection state: %d",
status);
return status;
}
status = setDeviceConnectionState(device,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
device_address, device_name);
if (status != NO_ERROR) {
ALOGW("handleDeviceConfigChange() error enabling connection state: %d",
status);
return status;
}
return NO_ERROR;
}
uint32_t AudioPolicyManager::updateCallRouting(audio_devices_t rxDevice, uint32_t delayMs)
{
bool createTxPatch = false;
uint32_t muteWaitMs = 0;
if(!hasPrimaryOutput() || mPrimaryOutput->device() == AUDIO_DEVICE_OUT_STUB) {
return muteWaitMs;
}
audio_devices_t txDevice = getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
ALOGV("updateCallRouting device rxDevice %08x txDevice %08x", rxDevice, txDevice);
// release existing RX patch if any
if (mCallRxPatch != 0) {
mpClientInterface->releaseAudioPatch(mCallRxPatch->mAfPatchHandle, 0);
mCallRxPatch.clear();
}
// release TX patch if any
if (mCallTxPatch != 0) {
mpClientInterface->releaseAudioPatch(mCallTxPatch->mAfPatchHandle, 0);
mCallTxPatch.clear();
}
// If the RX device is on the primary HW module, then use legacy routing method for voice calls
// via setOutputDevice() on primary output.
// Otherwise, create two audio patches for TX and RX path.
if (availablePrimaryOutputDevices() & rxDevice) {
muteWaitMs = setOutputDevice(mPrimaryOutput, rxDevice, true, delayMs);
// If the TX device is also on the primary HW module, setOutputDevice() will take care
// of it due to legacy implementation. If not, create a patch.
if ((availablePrimaryInputDevices() & txDevice & ~AUDIO_DEVICE_BIT_IN)
== AUDIO_DEVICE_NONE) {
createTxPatch = true;
}
} else { // create RX path audio patch
mCallRxPatch = createTelephonyPatch(true /*isRx*/, rxDevice, delayMs);
createTxPatch = true;
}
if (createTxPatch) { // create TX path audio patch
mCallTxPatch = createTelephonyPatch(false /*isRx*/, txDevice, delayMs);
}
return muteWaitMs;
}
sp<AudioPatch> AudioPolicyManager::createTelephonyPatch(
bool isRx, audio_devices_t device, uint32_t delayMs) {
struct audio_patch patch;
patch.num_sources = 1;
patch.num_sinks = 1;
sp<DeviceDescriptor> txSourceDeviceDesc;
if (isRx) {
fillAudioPortConfigForDevice(mAvailableOutputDevices, device, &patch.sinks[0]);
fillAudioPortConfigForDevice(
mAvailableInputDevices, AUDIO_DEVICE_IN_TELEPHONY_RX, &patch.sources[0]);
} else {
txSourceDeviceDesc = fillAudioPortConfigForDevice(
mAvailableInputDevices, device, &patch.sources[0]);
fillAudioPortConfigForDevice(
mAvailableOutputDevices, AUDIO_DEVICE_OUT_TELEPHONY_TX, &patch.sinks[0]);
}
audio_devices_t outputDevice = isRx ? device : AUDIO_DEVICE_OUT_TELEPHONY_TX;
SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(outputDevice, mOutputs);
audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE, AUDIO_FORMAT_INVALID);
// request to reuse existing output stream if one is already opened to reach the target device
if (output != AUDIO_IO_HANDLE_NONE) {
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
ALOG_ASSERT(!outputDesc->isDuplicated(),
"%s() %#x device output %d is duplicated", __func__, outputDevice, output);
outputDesc->toAudioPortConfig(&patch.sources[1]);
patch.sources[1].ext.mix.usecase.stream = AUDIO_STREAM_PATCH;
patch.num_sources = 2;
}
if (!isRx) {
// terminate active capture if on the same HW module as the call TX source device
// FIXME: would be better to refine to only inputs whose profile connects to the
// call TX device but this information is not in the audio patch and logic here must be
// symmetric to the one in startInput()
for (const auto& activeDesc : mInputs.getActiveInputs()) {
if (activeDesc->hasSameHwModuleAs(txSourceDeviceDesc)) {
AudioSessionCollection activeSessions =
activeDesc->getAudioSessions(true /*activeOnly*/);
for (size_t j = 0; j < activeSessions.size(); j++) {
audio_session_t activeSession = activeSessions.keyAt(j);
stopInput(activeDesc->mIoHandle, activeSession);
releaseInput(activeDesc->mIoHandle, activeSession);
}
}
}
}
audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
status_t status = mpClientInterface->createAudioPatch(&patch, &afPatchHandle, delayMs);
ALOGW_IF(status != NO_ERROR,
"%s() error %d creating %s audio patch", __func__, status, isRx ? "RX" : "TX");
sp<AudioPatch> audioPatch;
if (status == NO_ERROR) {
audioPatch = new AudioPatch(&patch, mUidCached);
audioPatch->mAfPatchHandle = afPatchHandle;
audioPatch->mUid = mUidCached;
}
return audioPatch;
}
sp<DeviceDescriptor> AudioPolicyManager::fillAudioPortConfigForDevice(
const DeviceVector& devices, audio_devices_t device, audio_port_config *config) {
DeviceVector deviceList = devices.getDevicesFromType(device);
ALOG_ASSERT(!deviceList.isEmpty(),
"%s() selected device type %#x is not in devices list", __func__, device);
sp<DeviceDescriptor> deviceDesc = deviceList.itemAt(0);
deviceDesc->toAudioPortConfig(config);
return deviceDesc;
}
void AudioPolicyManager::setPhoneState(audio_mode_t state)
{
ALOGV("setPhoneState() state %d", state);
// store previous phone state for management of sonification strategy below
int oldState = mEngine->getPhoneState();
if (mEngine->setPhoneState(state) != NO_ERROR) {
ALOGW("setPhoneState() invalid or same state %d", state);
return;
}
/// Opens: can these line be executed after the switch of volume curves???
// if leaving call state, handle special case of active streams
// pertaining to sonification strategy see handleIncallSonification()
if (isStateInCall(oldState)) {
ALOGV("setPhoneState() in call state management: new state is %d", state);
for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
handleIncallSonification((audio_stream_type_t)stream, false, true);
}
// force reevaluating accessibility routing when call stops
mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
}
/**
* Switching to or from incall state or switching between telephony and VoIP lead to force
* routing command.
*/
bool force = ((is_state_in_call(oldState) != is_state_in_call(state))
|| (is_state_in_call(state) && (state != oldState)));
// check for device and output changes triggered by new phone state
checkA2dpSuspend();
checkOutputForAllStrategies();
updateDevicesAndOutputs();
int delayMs = 0;
if (isStateInCall(state)) {
nsecs_t sysTime = systemTime();
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
// mute media and sonification strategies and delay device switch by the largest
// latency of any output where either strategy is active.
// This avoid sending the ring tone or music tail into the earpiece or headset.
if ((isStrategyActive(desc, STRATEGY_MEDIA,
SONIFICATION_HEADSET_MUSIC_DELAY,
sysTime) ||
isStrategyActive(desc, STRATEGY_SONIFICATION,
SONIFICATION_HEADSET_MUSIC_DELAY,
sysTime)) &&
(delayMs < (int)desc->latency()*2)) {
delayMs = desc->latency()*2;
}
setStrategyMute(STRATEGY_MEDIA, true, desc);
setStrategyMute(STRATEGY_MEDIA, false, desc, MUTE_TIME_MS,
getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/));
setStrategyMute(STRATEGY_SONIFICATION, true, desc);
setStrategyMute(STRATEGY_SONIFICATION, false, desc, MUTE_TIME_MS,
getDeviceForStrategy(STRATEGY_SONIFICATION, true /*fromCache*/));
}
}
if (hasPrimaryOutput()) {
// Note that despite the fact that getNewOutputDevice() is called on the primary output,
// the device returned is not necessarily reachable via this output
audio_devices_t rxDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
// force routing command to audio hardware when ending call
// even if no device change is needed
if (isStateInCall(oldState) && rxDevice == AUDIO_DEVICE_NONE) {
rxDevice = mPrimaryOutput->device();
}
if (state == AUDIO_MODE_IN_CALL) {
updateCallRouting(rxDevice, delayMs);
} else if (oldState == AUDIO_MODE_IN_CALL) {
if (mCallRxPatch != 0) {
mpClientInterface->releaseAudioPatch(mCallRxPatch->mAfPatchHandle, 0);
mCallRxPatch.clear();
}
if (mCallTxPatch != 0) {
mpClientInterface->releaseAudioPatch(mCallTxPatch->mAfPatchHandle, 0);
mCallTxPatch.clear();
}
setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
} else {
setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
}
}
// reevaluate routing on all outputs in case tracks have been started during the call
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
audio_devices_t newDevice = getNewOutputDevice(desc, true /*fromCache*/);
if (state != AUDIO_MODE_IN_CALL || desc != mPrimaryOutput) {
setOutputDevice(desc, newDevice, (newDevice != AUDIO_DEVICE_NONE), 0 /*delayMs*/);
}
}
// if entering in call state, handle special case of active streams
// pertaining to sonification strategy see handleIncallSonification()
if (isStateInCall(state)) {
ALOGV("setPhoneState() in call state management: new state is %d", state);
for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
handleIncallSonification((audio_stream_type_t)stream, true, true);
}
// force reevaluating accessibility routing when call starts
mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
}
// Flag that ringtone volume must be limited to music volume until we exit MODE_RINGTONE
if (state == AUDIO_MODE_RINGTONE &&
isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY)) {
mLimitRingtoneVolume = true;
} else {
mLimitRingtoneVolume = false;
}
}
audio_mode_t AudioPolicyManager::getPhoneState() {
return mEngine->getPhoneState();
}
void AudioPolicyManager::setForceUse(audio_policy_force_use_t usage,
audio_policy_forced_cfg_t config)
{
ALOGV("setForceUse() usage %d, config %d, mPhoneState %d", usage, config, mEngine->getPhoneState());
if (config == mEngine->getForceUse(usage)) {
return;
}
if (mEngine->setForceUse(usage, config) != NO_ERROR) {
ALOGW("setForceUse() could not set force cfg %d for usage %d", config, usage);
return;
}
bool forceVolumeReeval = (usage == AUDIO_POLICY_FORCE_FOR_COMMUNICATION) ||
(usage == AUDIO_POLICY_FORCE_FOR_DOCK) ||
(usage == AUDIO_POLICY_FORCE_FOR_SYSTEM);
// check for device and output changes triggered by new force usage
checkA2dpSuspend();
checkOutputForAllStrategies();
updateDevicesAndOutputs();
//FIXME: workaround for truncated touch sounds
// to be removed when the problem is handled by system UI
uint32_t delayMs = 0;
uint32_t waitMs = 0;
if (usage == AUDIO_POLICY_FORCE_FOR_COMMUNICATION) {
delayMs = TOUCH_SOUND_FIXED_DELAY_MS;
}
if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, true /*fromCache*/);
waitMs = updateCallRouting(newDevice, delayMs);
}
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
audio_devices_t newDevice = getNewOutputDevice(outputDesc, true /*fromCache*/);
if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (outputDesc != mPrimaryOutput)) {
waitMs = setOutputDevice(outputDesc, newDevice, (newDevice != AUDIO_DEVICE_NONE),
delayMs);
}
if (forceVolumeReeval && (newDevice != AUDIO_DEVICE_NONE)) {
applyStreamVolumes(outputDesc, newDevice, waitMs, true);
}
}
for (const auto& activeDesc : mInputs.getActiveInputs()) {
audio_devices_t newDevice = getNewInputDevice(activeDesc);
// Force new input selection if the new device can not be reached via current input
if (activeDesc->mProfile->getSupportedDevices().types() &
(newDevice & ~AUDIO_DEVICE_BIT_IN)) {
setInputDevice(activeDesc->mIoHandle, newDevice);
} else {
closeInput(activeDesc->mIoHandle);
}
}
}
void AudioPolicyManager::setSystemProperty(const char* property, const char* value)
{
ALOGV("setSystemProperty() property %s, value %s", property, value);
}
// Find a direct output profile compatible with the parameters passed, even if the input flags do
// not explicitly request a direct output
sp<IOProfile> AudioPolicyManager::getProfileForDirectOutput(
audio_devices_t device,
uint32_t samplingRate,
audio_format_t format,
audio_channel_mask_t channelMask,
audio_output_flags_t flags)
{
// only retain flags that will drive the direct output profile selection
// if explicitly requested
static const uint32_t kRelevantFlags =
(AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD |
AUDIO_OUTPUT_FLAG_VOIP_RX);
flags =
(audio_output_flags_t)((flags & kRelevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
sp<IOProfile> profile;
for (const auto& hwModule : mHwModules) {
for (const auto& curProfile : hwModule->getOutputProfiles()) {
if (!curProfile->isCompatibleProfile(device, String8(""),
samplingRate, NULL /*updatedSamplingRate*/,
format, NULL /*updatedFormat*/,
channelMask, NULL /*updatedChannelMask*/,
flags)) {
continue;
}
// reject profiles not corresponding to a device currently available
if ((mAvailableOutputDevices.types() & curProfile->getSupportedDevicesType()) == 0) {
continue;
}
// if several profiles are compatible, give priority to one with offload capability
if (profile != 0 && ((curProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0)) {
continue;
}
profile = curProfile;
if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
break;
}
}
}
return profile;
}
audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream)
{
routing_strategy strategy = getStrategy(stream);
audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
// Note that related method getOutputForAttr() uses getOutputForDevice() not selectOutput().
// We use selectOutput() here since we don't have the desired AudioTrack sample rate,
// format, flags, etc. This may result in some discrepancy for functions that utilize
// getOutput() solely on audio_stream_type such as AudioSystem::getOutputFrameCount()
// and AudioSystem::getOutputSamplingRate().
SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
audio_io_handle_t output = selectOutput(outputs, AUDIO_OUTPUT_FLAG_NONE, AUDIO_FORMAT_INVALID);
ALOGV("getOutput() stream %d selected device %08x, output %d", stream, device, output);
return output;
}
status_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
uid_t uid,
const audio_config_t *config,
audio_output_flags_t *flags,
audio_port_handle_t *selectedDeviceId,
audio_port_handle_t *portId)
{
audio_attributes_t attributes;
if (attr != NULL) {
if (!isValidAttributes(attr)) {
ALOGE("getOutputForAttr() invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
attr->usage, attr->content_type, attr->flags,
attr->tags);
return BAD_VALUE;
}
attributes = *attr;
} else {
if (*stream < AUDIO_STREAM_MIN || *stream >= AUDIO_STREAM_PUBLIC_CNT) {
ALOGE("getOutputForAttr(): invalid stream type");
return BAD_VALUE;
}
stream_type_to_audio_attributes(*stream, &attributes);
}
// TODO: check for existing client for this port ID
if (*portId == AUDIO_PORT_HANDLE_NONE) {
*portId = AudioPort::getNextUniqueId();
}
sp<SwAudioOutputDescriptor> desc;
if (mPolicyMixes.getOutputForAttr(attributes, uid, desc) == NO_ERROR) {
ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
if (!audio_has_proportional_frames(config->format)) {
return BAD_VALUE;
}
*stream = streamTypefromAttributesInt(&attributes);
*output = desc->mIoHandle;
ALOGV("getOutputForAttr() returns output %d", *output);
return NO_ERROR;
}
if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
return BAD_VALUE;
}
ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s flags=%08x"
" session %d selectedDeviceId %d",
attributes.usage, attributes.content_type, attributes.tags, attributes.flags,
session, *selectedDeviceId);
*stream = streamTypefromAttributesInt(&attributes);
// Explicit routing?
sp<DeviceDescriptor> deviceDesc;
if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
deviceDesc = mAvailableOutputDevices.getDeviceFromId(*selectedDeviceId);
}
mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
if ((attributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
*flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
}
ALOGV("getOutputForAttr() device 0x%x, sampling rate %d, format %#x, channel mask %#x, "
"flags %#x",
device, config->sample_rate, config->format, config->channel_mask, *flags);
*output = getOutputForDevice(device, session, *stream, config, flags);
if (*output == AUDIO_IO_HANDLE_NONE) {
mOutputRoutes.removeRoute(session);
return INVALID_OPERATION;
}
DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromType(device);
*selectedDeviceId = outputDevices.size() > 0 ? outputDevices.itemAt(0)->getId()
: AUDIO_PORT_HANDLE_NONE;
ALOGV(" getOutputForAttr() returns output %d selectedDeviceId %d", *output, *selectedDeviceId);
return NO_ERROR;
}
audio_io_handle_t AudioPolicyManager::getOutputForDevice(
audio_devices_t device,
audio_session_t session,
audio_stream_type_t stream,
const audio_config_t *config,
audio_output_flags_t *flags)
{
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
status_t status;
// open a direct output if required by specified parameters
//force direct flag if offload flag is set: offloading implies a direct output stream
// and all common behaviors are driven by checking only the direct flag
// this should normally be set appropriately in the policy configuration file
if ((*flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
*flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_DIRECT);
}
if ((*flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
*flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_DIRECT);
}
// only allow deep buffering for music stream type
if (stream != AUDIO_STREAM_MUSIC) {
*flags = (audio_output_flags_t)(*flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
} else if (/* stream == AUDIO_STREAM_MUSIC && */
*flags == AUDIO_OUTPUT_FLAG_NONE &&
property_get_bool("audio.deep_buffer.media", false /* default_value */)) {
// use DEEP_BUFFER as default output for music stream type
*flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
}
if (stream == AUDIO_STREAM_TTS) {
*flags = AUDIO_OUTPUT_FLAG_TTS;
} else if (stream == AUDIO_STREAM_VOICE_CALL &&
audio_is_linear_pcm(config->format)) {
*flags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_VOIP_RX |
AUDIO_OUTPUT_FLAG_DIRECT);
ALOGV("Set VoIP and Direct output flags for PCM format");
} else if (device == AUDIO_DEVICE_OUT_TELEPHONY_TX &&
stream == AUDIO_STREAM_MUSIC &&
audio_is_linear_pcm(config->format) &&
isInCall()) {
*flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
}
sp<IOProfile> profile;
// skip direct output selection if the request can obviously be attached to a mixed output
// and not explicitly requested
if (((*flags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
audio_is_linear_pcm(config->format) && config->sample_rate <= SAMPLE_RATE_HZ_MAX &&
audio_channel_count_from_out_mask(config->channel_mask) <= 2) {
goto non_direct_output;
}
// Do not allow offloading if one non offloadable effect is enabled or MasterMono is enabled.
// This prevents creating an offloaded track and tearing it down immediately after start
// when audioflinger detects there is an active non offloadable effect.
// FIXME: We should check the audio session here but we do not have it in this context.
// This may prevent offloading in rare situations where effects are left active by apps
// in the background.
if (((*flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
!(mEffects.isNonOffloadableEffectEnabled() || mMasterMono)) {
profile = getProfileForDirectOutput(device,
config->sample_rate,
config->format,
config->channel_mask,
(audio_output_flags_t)*flags);
}
if (profile != 0) {
// exclusive outputs for MMAP and Offload are enforced by different session ids.
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
if (!desc->isDuplicated() && (profile == desc->mProfile)) {
// reuse direct output if currently open by the same client
// and configured with same parameters
if ((config->sample_rate == desc->mSamplingRate) &&
(config->format == desc->mFormat) &&
(config->channel_mask == desc->mChannelMask) &&
(session == desc->mDirectClientSession)) {
desc->mDirectOpenCount++;
ALOGI("getOutputForDevice() reusing direct output %d for session %d",
mOutputs.keyAt(i), session);
return mOutputs.keyAt(i);
}
}
}
if (!profile->canOpenNewIo()) {
goto non_direct_output;
}
sp<SwAudioOutputDescriptor> outputDesc =
new SwAudioOutputDescriptor(profile, mpClientInterface);
DeviceVector outputDevices = mAvailableOutputDevices.getDevicesFromType(device);
String8 address = outputDevices.size() > 0 ? outputDevices.itemAt(0)->mAddress
: String8("");
status = outputDesc->open(config, device, address, stream, *flags, &output);
// only accept an output with the requested parameters
if (status != NO_ERROR ||
(config->sample_rate != 0 && config->sample_rate != outputDesc->mSamplingRate) ||
(config->format != AUDIO_FORMAT_DEFAULT && config->format != outputDesc->mFormat) ||
(config->channel_mask != 0 && config->channel_mask != outputDesc->mChannelMask)) {
ALOGV("getOutputForDevice() failed opening direct output: output %d sample rate %d %d,"
"format %d %d, channel mask %04x %04x", output, config->sample_rate,
outputDesc->mSamplingRate, config->format, outputDesc->mFormat,
config->channel_mask, outputDesc->mChannelMask);
if (output != AUDIO_IO_HANDLE_NONE) {
outputDesc->close();
}
// fall back to mixer output if possible when the direct output could not be open
if (audio_is_linear_pcm(config->format) &&
config->sample_rate <= SAMPLE_RATE_HZ_MAX) {
goto non_direct_output;
}
return AUDIO_IO_HANDLE_NONE;
}
outputDesc->mRefCount[stream] = 0;
outputDesc->mStopTime[stream] = 0;
outputDesc->mDirectOpenCount = 1;
outputDesc->mDirectClientSession = session;
addOutput(output, outputDesc);
mPreviousOutputs = mOutputs;
ALOGV("getOutputForDevice() returns new direct output %d", output);
mpClientInterface->onAudioPortListUpdate();
return output;
}
non_direct_output:
// A request for HW A/V sync cannot fallback to a mixed output because time
// stamps are embedded in audio data
if ((*flags & (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ)) != 0) {
return AUDIO_IO_HANDLE_NONE;
}
// ignoring channel mask due to downmix capability in mixer
// open a non direct output
// for non direct outputs, only PCM is supported
if (audio_is_linear_pcm(config->format)) {
// get which output is suitable for the specified stream. The actual
// routing change will happen when startOutput() will be called
SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
// at this stage we should ignore the DIRECT flag as no direct output could be found earlier
*flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
output = selectOutput(outputs, *flags, config->format);
}
ALOGW_IF((output == 0), "getOutputForDevice() could not find output for stream %d, "
"sampling rate %d, format %#x, channels %#x, flags %#x",
stream, config->sample_rate, config->format, config->channel_mask, *flags);
return output;
}
audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
audio_output_flags_t flags,
audio_format_t format)
{
// select one output among several that provide a path to a particular device or set of
// devices (the list was previously build by getOutputsForDevice()).
// The priority is as follows:
// 1: the output with the highest number of requested policy flags
// 2: the output with the bit depth the closest to the requested one
// 3: the primary output
// 4: the first output in the list
if (outputs.size() == 0) {
return AUDIO_IO_HANDLE_NONE;
}
if (outputs.size() == 1) {
return outputs[0];
}
int maxCommonFlags = 0;
audio_io_handle_t outputForFlags = AUDIO_IO_HANDLE_NONE;
audio_io_handle_t outputForPrimary = AUDIO_IO_HANDLE_NONE;
audio_io_handle_t outputForFormat = AUDIO_IO_HANDLE_NONE;
audio_format_t bestFormat = AUDIO_FORMAT_INVALID;
audio_format_t bestFormatForFlags = AUDIO_FORMAT_INVALID;
for (audio_io_handle_t output : outputs) {
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
if (!outputDesc->isDuplicated()) {
// if a valid format is specified, skip output if not compatible
if (format != AUDIO_FORMAT_INVALID) {
if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
if (format != outputDesc->mFormat) {
continue;
}
} else if (!audio_is_linear_pcm(format)) {
continue;
}
if (AudioPort::isBetterFormatMatch(
outputDesc->mFormat, bestFormat, format)) {
outputForFormat = output;
bestFormat = outputDesc->mFormat;
}
}
int commonFlags = popcount(outputDesc->mProfile->getFlags() & flags);
if (commonFlags >= maxCommonFlags) {
if (commonFlags == maxCommonFlags) {
if (format != AUDIO_FORMAT_INVALID
&& AudioPort::isBetterFormatMatch(
outputDesc->mFormat, bestFormatForFlags, format)) {
outputForFlags = output;
bestFormatForFlags = outputDesc->mFormat;
}
} else {
outputForFlags = output;
maxCommonFlags = commonFlags;
bestFormatForFlags = outputDesc->mFormat;
}
ALOGV("selectOutput() commonFlags for output %d, %04x", output, commonFlags);
}
if (outputDesc->mProfile->getFlags() & AUDIO_OUTPUT_FLAG_PRIMARY) {
outputForPrimary = output;
}
}
}
if (outputForFlags != AUDIO_IO_HANDLE_NONE) {
return outputForFlags;
}
if (outputForFormat != AUDIO_IO_HANDLE_NONE) {
return outputForFormat;
}
if (outputForPrimary != AUDIO_IO_HANDLE_NONE) {
return outputForPrimary;
}
return outputs[0];
}
status_t AudioPolicyManager::startOutput(audio_io_handle_t output,
audio_stream_type_t stream,
audio_session_t session)
{
ALOGV("startOutput() output %d, stream %d, session %d",
output, stream, session);
ssize_t index = mOutputs.indexOfKey(output);
if (index < 0) {
ALOGW("startOutput() unknown output %d", output);
return BAD_VALUE;
}
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
status_t status = outputDesc->start();
if (status != NO_ERROR) {
return status;
}
// Routing?
mOutputRoutes.incRouteActivity(session);
audio_devices_t newDevice;
AudioMix *policyMix = NULL;
const char *address = NULL;
if (outputDesc->mPolicyMix != NULL) {
policyMix = outputDesc->mPolicyMix;
address = policyMix->mDeviceAddress.string();
if ((policyMix->mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
newDevice = policyMix->mDeviceType;
} else {
newDevice = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
}
} else if (mOutputRoutes.getAndClearRouteChanged(session)) {
newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
if (newDevice != outputDesc->device()) {
checkStrategyRoute(getStrategy(stream), output);
}
} else {
newDevice = AUDIO_DEVICE_NONE;
}
uint32_t delayMs = 0;
status = startSource(outputDesc, stream, newDevice, address, &delayMs);
if (status != NO_ERROR) {
mOutputRoutes.decRouteActivity(session);
outputDesc->stop();
return status;
}
// Automatically enable the remote submix input when output is started on a re routing mix
// of type MIX_TYPE_RECORDERS
if (audio_is_remote_submix_device(newDevice) && policyMix != NULL &&
policyMix->mMixType == MIX_TYPE_RECORDERS) {
setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
address,
"remote-submix");
}
if (delayMs != 0) {
usleep(delayMs * 1000);
}
return status;
}
status_t AudioPolicyManager::startSource(const sp<AudioOutputDescriptor>& outputDesc,
audio_stream_type_t stream,
audio_devices_t device,
const char *address,
uint32_t *delayMs)
{
// cannot start playback of STREAM_TTS if any other output is being used
uint32_t beaconMuteLatency = 0;
*delayMs = 0;
if (stream == AUDIO_STREAM_TTS) {
ALOGV("\t found BEACON stream");
if (!mTtsOutputAvailable && mOutputs.isAnyOutputActive(AUDIO_STREAM_TTS /*streamToIgnore*/)) {
return INVALID_OPERATION;
} else {
beaconMuteLatency = handleEventForBeacon(STARTING_BEACON);
}
} else {
// some playback other than beacon starts
beaconMuteLatency = handleEventForBeacon(STARTING_OUTPUT);
}
// force device change if the output is inactive and no audio patch is already present.
// check active before incrementing usage count
bool force = !outputDesc->isActive() &&
(outputDesc->getPatchHandle() == AUDIO_PATCH_HANDLE_NONE);
// requiresMuteCheck is false when we can bypass mute strategy.
// It covers a common case when there is no materially active audio
// and muting would result in unnecessary delay and dropped audio.
const uint32_t outputLatencyMs = outputDesc->latency();
bool requiresMuteCheck = outputDesc->isActive(outputLatencyMs * 2); // account for drain
// increment usage count for this stream on the requested output:
// NOTE that the usage count is the same for duplicated output and hardware output which is
// necessary for a correct control of hardware output routing by startOutput() and stopOutput()
outputDesc->changeRefCount(stream, 1);
if (stream == AUDIO_STREAM_MUSIC) {
selectOutputForMusicEffects();
}
if (outputDesc->mRefCount[stream] == 1 || device != AUDIO_DEVICE_NONE) {
// starting an output being rerouted?
if (device == AUDIO_DEVICE_NONE) {
device = getNewOutputDevice(outputDesc, false /*fromCache*/);
}
routing_strategy strategy = getStrategy(stream);
bool shouldWait = (strategy == STRATEGY_SONIFICATION) ||
(strategy == STRATEGY_SONIFICATION_RESPECTFUL) ||
(beaconMuteLatency > 0);
uint32_t waitMs = beaconMuteLatency;
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
if (desc != outputDesc) {
// An output has a shared device if
// - managed by the same hw module
// - supports the currently selected device
const bool sharedDevice = outputDesc->sharesHwModuleWith(desc)
&& (desc->supportedDevices() & device) != AUDIO_DEVICE_NONE;
// force a device change if any other output is:
// - managed by the same hw module
// - supports currently selected device
// - has a current device selection that differs from selected device.
// - has an active audio patch
// In this case, the audio HAL must receive the new device selection so that it can
// change the device currently selected by the other output.
if (sharedDevice &&
desc->device() != device &&
desc->getPatchHandle() != AUDIO_PATCH_HANDLE_NONE) {
force = true;
}
// wait for audio on other active outputs to be presented when starting
// a notification so that audio focus effect can propagate, or that a mute/unmute
// event occurred for beacon
const uint32_t latencyMs = desc->latency();
const bool isActive = desc->isActive(latencyMs * 2); // account for drain
if (shouldWait && isActive && (waitMs < latencyMs)) {
waitMs = latencyMs;
}
// Require mute check if another output is on a shared device
// and currently active to have proper drain and avoid pops.
// Note restoring AudioTracks onto this output needs to invoke
// a volume ramp if there is no mute.
requiresMuteCheck |= sharedDevice && isActive;
}
}
const uint32_t muteWaitMs =
setOutputDevice(outputDesc, device, force, 0, NULL, address, requiresMuteCheck);
// handle special case for sonification while in call
if (isInCall()) {
handleIncallSonification(stream, true, false);
}
// apply volume rules for current stream and device if necessary
checkAndSetVolume(stream,
mVolumeCurves->getVolumeIndex(stream, outputDesc->device()),
outputDesc,
outputDesc->device());
// update the outputs if starting an output with a stream that can affect notification
// routing
handleNotificationRoutingForStream(stream);
// force reevaluating accessibility routing when ringtone or alarm starts
if (strategy == STRATEGY_SONIFICATION) {
mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
}
if (waitMs > muteWaitMs) {
*delayMs = waitMs - muteWaitMs;
}
// FIXME: A device change (muteWaitMs > 0) likely introduces a volume change.
// A volume change enacted by APM with 0 delay is not synchronous, as it goes
// via AudioCommandThread to AudioFlinger. Hence it is possible that the volume
// change occurs after the MixerThread starts and causes a stream volume
// glitch.
//
// We do not introduce additional delay here.
}
if (stream == AUDIO_STREAM_ENFORCED_AUDIBLE &&
mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
setStrategyMute(STRATEGY_SONIFICATION, true, outputDesc);
}
return NO_ERROR;
}
status_t AudioPolicyManager::stopOutput(audio_io_handle_t output,
audio_stream_type_t stream,
audio_session_t session)
{
ALOGV("stopOutput() output %d, stream %d, session %d", output, stream, session);
ssize_t index = mOutputs.indexOfKey(output);
if (index < 0) {
ALOGW("stopOutput() unknown output %d", output);
return BAD_VALUE;
}
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
if (outputDesc->mRefCount[stream] == 1) {
// Automatically disable the remote submix input when output is stopped on a
// re routing mix of type MIX_TYPE_RECORDERS
if (audio_is_remote_submix_device(outputDesc->mDevice) &&
outputDesc->mPolicyMix != NULL &&
outputDesc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS) {
setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
outputDesc->mPolicyMix->mDeviceAddress,
"remote-submix");
}
}
// Routing?
bool forceDeviceUpdate = false;
if (outputDesc->mRefCount[stream] > 0) {
int activityCount = mOutputRoutes.decRouteActivity(session);
forceDeviceUpdate = (mOutputRoutes.hasRoute(session) && (activityCount == 0));
if (forceDeviceUpdate) {
checkStrategyRoute(getStrategy(stream), AUDIO_IO_HANDLE_NONE);
}
}
status_t status = stopSource(outputDesc, stream, forceDeviceUpdate);
if (status == NO_ERROR ) {
outputDesc->stop();
}
return status;
}
status_t AudioPolicyManager::stopSource(const sp<AudioOutputDescriptor>& outputDesc,
audio_stream_type_t stream,
bool forceDeviceUpdate)
{
// always handle stream stop, check which stream type is stopping
handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT);
// handle special case for sonification while in call
if (isInCall()) {
handleIncallSonification(stream, false, false);
}
if (outputDesc->mRefCount[stream] > 0) {
// decrement usage count of this stream on the output
outputDesc->changeRefCount(stream, -1);
// store time at which the stream was stopped - see isStreamActive()
if (outputDesc->mRefCount[stream] == 0 || forceDeviceUpdate) {
outputDesc->mStopTime[stream] = systemTime();
audio_devices_t newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
// delay the device switch by twice the latency because stopOutput() is executed when
// the track stop() command is received and at that time the audio track buffer can
// still contain data that needs to be drained. The latency only covers the audio HAL
// and kernel buffers. Also the latency does not always include additional delay in the
// audio path (audio DSP, CODEC ...)
setOutputDevice(outputDesc, newDevice, false, outputDesc->latency()*2);
// force restoring the device selection on other active outputs if it differs from the
// one being selected for this output
uint32_t delayMs = outputDesc->latency()*2;
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
if (desc != outputDesc &&
desc->isActive() &&
outputDesc->sharesHwModuleWith(desc) &&
(newDevice != desc->device())) {
audio_devices_t newDevice2 = getNewOutputDevice(desc, false /*fromCache*/);
bool force = desc->device() != newDevice2;
setOutputDevice(desc,
newDevice2,
force,
delayMs);
// re-apply device specific volume if not done by setOutputDevice()
if (!force) {
applyStreamVolumes(desc, newDevice2, delayMs);
}
}
}
// update the outputs if stopping one with a stream that can affect notification routing
handleNotificationRoutingForStream(stream);
}
if (stream == AUDIO_STREAM_ENFORCED_AUDIBLE &&
mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
setStrategyMute(STRATEGY_SONIFICATION, false, outputDesc);
}
if (stream == AUDIO_STREAM_MUSIC) {
selectOutputForMusicEffects();
}
return NO_ERROR;
} else {
ALOGW("stopOutput() refcount is already 0");
return INVALID_OPERATION;
}
}
void AudioPolicyManager::releaseOutput(audio_io_handle_t output,
audio_stream_type_t stream __unused,
audio_session_t session __unused)
{
ALOGV("releaseOutput() %d", output);
ssize_t index = mOutputs.indexOfKey(output);
if (index < 0) {
ALOGW("releaseOutput() releasing unknown output %d", output);
return;
}
// Routing
mOutputRoutes.removeRoute(session);
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(index);
if (desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
if (desc->mDirectOpenCount <= 0) {
ALOGW("releaseOutput() invalid open count %d for output %d",
desc->mDirectOpenCount, output);
return;
}
if (--desc->mDirectOpenCount == 0) {
closeOutput(output);
mpClientInterface->onAudioPortListUpdate();
}
}
}
status_t AudioPolicyManager::getInputForAttr(const audio_attributes_t *attr,
audio_io_handle_t *input,
audio_session_t session,
uid_t uid,
const audio_config_base_t *config,
audio_input_flags_t flags,
audio_port_handle_t *selectedDeviceId,
input_type_t *inputType,
audio_port_handle_t *portId)
{
ALOGV("getInputForAttr() source %d, sampling rate %d, format %#x, channel mask %#x,"
"session %d, flags %#x",
attr->source, config->sample_rate, config->format, config->channel_mask, session, flags);
status_t status = NO_ERROR;
// handle legacy remote submix case where the address was not always specified
String8 address = String8("");
audio_source_t halInputSource;
audio_source_t inputSource = attr->source;
AudioMix *policyMix = NULL;
DeviceVector inputDevices;
if (inputSource == AUDIO_SOURCE_DEFAULT) {
inputSource = AUDIO_SOURCE_MIC;
}
// Explicit routing?
sp<DeviceDescriptor> deviceDesc;
if (*selectedDeviceId != AUDIO_PORT_HANDLE_NONE) {
deviceDesc = mAvailableInputDevices.getDeviceFromId(*selectedDeviceId);
}
mInputRoutes.addRoute(session, SessionRoute::STREAM_TYPE_NA, inputSource, deviceDesc, uid);
// special case for mmap capture: if an input IO handle is specified, we reuse this input if
// possible
if ((flags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) == AUDIO_INPUT_FLAG_MMAP_NOIRQ &&
*input != AUDIO_IO_HANDLE_NONE) {
ssize_t index = mInputs.indexOfKey(*input);
if (index < 0) {
ALOGW("getInputForAttr() unknown MMAP input %d", *input);
status = BAD_VALUE;
goto error;
}
sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
if (audioSession == 0) {
ALOGW("getInputForAttr() unknown session %d on input %d", session, *input);
status = BAD_VALUE;
goto error;
}
// For MMAP mode, the first call to getInputForAttr() is made on behalf of audioflinger.
// The second call is for the first active client and sets the UID. Any further call
// corresponds to a new client and is only permitted from the same UID.
// If the first UID is silenced, allow a new UID connection and replace with new UID
if (audioSession->openCount() == 1) {
audioSession->setUid(uid);
} else if (audioSession->uid() != uid) {
if (!audioSession->isSilenced()) {
ALOGW("getInputForAttr() bad uid %d for session %d uid %d",
uid, session, audioSession->uid());
status = INVALID_OPERATION;
goto error;
}
audioSession->setUid(uid);
audioSession->setSilenced(false);
}
audioSession->changeOpenCount(1);
*inputType = API_INPUT_LEGACY;
if (*portId == AUDIO_PORT_HANDLE_NONE) {
*portId = AudioPort::getNextUniqueId();
}
inputDevices = mAvailableInputDevices.getDevicesFromType(inputDesc->mDevice);
*selectedDeviceId = inputDevices.size() > 0 ? inputDevices.itemAt(0)->getId()
: AUDIO_PORT_HANDLE_NONE;
ALOGI("%s reusing MMAP input %d for session %d", __FUNCTION__, *input, session);
return NO_ERROR;
}
*input = AUDIO_IO_HANDLE_NONE;
*inputType = API_INPUT_INVALID;
halInputSource = inputSource;
// TODO: check for existing client for this port ID
if (*portId == AUDIO_PORT_HANDLE_NONE) {
*portId = AudioPort::getNextUniqueId();
}
audio_devices_t device;
if (inputSource == AUDIO_SOURCE_REMOTE_SUBMIX &&
strncmp(attr->tags, "addr=", strlen("addr=")) == 0) {
status = mPolicyMixes.getInputMixForAttr(*attr, &policyMix);
if (status != NO_ERROR) {
goto error;
}
*inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
address = String8(attr->tags + strlen("addr="));
} else {
device = getDeviceAndMixForInputSource(inputSource, &policyMix);
if (device == AUDIO_DEVICE_NONE) {
ALOGW("getInputForAttr() could not find device for source %d", inputSource);
status = BAD_VALUE;
goto error;
}
if (policyMix != NULL) {
address = policyMix->mDeviceAddress;
if (policyMix->mMixType == MIX_TYPE_RECORDERS) {
// there is an external policy, but this input is attached to a mix of recorders,
// meaning it receives audio injected into the framework, so the recorder doesn't
// know about it and is therefore considered "legacy"
*inputType = API_INPUT_LEGACY;
} else {
// recording a mix of players defined by an external policy, we're rerouting for
// an external policy
*inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
}
} else if (audio_is_remote_submix_device(device)) {
address = String8("0");
*inputType = API_INPUT_MIX_CAPTURE;
} else if (device == AUDIO_DEVICE_IN_TELEPHONY_RX) {
*inputType = API_INPUT_TELEPHONY_RX;
} else {
*inputType = API_INPUT_LEGACY;
}
}
*input = getInputForDevice(device, address, session, uid, inputSource,
config, flags,
policyMix);
if (*input == AUDIO_IO_HANDLE_NONE) {
status = INVALID_OPERATION;
goto error;
}
inputDevices = mAvailableInputDevices.getDevicesFromType(device);
*selectedDeviceId = inputDevices.size() > 0 ? inputDevices.itemAt(0)->getId()
: AUDIO_PORT_HANDLE_NONE;
ALOGV("getInputForAttr() returns input %d type %d selectedDeviceId %d",
*input, *inputType, *selectedDeviceId);
return NO_ERROR;
error:
mInputRoutes.removeRoute(session);
return status;
}
audio_io_handle_t AudioPolicyManager::getInputForDevice(audio_devices_t device,
String8 address,
audio_session_t session,
uid_t uid,
audio_source_t inputSource,
const audio_config_base_t *config,
audio_input_flags_t flags,
AudioMix *policyMix)
{
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
audio_source_t halInputSource = inputSource;
bool isSoundTrigger = false;
if (inputSource == AUDIO_SOURCE_HOTWORD) {
ssize_t index = mSoundTriggerSessions.indexOfKey(session);
if (index >= 0) {
input = mSoundTriggerSessions.valueFor(session);
isSoundTrigger = true;
flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_HW_HOTWORD);
ALOGV("SoundTrigger capture on session %d input %d", session, input);
} else {
halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
}
} else if (inputSource == AUDIO_SOURCE_VOICE_COMMUNICATION &&
audio_is_linear_pcm(config->format)) {
flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_VOIP_TX);
}
// find a compatible input profile (not necessarily identical in parameters)
sp<IOProfile> profile;
// sampling rate and flags may be updated by getInputProfile
uint32_t profileSamplingRate = (config->sample_rate == 0) ?
SAMPLE_RATE_HZ_DEFAULT : config->sample_rate;
audio_format_t profileFormat;
audio_channel_mask_t profileChannelMask = config->channel_mask;
audio_input_flags_t profileFlags = flags;
for (;;) {
profileFormat = config->format; // reset each time through loop, in case it is updated
profile = getInputProfile(device, address,
profileSamplingRate, profileFormat, profileChannelMask,
profileFlags);
if (profile != 0) {
break; // success
} else if (profileFlags & AUDIO_INPUT_FLAG_RAW) {
profileFlags = (audio_input_flags_t) (profileFlags & ~AUDIO_INPUT_FLAG_RAW); // retry
} else if (profileFlags != AUDIO_INPUT_FLAG_NONE) {
profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
} else { // fail
ALOGW("getInputForDevice() could not find profile for device 0x%X, "
"sampling rate %u, format %#x, channel mask 0x%X, flags %#x",
device, config->sample_rate, config->format, config->channel_mask, flags);
return input;
}
}
// Pick input sampling rate if not specified by client
uint32_t samplingRate = config->sample_rate;
if (samplingRate == 0) {
samplingRate = profileSamplingRate;
}
if (profile->getModuleHandle() == 0) {
ALOGE("getInputForAttr(): HW module %s not opened", profile->getModuleName());
return input;
}
sp<AudioSession> audioSession = new AudioSession(session,
inputSource,
config->format,
samplingRate,
config->channel_mask,
flags,
uid,
isSoundTrigger,
policyMix, mpClientInterface);
// FIXME: disable concurrent capture until UI is ready
#if 0
// reuse an open input if possible
sp<AudioInputDescriptor> reusedInputDesc;
for (size_t i = 0; i < mInputs.size(); i++) {
sp<AudioInputDescriptor> desc = mInputs.valueAt(i);
// reuse input if:
// - it shares the same profile
// AND
// - it is not a reroute submix input
// AND
// - it is: not used for sound trigger
// OR
// used for sound trigger and all clients use the same session ID
//
if ((profile == desc->mProfile) &&
(isSoundTrigger == desc->isSoundTrigger()) &&
!is_virtual_input_device(device)) {
sp<AudioSession> as = desc->getAudioSession(session);
if (as != 0) {
// do not allow unmatching properties on same session
if (as->matches(audioSession)) {
as->changeOpenCount(1);
} else {
ALOGW("getInputForDevice() record with different attributes"
" exists for session %d", session);
continue;
}
} else if (isSoundTrigger) {
continue;
}
// Reuse the already opened input stream on this profile if:
// - the new capture source is background OR
// - the path requested configurations match OR
// - the new source priority is less than the highest source priority on this input
// If the input stream cannot be reused, close it before opening a new stream
// on the same profile for the new client so that the requested path configuration
// can be selected.
if (!isConcurrentSource(inputSource) &&
((desc->mSamplingRate != samplingRate ||
desc->mChannelMask != config->channel_mask ||
!audio_formats_match(desc->mFormat, config->format)) &&
(source_priority(desc->getHighestPrioritySource(false /*activeOnly*/)) <
source_priority(inputSource)))) {
reusedInputDesc = desc;
continue;
} else {
desc->addAudioSession(session, audioSession);
ALOGV("%s: reusing input %d", __FUNCTION__, mInputs.keyAt(i));
return mInputs.keyAt(i);
}
}
}
if (reusedInputDesc != 0) {
AudioSessionCollection sessions = reusedInputDesc->getAudioSessions(false /*activeOnly*/);
for (size_t j = 0; j < sessions.size(); j++) {
audio_session_t currentSession = sessions.keyAt(j);
stopInput(reusedInputDesc->mIoHandle, currentSession);
releaseInput(reusedInputDesc->mIoHandle, currentSession);
}
}
#endif
if (!profile->canOpenNewIo()) {
return AUDIO_IO_HANDLE_NONE;
}
sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile, mpClientInterface);
audio_config_t lConfig = AUDIO_CONFIG_INITIALIZER;
lConfig.sample_rate = profileSamplingRate;
lConfig.channel_mask = profileChannelMask;
lConfig.format = profileFormat;
if (address == "") {
DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(device);
// the inputs vector must be of size >= 1, but we don't want to crash here
address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress : String8("");
}
status_t status = inputDesc->open(&lConfig, device, address,
halInputSource, profileFlags, &input);
// only accept input with the exact requested set of parameters
if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE ||
(profileSamplingRate != lConfig.sample_rate) ||
!audio_formats_match(profileFormat, lConfig.format) ||
(profileChannelMask != lConfig.channel_mask)) {
ALOGW("getInputForAttr() failed opening input: sampling rate %d"
", format %#x, channel mask %#x",
profileSamplingRate, profileFormat, profileChannelMask);
if (input != AUDIO_IO_HANDLE_NONE) {
inputDesc->close();
}
return AUDIO_IO_HANDLE_NONE;
}
inputDesc->mPolicyMix = policyMix;
inputDesc->addAudioSession(session, audioSession);
addInput(input, inputDesc);
mpClientInterface->onAudioPortListUpdate();
return input;
}
//static
bool AudioPolicyManager::isConcurrentSource(audio_source_t source)
{
return (source == AUDIO_SOURCE_HOTWORD) ||
(source == AUDIO_SOURCE_VOICE_RECOGNITION) ||
(source == AUDIO_SOURCE_FM_TUNER);
}
bool AudioPolicyManager::isConcurentCaptureAllowed(const sp<AudioInputDescriptor>& inputDesc,
const sp<AudioSession>& audioSession)
{
// Do not allow capture if an active voice call is using a software patch and
// the call TX source device is on the same HW module.
// FIXME: would be better to refine to only inputs whose profile connects to the
// call TX device but this information is not in the audio patch
if (mCallTxPatch != 0 &&
inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
return false;
}
// starting concurrent capture is enabled if:
// 1) capturing for re-routing
// 2) capturing for HOTWORD source
// 3) capturing for FM TUNER source
// 3) All other active captures are either for re-routing or HOTWORD
if (is_virtual_input_device(inputDesc->mDevice) ||
isConcurrentSource(audioSession->inputSource())) {
return true;
}
for (const auto& activeInput : mInputs.getActiveInputs()) {
if (!isConcurrentSource(activeInput->inputSource(true)) &&
!is_virtual_input_device(activeInput->mDevice)) {
return false;
}
}
return true;
}
// FIXME: remove when concurrent capture is ready. This is a hack to work around bug b/63083537.
bool AudioPolicyManager::soundTriggerSupportsConcurrentCapture() {
if (!mHasComputedSoundTriggerSupportsConcurrentCapture) {
bool soundTriggerSupportsConcurrentCapture = false;
unsigned int numModules = 0;
struct sound_trigger_module_descriptor* nModules = NULL;
status_t status = SoundTrigger::listModules(nModules, &numModules);
if (status == NO_ERROR && numModules != 0) {
nModules = (struct sound_trigger_module_descriptor*) calloc(
numModules, sizeof(struct sound_trigger_module_descriptor));
if (nModules == NULL) {
// We failed to malloc the buffer, so just say no for now, and hope that we have more
// ram the next time this function is called.
ALOGE("Failed to allocate buffer for module descriptors");
return false;
}
status = SoundTrigger::listModules(nModules, &numModules);
if (status == NO_ERROR) {
soundTriggerSupportsConcurrentCapture = true;
for (size_t i = 0; i < numModules; ++i) {
soundTriggerSupportsConcurrentCapture &=
nModules[i].properties.concurrent_capture;
}
}
free(nModules);
}
mSoundTriggerSupportsConcurrentCapture = soundTriggerSupportsConcurrentCapture;
mHasComputedSoundTriggerSupportsConcurrentCapture = true;
}
return mSoundTriggerSupportsConcurrentCapture;
}
status_t AudioPolicyManager::startInput(audio_io_handle_t input,
audio_session_t session,
bool silenced,
concurrency_type__mask_t *concurrency)
{
ALOGV("AudioPolicyManager::startInput(input:%d, session:%d, silenced:%d, concurrency:%d)",
input, session, silenced, *concurrency);
*concurrency = API_INPUT_CONCURRENCY_NONE;
ssize_t index = mInputs.indexOfKey(input);
if (index < 0) {
ALOGW("startInput() unknown input %d", input);
return BAD_VALUE;
}
sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
if (audioSession == 0) {
ALOGW("startInput() unknown session %d on input %d", session, input);
return BAD_VALUE;
}
// FIXME: disable concurrent capture until UI is ready
#if 0
if (!isConcurentCaptureAllowed(inputDesc, audioSession)) {
ALOGW("startInput(%d) failed: other input already started", input);
return INVALID_OPERATION;
}
if (isInCall()) {
*concurrency |= API_INPUT_CONCURRENCY_CALL;
}
if (mInputs.activeInputsCountOnDevices() != 0) {
*concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
}
#else
if (!is_virtual_input_device(inputDesc->mDevice)) {
if (mCallTxPatch != 0 &&
inputDesc->getModuleHandle() == mCallTxPatch->mPatch.sources[0].ext.device.hw_module) {
ALOGW("startInput(%d) failed: call in progress", input);
*concurrency |= API_INPUT_CONCURRENCY_CALL;
return INVALID_OPERATION;
}
Vector<sp<AudioInputDescriptor>> activeInputs = mInputs.getActiveInputs();
// If a UID is idle and records silence and another not silenced recording starts
// from another UID (idle or active) we stop the current idle UID recording in
// favor of the new one - "There can be only one" TM
if (!silenced) {
for (const auto& activeDesc : activeInputs) {
if ((audioSession->flags() & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0 &&
activeDesc->getId() == inputDesc->getId()) {
continue;
}
AudioSessionCollection activeSessions = activeDesc->getAudioSessions(
true /*activeOnly*/);
sp<AudioSession> activeSession = activeSessions.valueAt(0);
if (activeSession->isSilenced()) {
audio_io_handle_t activeInput = activeDesc->mIoHandle;
audio_session_t activeSessionId = activeSession->session();
stopInput(activeInput, activeSessionId);
releaseInput(activeInput, activeSessionId);
ALOGV("startInput(%d) stopping silenced input %d", input, activeInput);
activeInputs = mInputs.getActiveInputs();
}
}
}
for (const auto& activeDesc : activeInputs) {
if ((audioSession->flags() & AUDIO_INPUT_FLAG_MMAP_NOIRQ) != 0 &&
activeDesc->getId() == inputDesc->getId()) {
continue;
}
audio_source_t activeSource = activeDesc->inputSource(true);
if (audioSession->inputSource() == AUDIO_SOURCE_HOTWORD) {
if (activeSource == AUDIO_SOURCE_HOTWORD) {
if (activeDesc->hasPreemptedSession(session)) {
ALOGW("startInput(%d) failed for HOTWORD: "
"other input %d already started for HOTWORD",
input, activeDesc->mIoHandle);
*concurrency |= API_INPUT_CONCURRENCY_HOTWORD;
return INVALID_OPERATION;
}
} else {
ALOGV("startInput(%d) failed for HOTWORD: other input %d already started",
input, activeDesc->mIoHandle);
*concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
return INVALID_OPERATION;
}
} else {
if (activeSource != AUDIO_SOURCE_HOTWORD) {
ALOGW("startInput(%d) failed: other input %d already started",
input, activeDesc->mIoHandle);
*concurrency |= API_INPUT_CONCURRENCY_CAPTURE;
return INVALID_OPERATION;
}
}
}
// We only need to check if the sound trigger session supports concurrent capture if the
// input is also a sound trigger input. Otherwise, we should preempt any hotword stream
// that's running.
const bool allowConcurrentWithSoundTrigger =
inputDesc->isSoundTrigger() ? soundTriggerSupportsConcurrentCapture() : false;
// if capture is allowed, preempt currently active HOTWORD captures
for (const auto& activeDesc : activeInputs) {
if (allowConcurrentWithSoundTrigger && activeDesc->isSoundTrigger()) {
continue;
}
audio_source_t activeSource = activeDesc->inputSource(true);
if (activeSource == AUDIO_SOURCE_HOTWORD) {
AudioSessionCollection activeSessions =
activeDesc->getAudioSessions(true /*activeOnly*/);
audio_session_t activeSession = activeSessions.keyAt(0);
audio_io_handle_t activeHandle = activeDesc->mIoHandle;
SortedVector<audio_session_t> sessions = activeDesc->getPreemptedSessions();
*concurrency |= API_INPUT_CONCURRENCY_PREEMPT;
sessions.add(activeSession);
inputDesc->setPreemptedSessions(sessions);
stopInput(activeHandle, activeSession);
releaseInput(activeHandle, activeSession);
ALOGV("startInput(%d) for HOTWORD preempting HOTWORD input %d",
input, activeDesc->mIoHandle);
}
}
}
#endif
// Make sure we start with the correct silence state
audioSession->setSilenced(silenced);
// increment activity count before calling getNewInputDevice() below as only active sessions
// are considered for device selection
audioSession->changeActiveCount(1);
// Routing?
mInputRoutes.incRouteActivity(session);
if (audioSession->activeCount() == 1 || mInputRoutes.getAndClearRouteChanged(session)) {
// indicate active capture to sound trigger service if starting capture from a mic on
// primary HW module
audio_devices_t device = getNewInputDevice(inputDesc);
setInputDevice(input, device, true /* force */);
status_t status = inputDesc->start();
if (status != NO_ERROR) {
mInputRoutes.decRouteActivity(session);
audioSession->changeActiveCount(-1);
return status;
}
if (inputDesc->getAudioSessionCount(true/*activeOnly*/) == 1) {
// if input maps to a dynamic policy with an activity listener, notify of state change
if ((inputDesc->mPolicyMix != NULL)
&& ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
MIX_STATE_MIXING);
}
audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
mInputs.activeInputsCountOnDevices(primaryInputDevices) == 1) {
SoundTrigger::setCaptureState(true);
}
// automatically enable the remote submix output when input is started if not
// used by a policy mix of type MIX_TYPE_RECORDERS
// For remote submix (a virtual device), we open only one input per capture request.
if (audio_is_remote_submix_device(inputDesc->mDevice)) {
String8 address = String8("");
if (inputDesc->mPolicyMix == NULL) {
address = String8("0");
} else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
address = inputDesc->mPolicyMix->mDeviceAddress;
}
if (address != "") {
setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
address, "remote-submix");
}
}
}
}
ALOGV("AudioPolicyManager::startInput() input source = %d", audioSession->inputSource());
return NO_ERROR;
}
status_t AudioPolicyManager::stopInput(audio_io_handle_t input,
audio_session_t session)
{
ALOGV("stopInput() input %d", input);
ssize_t index = mInputs.indexOfKey(input);
if (index < 0) {
ALOGW("stopInput() unknown input %d", input);
return BAD_VALUE;
}
sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
if (index < 0) {
ALOGW("stopInput() unknown session %d on input %d", session, input);
return BAD_VALUE;
}
if (audioSession->activeCount() == 0) {
ALOGW("stopInput() input %d already stopped", input);
return INVALID_OPERATION;
}
audioSession->changeActiveCount(-1);
// Routing?
mInputRoutes.decRouteActivity(session);
if (audioSession->activeCount() == 0) {
inputDesc->stop();
if (inputDesc->isActive()) {
setInputDevice(input, getNewInputDevice(inputDesc), false /* force */);
} else {
// if input maps to a dynamic policy with an activity listener, notify of state change
if ((inputDesc->mPolicyMix != NULL)
&& ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
MIX_STATE_IDLE);
}
// automatically disable the remote submix output when input is stopped if not
// used by a policy mix of type MIX_TYPE_RECORDERS
if (audio_is_remote_submix_device(inputDesc->mDevice)) {
String8 address = String8("");
if (inputDesc->mPolicyMix == NULL) {
address = String8("0");
} else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
address = inputDesc->mPolicyMix->mDeviceAddress;
}
if (address != "") {
setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
address, "remote-submix");
}
}
audio_devices_t device = inputDesc->mDevice;
resetInputDevice(input);
// indicate inactive capture to sound trigger service if stopping capture from a mic on
// primary HW module
audio_devices_t primaryInputDevices = availablePrimaryInputDevices();
if (((device & primaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) {
SoundTrigger::setCaptureState(false);
}
inputDesc->clearPreemptedSessions();
}
}
return NO_ERROR;
}
void AudioPolicyManager::releaseInput(audio_io_handle_t input,
audio_session_t session)
{
ALOGV("releaseInput() %d", input);
ssize_t index = mInputs.indexOfKey(input);
if (index < 0) {
ALOGW("releaseInput() releasing unknown input %d", input);
return;
}
// Routing
mInputRoutes.removeRoute(session);
sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
ALOG_ASSERT(inputDesc != 0);
sp<AudioSession> audioSession = inputDesc->getAudioSession(session);
if (audioSession == 0) {
ALOGW("releaseInput() unknown session %d on input %d", session, input);
return;
}
if (audioSession->openCount() == 0) {
ALOGW("releaseInput() invalid open count %d on session %d",
audioSession->openCount(), session);
return;
}
if (audioSession->changeOpenCount(-1) == 0) {
inputDesc->removeAudioSession(session);
}
if (inputDesc->getOpenRefCount() > 0) {
ALOGV("releaseInput() exit > 0");
return;
}
closeInput(input);
mpClientInterface->onAudioPortListUpdate();
ALOGV("releaseInput() exit");
}
void AudioPolicyManager::closeAllInputs() {
bool patchRemoved = false;
for (size_t input_index = 0; input_index < mInputs.size(); input_index++) {
sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(input_index);
ssize_t patch_index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
if (patch_index >= 0) {
sp<AudioPatch> patchDesc = mAudioPatches.valueAt(patch_index);
(void) /*status_t status*/ mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
mAudioPatches.removeItemsAt(patch_index);
patchRemoved = true;
}
inputDesc->close();
}
mInputRoutes.clear();
mInputs.clear();
SoundTrigger::setCaptureState(false);
nextAudioPortGeneration();
if (patchRemoved) {
mpClientInterface->onAudioPatchListUpdate();
}
}
void AudioPolicyManager::initStreamVolume(audio_stream_type_t stream,
int indexMin,
int indexMax)
{
ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
mVolumeCurves->initStreamVolume(stream, indexMin, indexMax);
// initialize other private stream volumes which follow this one
for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
mVolumeCurves->initStreamVolume((audio_stream_type_t)curStream, indexMin, indexMax);
}
}
status_t AudioPolicyManager::setStreamVolumeIndex(audio_stream_type_t stream,
int index,
audio_devices_t device)
{
// VOICE_CALL stream has minVolumeIndex > 0 but can be muted directly by an
// app that has MODIFY_PHONE_STATE permission.
if (((index < mVolumeCurves->getVolumeIndexMin(stream)) &&
!(stream == AUDIO_STREAM_VOICE_CALL && index == 0)) ||
(index > mVolumeCurves->getVolumeIndexMax(stream))) {
return BAD_VALUE;
}
if (!audio_is_output_device(device)) {
return BAD_VALUE;
}
// Force max volume if stream cannot be muted
if (!mVolumeCurves->canBeMuted(stream)) index = mVolumeCurves->getVolumeIndexMax(stream);
ALOGV("setStreamVolumeIndex() stream %d, device %08x, index %d",
stream, device, index);
// update other private stream volumes which follow this one
for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
mVolumeCurves->addCurrentVolumeIndex((audio_stream_type_t)curStream, device, index);
}
// update volume on all outputs and streams matching the following:
// - The requested stream (or a stream matching for volume control) is active on the output
// - The device (or devices) selected by the strategy corresponding to this stream includes
// the requested device
// - For non default requested device, currently selected device on the output is either the
// requested device or one of the devices selected by the strategy
// - For default requested device (AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME), apply volume only if
// no specific device volume value exists for currently selected device.
status_t status = NO_ERROR;
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
audio_devices_t curDevice = Volume::getDeviceForVolume(desc->device());
for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
if (!(desc->isStreamActive((audio_stream_type_t)curStream) ||
(isInCall() && (curStream == AUDIO_STREAM_VOICE_CALL)))) {
continue;
}
routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
audio_devices_t curStreamDevice = Volume::getDeviceForVolume(getDeviceForStrategy(
curStrategy, false /*fromCache*/));
if ((device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) &&
((curStreamDevice & device) == 0)) {
continue;
}
bool applyVolume;
if (device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
curStreamDevice |= device;
applyVolume = (curDevice & curStreamDevice) != 0;
} else {
applyVolume = !mVolumeCurves->hasVolumeIndexForDevice(
stream, curStreamDevice);
}
if (applyVolume) {
//FIXME: workaround for truncated touch sounds
// delayed volume change for system stream to be removed when the problem is
// handled by system UI
status_t volStatus =
checkAndSetVolume((audio_stream_type_t)curStream, index, desc, curDevice,
(stream == AUDIO_STREAM_SYSTEM) ? TOUCH_SOUND_FIXED_DELAY_MS : 0);
if (volStatus != NO_ERROR) {
status = volStatus;
}
}
}
}
return status;
}
status_t AudioPolicyManager::getStreamVolumeIndex(audio_stream_type_t stream,
int *index,
audio_devices_t device)
{
if (index == NULL) {
return BAD_VALUE;
}
if (!audio_is_output_device(device)) {
return BAD_VALUE;
}
// if device is AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, return volume for device corresponding to
// the strategy the stream belongs to.
if (device == AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
device = getDeviceForStrategy(getStrategy(stream), true /*fromCache*/);
}
device = Volume::getDeviceForVolume(device);
*index = mVolumeCurves->getVolumeIndex(stream, device);
ALOGV("getStreamVolumeIndex() stream %d device %08x index %d", stream, device, *index);
return NO_ERROR;
}
audio_io_handle_t AudioPolicyManager::selectOutputForMusicEffects()
{
// select one output among several suitable for global effects.
// The priority is as follows:
// 1: An offloaded output. If the effect ends up not being offloadable,
// AudioFlinger will invalidate the track and the offloaded output
// will be closed causing the effect to be moved to a PCM output.
// 2: A deep buffer output
// 3: The primary output
// 4: the first output in the list
routing_strategy strategy = getStrategy(AUDIO_STREAM_MUSIC);
audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
if (outputs.size() == 0) {
return AUDIO_IO_HANDLE_NONE;
}
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
bool activeOnly = true;
while (output == AUDIO_IO_HANDLE_NONE) {
audio_io_handle_t outputOffloaded = AUDIO_IO_HANDLE_NONE;
audio_io_handle_t outputDeepBuffer = AUDIO_IO_HANDLE_NONE;
audio_io_handle_t outputPrimary = AUDIO_IO_HANDLE_NONE;
for (audio_io_handle_t output : outputs) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
if (activeOnly && !desc->isStreamActive(AUDIO_STREAM_MUSIC)) {
continue;
}
ALOGV("selectOutputForMusicEffects activeOnly %d output %d flags 0x%08x",
activeOnly, output, desc->mFlags);
if ((desc->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
outputOffloaded = output;
}
if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
outputDeepBuffer = output;
}
if ((desc->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) != 0) {
outputPrimary = output;
}
}
if (outputOffloaded != AUDIO_IO_HANDLE_NONE) {
output = outputOffloaded;
} else if (outputDeepBuffer != AUDIO_IO_HANDLE_NONE) {
output = outputDeepBuffer;
} else if (outputPrimary != AUDIO_IO_HANDLE_NONE) {
output = outputPrimary;
} else {
output = outputs[0];
}
activeOnly = false;
}
if (output != mMusicEffectOutput) {
mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, mMusicEffectOutput, output);
mMusicEffectOutput = output;
}
ALOGV("selectOutputForMusicEffects selected output %d", output);
return output;
}
audio_io_handle_t AudioPolicyManager::getOutputForEffect(const effect_descriptor_t *desc __unused)
{
return selectOutputForMusicEffects();
}
status_t AudioPolicyManager::registerEffect(const effect_descriptor_t *desc,
audio_io_handle_t io,
uint32_t strategy,
int session,
int id)
{
ssize_t index = mOutputs.indexOfKey(io);
if (index < 0) {
index = mInputs.indexOfKey(io);
if (index < 0) {
ALOGW("registerEffect() unknown io %d", io);
return INVALID_OPERATION;
}
}
return mEffects.registerEffect(desc, io, strategy, session, id);
}
bool AudioPolicyManager::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
{
bool active = false;
for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT && !active; curStream++) {
if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
active = mOutputs.isStreamActive((audio_stream_type_t)curStream, inPastMs);
}
return active;
}
bool AudioPolicyManager::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
{
return mOutputs.isStreamActiveRemotely(stream, inPastMs);
}
bool AudioPolicyManager::isSourceActive(audio_source_t source) const
{
for (size_t i = 0; i < mInputs.size(); i++) {
const sp<AudioInputDescriptor> inputDescriptor = mInputs.valueAt(i);
if (inputDescriptor->isSourceActive(source)) {
return true;
}
}
return false;
}
// Register a list of custom mixes with their attributes and format.
// When a mix is registered, corresponding input and output profiles are
// added to the remote submix hw module. The profile contains only the
// parameters (sampling rate, format...) specified by the mix.
// The corresponding input remote submix device is also connected.
//
// When a remote submix device is connected, the address is checked to select the
// appropriate profile and the corresponding input or output stream is opened.
//
// When capture starts, getInputForAttr() will:
// - 1 look for a mix matching the address passed in attribtutes tags if any
// - 2 if none found, getDeviceForInputSource() will:
// - 2.1 look for a mix matching the attributes source
// - 2.2 if none found, default to device selection by policy rules
// At this time, the corresponding output remote submix device is also connected
// and active playback use cases can be transferred to this mix if needed when reconnecting
// after AudioTracks are invalidated
//
// When playback starts, getOutputForAttr() will:
// - 1 look for a mix matching the address passed in attribtutes tags if any
// - 2 if none found, look for a mix matching the attributes usage
// - 3 if none found, default to device and output selection by policy rules.
status_t AudioPolicyManager::registerPolicyMixes(const Vector<AudioMix>& mixes)
{
ALOGV("registerPolicyMixes() %zu mix(es)", mixes.size());
status_t res = NO_ERROR;
sp<HwModule> rSubmixModule;
// examine each mix's route type
for (size_t i = 0; i < mixes.size(); i++) {
// we only support MIX_ROUTE_FLAG_LOOP_BACK or MIX_ROUTE_FLAG_RENDER, not the combination
if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_ALL) == MIX_ROUTE_FLAG_ALL) {
res = INVALID_OPERATION;
break;
}
if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
ALOGV("registerPolicyMixes() mix %zu of %zu is LOOP_BACK", i, mixes.size());
if (rSubmixModule == 0) {
rSubmixModule = mHwModules.getModuleFromName(
AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX);
if (rSubmixModule == 0) {
ALOGE(" Unable to find audio module for submix, aborting mix %zu registration",
i);
res = INVALID_OPERATION;
break;
}
}
String8 address = mixes[i].mDeviceAddress;
if (mPolicyMixes.registerMix(address, mixes[i], 0 /*output desc*/) != NO_ERROR) {
ALOGE(" Error registering mix %zu for address %s", i, address.string());
res = INVALID_OPERATION;
break;
}
audio_config_t outputConfig = mixes[i].mFormat;
audio_config_t inputConfig = mixes[i].mFormat;
// NOTE: audio flinger mixer does not support mono output: configure remote submix HAL in
// stereo and let audio flinger do the channel conversion if needed.
outputConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
inputConfig.channel_mask = AUDIO_CHANNEL_IN_STEREO;
rSubmixModule->addOutputProfile(address, &outputConfig,
AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address);
rSubmixModule->addInputProfile(address, &inputConfig,
AUDIO_DEVICE_IN_REMOTE_SUBMIX, address);
if (mixes[i].mMixType == MIX_TYPE_PLAYERS) {
setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
address.string(), "remote-submix");
} else {
setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
address.string(), "remote-submix");
}
} else if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
String8 address = mixes[i].mDeviceAddress;
audio_devices_t device = mixes[i].mDeviceType;
ALOGV(" registerPolicyMixes() mix %zu of %zu is RENDER, dev=0x%X addr=%s",
i, mixes.size(), device, address.string());
bool foundOutput = false;
for (size_t j = 0 ; j < mOutputs.size() ; j++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(j);
sp<AudioPatch> patch = mAudioPatches.valueFor(desc->getPatchHandle());
if ((patch != 0) && (patch->mPatch.num_sinks != 0)
&& (patch->mPatch.sinks[0].type == AUDIO_PORT_TYPE_DEVICE)
&& (patch->mPatch.sinks[0].ext.device.type == device)
&& (strncmp(patch->mPatch.sinks[0].ext.device.address, address.string(),
AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0)) {
if (mPolicyMixes.registerMix(address, mixes[i], desc) != NO_ERROR) {
res = INVALID_OPERATION;
} else {
foundOutput = true;
}
break;
}
}
if (res != NO_ERROR) {
ALOGE(" Error registering mix %zu for device 0x%X addr %s",
i, device, address.string());
res = INVALID_OPERATION;
break;
} else if (!foundOutput) {
ALOGE(" Output not found for mix %zu for device 0x%X addr %s",
i, device, address.string());
res = INVALID_OPERATION;
break;
}
}
}
if (res != NO_ERROR) {
unregisterPolicyMixes(mixes);
}
return res;
}
status_t AudioPolicyManager::unregisterPolicyMixes(Vector<AudioMix> mixes)