| /* |
| * Copyright (C) 2009 The Android Open Source Project |
| * |
| * Licensed under the Apache License, Version 2.0 (the "License"); |
| * you may not use this file except in compliance with the License. |
| * You may obtain a copy of the License at |
| * |
| * http://www.apache.org/licenses/LICENSE-2.0 |
| * |
| * Unless required by applicable law or agreed to in writing, software |
| * distributed under the License is distributed on an "AS IS" BASIS, |
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| * See the License for the specific language governing permissions and |
| * limitations under the License. |
| */ |
| |
| #define LOG_TAG "APM_AudioPolicyManager" |
| |
| // Need to keep the log statements even in production builds |
| // to enable VERBOSE logging dynamically. |
| // You can enable VERBOSE logging as follows: |
| // adb shell setprop log.tag.APM_AudioPolicyManager V |
| #define LOG_NDEBUG 0 |
| |
| //#define VERY_VERBOSE_LOGGING |
| #ifdef VERY_VERBOSE_LOGGING |
| #define ALOGVV ALOGV |
| #else |
| #define ALOGVV(a...) do { } while(0) |
| #endif |
| |
| #define AUDIO_POLICY_XML_CONFIG_FILE_PATH_MAX_LENGTH 128 |
| #define AUDIO_POLICY_XML_CONFIG_FILE_NAME "audio_policy_configuration.xml" |
| #define AUDIO_POLICY_A2DP_OFFLOAD_DISABLED_XML_CONFIG_FILE_NAME \ |
| "audio_policy_configuration_a2dp_offload_disabled.xml" |
| #define AUDIO_POLICY_BLUETOOTH_LEGACY_HAL_XML_CONFIG_FILE_NAME \ |
| "audio_policy_configuration_bluetooth_legacy_hal.xml" |
| |
| #include <algorithm> |
| #include <inttypes.h> |
| #include <math.h> |
| #include <set> |
| #include <unordered_set> |
| #include <vector> |
| #include <AudioPolicyManagerInterface.h> |
| #include <AudioPolicyEngineInstance.h> |
| #include <cutils/properties.h> |
| #include <utils/Log.h> |
| #include <media/AudioParameter.h> |
| #include <private/android_filesystem_config.h> |
| #include <soundtrigger/SoundTrigger.h> |
| #include <system/audio.h> |
| #include <audio_policy_conf.h> |
| #include "AudioPolicyManager.h" |
| #include <Serializer.h> |
| #include "TypeConverter.h" |
| #include <policy.h> |
| |
| namespace android { |
| |
| //FIXME: workaround for truncated touch sounds |
| // to be removed when the problem is handled by system UI |
| #define TOUCH_SOUND_FIXED_DELAY_MS 100 |
| |
| // Largest difference in dB on earpiece in call between the voice volume and another |
| // media / notification / system volume. |
| constexpr float IN_CALL_EARPIECE_HEADROOM_DB = 3.f; |
| |
| // Compressed formats for MSD module, ordered from most preferred to least preferred. |
| static const std::vector<audio_format_t> compressedFormatsOrder = {{ |
| AUDIO_FORMAT_MAT_2_1, AUDIO_FORMAT_MAT_2_0, AUDIO_FORMAT_E_AC3, |
| AUDIO_FORMAT_AC3, AUDIO_FORMAT_PCM_16_BIT }}; |
| // Channel masks for MSD module, 3D > 2D > 1D ordering (most preferred to least preferred). |
| static const std::vector<audio_channel_mask_t> surroundChannelMasksOrder = {{ |
| AUDIO_CHANNEL_OUT_3POINT1POINT2, AUDIO_CHANNEL_OUT_3POINT0POINT2, |
| AUDIO_CHANNEL_OUT_2POINT1POINT2, AUDIO_CHANNEL_OUT_2POINT0POINT2, |
| AUDIO_CHANNEL_OUT_5POINT1, AUDIO_CHANNEL_OUT_STEREO }}; |
| |
| // ---------------------------------------------------------------------------- |
| // AudioPolicyInterface implementation |
| // ---------------------------------------------------------------------------- |
| |
| status_t AudioPolicyManager::setDeviceConnectionState(audio_devices_t device, |
| audio_policy_dev_state_t state, |
| const char *device_address, |
| const char *device_name, |
| audio_format_t encodedFormat) |
| { |
| status_t status = setDeviceConnectionStateInt(device, state, device_address, |
| device_name, encodedFormat); |
| nextAudioPortGeneration(); |
| return status; |
| } |
| |
| void AudioPolicyManager::broadcastDeviceConnectionState(const sp<DeviceDescriptor> &device, |
| audio_policy_dev_state_t state) |
| { |
| AudioParameter param(device->address()); |
| const String8 key(state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE ? |
| AudioParameter::keyStreamConnect : AudioParameter::keyStreamDisconnect); |
| param.addInt(key, device->type()); |
| mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString()); |
| } |
| |
| status_t AudioPolicyManager::setDeviceConnectionStateInt(audio_devices_t deviceType, |
| audio_policy_dev_state_t state, |
| const char *device_address, |
| const char *device_name, |
| audio_format_t encodedFormat) |
| { |
| ALOGV("setDeviceConnectionStateInt() device: 0x%X, state %d, address %s name %s format 0x%X", |
| deviceType, state, device_address, device_name, encodedFormat); |
| |
| // connect/disconnect only 1 device at a time |
| if (!audio_is_output_device(deviceType) && !audio_is_input_device(deviceType)) return BAD_VALUE; |
| |
| sp<DeviceDescriptor> device = |
| mHwModules.getDeviceDescriptor(deviceType, device_address, device_name, encodedFormat, |
| state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE); |
| if (device == 0) { |
| return INVALID_OPERATION; |
| } |
| |
| // handle output devices |
| if (audio_is_output_device(deviceType)) { |
| SortedVector <audio_io_handle_t> outputs; |
| |
| ssize_t index = mAvailableOutputDevices.indexOf(device); |
| |
| // save a copy of the opened output descriptors before any output is opened or closed |
| // by checkOutputsForDevice(). This will be needed by checkOutputForAllStrategies() |
| mPreviousOutputs = mOutputs; |
| switch (state) |
| { |
| // handle output device connection |
| case AUDIO_POLICY_DEVICE_STATE_AVAILABLE: { |
| if (index >= 0) { |
| ALOGW("%s() device already connected: %s", __func__, device->toString().c_str()); |
| return INVALID_OPERATION; |
| } |
| ALOGV("%s() connecting device %s format %x", |
| __func__, device->toString().c_str(), encodedFormat); |
| |
| // register new device as available |
| if (mAvailableOutputDevices.add(device) < 0) { |
| return NO_MEMORY; |
| } |
| |
| // Before checking outputs, broadcast connect event to allow HAL to retrieve dynamic |
| // parameters on newly connected devices (instead of opening the outputs...) |
| broadcastDeviceConnectionState(device, state); |
| |
| if (checkOutputsForDevice(device, state, outputs) != NO_ERROR) { |
| mAvailableOutputDevices.remove(device); |
| |
| mHwModules.cleanUpForDevice(device); |
| |
| broadcastDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE); |
| return INVALID_OPERATION; |
| } |
| |
| // outputs should never be empty here |
| ALOG_ASSERT(outputs.size() != 0, "setDeviceConnectionState():" |
| "checkOutputsForDevice() returned no outputs but status OK"); |
| ALOGV("%s() checkOutputsForDevice() returned %zu outputs", __func__, outputs.size()); |
| |
| } break; |
| // handle output device disconnection |
| case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: { |
| if (index < 0) { |
| ALOGW("%s() device not connected: %s", __func__, device->toString().c_str()); |
| return INVALID_OPERATION; |
| } |
| |
| ALOGV("%s() disconnecting output device %s", __func__, device->toString().c_str()); |
| |
| // Send Disconnect to HALs |
| broadcastDeviceConnectionState(device, state); |
| |
| // remove device from available output devices |
| mAvailableOutputDevices.remove(device); |
| |
| mOutputs.clearSessionRoutesForDevice(device); |
| |
| checkOutputsForDevice(device, state, outputs); |
| |
| // Reset active device codec |
| device->setEncodedFormat(AUDIO_FORMAT_DEFAULT); |
| |
| } break; |
| |
| default: |
| ALOGE("%s() invalid state: %x", __func__, state); |
| return BAD_VALUE; |
| } |
| |
| // Propagate device availability to Engine |
| setEngineDeviceConnectionState(device, state); |
| |
| // No need to evaluate playback routing when connecting a remote submix |
| // output device used by a dynamic policy of type recorder as no |
| // playback use case is affected. |
| bool doCheckForDeviceAndOutputChanges = true; |
| if (device->type() == AUDIO_DEVICE_OUT_REMOTE_SUBMIX |
| && strncmp(device_address, "0", AUDIO_DEVICE_MAX_ADDRESS_LEN) != 0) { |
| for (audio_io_handle_t output : outputs) { |
| sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output); |
| sp<AudioPolicyMix> policyMix = desc->mPolicyMix.promote(); |
| if (policyMix != nullptr |
| && policyMix->mMixType == MIX_TYPE_RECORDERS |
| && strncmp(device_address, |
| policyMix->mDeviceAddress.string(), |
| AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) { |
| doCheckForDeviceAndOutputChanges = false; |
| break; |
| } |
| } |
| } |
| |
| auto checkCloseOutputs = [&]() { |
| // outputs must be closed after checkOutputForAllStrategies() is executed |
| if (!outputs.isEmpty()) { |
| for (audio_io_handle_t output : outputs) { |
| sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output); |
| // close unused outputs after device disconnection or direct outputs that have |
| // been opened by checkOutputsForDevice() to query dynamic parameters |
| if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) || |
| (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) && |
| (desc->mDirectOpenCount == 0))) { |
| closeOutput(output); |
| } |
| } |
| // check A2DP again after closing A2DP output to reset mA2dpSuspended if needed |
| return true; |
| } |
| return false; |
| }; |
| |
| if (doCheckForDeviceAndOutputChanges) { |
| checkForDeviceAndOutputChanges(checkCloseOutputs); |
| } else { |
| checkCloseOutputs(); |
| } |
| |
| if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) { |
| DeviceVector newDevices = getNewOutputDevices(mPrimaryOutput, false /*fromCache*/); |
| updateCallRouting(newDevices); |
| } |
| const DeviceVector msdOutDevices = getMsdAudioOutDevices(); |
| for (size_t i = 0; i < mOutputs.size(); i++) { |
| sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i); |
| if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (desc != mPrimaryOutput)) { |
| DeviceVector newDevices = getNewOutputDevices(desc, true /*fromCache*/); |
| // do not force device change on duplicated output because if device is 0, it will |
| // also force a device 0 for the two outputs it is duplicated to which may override |
| // a valid device selection on those outputs. |
| bool force = (msdOutDevices.isEmpty() || msdOutDevices != desc->devices()) |
| && !desc->isDuplicated() |
| && (!device_distinguishes_on_address(deviceType) |
| // always force when disconnecting (a non-duplicated device) |
| || (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE)); |
| setOutputDevices(desc, newDevices, force, 0); |
| } |
| } |
| |
| if (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) { |
| cleanUpForDevice(device); |
| } |
| |
| mpClientInterface->onAudioPortListUpdate(); |
| return NO_ERROR; |
| } // end if is output device |
| |
| // handle input devices |
| if (audio_is_input_device(deviceType)) { |
| ssize_t index = mAvailableInputDevices.indexOf(device); |
| switch (state) |
| { |
| // handle input device connection |
| case AUDIO_POLICY_DEVICE_STATE_AVAILABLE: { |
| if (index >= 0) { |
| ALOGW("%s() device already connected: %s", __func__, device->toString().c_str()); |
| return INVALID_OPERATION; |
| } |
| |
| if (mAvailableInputDevices.add(device) < 0) { |
| return NO_MEMORY; |
| } |
| |
| // Before checking intputs, broadcast connect event to allow HAL to retrieve dynamic |
| // parameters on newly connected devices (instead of opening the inputs...) |
| broadcastDeviceConnectionState(device, state); |
| |
| if (checkInputsForDevice(device, state) != NO_ERROR) { |
| mAvailableInputDevices.remove(device); |
| |
| broadcastDeviceConnectionState(device, AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE); |
| |
| mHwModules.cleanUpForDevice(device); |
| |
| return INVALID_OPERATION; |
| } |
| |
| } break; |
| |
| // handle input device disconnection |
| case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: { |
| if (index < 0) { |
| ALOGW("%s() device not connected: %s", __func__, device->toString().c_str()); |
| return INVALID_OPERATION; |
| } |
| |
| ALOGV("%s() disconnecting input device %s", __func__, device->toString().c_str()); |
| |
| // Set Disconnect to HALs |
| broadcastDeviceConnectionState(device, state); |
| |
| mAvailableInputDevices.remove(device); |
| |
| checkInputsForDevice(device, state); |
| } break; |
| |
| default: |
| ALOGE("%s() invalid state: %x", __func__, state); |
| return BAD_VALUE; |
| } |
| |
| // Propagate device availability to Engine |
| setEngineDeviceConnectionState(device, state); |
| |
| checkCloseInputs(); |
| // As the input device list can impact the output device selection, update |
| // getDeviceForStrategy() cache |
| updateDevicesAndOutputs(); |
| |
| if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) { |
| DeviceVector newDevices = getNewOutputDevices(mPrimaryOutput, false /*fromCache*/); |
| updateCallRouting(newDevices); |
| } |
| |
| if (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) { |
| cleanUpForDevice(device); |
| } |
| |
| mpClientInterface->onAudioPortListUpdate(); |
| return NO_ERROR; |
| } // end if is input device |
| |
| ALOGW("%s() invalid device: %s", __func__, device->toString().c_str()); |
| return BAD_VALUE; |
| } |
| |
| void AudioPolicyManager::setEngineDeviceConnectionState(const sp<DeviceDescriptor> device, |
| audio_policy_dev_state_t state) { |
| |
| // the Engine does not have to know about remote submix devices used by dynamic audio policies |
| if (audio_is_remote_submix_device(device->type()) && device->address() != "0") { |
| return; |
| } |
| mEngine->setDeviceConnectionState(device, state); |
| } |
| |
| |
| audio_policy_dev_state_t AudioPolicyManager::getDeviceConnectionState(audio_devices_t device, |
| const char *device_address) |
| { |
| sp<DeviceDescriptor> devDesc = |
| mHwModules.getDeviceDescriptor(device, device_address, "", AUDIO_FORMAT_DEFAULT, |
| false /* allowToCreate */, |
| (strlen(device_address) != 0)/*matchAddress*/); |
| |
| if (devDesc == 0) { |
| ALOGV("getDeviceConnectionState() undeclared device, type %08x, address: %s", |
| device, device_address); |
| return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE; |
| } |
| |
| DeviceVector *deviceVector; |
| |
| if (audio_is_output_device(device)) { |
| deviceVector = &mAvailableOutputDevices; |
| } else if (audio_is_input_device(device)) { |
| deviceVector = &mAvailableInputDevices; |
| } else { |
| ALOGW("%s() invalid device type %08x", __func__, device); |
| return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE; |
| } |
| |
| return (deviceVector->getDevice( |
| device, String8(device_address), AUDIO_FORMAT_DEFAULT) != 0) ? |
| AUDIO_POLICY_DEVICE_STATE_AVAILABLE : AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE; |
| } |
| |
| status_t AudioPolicyManager::handleDeviceConfigChange(audio_devices_t device, |
| const char *device_address, |
| const char *device_name, |
| audio_format_t encodedFormat) |
| { |
| status_t status; |
| String8 reply; |
| AudioParameter param; |
| int isReconfigA2dpSupported = 0; |
| |
| ALOGV("handleDeviceConfigChange(() device: 0x%X, address %s name %s encodedFormat: 0x%X", |
| device, device_address, device_name, encodedFormat); |
| |
| // connect/disconnect only 1 device at a time |
| if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE; |
| |
| // Check if the device is currently connected |
| DeviceVector deviceList = mAvailableOutputDevices.getDevicesFromTypeMask(device); |
| if (deviceList.empty()) { |
| // Nothing to do: device is not connected |
| return NO_ERROR; |
| } |
| sp<DeviceDescriptor> devDesc = deviceList.itemAt(0); |
| |
| // For offloaded A2DP, Hw modules may have the capability to |
| // configure codecs. |
| // Handle two specific cases by sending a set parameter to |
| // configure A2DP codecs. No need to toggle device state. |
| // Case 1: A2DP active device switches from primary to primary |
| // module |
| // Case 2: A2DP device config changes on primary module. |
| if (device & AUDIO_DEVICE_OUT_ALL_A2DP) { |
| sp<HwModule> module = mHwModules.getModuleForDeviceTypes(device, encodedFormat); |
| audio_module_handle_t primaryHandle = mPrimaryOutput->getModuleHandle(); |
| if (availablePrimaryOutputDevices().contains(devDesc) && |
| (module != 0 && module->getHandle() == primaryHandle)) { |
| reply = mpClientInterface->getParameters( |
| AUDIO_IO_HANDLE_NONE, |
| String8(AudioParameter::keyReconfigA2dpSupported)); |
| AudioParameter repliedParameters(reply); |
| repliedParameters.getInt( |
| String8(AudioParameter::keyReconfigA2dpSupported), isReconfigA2dpSupported); |
| if (isReconfigA2dpSupported) { |
| const String8 key(AudioParameter::keyReconfigA2dp); |
| param.add(key, String8("true")); |
| mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString()); |
| devDesc->setEncodedFormat(encodedFormat); |
| return NO_ERROR; |
| } |
| } |
| } |
| |
| // Toggle the device state: UNAVAILABLE -> AVAILABLE |
| // This will force reading again the device configuration |
| status = setDeviceConnectionState(device, |
| AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE, |
| device_address, device_name, |
| devDesc->getEncodedFormat()); |
| if (status != NO_ERROR) { |
| ALOGW("handleDeviceConfigChange() error disabling connection state: %d", |
| status); |
| return status; |
| } |
| |
| status = setDeviceConnectionState(device, |
| AUDIO_POLICY_DEVICE_STATE_AVAILABLE, |
| device_address, device_name, encodedFormat); |
| if (status != NO_ERROR) { |
| ALOGW("handleDeviceConfigChange() error enabling connection state: %d", |
| status); |
| return status; |
| } |
| |
| return NO_ERROR; |
| } |
| |
| status_t AudioPolicyManager::getHwOffloadEncodingFormatsSupportedForA2DP( |
| std::vector<audio_format_t> *formats) |
| { |
| ALOGV("getHwOffloadEncodingFormatsSupportedForA2DP()"); |
| status_t status = NO_ERROR; |
| std::unordered_set<audio_format_t> formatSet; |
| sp<HwModule> primaryModule = |
| mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY); |
| DeviceVector declaredDevices = primaryModule->getDeclaredDevices().getDevicesFromTypeMask( |
| AUDIO_DEVICE_OUT_ALL_A2DP); |
| for (const auto& device : declaredDevices) { |
| formatSet.insert(device->encodedFormats().begin(), device->encodedFormats().end()); |
| } |
| formats->assign(formatSet.begin(), formatSet.end()); |
| return status; |
| } |
| |
| uint32_t AudioPolicyManager::updateCallRouting(const DeviceVector &rxDevices, uint32_t delayMs) |
| { |
| bool createTxPatch = false; |
| bool createRxPatch = false; |
| uint32_t muteWaitMs = 0; |
| |
| if(!hasPrimaryOutput() || mPrimaryOutput->devices().types() == AUDIO_DEVICE_OUT_STUB) { |
| return muteWaitMs; |
| } |
| ALOG_ASSERT(!rxDevices.isEmpty(), "updateCallRouting() no selected output device"); |
| |
| audio_attributes_t attr = { .source = AUDIO_SOURCE_VOICE_COMMUNICATION }; |
| auto txSourceDevice = mEngine->getInputDeviceForAttributes(attr); |
| ALOG_ASSERT(txSourceDevice != 0, "updateCallRouting() input selected device not available"); |
| |
| ALOGV("updateCallRouting device rxDevice %s txDevice %s", |
| rxDevices.itemAt(0)->toString().c_str(), txSourceDevice->toString().c_str()); |
| |
| // release existing RX patch if any |
| if (mCallRxPatch != 0) { |
| mpClientInterface->releaseAudioPatch(mCallRxPatch->mAfPatchHandle, 0); |
| mCallRxPatch.clear(); |
| } |
| // release TX patch if any |
| if (mCallTxPatch != 0) { |
| mpClientInterface->releaseAudioPatch(mCallTxPatch->mAfPatchHandle, 0); |
| mCallTxPatch.clear(); |
| } |
| |
| auto telephonyRxModule = |
| mHwModules.getModuleForDeviceTypes(AUDIO_DEVICE_IN_TELEPHONY_RX, AUDIO_FORMAT_DEFAULT); |
| auto telephonyTxModule = |
| mHwModules.getModuleForDeviceTypes(AUDIO_DEVICE_OUT_TELEPHONY_TX, AUDIO_FORMAT_DEFAULT); |
| // retrieve Rx Source and Tx Sink device descriptors |
| sp<DeviceDescriptor> rxSourceDevice = |
| mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_TELEPHONY_RX, |
| String8(), |
| AUDIO_FORMAT_DEFAULT); |
| sp<DeviceDescriptor> txSinkDevice = |
| mAvailableOutputDevices.getDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX, |
| String8(), |
| AUDIO_FORMAT_DEFAULT); |
| |
| // RX and TX Telephony device are declared by Primary Audio HAL |
| if (isPrimaryModule(telephonyRxModule) && isPrimaryModule(telephonyTxModule) && |
| (telephonyRxModule->getHalVersionMajor() >= 3)) { |
| if (rxSourceDevice == 0 || txSinkDevice == 0) { |
| // RX / TX Telephony device(s) is(are) not currently available |
| ALOGE("updateCallRouting() no telephony Tx and/or RX device"); |
| return muteWaitMs; |
| } |
| // do not create a patch (aka Sw Bridging) if Primary HW module has declared supporting a |
| // route between telephony RX to Sink device and Source device to telephony TX |
| const auto &primaryModule = telephonyRxModule; |
| createRxPatch = !primaryModule->supportsPatch(rxSourceDevice, rxDevices.itemAt(0)); |
| createTxPatch = !primaryModule->supportsPatch(txSourceDevice, txSinkDevice); |
| } else { |
| // If the RX device is on the primary HW module, then use legacy routing method for |
| // voice calls via setOutputDevice() on primary output. |
| // Otherwise, create two audio patches for TX and RX path. |
| createRxPatch = !(availablePrimaryOutputDevices().contains(rxDevices.itemAt(0))) && |
| (rxSourceDevice != 0); |
| // If the TX device is also on the primary HW module, setOutputDevice() will take care |
| // of it due to legacy implementation. If not, create a patch. |
| createTxPatch = !(availablePrimaryModuleInputDevices().contains(txSourceDevice)) && |
| (txSinkDevice != 0); |
| } |
| // Use legacy routing method for voice calls via setOutputDevice() on primary output. |
| // Otherwise, create two audio patches for TX and RX path. |
| if (!createRxPatch) { |
| muteWaitMs = setOutputDevices(mPrimaryOutput, rxDevices, true, delayMs); |
| } else { // create RX path audio patch |
| mCallRxPatch = createTelephonyPatch(true /*isRx*/, rxDevices.itemAt(0), delayMs); |
| |
| // If the TX device is on the primary HW module but RX device is |
| // on other HW module, SinkMetaData of telephony input should handle it |
| // assuming the device uses audio HAL V5.0 and above |
| } |
| if (createTxPatch) { // create TX path audio patch |
| mCallTxPatch = createTelephonyPatch(false /*isRx*/, txSourceDevice, delayMs); |
| } |
| |
| return muteWaitMs; |
| } |
| |
| sp<AudioPatch> AudioPolicyManager::createTelephonyPatch( |
| bool isRx, const sp<DeviceDescriptor> &device, uint32_t delayMs) { |
| PatchBuilder patchBuilder; |
| |
| if (device == nullptr) { |
| return nullptr; |
| } |
| if (isRx) { |
| patchBuilder.addSink(device). |
| addSource(mAvailableInputDevices.getDevice( |
| AUDIO_DEVICE_IN_TELEPHONY_RX, String8(), AUDIO_FORMAT_DEFAULT)); |
| } else { |
| patchBuilder.addSource(device). |
| addSink(mAvailableOutputDevices.getDevice( |
| AUDIO_DEVICE_OUT_TELEPHONY_TX, String8(), AUDIO_FORMAT_DEFAULT)); |
| } |
| |
| // @TODO: still ignoring the address, or not dealing platform with mutliple telephonydevices |
| const sp<DeviceDescriptor> outputDevice = isRx ? |
| device : mAvailableOutputDevices.getDevice( |
| AUDIO_DEVICE_OUT_TELEPHONY_TX, String8(), AUDIO_FORMAT_DEFAULT); |
| SortedVector<audio_io_handle_t> outputs = |
| getOutputsForDevices(DeviceVector(outputDevice), mOutputs); |
| const audio_io_handle_t output = selectOutput(outputs); |
| // request to reuse existing output stream if one is already opened to reach the target device |
| if (output != AUDIO_IO_HANDLE_NONE) { |
| sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); |
| ALOG_ASSERT(!outputDesc->isDuplicated(), "%s() %s device output %d is duplicated", __func__, |
| outputDevice->toString().c_str(), output); |
| patchBuilder.addSource(outputDesc, { .stream = AUDIO_STREAM_PATCH }); |
| } |
| |
| if (!isRx) { |
| // terminate active capture if on the same HW module as the call TX source device |
| // FIXME: would be better to refine to only inputs whose profile connects to the |
| // call TX device but this information is not in the audio patch and logic here must be |
| // symmetric to the one in startInput() |
| for (const auto& activeDesc : mInputs.getActiveInputs()) { |
| if (activeDesc->hasSameHwModuleAs(device)) { |
| closeActiveClients(activeDesc); |
| } |
| } |
| } |
| |
| audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE; |
| status_t status = mpClientInterface->createAudioPatch( |
| patchBuilder.patch(), &afPatchHandle, delayMs); |
| ALOGW_IF(status != NO_ERROR, |
| "%s() error %d creating %s audio patch", __func__, status, isRx ? "RX" : "TX"); |
| sp<AudioPatch> audioPatch; |
| if (status == NO_ERROR) { |
| audioPatch = new AudioPatch(patchBuilder.patch(), mUidCached); |
| audioPatch->mAfPatchHandle = afPatchHandle; |
| audioPatch->mUid = mUidCached; |
| } |
| return audioPatch; |
| } |
| |
| sp<DeviceDescriptor> AudioPolicyManager::findDevice( |
| const DeviceVector& devices, audio_devices_t device) const { |
| DeviceVector deviceList = devices.getDevicesFromTypeMask(device); |
| ALOG_ASSERT(!deviceList.isEmpty(), |
| "%s() selected device type %#x is not in devices list", __func__, device); |
| return deviceList.itemAt(0); |
| } |
| |
| audio_devices_t AudioPolicyManager::getModuleDeviceTypes( |
| const DeviceVector& devices, const char *moduleId) const { |
| sp<HwModule> mod = mHwModules.getModuleFromName(moduleId); |
| return mod != 0 ? devices.getDeviceTypesFromHwModule(mod->getHandle()) : AUDIO_DEVICE_NONE; |
| } |
| |
| bool AudioPolicyManager::isDeviceOfModule( |
| const sp<DeviceDescriptor>& devDesc, const char *moduleId) const { |
| sp<HwModule> module = mHwModules.getModuleFromName(moduleId); |
| if (module != 0) { |
| return mAvailableOutputDevices.getDevicesFromHwModule(module->getHandle()) |
| .indexOf(devDesc) != NAME_NOT_FOUND |
| || mAvailableInputDevices.getDevicesFromHwModule(module->getHandle()) |
| .indexOf(devDesc) != NAME_NOT_FOUND; |
| } |
| return false; |
| } |
| |
| void AudioPolicyManager::setPhoneState(audio_mode_t state) |
| { |
| ALOGV("setPhoneState() state %d", state); |
| // store previous phone state for management of sonification strategy below |
| int oldState = mEngine->getPhoneState(); |
| |
| if (mEngine->setPhoneState(state) != NO_ERROR) { |
| ALOGW("setPhoneState() invalid or same state %d", state); |
| return; |
| } |
| /// Opens: can these line be executed after the switch of volume curves??? |
| if (isStateInCall(oldState)) { |
| ALOGV("setPhoneState() in call state management: new state is %d", state); |
| // force reevaluating accessibility routing when call stops |
| mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY); |
| } |
| |
| /** |
| * Switching to or from incall state or switching between telephony and VoIP lead to force |
| * routing command. |
| */ |
| bool force = ((is_state_in_call(oldState) != is_state_in_call(state)) |
| || (is_state_in_call(state) && (state != oldState))); |
| |
| // check for device and output changes triggered by new phone state |
| checkForDeviceAndOutputChanges(); |
| |
| int delayMs = 0; |
| if (isStateInCall(state)) { |
| nsecs_t sysTime = systemTime(); |
| auto musicStrategy = streamToStrategy(AUDIO_STREAM_MUSIC); |
| auto sonificationStrategy = streamToStrategy(AUDIO_STREAM_ALARM); |
| for (size_t i = 0; i < mOutputs.size(); i++) { |
| sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i); |
| // mute media and sonification strategies and delay device switch by the largest |
| // latency of any output where either strategy is active. |
| // This avoid sending the ring tone or music tail into the earpiece or headset. |
| if ((desc->isStrategyActive(musicStrategy, SONIFICATION_HEADSET_MUSIC_DELAY, sysTime) || |
| desc->isStrategyActive(sonificationStrategy, SONIFICATION_HEADSET_MUSIC_DELAY, |
| sysTime)) && |
| (delayMs < (int)desc->latency()*2)) { |
| delayMs = desc->latency()*2; |
| } |
| setStrategyMute(musicStrategy, true, desc); |
| setStrategyMute(musicStrategy, false, desc, MUTE_TIME_MS, |
| mEngine->getOutputDevicesForAttributes(attributes_initializer(AUDIO_USAGE_MEDIA), |
| nullptr, true /*fromCache*/).types()); |
| setStrategyMute(sonificationStrategy, true, desc); |
| setStrategyMute(sonificationStrategy, false, desc, MUTE_TIME_MS, |
| mEngine->getOutputDevicesForAttributes(attributes_initializer(AUDIO_USAGE_ALARM), |
| nullptr, true /*fromCache*/).types()); |
| } |
| } |
| |
| if (hasPrimaryOutput()) { |
| // Note that despite the fact that getNewOutputDevices() is called on the primary output, |
| // the device returned is not necessarily reachable via this output |
| DeviceVector rxDevices = getNewOutputDevices(mPrimaryOutput, false /*fromCache*/); |
| // force routing command to audio hardware when ending call |
| // even if no device change is needed |
| if (isStateInCall(oldState) && rxDevices.isEmpty()) { |
| rxDevices = mPrimaryOutput->devices(); |
| } |
| |
| if (state == AUDIO_MODE_IN_CALL) { |
| updateCallRouting(rxDevices, delayMs); |
| } else if (oldState == AUDIO_MODE_IN_CALL) { |
| if (mCallRxPatch != 0) { |
| mpClientInterface->releaseAudioPatch(mCallRxPatch->mAfPatchHandle, 0); |
| mCallRxPatch.clear(); |
| } |
| if (mCallTxPatch != 0) { |
| mpClientInterface->releaseAudioPatch(mCallTxPatch->mAfPatchHandle, 0); |
| mCallTxPatch.clear(); |
| } |
| setOutputDevices(mPrimaryOutput, rxDevices, force, 0); |
| } else { |
| setOutputDevices(mPrimaryOutput, rxDevices, force, 0); |
| } |
| } |
| |
| // reevaluate routing on all outputs in case tracks have been started during the call |
| for (size_t i = 0; i < mOutputs.size(); i++) { |
| sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i); |
| DeviceVector newDevices = getNewOutputDevices(desc, true /*fromCache*/); |
| if (state != AUDIO_MODE_IN_CALL || desc != mPrimaryOutput) { |
| setOutputDevices(desc, newDevices, !newDevices.isEmpty(), 0 /*delayMs*/); |
| } |
| } |
| |
| if (isStateInCall(state)) { |
| ALOGV("setPhoneState() in call state management: new state is %d", state); |
| // force reevaluating accessibility routing when call starts |
| mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY); |
| } |
| |
| // Flag that ringtone volume must be limited to music volume until we exit MODE_RINGTONE |
| mLimitRingtoneVolume = (state == AUDIO_MODE_RINGTONE && |
| isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY)); |
| } |
| |
| audio_mode_t AudioPolicyManager::getPhoneState() { |
| return mEngine->getPhoneState(); |
| } |
| |
| void AudioPolicyManager::setForceUse(audio_policy_force_use_t usage, |
| audio_policy_forced_cfg_t config) |
| { |
| ALOGV("setForceUse() usage %d, config %d, mPhoneState %d", usage, config, mEngine->getPhoneState()); |
| if (config == mEngine->getForceUse(usage)) { |
| return; |
| } |
| |
| if (mEngine->setForceUse(usage, config) != NO_ERROR) { |
| ALOGW("setForceUse() could not set force cfg %d for usage %d", config, usage); |
| return; |
| } |
| bool forceVolumeReeval = (usage == AUDIO_POLICY_FORCE_FOR_COMMUNICATION) || |
| (usage == AUDIO_POLICY_FORCE_FOR_DOCK) || |
| (usage == AUDIO_POLICY_FORCE_FOR_SYSTEM); |
| |
| // check for device and output changes triggered by new force usage |
| checkForDeviceAndOutputChanges(); |
| |
| // force client reconnection to reevaluate flag AUDIO_FLAG_AUDIBILITY_ENFORCED |
| if (usage == AUDIO_POLICY_FORCE_FOR_SYSTEM) { |
| mpClientInterface->invalidateStream(AUDIO_STREAM_SYSTEM); |
| mpClientInterface->invalidateStream(AUDIO_STREAM_ENFORCED_AUDIBLE); |
| } |
| |
| //FIXME: workaround for truncated touch sounds |
| // to be removed when the problem is handled by system UI |
| uint32_t delayMs = 0; |
| uint32_t waitMs = 0; |
| if (usage == AUDIO_POLICY_FORCE_FOR_COMMUNICATION) { |
| delayMs = TOUCH_SOUND_FIXED_DELAY_MS; |
| } |
| if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) { |
| DeviceVector newDevices = getNewOutputDevices(mPrimaryOutput, true /*fromCache*/); |
| waitMs = updateCallRouting(newDevices, delayMs); |
| } |
| for (size_t i = 0; i < mOutputs.size(); i++) { |
| sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(i); |
| DeviceVector newDevices = getNewOutputDevices(outputDesc, true /*fromCache*/); |
| if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (outputDesc != mPrimaryOutput)) { |
| // As done in setDeviceConnectionState, we could also fix default device issue by |
| // preventing the force re-routing in case of default dev that distinguishes on address. |
| // Let's give back to engine full device choice decision however. |
| waitMs = setOutputDevices(outputDesc, newDevices, !newDevices.isEmpty(), delayMs); |
| } |
| if (forceVolumeReeval && !newDevices.isEmpty()) { |
| applyStreamVolumes(outputDesc, newDevices.types(), waitMs, true); |
| } |
| } |
| |
| for (const auto& activeDesc : mInputs.getActiveInputs()) { |
| auto newDevice = getNewInputDevice(activeDesc); |
| // Force new input selection if the new device can not be reached via current input |
| if (activeDesc->mProfile->getSupportedDevices().contains(newDevice)) { |
| setInputDevice(activeDesc->mIoHandle, newDevice); |
| } else { |
| closeInput(activeDesc->mIoHandle); |
| } |
| } |
| } |
| |
| void AudioPolicyManager::setSystemProperty(const char* property, const char* value) |
| { |
| ALOGV("setSystemProperty() property %s, value %s", property, value); |
| } |
| |
| // Find an output profile compatible with the parameters passed. When "directOnly" is set, restrict |
| // search to profiles for direct outputs. |
| sp<IOProfile> AudioPolicyManager::getProfileForOutput( |
| const DeviceVector& devices, |
| uint32_t samplingRate, |
| audio_format_t format, |
| audio_channel_mask_t channelMask, |
| audio_output_flags_t flags, |
| bool directOnly) |
| { |
| if (directOnly) { |
| // only retain flags that will drive the direct output profile selection |
| // if explicitly requested |
| static const uint32_t kRelevantFlags = |
| (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | |
| AUDIO_OUTPUT_FLAG_VOIP_RX); |
| flags = |
| (audio_output_flags_t)((flags & kRelevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT); |
| } |
| |
| sp<IOProfile> profile; |
| |
| for (const auto& hwModule : mHwModules) { |
| for (const auto& curProfile : hwModule->getOutputProfiles()) { |
| if (!curProfile->isCompatibleProfile(devices, |
| samplingRate, NULL /*updatedSamplingRate*/, |
| format, NULL /*updatedFormat*/, |
| channelMask, NULL /*updatedChannelMask*/, |
| flags)) { |
| continue; |
| } |
| // reject profiles not corresponding to a device currently available |
| if (!mAvailableOutputDevices.containsAtLeastOne(curProfile->getSupportedDevices())) { |
| continue; |
| } |
| // reject profiles if connected device does not support codec |
| if (!curProfile->deviceSupportsEncodedFormats(devices.types())) { |
| continue; |
| } |
| if (!directOnly) return curProfile; |
| // when searching for direct outputs, if several profiles are compatible, give priority |
| // to one with offload capability |
| if (profile != 0 && ((curProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0)) { |
| continue; |
| } |
| profile = curProfile; |
| if ((profile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) { |
| break; |
| } |
| } |
| } |
| return profile; |
| } |
| |
| audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream) |
| { |
| DeviceVector devices = mEngine->getOutputDevicesForStream(stream, false /*fromCache*/); |
| |
| // Note that related method getOutputForAttr() uses getOutputForDevice() not selectOutput(). |
| // We use selectOutput() here since we don't have the desired AudioTrack sample rate, |
| // format, flags, etc. This may result in some discrepancy for functions that utilize |
| // getOutput() solely on audio_stream_type such as AudioSystem::getOutputFrameCount() |
| // and AudioSystem::getOutputSamplingRate(). |
| |
| SortedVector<audio_io_handle_t> outputs = getOutputsForDevices(devices, mOutputs); |
| const audio_io_handle_t output = selectOutput(outputs); |
| |
| ALOGV("getOutput() stream %d selected devices %s, output %d", stream, |
| devices.toString().c_str(), output); |
| return output; |
| } |
| |
| status_t AudioPolicyManager::getAudioAttributes(audio_attributes_t *dstAttr, |
| const audio_attributes_t *srcAttr, |
| audio_stream_type_t srcStream) |
| { |
| if (srcAttr != NULL) { |
| if (!isValidAttributes(srcAttr)) { |
| ALOGE("%s invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]", |
| __func__, |
| srcAttr->usage, srcAttr->content_type, srcAttr->flags, |
| srcAttr->tags); |
| return BAD_VALUE; |
| } |
| *dstAttr = *srcAttr; |
| } else { |
| if (srcStream < AUDIO_STREAM_MIN || srcStream >= AUDIO_STREAM_PUBLIC_CNT) { |
| ALOGE("%s: invalid stream type", __func__); |
| return BAD_VALUE; |
| } |
| *dstAttr = mEngine->getAttributesForStreamType(srcStream); |
| } |
| |
| // Only honor audibility enforced when required. The client will be |
| // forced to reconnect if the forced usage changes. |
| if (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) != AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) { |
| dstAttr->flags &= ~AUDIO_FLAG_AUDIBILITY_ENFORCED; |
| } |
| |
| return NO_ERROR; |
| } |
| |
| status_t AudioPolicyManager::getOutputForAttrInt( |
| audio_attributes_t *resultAttr, |
| audio_io_handle_t *output, |
| audio_session_t session, |
| const audio_attributes_t *attr, |
| audio_stream_type_t *stream, |
| uid_t uid, |
| const audio_config_t *config, |
| audio_output_flags_t *flags, |
| audio_port_handle_t *selectedDeviceId, |
| bool *isRequestedDeviceForExclusiveUse, |
| std::vector<sp<SwAudioOutputDescriptor>> *secondaryDescs) |
| { |
| DeviceVector outputDevices; |
| const audio_port_handle_t requestedPortId = *selectedDeviceId; |
| DeviceVector msdDevices = getMsdAudioOutDevices(); |
| const sp<DeviceDescriptor> requestedDevice = |
| mAvailableOutputDevices.getDeviceFromId(requestedPortId); |
| |
| status_t status = getAudioAttributes(resultAttr, attr, *stream); |
| if (status != NO_ERROR) { |
| return status; |
| } |
| if (auto it = mAllowedCapturePolicies.find(uid); it != end(mAllowedCapturePolicies)) { |
| resultAttr->flags |= it->second; |
| } |
| *stream = mEngine->getStreamTypeForAttributes(*resultAttr); |
| |
| ALOGV("%s() attributes=%s stream=%s session %d selectedDeviceId %d", __func__, |
| toString(*resultAttr).c_str(), toString(*stream).c_str(), session, requestedPortId); |
| |
| // The primary output is the explicit routing (eg. setPreferredDevice) if specified, |
| // otherwise, fallback to the dynamic policies, if none match, query the engine. |
| // Secondary outputs are always found by dynamic policies as the engine do not support them |
| sp<SwAudioOutputDescriptor> policyDesc; |
| status = mPolicyMixes.getOutputForAttr(*resultAttr, uid, *flags, policyDesc, secondaryDescs); |
| if (status != OK) { |
| return status; |
| } |
| |
| // Explicit routing is higher priority then any dynamic policy primary output |
| bool usePrimaryOutputFromPolicyMixes = requestedDevice == nullptr && policyDesc != nullptr; |
| |
| // FIXME: in case of RENDER policy, the output capabilities should be checked |
| if ((usePrimaryOutputFromPolicyMixes || !secondaryDescs->empty()) |
| && !audio_is_linear_pcm(config->format)) { |
| ALOGD("%s: rejecting request as dynamic audio policy only support pcm", __func__); |
| return BAD_VALUE; |
| } |
| if (usePrimaryOutputFromPolicyMixes) { |
| *output = policyDesc->mIoHandle; |
| sp<AudioPolicyMix> mix = policyDesc->mPolicyMix.promote(); |
| sp<DeviceDescriptor> deviceDesc = |
| mAvailableOutputDevices.getDevice(mix->mDeviceType, |
| mix->mDeviceAddress, |
| AUDIO_FORMAT_DEFAULT); |
| *selectedDeviceId = deviceDesc != 0 ? deviceDesc->getId() : AUDIO_PORT_HANDLE_NONE; |
| ALOGV("getOutputForAttr() returns output %d", *output); |
| return NO_ERROR; |
| } |
| // Virtual sources must always be dynamicaly or explicitly routed |
| if (resultAttr->usage == AUDIO_USAGE_VIRTUAL_SOURCE) { |
| ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE"); |
| return BAD_VALUE; |
| } |
| // explicit routing managed by getDeviceForStrategy in APM is now handled by engine |
| // in order to let the choice of the order to future vendor engine |
| outputDevices = mEngine->getOutputDevicesForAttributes(*resultAttr, requestedDevice, false); |
| |
| if ((resultAttr->flags & AUDIO_FLAG_HW_AV_SYNC) != 0) { |
| *flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC); |
| } |
| |
| // Set incall music only if device was explicitly set, and fallback to the device which is |
| // chosen by the engine if not. |
| // FIXME: provide a more generic approach which is not device specific and move this back |
| // to getOutputForDevice. |
| // TODO: Remove check of AUDIO_STREAM_MUSIC once migration is completed on the app side. |
| if (outputDevices.types() == AUDIO_DEVICE_OUT_TELEPHONY_TX && |
| (*stream == AUDIO_STREAM_MUSIC || resultAttr->usage == AUDIO_USAGE_VOICE_COMMUNICATION) && |
| audio_is_linear_pcm(config->format) && |
| isInCall()) { |
| if (requestedPortId != AUDIO_PORT_HANDLE_NONE) { |
| *flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_INCALL_MUSIC; |
| *isRequestedDeviceForExclusiveUse = true; |
| } |
| } |
| |
| ALOGV("%s() device %s, sampling rate %d, format %#x, channel mask %#x, flags %#x stream %s", |
| __func__, outputDevices.toString().c_str(), config->sample_rate, config->format, |
| config->channel_mask, *flags, toString(*stream).c_str()); |
| |
| *output = AUDIO_IO_HANDLE_NONE; |
| if (!msdDevices.isEmpty()) { |
| *output = getOutputForDevices(msdDevices, session, *stream, config, flags); |
| sp<DeviceDescriptor> device = outputDevices.isEmpty() ? nullptr : outputDevices.itemAt(0); |
| if (*output != AUDIO_IO_HANDLE_NONE && setMsdPatch(device) == NO_ERROR) { |
| ALOGV("%s() Using MSD devices %s instead of devices %s", |
| __func__, msdDevices.toString().c_str(), outputDevices.toString().c_str()); |
| outputDevices = msdDevices; |
| } else { |
| *output = AUDIO_IO_HANDLE_NONE; |
| } |
| } |
| if (*output == AUDIO_IO_HANDLE_NONE) { |
| *output = getOutputForDevices(outputDevices, session, *stream, config, |
| flags, resultAttr->flags & AUDIO_FLAG_MUTE_HAPTIC); |
| } |
| if (*output == AUDIO_IO_HANDLE_NONE) { |
| return INVALID_OPERATION; |
| } |
| |
| *selectedDeviceId = getFirstDeviceId(outputDevices); |
| |
| ALOGV("%s returns output %d selectedDeviceId %d", __func__, *output, *selectedDeviceId); |
| |
| return NO_ERROR; |
| } |
| |
| status_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr, |
| audio_io_handle_t *output, |
| audio_session_t session, |
| audio_stream_type_t *stream, |
| uid_t uid, |
| const audio_config_t *config, |
| audio_output_flags_t *flags, |
| audio_port_handle_t *selectedDeviceId, |
| audio_port_handle_t *portId, |
| std::vector<audio_io_handle_t> *secondaryOutputs) |
| { |
| // The supplied portId must be AUDIO_PORT_HANDLE_NONE |
| if (*portId != AUDIO_PORT_HANDLE_NONE) { |
| return INVALID_OPERATION; |
| } |
| const audio_port_handle_t requestedPortId = *selectedDeviceId; |
| audio_attributes_t resultAttr; |
| bool isRequestedDeviceForExclusiveUse = false; |
| std::vector<sp<SwAudioOutputDescriptor>> secondaryOutputDescs; |
| const sp<DeviceDescriptor> requestedDevice = |
| mAvailableOutputDevices.getDeviceFromId(requestedPortId); |
| |
| // Prevent from storing invalid requested device id in clients |
| const audio_port_handle_t sanitizedRequestedPortId = |
| requestedDevice != nullptr ? requestedPortId : AUDIO_PORT_HANDLE_NONE; |
| *selectedDeviceId = sanitizedRequestedPortId; |
| |
| status_t status = getOutputForAttrInt(&resultAttr, output, session, attr, stream, uid, |
| config, flags, selectedDeviceId, &isRequestedDeviceForExclusiveUse, |
| &secondaryOutputDescs); |
| if (status != NO_ERROR) { |
| return status; |
| } |
| std::vector<wp<SwAudioOutputDescriptor>> weakSecondaryOutputDescs; |
| for (auto& secondaryDesc : secondaryOutputDescs) { |
| secondaryOutputs->push_back(secondaryDesc->mIoHandle); |
| weakSecondaryOutputDescs.push_back(secondaryDesc); |
| } |
| |
| audio_config_base_t clientConfig = {.sample_rate = config->sample_rate, |
| .format = config->format, |
| .channel_mask = config->channel_mask }; |
| *portId = AudioPort::getNextUniqueId(); |
| |
| sp<TrackClientDescriptor> clientDesc = |
| new TrackClientDescriptor(*portId, uid, session, resultAttr, clientConfig, |
| sanitizedRequestedPortId, *stream, |
| mEngine->getProductStrategyForAttributes(resultAttr), |
| toVolumeSource(resultAttr), |
| *flags, isRequestedDeviceForExclusiveUse, |
| std::move(weakSecondaryOutputDescs)); |
| sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(*output); |
| outputDesc->addClient(clientDesc); |
| |
| ALOGV("%s() returns output %d requestedPortId %d selectedDeviceId %d for port ID %d", __func__, |
| *output, requestedPortId, *selectedDeviceId, *portId); |
| |
| return NO_ERROR; |
| } |
| |
| audio_io_handle_t AudioPolicyManager::getOutputForDevices( |
| const DeviceVector &devices, |
| audio_session_t session, |
| audio_stream_type_t stream, |
| const audio_config_t *config, |
| audio_output_flags_t *flags, |
| bool forceMutingHaptic) |
| { |
| audio_io_handle_t output = AUDIO_IO_HANDLE_NONE; |
| status_t status; |
| |
| // Discard haptic channel mask when forcing muting haptic channels. |
| audio_channel_mask_t channelMask = forceMutingHaptic |
| ? (config->channel_mask & ~AUDIO_CHANNEL_HAPTIC_ALL) : config->channel_mask; |
| |
| // open a direct output if required by specified parameters |
| //force direct flag if offload flag is set: offloading implies a direct output stream |
| // and all common behaviors are driven by checking only the direct flag |
| // this should normally be set appropriately in the policy configuration file |
| if ((*flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) { |
| *flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_DIRECT); |
| } |
| if ((*flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) { |
| *flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_DIRECT); |
| } |
| // only allow deep buffering for music stream type |
| if (stream != AUDIO_STREAM_MUSIC) { |
| *flags = (audio_output_flags_t)(*flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER); |
| } else if (/* stream == AUDIO_STREAM_MUSIC && */ |
| *flags == AUDIO_OUTPUT_FLAG_NONE && |
| property_get_bool("audio.deep_buffer.media", false /* default_value */)) { |
| // use DEEP_BUFFER as default output for music stream type |
| *flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_DEEP_BUFFER; |
| } |
| if (stream == AUDIO_STREAM_TTS) { |
| *flags = AUDIO_OUTPUT_FLAG_TTS; |
| } else if (stream == AUDIO_STREAM_VOICE_CALL && |
| audio_is_linear_pcm(config->format) && |
| (*flags & AUDIO_OUTPUT_FLAG_INCALL_MUSIC) == 0) { |
| *flags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_VOIP_RX | |
| AUDIO_OUTPUT_FLAG_DIRECT); |
| ALOGV("Set VoIP and Direct output flags for PCM format"); |
| } |
| |
| |
| sp<IOProfile> profile; |
| |
| // skip direct output selection if the request can obviously be attached to a mixed output |
| // and not explicitly requested |
| if (((*flags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) && |
| audio_is_linear_pcm(config->format) && config->sample_rate <= SAMPLE_RATE_HZ_MAX && |
| audio_channel_count_from_out_mask(channelMask) <= 2) { |
| goto non_direct_output; |
| } |
| |
| // Do not allow offloading if one non offloadable effect is enabled or MasterMono is enabled. |
| // This prevents creating an offloaded track and tearing it down immediately after start |
| // when audioflinger detects there is an active non offloadable effect. |
| // FIXME: We should check the audio session here but we do not have it in this context. |
| // This may prevent offloading in rare situations where effects are left active by apps |
| // in the background. |
| |
| if (((*flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) || |
| !(mEffects.isNonOffloadableEffectEnabled() || mMasterMono)) { |
| profile = getProfileForOutput(devices, |
| config->sample_rate, |
| config->format, |
| channelMask, |
| (audio_output_flags_t)*flags, |
| true /* directOnly */); |
| } |
| |
| if (profile != 0) { |
| // exclusive outputs for MMAP and Offload are enforced by different session ids. |
| for (size_t i = 0; i < mOutputs.size(); i++) { |
| sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i); |
| if (!desc->isDuplicated() && (profile == desc->mProfile)) { |
| // reuse direct output if currently open by the same client |
| // and configured with same parameters |
| if ((config->sample_rate == desc->mSamplingRate) && |
| (config->format == desc->mFormat) && |
| (channelMask == desc->mChannelMask) && |
| (session == desc->mDirectClientSession)) { |
| desc->mDirectOpenCount++; |
| ALOGI("%s reusing direct output %d for session %d", __func__, |
| mOutputs.keyAt(i), session); |
| return mOutputs.keyAt(i); |
| } |
| } |
| } |
| |
| if (!profile->canOpenNewIo()) { |
| goto non_direct_output; |
| } |
| |
| sp<SwAudioOutputDescriptor> outputDesc = |
| new SwAudioOutputDescriptor(profile, mpClientInterface); |
| |
| String8 address = getFirstDeviceAddress(devices); |
| |
| // MSD patch may be using the only output stream that can service this request. Release |
| // MSD patch to prioritize this request over any active output on MSD. |
| AudioPatchCollection msdPatches = getMsdPatches(); |
| for (size_t i = 0; i < msdPatches.size(); i++) { |
| const auto& patch = msdPatches[i]; |
| for (size_t j = 0; j < patch->mPatch.num_sinks; ++j) { |
| const struct audio_port_config *sink = &patch->mPatch.sinks[j]; |
| if (sink->type == AUDIO_PORT_TYPE_DEVICE && |
| (sink->ext.device.type & devices.types()) != AUDIO_DEVICE_NONE && |
| (address.isEmpty() || strncmp(sink->ext.device.address, address.string(), |
| AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0)) { |
| releaseAudioPatch(patch->mHandle, mUidCached); |
| break; |
| } |
| } |
| } |
| |
| status = outputDesc->open(config, devices, stream, *flags, &output); |
| |
| // only accept an output with the requested parameters |
| if (status != NO_ERROR || |
| (config->sample_rate != 0 && config->sample_rate != outputDesc->mSamplingRate) || |
| (config->format != AUDIO_FORMAT_DEFAULT && config->format != outputDesc->mFormat) || |
| (channelMask != 0 && channelMask != outputDesc->mChannelMask)) { |
| ALOGV("%s failed opening direct output: output %d sample rate %d %d," |
| "format %d %d, channel mask %04x %04x", __func__, output, config->sample_rate, |
| outputDesc->mSamplingRate, config->format, outputDesc->mFormat, |
| channelMask, outputDesc->mChannelMask); |
| if (output != AUDIO_IO_HANDLE_NONE) { |
| outputDesc->close(); |
| } |
| // fall back to mixer output if possible when the direct output could not be open |
| if (audio_is_linear_pcm(config->format) && |
| config->sample_rate <= SAMPLE_RATE_HZ_MAX) { |
| goto non_direct_output; |
| } |
| return AUDIO_IO_HANDLE_NONE; |
| } |
| outputDesc->mDirectOpenCount = 1; |
| outputDesc->mDirectClientSession = session; |
| |
| addOutput(output, outputDesc); |
| mPreviousOutputs = mOutputs; |
| ALOGV("%s returns new direct output %d", __func__, output); |
| mpClientInterface->onAudioPortListUpdate(); |
| return output; |
| } |
| |
| non_direct_output: |
| |
| // A request for HW A/V sync cannot fallback to a mixed output because time |
| // stamps are embedded in audio data |
| if ((*flags & (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ)) != 0) { |
| return AUDIO_IO_HANDLE_NONE; |
| } |
| |
| // ignoring channel mask due to downmix capability in mixer |
| |
| // open a non direct output |
| |
| // for non direct outputs, only PCM is supported |
| if (audio_is_linear_pcm(config->format)) { |
| // get which output is suitable for the specified stream. The actual |
| // routing change will happen when startOutput() will be called |
| SortedVector<audio_io_handle_t> outputs = getOutputsForDevices(devices, mOutputs); |
| |
| // at this stage we should ignore the DIRECT flag as no direct output could be found earlier |
| *flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_DIRECT); |
| output = selectOutput(outputs, *flags, config->format, channelMask, config->sample_rate); |
| } |
| ALOGW_IF((output == 0), "getOutputForDevices() could not find output for stream %d, " |
| "sampling rate %d, format %#x, channels %#x, flags %#x", |
| stream, config->sample_rate, config->format, channelMask, *flags); |
| |
| return output; |
| } |
| |
| sp<DeviceDescriptor> AudioPolicyManager::getMsdAudioInDevice() const { |
| auto msdInDevices = mHwModules.getAvailableDevicesFromModuleName(AUDIO_HARDWARE_MODULE_ID_MSD, |
| mAvailableInputDevices); |
| return msdInDevices.isEmpty()? nullptr : msdInDevices.itemAt(0); |
| } |
| |
| DeviceVector AudioPolicyManager::getMsdAudioOutDevices() const { |
| return mHwModules.getAvailableDevicesFromModuleName(AUDIO_HARDWARE_MODULE_ID_MSD, |
| mAvailableOutputDevices); |
| } |
| |
| const AudioPatchCollection AudioPolicyManager::getMsdPatches() const { |
| AudioPatchCollection msdPatches; |
| sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD); |
| if (msdModule != 0) { |
| for (size_t i = 0; i < mAudioPatches.size(); ++i) { |
| sp<AudioPatch> patch = mAudioPatches.valueAt(i); |
| for (size_t j = 0; j < patch->mPatch.num_sources; ++j) { |
| const struct audio_port_config *source = &patch->mPatch.sources[j]; |
| if (source->type == AUDIO_PORT_TYPE_DEVICE && |
| source->ext.device.hw_module == msdModule->getHandle()) { |
| msdPatches.addAudioPatch(patch->mHandle, patch); |
| } |
| } |
| } |
| } |
| return msdPatches; |
| } |
| |
| status_t AudioPolicyManager::getBestMsdAudioProfileFor(const sp<DeviceDescriptor> &outputDevice, |
| bool hwAvSync, audio_port_config *sourceConfig, audio_port_config *sinkConfig) const |
| { |
| sp<HwModule> msdModule = mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_MSD); |
| if (msdModule == nullptr) { |
| ALOGE("%s() unable to get MSD module", __func__); |
| return NO_INIT; |
| } |
| sp<HwModule> deviceModule = mHwModules.getModuleForDevice(outputDevice, AUDIO_FORMAT_DEFAULT); |
| if (deviceModule == nullptr) { |
| ALOGE("%s() unable to get module for %s", __func__, outputDevice->toString().c_str()); |
| return NO_INIT; |
| } |
| const InputProfileCollection &inputProfiles = msdModule->getInputProfiles(); |
| if (inputProfiles.isEmpty()) { |
| ALOGE("%s() no input profiles for MSD module", __func__); |
| return NO_INIT; |
| } |
| const OutputProfileCollection &outputProfiles = deviceModule->getOutputProfiles(); |
| if (outputProfiles.isEmpty()) { |
| ALOGE("%s() no output profiles for device %s", __func__, outputDevice->toString().c_str()); |
| return NO_INIT; |
| } |
| AudioProfileVector msdProfiles; |
| // Each IOProfile represents a MixPort from audio_policy_configuration.xml |
| for (const auto &inProfile : inputProfiles) { |
| if (hwAvSync == ((inProfile->getFlags() & AUDIO_INPUT_FLAG_HW_AV_SYNC) != 0)) { |
| msdProfiles.appendVector(inProfile->getAudioProfiles()); |
| } |
| } |
| AudioProfileVector deviceProfiles; |
| for (const auto &outProfile : outputProfiles) { |
| if (hwAvSync == ((outProfile->getFlags() & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0)) { |
| deviceProfiles.appendVector(outProfile->getAudioProfiles()); |
| } |
| } |
| struct audio_config_base bestSinkConfig; |
| status_t result = msdProfiles.findBestMatchingOutputConfig(deviceProfiles, |
| compressedFormatsOrder, surroundChannelMasksOrder, true /*preferHigherSamplingRates*/, |
| &bestSinkConfig); |
| if (result != NO_ERROR) { |
| ALOGD("%s() no matching profiles found for device: %s, hwAvSync: %d", |
| __func__, outputDevice->toString().c_str(), hwAvSync); |
| return result; |
| } |
| sinkConfig->sample_rate = bestSinkConfig.sample_rate; |
| sinkConfig->channel_mask = bestSinkConfig.channel_mask; |
| sinkConfig->format = bestSinkConfig.format; |
| // For encoded streams force direct flag to prevent downstream mixing. |
| sinkConfig->flags.output = static_cast<audio_output_flags_t>( |
| sinkConfig->flags.output | AUDIO_OUTPUT_FLAG_DIRECT); |
| sourceConfig->sample_rate = bestSinkConfig.sample_rate; |
| // Specify exact channel mask to prevent guessing by bit count in PatchPanel. |
| sourceConfig->channel_mask = audio_channel_mask_out_to_in(bestSinkConfig.channel_mask); |
| sourceConfig->format = bestSinkConfig.format; |
| // Copy input stream directly without any processing (e.g. resampling). |
| sourceConfig->flags.input = static_cast<audio_input_flags_t>( |
| sourceConfig->flags.input | AUDIO_INPUT_FLAG_DIRECT); |
| if (hwAvSync) { |
| sinkConfig->flags.output = static_cast<audio_output_flags_t>( |
| sinkConfig->flags.output | AUDIO_OUTPUT_FLAG_HW_AV_SYNC); |
| sourceConfig->flags.input = static_cast<audio_input_flags_t>( |
| sourceConfig->flags.input | AUDIO_INPUT_FLAG_HW_AV_SYNC); |
| } |
| const unsigned int config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE | |
| AUDIO_PORT_CONFIG_CHANNEL_MASK | AUDIO_PORT_CONFIG_FORMAT | AUDIO_PORT_CONFIG_FLAGS; |
| sinkConfig->config_mask |= config_mask; |
| sourceConfig->config_mask |= config_mask; |
| return NO_ERROR; |
| } |
| |
| PatchBuilder AudioPolicyManager::buildMsdPatch(const sp<DeviceDescriptor> &outputDevice) const |
| { |
| PatchBuilder patchBuilder; |
| patchBuilder.addSource(getMsdAudioInDevice()).addSink(outputDevice); |
| audio_port_config sourceConfig = patchBuilder.patch()->sources[0]; |
| audio_port_config sinkConfig = patchBuilder.patch()->sinks[0]; |
| // TODO: Figure out whether MSD module has HW_AV_SYNC flag set in the AP config file. |
| // For now, we just forcefully try with HwAvSync first. |
| status_t res = getBestMsdAudioProfileFor(outputDevice, true /*hwAvSync*/, |
| &sourceConfig, &sinkConfig) == NO_ERROR ? NO_ERROR : |
| getBestMsdAudioProfileFor( |
| outputDevice, false /*hwAvSync*/, &sourceConfig, &sinkConfig); |
| if (res == NO_ERROR) { |
| // Found a matching profile for encoded audio. Re-create PatchBuilder with this config. |
| return (PatchBuilder()).addSource(sourceConfig).addSink(sinkConfig); |
| } |
| ALOGV("%s() no matching profile found. Fall through to default PCM patch" |
| " supporting PCM format conversion.", __func__); |
| return patchBuilder; |
| } |
| |
| status_t AudioPolicyManager::setMsdPatch(const sp<DeviceDescriptor> &outputDevice) { |
| sp<DeviceDescriptor> device = outputDevice; |
| if (device == nullptr) { |
| // Use media strategy for unspecified output device. This should only |
| // occur on checkForDeviceAndOutputChanges(). Device connection events may |
| // therefore invalidate explicit routing requests. |
| DeviceVector devices = mEngine->getOutputDevicesForAttributes( |
| attributes_initializer(AUDIO_USAGE_MEDIA), nullptr, false /*fromCache*/); |
| LOG_ALWAYS_FATAL_IF(devices.isEmpty(), "no outpudevice to set Msd Patch"); |
| device = devices.itemAt(0); |
| } |
| ALOGV("%s() for device %s", __func__, device->toString().c_str()); |
| PatchBuilder patchBuilder = buildMsdPatch(device); |
| const struct audio_patch* patch = patchBuilder.patch(); |
| const AudioPatchCollection msdPatches = getMsdPatches(); |
| if (!msdPatches.isEmpty()) { |
| LOG_ALWAYS_FATAL_IF(msdPatches.size() > 1, |
| "The current MSD prototype only supports one output patch"); |
| sp<AudioPatch> currentPatch = msdPatches.valueAt(0); |
| if (audio_patches_are_equal(¤tPatch->mPatch, patch)) { |
| return NO_ERROR; |
| } |
| releaseAudioPatch(currentPatch->mHandle, mUidCached); |
| } |
| status_t status = installPatch(__func__, -1 /*index*/, nullptr /*patchHandle*/, |
| patch, 0 /*delayMs*/, mUidCached, nullptr /*patchDescPtr*/); |
| ALOGE_IF(status != NO_ERROR, "%s() error %d creating MSD audio patch", __func__, status); |
| ALOGI_IF(status == NO_ERROR, "%s() Patch created from MSD_IN to " |
| "device:%s (format:%#x channels:%#x samplerate:%d)", __func__, |
| device->toString().c_str(), patch->sources[0].format, |
| patch->sources[0].channel_mask, patch->sources[0].sample_rate); |
| return status; |
| } |
| |
| audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs, |
| audio_output_flags_t flags, |
| audio_format_t format, |
| audio_channel_mask_t channelMask, |
| uint32_t samplingRate) |
| { |
| LOG_ALWAYS_FATAL_IF(!(format == AUDIO_FORMAT_INVALID || audio_is_linear_pcm(format)), |
| "%s called with format %#x", __func__, format); |
| |
| // Flags disqualifying an output: the match must happen before calling selectOutput() |
| static const audio_output_flags_t kExcludedFlags = (audio_output_flags_t) |
| (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_MMAP_NOIRQ | AUDIO_OUTPUT_FLAG_DIRECT); |
| |
| // Flags expressing a functional request: must be honored in priority over |
| // other criteria |
| static const audio_output_flags_t kFunctionalFlags = (audio_output_flags_t) |
| (AUDIO_OUTPUT_FLAG_VOIP_RX | AUDIO_OUTPUT_FLAG_INCALL_MUSIC | |
| AUDIO_OUTPUT_FLAG_TTS | AUDIO_OUTPUT_FLAG_DIRECT_PCM); |
| // Flags expressing a performance request: have lower priority than serving |
| // requested sampling rate or channel mask |
| static const audio_output_flags_t kPerformanceFlags = (audio_output_flags_t) |
| (AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_DEEP_BUFFER | |
| AUDIO_OUTPUT_FLAG_RAW | AUDIO_OUTPUT_FLAG_SYNC); |
| |
| const audio_output_flags_t functionalFlags = |
| (audio_output_flags_t)(flags & kFunctionalFlags); |
| const audio_output_flags_t performanceFlags = |
| (audio_output_flags_t)(flags & kPerformanceFlags); |
| |
| audio_io_handle_t bestOutput = (outputs.size() == 0) ? AUDIO_IO_HANDLE_NONE : outputs[0]; |
| |
| // select one output among several that provide a path to a particular device or set of |
| // devices (the list was previously build by getOutputsForDevices()). |
| // The priority is as follows: |
| // 1: the output supporting haptic playback when requesting haptic playback |
| // 2: the output with the highest number of requested functional flags |
| // 3: the output supporting the exact channel mask |
| // 4: the output with a higher channel count than requested |
| // 5: the output with a higher sampling rate than requested |
| // 6: the output with the highest number of requested performance flags |
| // 7: the output with the bit depth the closest to the requested one |
| // 8: the primary output |
| // 9: the first output in the list |
| |
| // matching criteria values in priority order for best matching output so far |
| std::vector<uint32_t> bestMatchCriteria(8, 0); |
| |
| const uint32_t channelCount = audio_channel_count_from_out_mask(channelMask); |
| const uint32_t hapticChannelCount = audio_channel_count_from_out_mask( |
| channelMask & AUDIO_CHANNEL_HAPTIC_ALL); |
| |
| for (audio_io_handle_t output : outputs) { |
| sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output); |
| // matching criteria values in priority order for current output |
| std::vector<uint32_t> currentMatchCriteria(8, 0); |
| |
| if (outputDesc->isDuplicated()) { |
| continue; |
| } |
| if ((kExcludedFlags & outputDesc->mFlags) != 0) { |
| continue; |
| } |
| |
| // If haptic channel is specified, use the haptic output if present. |
| // When using haptic output, same audio format and sample rate are required. |
| const uint32_t outputHapticChannelCount = audio_channel_count_from_out_mask( |
| outputDesc->mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL); |
| if ((hapticChannelCount == 0) != (outputHapticChannelCount == 0)) { |
| continue; |
| } |
| if (outputHapticChannelCount >= hapticChannelCount |
| && format == outputDesc->mFormat |
| && samplingRate == outputDesc->mSamplingRate) { |
| currentMatchCriteria[0] = outputHapticChannelCount; |
| } |
| |
| // functional flags match |
| currentMatchCriteria[1] = popcount(outputDesc->mFlags & functionalFlags); |
| |
| // channel mask and channel count match |
| uint32_t outputChannelCount = audio_channel_count_from_out_mask(outputDesc->mChannelMask); |
| if (channelMask != AUDIO_CHANNEL_NONE && channelCount > 2 && |
| channelCount <= outputChannelCount) { |
| if ((audio_channel_mask_get_representation(channelMask) == |
| audio_channel_mask_get_representation(outputDesc->mChannelMask)) && |
| ((channelMask & outputDesc->mChannelMask) == channelMask)) { |
| currentMatchCriteria[2] = outputChannelCount; |
| } |
| currentMatchCriteria[3] = outputChannelCount; |
| } |
| |
| // sampling rate match |
| if (samplingRate > SAMPLE_RATE_HZ_DEFAULT && |
| samplingRate <= outputDesc->mSamplingRate) { |
| currentMatchCriteria[4] = outputDesc->mSamplingRate; |
| } |
| |
| // performance flags match |
| currentMatchCriteria[5] = popcount(outputDesc->mFlags & performanceFlags); |
| |
| // format match |
| if (format != AUDIO_FORMAT_INVALID) { |
| currentMatchCriteria[6] = |
| AudioPort::kFormatDistanceMax - |
| AudioPort::formatDistance(format, outputDesc->mFormat); |
| } |
| |
| // primary output match |
| currentMatchCriteria[7] = outputDesc->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY; |
| |
| // compare match criteria by priority then value |
| if (std::lexicographical_compare(bestMatchCriteria.begin(), bestMatchCriteria.end(), |
| currentMatchCriteria.begin(), currentMatchCriteria.end())) { |
| bestMatchCriteria = currentMatchCriteria; |
| bestOutput = output; |
| |
| std::stringstream result; |
| std::copy(bestMatchCriteria.begin(), bestMatchCriteria.end(), |
| std::ostream_iterator<int>(result, " ")); |
| ALOGV("%s new bestOutput %d criteria %s", |
| __func__, bestOutput, result.str().c_str()); |
| } |
| } |
| |
| return bestOutput; |
| } |
| |
| status_t AudioPolicyManager::startOutput(audio_port_handle_t portId) |
| { |
| ALOGV("%s portId %d", __FUNCTION__, portId); |
| |
| sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputForClient(portId); |
| if (outputDesc == 0) { |
| ALOGW("startOutput() no output for client %d", portId); |
| return BAD_VALUE; |
| } |
| sp<TrackClientDescriptor> client = outputDesc->getClient(portId); |
| |
| ALOGV("startOutput() output %d, stream %d, session %d", |
| outputDesc->mIoHandle, client->stream(), client->session()); |
| |
| status_t status = outputDesc->start(); |
| if (status != NO_ERROR) { |
| return status; |
| } |
| |
| uint32_t delayMs; |
| status = startSource(outputDesc, client, &delayMs); |
| |
| if (status != NO_ERROR) { |
| outputDesc->stop(); |
| return status; |
| } |
| if (delayMs != 0) { |
| usleep(delayMs * 1000); |
| } |
| |
| return status; |
| } |
| |
| status_t AudioPolicyManager::startSource(const sp<SwAudioOutputDescriptor>& outputDesc, |
| const sp<TrackClientDescriptor>& client, |
| uint32_t *delayMs) |
| { |
| // cannot start playback of STREAM_TTS if any other output is being used |
| uint32_t beaconMuteLatency = 0; |
| |
| *delayMs = 0; |
| audio_stream_type_t stream = client->stream(); |
| auto clientVolSrc = client->volumeSource(); |
| auto clientStrategy = client->strategy(); |
| auto clientAttr = client->attributes(); |
| if (stream == AUDIO_STREAM_TTS) { |
| ALOGV("\t found BEACON stream"); |
| if (!mTtsOutputAvailable && mOutputs.isAnyOutputActive( |
| toVolumeSource(AUDIO_STREAM_TTS) /*sourceToIgnore*/)) { |
| return INVALID_OPERATION; |
| } else { |
| beaconMuteLatency = handleEventForBeacon(STARTING_BEACON); |
| } |
| } else { |
| // some playback other than beacon starts |
| beaconMuteLatency = handleEventForBeacon(STARTING_OUTPUT); |
| } |
| |
| // force device change if the output is inactive and no audio patch is already present. |
| // check active before incrementing usage count |
| bool force = !outputDesc->isActive() && |
| (outputDesc->getPatchHandle() == AUDIO_PATCH_HANDLE_NONE); |
| |
| DeviceVector devices; |
| sp<AudioPolicyMix> policyMix = outputDesc->mPolicyMix.promote(); |
| const char *address = NULL; |
| if (policyMix != NULL) { |
| audio_devices_t newDeviceType; |
| address = policyMix->mDeviceAddress.string(); |
| if ((policyMix->mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) { |
| newDeviceType = AUDIO_DEVICE_OUT_REMOTE_SUBMIX; |
| } else { |
| newDeviceType = policyMix->mDeviceType; |
| } |
| sp device = mAvailableOutputDevices.getDevice(newDeviceType, String8(address), |
| AUDIO_FORMAT_DEFAULT); |
| ALOG_ASSERT(device, "%s: no device found t=%u, a=%s", __func__, newDeviceType, address); |
| devices.add(device); |
| } |
| |
| // requiresMuteCheck is false when we can bypass mute strategy. |
| // It covers a common case when there is no materially active audio |
| // and muting would result in unnecessary delay and dropped audio. |
| const uint32_t outputLatencyMs = outputDesc->latency(); |
| bool requiresMuteCheck = outputDesc->isActive(outputLatencyMs * 2); // account for drain |
| |
| // increment usage count for this stream on the requested output: |
| // NOTE that the usage count is the same for duplicated output and hardware output which is |
| // necessary for a correct control of hardware output routing by startOutput() and stopOutput() |
| outputDesc->setClientActive(client, true); |
| |
| if (client->hasPreferredDevice(true)) { |
| if (outputDesc->clientsList(true /*activeOnly*/).size() == 1 && |
| client->isPreferredDeviceForExclusiveUse()) { |
| // Preferred device may be exclusive, use only if no other active clients on this output |
| devices = DeviceVector( |
| mAvailableOutputDevices.getDeviceFromId(client->preferredDeviceId())); |
| } else { |
| devices = getNewOutputDevices(outputDesc, false /*fromCache*/); |
| } |
| if (devices != outputDesc->devices()) { |
| checkStrategyRoute(clientStrategy, outputDesc->mIoHandle); |
| } |
| } |
| |
| if (followsSameRouting(clientAttr, attributes_initializer(AUDIO_USAGE_MEDIA))) { |
| selectOutputForMusicEffects(); |
| } |
| |
| if (outputDesc->getActivityCount(clientVolSrc) == 1 || !devices.isEmpty()) { |
| // starting an output being rerouted? |
| if (devices.isEmpty()) { |
| devices = getNewOutputDevices(outputDesc, false /*fromCache*/); |
| } |
| bool shouldWait = |
| (followsSameRouting(clientAttr, attributes_initializer(AUDIO_USAGE_ALARM)) || |
| followsSameRouting(clientAttr, attributes_initializer(AUDIO_USAGE_NOTIFICATION)) || |
| (beaconMuteLatency > 0)); |
| uint32_t waitMs = beaconMuteLatency; |
| for (size_t i = 0; i < mOutputs.size(); i++) { |
| sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i); |
| if (desc != outputDesc) { |
| // An output has a shared device if |
| // - managed by the same hw module |
| // - supports the currently selected device |
| const bool sharedDevice = outputDesc->sharesHwModuleWith(desc) |
| && (!desc->filterSupportedDevices(devices).isEmpty()); |
| |
| // force a device change if any other output is: |
| // - managed by the same hw module |
| // - supports currently selected device |
| // - has a current device selection that differs from selected device. |
| // - has an active audio patch |
| // In this case, the audio HAL must receive the new device selection so that it can |
| // change the device currently selected by the other output. |
| if (sharedDevice && |
| desc->devices() != devices && |
| desc->getPatchHandle() != AUDIO_PATCH_HANDLE_NONE) { |
| force = true; |
| } |
| // wait for audio on other active outputs to be presented when starting |
| // a notification so that audio focus effect can propagate, or that a mute/unmute |
| // event occurred for beacon |
| const uint32_t latencyMs = desc->latency(); |
| const bool isActive = desc->isActive(latencyMs * 2); // account for drain |
| |
| if (shouldWait && isActive && (waitMs < latencyMs)) { |
| waitMs = latencyMs; |
| } |
| |
| // Require mute check if another output is on a shared device |
| // and currently active to have proper drain and avoid pops. |
| // Note restoring AudioTracks onto this output needs to invoke |
| // a volume ramp if there is no mute. |
| requiresMuteCheck |= sharedDevice && isActive; |
| } |
| } |
| |
| const uint32_t muteWaitMs = |
| setOutputDevices(outputDesc, devices, force, 0, NULL, requiresMuteCheck); |
| |
| // apply volume rules for current stream and device if necessary |
| auto &curves = getVolumeCurves(client->attributes()); |
| checkAndSetVolume(curves, client->volumeSource(), |
| curves.getVolumeIndex(outputDesc->devices().types()), |
| outputDesc, |
| outputDesc->devices().types()); |
| |
| // update the outputs if starting an output with a stream that can affect notification |
| // routing |
| handleNotificationRoutingForStream(stream); |
| |
| // force reevaluating accessibility routing when ringtone or alarm starts |
| if (followsSameRouting(clientAttr, attributes_initializer(AUDIO_USAGE_ALARM))) { |
| mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY); |
| } |
| |
| if (waitMs > muteWaitMs) { |
| *delayMs = waitMs - muteWaitMs; |
| } |
| |
| // FIXME: A device change (muteWaitMs > 0) likely introduces a volume change. |
| // A volume change enacted by APM with 0 delay is not synchronous, as it goes |
| // via AudioCommandThread to AudioFlinger. Hence it is possible that the volume |
| // change occurs after the MixerThread starts and causes a stream volume |
| // glitch. |
| // |
| // We do not introduce additional delay here. |
| } |
| |
| if (stream == AUDIO_STREAM_ENFORCED_AUDIBLE && |
| mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) { |
| setStrategyMute(streamToStrategy(AUDIO_STREAM_ALARM), true, outputDesc); |
| } |
| |
| // Automatically enable the remote submix input when output is started on a re routing mix |
| // of type MIX_TYPE_RECORDERS |
| if (audio_is_remote_submix_device(devices.types()) && policyMix != NULL && |
| policyMix->mMixType == MIX_TYPE_RECORDERS) { |
| setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX, |
| AUDIO_POLICY_DEVICE_STATE_AVAILABLE, |
| address, |
| "remote-submix", |
| AUDIO_FORMAT_DEFAULT); |
| } |
| |
| return NO_ERROR; |
| } |
| |
| status_t AudioPolicyManager::stopOutput(audio_port_handle_t portId) |
| { |
| ALOGV("%s portId %d", __FUNCTION__, portId); |
| |
| sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputForClient(portId); |
| if (outputDesc == 0) { |
| ALOGW("stopOutput() no output for client %d", portId); |
| return BAD_VALUE; |
| } |
| sp<TrackClientDescriptor> client = outputDesc->getClient(portId); |
| |
| ALOGV("stopOutput() output %d, stream %d, session %d", |
| outputDesc->mIoHandle, client->stream(), client->session()); |
| |
| status_t status = stopSource(outputDesc, client); |
| |
| if (status == NO_ERROR ) { |
| outputDesc->stop(); |
| } |
| return status; |
| } |
| |
| status_t AudioPolicyManager::stopSource(const sp<SwAudioOutputDescriptor>& outputDesc, |
| const sp<TrackClientDescriptor>& client) |
| { |
| // always handle stream stop, check which stream type is stopping |
| audio_stream_type_t stream = client->stream(); |
| auto clientVolSrc = client->volumeSource(); |
| |
| handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT); |
| |
| if (outputDesc->getActivityCount(clientVolSrc) > 0) { |
| if (outputDesc->getActivityCount(clientVolSrc) == 1) { |
| // Automatically disable the remote submix input when output is stopped on a |
| // re routing mix of type MIX_TYPE_RECORDERS |
| sp<AudioPolicyMix> policyMix = outputDesc->mPolicyMix.promote(); |
| if (audio_is_remote_submix_device(outputDesc->devices().types()) && |
| policyMix != NULL && |
| policyMix->mMixType == MIX_TYPE_RECORDERS) { |
| setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX, |
| AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE, |
| policyMix->mDeviceAddress, |
| "remote-submix", AUDIO_FORMAT_DEFAULT); |
| } |
| } |
| bool forceDeviceUpdate = false; |
| if (client->hasPreferredDevice(true)) { |
| checkStrategyRoute(client->strategy(), AUDIO_IO_HANDLE_NONE); |
| forceDeviceUpdate = true; |
| } |
| |
| // decrement usage count of this stream on the output |
| outputDesc->setClientActive(client, false); |
| |
| // store time at which the stream was stopped - see isStreamActive() |
| if (outputDesc->getActivityCount(clientVolSrc) == 0 || forceDeviceUpdate) { |
| outputDesc->setStopTime(client, systemTime()); |
| DeviceVector newDevices = getNewOutputDevices(outputDesc, false /*fromCache*/); |
| // delay the device switch by twice the latency because stopOutput() is executed when |
| // the track stop() command is received and at that time the audio track buffer can |
| // still contain data that needs to be drained. The latency only covers the audio HAL |
| // and kernel buffers. Also the latency does not always include additional delay in the |
| // audio path (audio DSP, CODEC ...) |
| setOutputDevices(outputDesc, newDevices, false, outputDesc->latency()*2); |
| |
| // force restoring the device selection on other active outputs if it differs from the |
| // one being selected for this output |
| uint32_t delayMs = outputDesc->latency()*2; |
| for (size_t i = 0; i < mOutputs.size(); i++) { |
| sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i); |
| if (desc != outputDesc && |
| desc->isActive() && |
| outputDesc->sharesHwModuleWith(desc) && |
| (newDevices != desc->devices())) { |
| DeviceVector newDevices2 = getNewOutputDevices(desc, false /*fromCache*/); |
| bool force = desc->devices() != newDevices2; |
| |
| setOutputDevices(desc, newDevices2, force, delayMs); |
| |
| // re-apply device specific volume if not done by setOutputDevice() |
| if (!force) { |
| applyStreamVolumes(desc, newDevices2.types(), delayMs); |
| } |
| } |
| } |
| // update the outputs if stopping one with a stream that can affect notification routing |
| handleNotificationRoutingForStream(stream); |
| } |
| |
| if (stream == AUDIO_STREAM_ENFORCED_AUDIBLE && |
| mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) { |
| setStrategyMute(streamToStrategy(AUDIO_STREAM_ALARM), false, outputDesc); |
| } |
| |
| if (followsSameRouting(client->attributes(), attributes_initializer(AUDIO_USAGE_MEDIA))) { |
| selectOutputForMusicEffects(); |
| } |
| return NO_ERROR; |
| } else { |
| ALOGW("stopOutput() refcount is already 0"); |
| return INVALID_OPERATION; |
| } |
| } |
| |
| void AudioPolicyManager::releaseOutput(audio_port_handle_t portId) |
| { |
| ALOGV("%s portId %d", __FUNCTION__, portId); |
| |
| sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputForClient(portId); |
| if (outputDesc == 0) { |
| // If an output descriptor is closed due to a device routing change, |
| // then there are race conditions with releaseOutput from tracks |
| // that may be destroyed (with no PlaybackThread) or a PlaybackThread |
| // destroyed shortly thereafter. |
| // |
| // Here we just log a warning, instead of a fatal error. |
| ALOGW("releaseOutput() no output for client %d", portId); |
| return; |
| } |
| |
| ALOGV("releaseOutput() %d", outputDesc->mIoHandle); |
| |
| if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) { |
| if (outputDesc->mDirectOpenCount <= 0) { |
| ALOGW("releaseOutput() invalid open count %d for output %d", |
| outputDesc->mDirectOpenCount, outputDesc->mIoHandle); |
| return; |
| } |
| if (--outputDesc->mDirectOpenCount == 0) { |
| closeOutput(outputDesc->mIoHandle); |
| mpClientInterface->onAudioPortListUpdate(); |
| } |
| } |
| // stopOutput() needs to be successfully called before releaseOutput() |
| // otherwise there may be inaccurate stream reference counts. |
| // This is checked in outputDesc->removeClient below. |
| outputDesc->removeClient(portId); |
| } |
| |
| status_t AudioPolicyManager::getInputForAttr(const audio_attributes_t *attr, |
| audio_io_handle_t *input, |
| audio_unique_id_t riid, |
| audio_session_t session, |
| uid_t uid, |
| const audio_config_base_t *config, |
| audio_input_flags_t flags, |
| audio_port_handle_t *selectedDeviceId, |
| input_type_t *inputType, |
| audio_port_handle_t *portId) |
| { |
| ALOGV("%s() source %d, sampling rate %d, format %#x, channel mask %#x, session %d, " |
| "flags %#x attributes=%s", __func__, attr->source, config->sample_rate, |
| config->format, config->channel_mask, session, flags, toString(*attr).c_str()); |
| |
| status_t status = NO_ERROR; |
| audio_source_t halInputSource; |
| audio_attributes_t attributes = *attr; |
| sp<AudioPolicyMix> policyMix; |
| sp<DeviceDescriptor> device; |
| sp<AudioInputDescriptor> inputDesc; |
| sp<RecordClientDescriptor> clientDesc; |
| audio_port_handle_t requestedDeviceId = *selectedDeviceId; |
| bool isSoundTrigger; |
| |
| // The supplied portId must be AUDIO_PORT_HANDLE_NONE |
| if (*portId != AUDIO_PORT_HANDLE_NONE) { |
| return INVALID_OPERATION; |
| } |
| |
| if (attr->source == AUDIO_SOURCE_DEFAULT) { |
| attributes.source = AUDIO_SOURCE_MIC; |
| } |
| |
| // Explicit routing? |
| sp<DeviceDescriptor> explicitRoutingDevice = |
| mAvailableInputDevices.getDeviceFromId(*selectedDeviceId); |
| |
| // special case for mmap capture: if an input IO handle is specified, we reuse this input if |
| // possible |
| if ((flags & AUDIO_INPUT_FLAG_MMAP_NOIRQ) == AUDIO_INPUT_FLAG_MMAP_NOIRQ && |
| *input != AUDIO_IO_HANDLE_NONE) { |
| ssize_t index = mInputs.indexOfKey(*input); |
| if (index < 0) { |
| ALOGW("getInputForAttr() unknown MMAP input %d", *input); |
| status = BAD_VALUE; |
| goto error; |
| } |
| sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index); |
| RecordClientVector clients = inputDesc->getClientsForSession(session); |
| if (clients.size() == 0) { |
| ALOGW("getInputForAttr() unknown session %d on input %d", session, *input); |
| status = BAD_VALUE; |
| goto error; |
| } |
| // For MMAP mode, the first call to getInputForAttr() is made on behalf of audioflinger. |
| // The second call is for the first active client and sets the UID. Any further call |
| // corresponds to a new client and is only permitted from the same UID. |
| // If the first UID is silenced, allow a new UID connection and replace with new UID |
| if (clients.size() > 1) { |
| for (const auto& client : clients) { |
| // The client map is ordered by key values (portId) and portIds are allocated |
| // incrementaly. So the first client in this list is the one opened by audio flinger |
| // when the mmap stream is created and should be ignored as it does not correspond |
| // to an actual client |
| if (client == *clients.cbegin()) { |
| continue; |
| } |
| if (uid != client->uid() && !client->isSilenced()) { |
| ALOGW("getInputForAttr() bad uid %d for client %d uid %d", |
| uid, client->portId(), client->uid()); |
| status = INVALID_OPERATION; |
| goto error; |
| } |
| } |
| } |
| *inputType = API_INPUT_LEGACY; |
| device = inputDesc->getDevice(); |
| |
| ALOGI("%s reusing MMAP input %d for session %d", __FUNCTION__, *input, session); |
| goto exit; |
| } |
| |
| *input = AUDIO_IO_HANDLE_NONE; |
| *inputType = API_INPUT_INVALID; |
| |
| halInputSource = attributes.source; |
| |
| if (attributes.source == AUDIO_SOURCE_REMOTE_SUBMIX && |
| strncmp(attributes.tags, "addr=", strlen("addr=")) == 0) { |
| status = mPolicyMixes.getInputMixForAttr(attributes, &policyMix); |
| if (status != NO_ERROR) { |
| ALOGW("%s could not find input mix for attr %s", |
| __func__, toString(attributes).c_str()); |
| goto error; |
| } |
| device = mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_REMOTE_SUBMIX, |
| String8(attr->tags + strlen("addr=")), |
| AUDIO_FORMAT_DEFAULT); |
| if (device == nullptr) { |
| ALOGW("%s could not find in Remote Submix device for source %d, tags %s", |
| __func__, attributes.source, attributes.tags); |
| status = BAD_VALUE; |
| goto error; |
| } |
| |
| if (is_mix_loopback_render(policyMix->mRouteFlags)) { |
| *inputType = API_INPUT_MIX_PUBLIC_CAPTURE_PLAYBACK; |
| } else { |
| *inputType = API_INPUT_MIX_EXT_POLICY_REROUTE; |
| } |
| } else { |
| if (explicitRoutingDevice != nullptr) { |
| device = explicitRoutingDevice; |
| } else { |
| // Prevent from storing invalid requested device id in clients |
| requestedDeviceId = AUDIO_PORT_HANDLE_NONE; |
| device = mEngine->getInputDeviceForAttributes(attributes, &policyMix); |
| } |
| if (device == nullptr) { |
| ALOGW("getInputForAttr() could not find device for source %d", attributes.source); |
| status = BAD_VALUE; |
| goto error; |
| } |
| if (policyMix) { |
| ALOG_ASSERT(policyMix->mMixType == MIX_TYPE_RECORDERS, "Invalid Mix Type"); |
| // there is an external policy, but this input is attached to a mix of recorders, |
| // meaning it receives audio injected into the framework, so the recorder doesn't |
| // know about it and is therefore considered "legacy" |
| *inputType = API_INPUT_LEGACY; |
| } else if (audio_is_remote_submix_device(device->type())) { |
| *inputType = API_INPUT_MIX_CAPTURE; |
| } else if (device->type() == AUDIO_DEVICE_IN_TELEPHONY_RX) { |
| *inputType = API_INPUT_TELEPHONY_RX; |
| } else { |
| *inputType = API_INPUT_LEGACY; |
| } |
| |
| } |
| |
| *input = getInputForDevice(device, session, attributes, config, flags, policyMix); |
| if (*input == AUDIO_IO_HANDLE_NONE) { |
| status = INVALID_OPERATION; |
| goto error; |
| } |
| |
| exit: |
| |
| *selectedDeviceId = mAvailableInputDevices.contains(device) ? |
| device->getId() : AUDIO_PORT_HANDLE_NONE; |
| |
| isSoundTrigger = attributes.source == AUDIO_SOURCE_HOTWORD && |
| mSoundTriggerSessions.indexOfKey(session) >= 0; |
| *portId = AudioPort::getNextUniqueId(); |
| |
| clientDesc = new RecordClientDescriptor(*portId, riid, uid, session, attributes, *config, |
| requestedDeviceId, attributes.source, flags, |
| isSoundTrigger); |
| inputDesc = mInputs.valueFor(*input); |
| inputDesc->addClient(clientDesc); |
| |
| ALOGV("getInputForAttr() returns input %d type %d selectedDeviceId %d for port ID %d", |
| *input, *inputType, *selectedDeviceId, *portId); |
| |
| return NO_ERROR; |
| |
| error: |
| return status; |
| } |
| |
| |
| audio_io_handle_t AudioPolicyManager::getInputForDevice(const sp<DeviceDescriptor> &device, |
| audio_session_t session, |
| const audio_attributes_t &attributes, |
| const audio_config_base_t *config, |
| audio_input_flags_t flags, |
| const sp<AudioPolicyMix> &policyMix) |
| { |
| audio_io_handle_t input = AUDIO_IO_HANDLE_NONE; |
| audio_source_t halInputSource = attributes.source; |
| bool isSoundTrigger = false; |
| |
| if (attributes.source == AUDIO_SOURCE_HOTWORD) { |
| ssize_t index = mSoundTriggerSessions.indexOfKey(session); |
| if (index >= 0) { |
| input = mSoundTriggerSessions.valueFor(session); |
| isSoundTrigger = true; |
| flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_HW_HOTWORD); |
| ALOGV("SoundTrigger capture on session %d input %d", session, input); |
| } else { |
| halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION; |
| } |
| } else if (attributes.source == AUDIO_SOURCE_VOICE_COMMUNICATION && |
| audio_is_linear_pcm(config->format)) { |
| flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_VOIP_TX); |
| } |
| |
| // find a compatible input profile (not necessarily identical in parameters) |
| sp<IOProfile> profile; |
| // sampling rate and flags may be updated by getInputProfile |
| uint32_t profileSamplingRate = (config->sample_rate == 0) ? |
| SAMPLE_RATE_HZ_DEFAULT : config->sample_rate; |
| audio_format_t profileFormat; |
| audio_channel_mask_t profileChannelMask = config->channel_mask; |
| audio_input_flags_t profileFlags = flags; |
| for (;;) { |
| profileFormat = config->format; // reset each time through loop, in case it is updated |
| profile = getInputProfile(device, profileSamplingRate, profileFormat, profileChannelMask, |
| profileFlags); |
| if (profile != 0) { |
| break; // success |
| } else if (profileFlags & AUDIO_INPUT_FLAG_RAW) { |
| profileFlags = (audio_input_flags_t) (profileFlags & ~AUDIO_INPUT_FLAG_RAW); // retry |
| } else if (profileFlags != AUDIO_INPUT_FLAG_NONE) { |
| profileFlags = AUDIO_INPUT_FLAG_NONE; // retry |
| } else { // fail |
| ALOGW("%s could not find profile for device %s, sampling rate %u, format %#x, " |
| "channel mask 0x%X, flags %#x", __func__, device->toString().c_str(), |
| config->sample_rate, config->format, config->channel_mask, flags); |
| return input; |
| } |
| } |
| // Pick input sampling rate if not specified by client |
| uint32_t samplingRate = config->sample_rate; |
| if (samplingRate == 0) { |
| samplingRate = profileSamplingRate; |
| } |
| |
| if (profile->getModuleHandle() == 0) { |
| ALOGE("getInputForAttr(): HW module %s not opened", profile->getModuleName()); |
| return input; |
| } |
| |
| if (!profile->canOpenNewIo()) { |
| for (size_t i = 0; i < mInputs.size(); ) { |
| sp <AudioInputDescriptor> desc = mInputs.valueAt(i); |
| if (desc->mProfile != profile) { |
| i++; |
| continue; |
| } |
| // if sound trigger, reuse input if used by other sound trigger on same session |
| // else |
| // reuse input if active client app is not in IDLE state |
| // |
| RecordClientVector clients = desc->clientsList(); |
| bool doClose = false; |
| for (const auto& client : clients) { |
| if (isSoundTrigger != client->isSoundTrigger()) { |
| continue; |
| } |
| if (client->isSoundTrigger()) { |
| if (session == client->session()) { |
| return desc->mIoHandle; |
| } |
| continue; |
| } |
| if (client->active() && client->appState() != APP_STATE_IDLE) { |
| return desc->mIoHandle; |
| } |
| doClose = true; |
| } |
| if (doClose) { |
| closeInput(desc->mIoHandle); |
| } else { |
| i++; |
| } |
| } |
| } |
| |
| sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile, mpClientInterface); |
| |
| audio_config_t lConfig = AUDIO_CONFIG_INITIALIZER; |
| lConfig.sample_rate = profileSamplingRate; |
| lConfig.channel_mask = profileChannelMask; |
| lConfig.format = profileFormat; |
| |
| status_t status = inputDesc->open(&lConfig, device, halInputSource, profileFlags, &input); |
| |
| // only accept input with the exact requested set of parameters |
| if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE || |
| (profileSamplingRate != lConfig.sample_rate) || |
| !audio_formats_match(profileFormat, lConfig.format) || |
| (profileChannelMask != lConfig.channel_mask)) { |
| ALOGW("getInputForAttr() failed opening input: sampling rate %d" |
| ", format %#x, channel mask %#x", |
| profileSamplingRate, profileFormat, profileChannelMask); |
| if (input != AUDIO_IO_HANDLE_NONE) { |
| inputDesc->close(); |
| } |
| return AUDIO_IO_HANDLE_NONE; |
| } |
| |
| inputDesc->mPolicyMix = policyMix; |
| |
| addInput(input, inputDesc); |
| mpClientInterface->onAudioPortListUpdate(); |
| |
| return input; |
| } |
| |
| status_t AudioPolicyManager::startInput(audio_port_handle_t portId) |
| { |
| ALOGV("%s portId %d", __FUNCTION__, portId); |
| |
| sp<AudioInputDescriptor> inputDesc = mInputs.getInputForClient(portId); |
| if (inputDesc == 0) { |
| ALOGW("%s no input for client %d", __FUNCTION__, portId); |
| return BAD_VALUE; |
| } |
| audio_io_handle_t input = inputDesc->mIoHandle; |
| sp<RecordClientDescriptor> client = inputDesc->getClient(portId); |
| if (client->active()) { |
| ALOGW("%s input %d client %d already started", __FUNCTION__, input, client->portId()); |
| return INVALID_OPERATION; |
| } |
| |
| audio_session_t session = client->session(); |
| |
| ALOGV("%s input:%d, session:%d)", __FUNCTION__, input, session); |
| |
| Vector<sp<AudioInputDescriptor>> activeInputs = mInputs.getActiveInputs(); |
| |
| status_t status = inputDesc->start(); |
| if (status != NO_ERROR) { |
| return status; |
| } |
| |
| // increment activity count before calling getNewInputDevice() below as only active sessions |
| // are considered for device selection |
| inputDesc->setClientActive(client, true); |
| |
| // indicate active capture to sound trigger service if starting capture from a mic on |
| // primary HW module |
| sp<DeviceDescriptor> device = getNewInputDevice(inputDesc); |
| setInputDevice(input, device, true /* force */); |
| |
| if (inputDesc->activeCount() == 1) { |
| sp<AudioPolicyMix> policyMix = inputDesc->mPolicyMix.promote(); |
| // if input maps to a dynamic policy with an activity listener, notify of state change |
| if ((policyMix != NULL) |
| && ((policyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) { |
| mpClientInterface->onDynamicPolicyMixStateUpdate(policyMix->mDeviceAddress, |
| MIX_STATE_MIXING); |
| } |
| |
| DeviceVector primaryInputDevices = availablePrimaryModuleInputDevices(); |
| if (primaryInputDevices.contains(device) && |
| mInputs.activeInputsCountOnDevices(primaryInputDevices) == 1) { |
| SoundTrigger::setCaptureState(true); |
| } |
| |
| // automatically enable the remote submix output when input is started if not |
| // used by a policy mix of type MIX_TYPE_RECORDERS |
| // For remote submix (a virtual device), we open only one input per capture request. |
| if (audio_is_remote_submix_device(inputDesc->getDeviceType())) { |
| String8 address = String8(""); |
| if (policyMix == NULL) { |
| address = String8("0"); |
| } else if (policyMix->mMixType == MIX_TYPE_PLAYERS) { |
| address = policyMix->mDeviceAddress; |
| } |
| if (address != "") { |
| setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, |
| AUDIO_POLICY_DEVICE_STATE_AVAILABLE, |
| address, "remote-submix", AUDIO_FORMAT_DEFAULT); |
| } |
| } |
| } |
| |
| ALOGV("%s input %d source = %d exit", __FUNCTION__, input, client->source()); |
| |
| return NO_ERROR; |
| } |
| |
| status_t AudioPolicyManager::stopInput(audio_port_handle_t portId) |
| { |
| ALOGV("%s portId %d", __FUNCTION__, portId); |
| |
| sp<AudioInputDescriptor> inputDesc = mInputs.getInputForClient(portId); |
| if (inputDesc == 0) { |
| ALOGW("%s no input for client %d", __FUNCTION__, portId); |
| return BAD_VALUE; |
| } |
| audio_io_handle_t input = inputDesc->mIoHandle; |
| sp<RecordClientDescriptor> client = inputDesc->getClient(portId); |
| if (!client->active()) { |
| ALOGW("%s input %d client %d already stopped", __FUNCTION__, input, client->portId()); |
| return INVALID_OPERATION; |
| } |
| |
| inputDesc->setClientActive(client, false); |
| |
| inputDesc->stop(); |
| if (inputDesc->isActive()) { |
| setInputDevice(input, getNewInputDevice(inputDesc), false /* force */); |
| } else { |
| sp<AudioPolicyMix> policyMix = inputDesc->mPolicyMix.promote(); |
| // if input maps to a dynamic policy with an activity listener, notify of state change |
| if ((policyMix != NULL) |
| && ((policyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) { |
| mpClientInterface->onDynamicPolicyMixStateUpdate(policyMix->mDeviceAddress, |
| MIX_STATE_IDLE); |
| } |
| |
| // automatically disable the remote submix output when input is stopped if not |
| // used by a policy mix of type MIX_TYPE_RECORDERS |
| if (audio_is_remote_submix_device(inputDesc->getDeviceType())) { |
| String8 address = String8(""); |
| if (policyMix == NULL) { |
| address = String8("0"); |
| } else if (policyMix->mMixType == MIX_TYPE_PLAYERS) { |
| address = policyMix->mDeviceAddress; |
| } |
| if (address != "") { |
| setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, |
| AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE, |
| address, "remote-submix", AUDIO_FORMAT_DEFAULT); |
| } |
| } |
| resetInputDevice(input); |
| |
| // indicate inactive capture to sound trigger service if stopping capture from a mic on |
| // primary HW module |
| DeviceVector primaryInputDevices = availablePrimaryModuleInputDevices(); |
| if (primaryInputDevices.contains(inputDesc->getDevice()) && |
| mInputs.activeInputsCountOnDevices(primaryInputDevices) == 0) { |
| SoundTrigger::setCaptureState(false); |
| } |
| inputDesc->clearPreemptedSessions(); |
| } |
| return NO_ERROR; |
| } |
| |
| void AudioPolicyManager::releaseInput(audio_port_handle_t portId) |
| { |
| ALOGV("%s portId %d", __FUNCTION__, portId); |
| |
| sp<AudioInputDescriptor> inputDesc = mInputs.getInputForClient(portId); |
| if (inputDesc == 0) { |
| ALOGW("%s no input for client %d", __FUNCTION__, portId); |
| return; |
| } |
| sp<RecordClientDescriptor> client = inputDesc->getClient(portId); |
| audio_io_handle_t input = inputDesc->mIoHandle; |
| |
| ALOGV("%s %d", __FUNCTION__, input); |
| |
| inputDesc->removeClient(portId); |
| |
| if (inputDesc->getClientCount() > 0) { |
| ALOGV("%s(%d) %zu clients remaining", __func__, portId, inputDesc->getClientCount()); |
| return; |
| } |
| |
| closeInput(input); |
| mpClientInterface->onAudioPortListUpdate(); |
| ALOGV("%s exit", __FUNCTION__); |
| } |
| |
| void AudioPolicyManager::closeActiveClients(const sp<AudioInputDescriptor>& input) |
| { |
| RecordClientVector clients = input->clientsList(true); |
| |
| for (const auto& client : clients) { |
| closeClient(client->portId()); |
| } |
| } |
| |
| void AudioPolicyManager::closeClient(audio_port_handle_t portId) |
| { |
| stopInput(portId); |
| releaseInput(portId); |
| } |
| |
| void AudioPolicyManager::checkCloseInputs() { |
| // After connecting or disconnecting an input device, close input if: |
| // - it has no client (was just opened to check profile) OR |
| // - none of its supported devices are connected anymore OR |
| // - one of its clients cannot be routed to one of its supported |
| // devices anymore. Otherwise update device selection |
| std::vector<audio_io_handle_t> inputsToClose; |
| for (size_t i = 0; i < mInputs.size(); i++) { |
| const sp<AudioInputDescriptor> input = mInputs.valueAt(i); |
| if (input->clientsList().size() == 0 |
| || !mAvailableInputDevices.containsAtLeastOne(input->supportedDevices())) { |
| inputsToClose.push_back(mInputs.keyAt(i)); |
| } else { |
| bool close = false; |
| for (const auto& client : input->clientsList()) { |
| sp<DeviceDescriptor> device = |
| mEngine->getInputDeviceForAttributes(client->attributes()); |
| if (!input->supportedDevices().contains(device)) { |
| close = true; |
| break; |
| } |
| } |
| if (close) { |
| inputsToClose.push_back(mInputs.keyAt(i)); |
| } else { |
| setInputDevice(input->mIoHandle, getNewInputDevice(input)); |
| } |
| } |
| } |
| |
| for (const audio_io_handle_t handle : inputsToClose) { |
| ALOGV("%s closing input %d", __func__, handle); |
| closeInput(handle); |
| } |
| } |
| |
| void AudioPolicyManager::initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax) |
| { |
| ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax); |
| if (indexMin < 0 || indexMax < 0) { |
| ALOGE("%s for stream %d: invalid min %d or max %d", __func__, stream , indexMin, indexMax); |
| return; |
| } |
| getVolumeCurves(stream).initVolume(indexMin, indexMax); |
| |
| // initialize other private stream volumes which follow this one |
| for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) { |
| if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) { |
| continue; |
| } |
| getVolumeCurves((audio_stream_type_t)curStream).initVolume(indexMin, indexMax); |
| } |
| } |
| |
| status_t AudioPolicyManager::setStreamVolumeIndex(audio_stream_type_t stream, |
| int index, |
| audio_devices_t device) |
| { |
| auto attributes = mEngine->getAttributesForStreamType(stream); |
| ALOGV("%s: stream %s attributes=%s", __func__, |
| toString(stream).c_str(), toString(attributes).c_str()); |
| return setVolumeIndexForAttributes(attributes, index, device); |
| } |
| |
| status_t AudioPolicyManager::getStreamVolumeIndex(audio_stream_type_t stream, |
| int *index, |
| audio_devices_t device) |
| { |
| // if device is AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, return volume for device selected for this |
| // stream by the engine. |
| if (device == AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) { |
| device = mEngine->getOutputDevicesForStream(stream, true /*fromCache*/).types(); |
| } |
| return getVolumeIndex(getVolumeCurves(stream), *index, device); |
| } |
| |
| status_t AudioPolicyManager::setVolumeIndexForAttributes(const audio_attributes_t &attributes, |
| int index, |
| audio_devices_t device) |
| { |
| // Get Volume group matching the Audio Attributes |
| auto group = mEngine->getVolumeGroupForAttributes(attributes); |
| if (group == VOLUME_GROUP_NONE) { |
| ALOGD("%s: no group matching with %s", __FUNCTION__, toString(attributes).c_str()); |
| return BAD_VALUE; |
| } |
| ALOGV("%s: group %d matching with %s", __FUNCTION__, group, toString(attributes).c_str()); |
| status_t status = NO_ERROR; |
| IVolumeCurves &curves = getVolumeCurves(attributes); |
| VolumeSource vs = toVolumeSource(group); |
| product_strategy_t strategy = mEngine->getProductStrategyForAttributes(attributes); |
| |
| status = setVolumeCurveIndex(index, device, curves); |
| if (status != NO_ERROR) { |
| ALOGE("%s failed to set curve index for group %d device 0x%X", __func__, group, device); |
| return status; |
| } |
| |
| audio_devices_t curSrcDevice; |
| auto curCurvAttrs = curves.getAttributes(); |
| if (!curCurvAttrs.empty() && curCurvAttrs.front() != defaultAttr) { |
| auto attr = curCurvAttrs.front(); |
| curSrcDevice = mEngine->getOutputDevicesForAttributes(attr, nullptr, false).types(); |
| } else if (!curves.getStreamTypes().empty()) { |
| auto stream = curves.getStreamTypes().front(); |
| curSrcDevice = mEngine->getOutputDevicesForStream(stream, false).types(); |
| } else { |
| ALOGE("%s: Invalid src %d: no valid attributes nor stream",__func__, vs); |
| return BAD_VALUE; |
| } |
| curSrcDevice = Volume::getDeviceForVolume(curSrcDevice); |
| |
| // update volume on all outputs and streams matching the following: |
| // - The requested stream (or a stream matching for volume control) is active on the output |
| // - The device (or devices) selected by the engine for this stream includes |
| // the requested device |
| // - For non default requested device, currently selected device on the output is either the |
| // requested device or one of the devices selected by the engine for this stream |
| // - For default requested device (AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME), apply volume only if |
| // no specific device volume value exists for currently selected device. |
| for (size_t i = 0; i < mOutputs.size(); i++) { |
| sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i); |
| audio_devices_t curDevice = desc->devices().types(); |
| |
| if (curDevice & AUDIO_DEVICE_OUT_SPEAKER_SAFE) { |
| curDevice |= AUDIO_DEVICE_OUT_SPEAKER; |
| curDevice &= ~AUDIO_DEVICE_OUT_SPEAKER_SAFE; |
| } |
| |
|