am f67f894f: (-s ours) am ff9a97f6: am 92aa0532: am 5e47eb95: am 35a7b54e: am f9a51893: (-s ours) am 9a2be5d4: Merge "DO NOT MERGE - Fix software video decoder buffer size calculation" into klp-dev

* commit 'f67f894fead9065dcffda03a127cee7d746c9226':
  DO NOT MERGE - Fix software video decoder buffer size calculation
diff --git a/camera/Android.mk b/camera/Android.mk
index da5ac59..471cb0d 100644
--- a/camera/Android.mk
+++ b/camera/Android.mk
@@ -28,14 +28,13 @@
 	ICameraClient.cpp \
 	ICameraService.cpp \
 	ICameraServiceListener.cpp \
+	ICameraServiceProxy.cpp \
 	ICameraRecordingProxy.cpp \
 	ICameraRecordingProxyListener.cpp \
-	IProCameraUser.cpp \
-	IProCameraCallbacks.cpp \
 	camera2/ICameraDeviceUser.cpp \
 	camera2/ICameraDeviceCallbacks.cpp \
 	camera2/CaptureRequest.cpp \
-	ProCamera.cpp \
+	camera2/OutputConfiguration.cpp \
 	CameraBase.cpp \
 	CameraUtils.cpp \
 	VendorTagDescriptor.cpp
diff --git a/camera/Camera.cpp b/camera/Camera.cpp
index 3a9fb4c..9bf3134 100644
--- a/camera/Camera.cpp
+++ b/camera/Camera.cpp
@@ -97,7 +97,8 @@
         c->mStatus = NO_ERROR;
         camera = c;
     } else {
-        ALOGW("An error occurred while connecting to camera: %d", cameraId);
+        ALOGW("An error occurred while connecting to camera %d: %d (%s)",
+                cameraId, status, strerror(-status));
         c.clear();
     }
     return status;
diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp
index 65a1a47..5d50aa8 100644
--- a/camera/CameraBase.cpp
+++ b/camera/CameraBase.cpp
@@ -29,7 +29,6 @@
 #include <camera/ICameraService.h>
 
 // needed to instantiate
-#include <camera/ProCamera.h>
 #include <camera/Camera.h>
 
 #include <system/camera_metadata.h>
@@ -217,7 +216,6 @@
     return cs->removeListener(listener);
 }
 
-template class CameraBase<ProCamera>;
 template class CameraBase<Camera>;
 
 } // namespace android
diff --git a/camera/CameraMetadata.cpp b/camera/CameraMetadata.cpp
index 043437f..46bcc1d 100644
--- a/camera/CameraMetadata.cpp
+++ b/camera/CameraMetadata.cpp
@@ -74,7 +74,7 @@
     clear();
 }
 
-const camera_metadata_t* CameraMetadata::getAndLock() {
+const camera_metadata_t* CameraMetadata::getAndLock() const {
     mLocked = true;
     return mBuffer;
 }
@@ -289,6 +289,17 @@
         ALOGE("%s: Tag %d not found", __FUNCTION__, tag);
         return BAD_VALUE;
     }
+    // Safety check - ensure that data isn't pointing to this metadata, since
+    // that would get invalidated if a resize is needed
+    size_t bufferSize = get_camera_metadata_size(mBuffer);
+    uintptr_t bufAddr = reinterpret_cast<uintptr_t>(mBuffer);
+    uintptr_t dataAddr = reinterpret_cast<uintptr_t>(data);
+    if (dataAddr > bufAddr && dataAddr < (bufAddr + bufferSize)) {
+        ALOGE("%s: Update attempted with data from the same metadata buffer!",
+                __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+
     size_t data_size = calculate_camera_metadata_entry_data_size(type,
             data_count);
 
@@ -583,7 +594,7 @@
      */
     WritableBlob blob;
     do {
-        res = data.writeBlob(blobSize, &blob);
+        res = data.writeBlob(blobSize, false, &blob);
         if (res != OK) {
             break;
         }
diff --git a/camera/CameraParameters.cpp b/camera/CameraParameters.cpp
index e5e4e90..68969cf 100644
--- a/camera/CameraParameters.cpp
+++ b/camera/CameraParameters.cpp
@@ -526,8 +526,12 @@
         !strcmp(format, PIXEL_FORMAT_RGBA8888) ?
             HAL_PIXEL_FORMAT_RGBA_8888 :    // RGB8888
         !strcmp(format, PIXEL_FORMAT_BAYER_RGGB) ?
-            HAL_PIXEL_FORMAT_RAW_SENSOR :   // Raw sensor data
+            HAL_PIXEL_FORMAT_RAW16 :   // Raw sensor data
         -1;
 }
 
+bool CameraParameters::isEmpty() const {
+    return mMap.isEmpty();
+}
+
 }; // namespace android
diff --git a/camera/ICameraService.cpp b/camera/ICameraService.cpp
index fc3e437..b359f57 100644
--- a/camera/ICameraService.cpp
+++ b/camera/ICameraService.cpp
@@ -2,16 +2,16 @@
 **
 ** Copyright 2008, The Android Open Source Project
 **
-** Licensed under the Apache License, Version 2.0 (the "License"); 
-** you may not use this file except in compliance with the License. 
-** You may obtain a copy of the License at 
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
 **
-**     http://www.apache.org/licenses/LICENSE-2.0 
+**     http://www.apache.org/licenses/LICENSE-2.0
 **
-** Unless required by applicable law or agreed to in writing, software 
-** distributed under the License is distributed on an "AS IS" BASIS, 
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
-** See the License for the specific language governing permissions and 
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
 ** limitations under the License.
 */
 
@@ -20,6 +20,7 @@
 #include <utils/Errors.h>
 #include <utils/String16.h>
 
+#include <inttypes.h>
 #include <stdint.h>
 #include <sys/types.h>
 
@@ -29,8 +30,6 @@
 
 #include <camera/ICameraService.h>
 #include <camera/ICameraServiceListener.h>
-#include <camera/IProCameraUser.h>
-#include <camera/IProCameraCallbacks.h>
 #include <camera/ICamera.h>
 #include <camera/ICameraClient.h>
 #include <camera/camera2/ICameraDeviceUser.h>
@@ -95,11 +94,18 @@
     {
     }
 
-    // get number of cameras available
+    // get number of cameras available that support standard camera operations
     virtual int32_t getNumberOfCameras()
     {
+        return getNumberOfCameras(CAMERA_TYPE_BACKWARD_COMPATIBLE);
+    }
+
+    // get number of cameras available of a given type
+    virtual int32_t getNumberOfCameras(int type)
+    {
         Parcel data, reply;
         data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
+        data.writeInt32(type);
         remote()->transact(BnCameraService::GET_NUMBER_OF_CAMERAS, data, &reply);
 
         if (readExceptionCode(reply)) return 0;
@@ -176,10 +182,13 @@
         data.writeInt32(cameraId);
         data.writeString16(clientPackageName);
         data.writeInt32(clientUid);
-        remote()->transact(BnCameraService::CONNECT, data, &reply);
+
+        status_t status;
+        status = remote()->transact(BnCameraService::CONNECT, data, &reply);
+        if (status != OK) return status;
 
         if (readExceptionCode(reply)) return -EPROTO;
-        status_t status = reply.readInt32();
+        status = reply.readInt32();
         if (reply.readInt32() != 0) {
             device = interface_cast<ICamera>(reply.readStrongBinder());
         }
@@ -199,36 +208,31 @@
         data.writeInt32(halVersion);
         data.writeString16(clientPackageName);
         data.writeInt32(clientUid);
-        remote()->transact(BnCameraService::CONNECT_LEGACY, data, &reply);
+
+        status_t status;
+        status = remote()->transact(BnCameraService::CONNECT_LEGACY, data, &reply);
+        if (status != OK) return status;
 
         if (readExceptionCode(reply)) return -EPROTO;
-        status_t status = reply.readInt32();
+        status = reply.readInt32();
         if (reply.readInt32() != 0) {
             device = interface_cast<ICamera>(reply.readStrongBinder());
         }
         return status;
     }
 
-    // connect to camera service (pro client)
-    virtual status_t connectPro(const sp<IProCameraCallbacks>& cameraCb, int cameraId,
-                                const String16 &clientPackageName, int clientUid,
-                                /*out*/
-                                sp<IProCameraUser>& device)
+    virtual status_t setTorchMode(const String16& cameraId, bool enabled,
+            const sp<IBinder>& clientBinder)
     {
         Parcel data, reply;
         data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
-        data.writeStrongBinder(IInterface::asBinder(cameraCb));
-        data.writeInt32(cameraId);
-        data.writeString16(clientPackageName);
-        data.writeInt32(clientUid);
-        remote()->transact(BnCameraService::CONNECT_PRO, data, &reply);
+        data.writeString16(cameraId);
+        data.writeInt32(enabled ? 1 : 0);
+        data.writeStrongBinder(clientBinder);
+        remote()->transact(BnCameraService::SET_TORCH_MODE, data, &reply);
 
         if (readExceptionCode(reply)) return -EPROTO;
-        status_t status = reply.readInt32();
-        if (reply.readInt32() != 0) {
-            device = interface_cast<IProCameraUser>(reply.readStrongBinder());
-        }
-        return status;
+        return reply.readInt32();
     }
 
     // connect to camera service (android.hardware.camera2.CameraDevice)
@@ -246,10 +250,13 @@
         data.writeInt32(cameraId);
         data.writeString16(clientPackageName);
         data.writeInt32(clientUid);
-        remote()->transact(BnCameraService::CONNECT_DEVICE, data, &reply);
+
+        status_t status;
+        status = remote()->transact(BnCameraService::CONNECT_DEVICE, data, &reply);
+        if (status != OK) return status;
 
         if (readExceptionCode(reply)) return -EPROTO;
-        status_t status = reply.readInt32();
+        status = reply.readInt32();
         if (reply.readInt32() != 0) {
             device = interface_cast<ICameraDeviceUser>(reply.readStrongBinder());
         }
@@ -285,6 +292,7 @@
         }
 
         Parcel data, reply;
+        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
 
         data.writeInt32(cameraId);
         remote()->transact(BnCameraService::GET_LEGACY_PARAMETERS, data, &reply);
@@ -304,6 +312,7 @@
     virtual status_t supportsCameraApi(int cameraId, int apiVersion) {
         Parcel data, reply;
 
+        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
         data.writeInt32(cameraId);
         data.writeInt32(apiVersion);
         remote()->transact(BnCameraService::SUPPORTS_CAMERA_API, data, &reply);
@@ -312,6 +321,16 @@
         status_t res = data.readInt32();
         return res;
     }
+
+    virtual void notifySystemEvent(int32_t eventId, const int32_t* args, size_t len) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
+        data.writeInt32(eventId);
+        data.writeInt32Array(len, args);
+        remote()->transact(BnCameraService::NOTIFY_SYSTEM_EVENT, data, &reply,
+                IBinder::FLAG_ONEWAY);
+    }
+
 };
 
 IMPLEMENT_META_INTERFACE(CameraService, "android.hardware.ICameraService");
@@ -325,7 +344,7 @@
         case GET_NUMBER_OF_CAMERAS: {
             CHECK_INTERFACE(ICameraService, data, reply);
             reply->writeNoException();
-            reply->writeInt32(getNumberOfCameras());
+            reply->writeInt32(getNumberOfCameras(data.readInt32()));
             return NO_ERROR;
         } break;
         case GET_CAMERA_INFO: {
@@ -390,26 +409,6 @@
             }
             return NO_ERROR;
         } break;
-        case CONNECT_PRO: {
-            CHECK_INTERFACE(ICameraService, data, reply);
-            sp<IProCameraCallbacks> cameraClient =
-                interface_cast<IProCameraCallbacks>(data.readStrongBinder());
-            int32_t cameraId = data.readInt32();
-            const String16 clientName = data.readString16();
-            int32_t clientUid = data.readInt32();
-            sp<IProCameraUser> camera;
-            status_t status = connectPro(cameraClient, cameraId,
-                    clientName, clientUid, /*out*/camera);
-            reply->writeNoException();
-            reply->writeInt32(status);
-            if (camera != NULL) {
-                reply->writeInt32(1);
-                reply->writeStrongBinder(IInterface::asBinder(camera));
-            } else {
-                reply->writeInt32(0);
-            }
-            return NO_ERROR;
-        } break;
         case CONNECT_DEVICE: {
             CHECK_INTERFACE(ICameraService, data, reply);
             sp<ICameraDeviceCallbacks> cameraClient =
@@ -490,6 +489,41 @@
             }
             return NO_ERROR;
         } break;
+        case SET_TORCH_MODE: {
+            CHECK_INTERFACE(ICameraService, data, reply);
+            String16 cameraId = data.readString16();
+            bool enabled = data.readInt32() != 0 ? true : false;
+            const sp<IBinder> clientBinder = data.readStrongBinder();
+            status_t status = setTorchMode(cameraId, enabled, clientBinder);
+            reply->writeNoException();
+            reply->writeInt32(status);
+            return NO_ERROR;
+        } break;
+        case NOTIFY_SYSTEM_EVENT: {
+            CHECK_INTERFACE(ICameraService, data, reply);
+            int32_t eventId = data.readInt32();
+            int32_t len = data.readInt32();
+            if (len < 0) {
+                ALOGE("%s: Received poorly formatted length in binder request: notifySystemEvent.",
+                        __FUNCTION__);
+                return FAILED_TRANSACTION;
+            }
+            if (len > 512) {
+                ALOGE("%s: Length %" PRIi32 " too long in binder request: notifySystemEvent.",
+                        __FUNCTION__, len);
+                return FAILED_TRANSACTION;
+            }
+            int32_t events[len];
+            memset(events, 0, sizeof(int32_t) * len);
+            status_t status = data.read(events, sizeof(int32_t) * len);
+            if (status != NO_ERROR) {
+                ALOGE("%s: Received poorly formatted binder request: notifySystemEvent.",
+                        __FUNCTION__);
+                return FAILED_TRANSACTION;
+            }
+            notifySystemEvent(eventId, events, len);
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/camera/ICameraServiceListener.cpp b/camera/ICameraServiceListener.cpp
index b2f1729..0010325 100644
--- a/camera/ICameraServiceListener.cpp
+++ b/camera/ICameraServiceListener.cpp
@@ -29,6 +29,7 @@
 namespace {
     enum {
         STATUS_CHANGED = IBinder::FIRST_CALL_TRANSACTION,
+        TORCH_STATUS_CHANGED,
     };
 }; // namespace anonymous
 
@@ -44,8 +45,7 @@
     virtual void onStatusChanged(Status status, int32_t cameraId)
     {
         Parcel data, reply;
-        data.writeInterfaceToken(
-                              ICameraServiceListener::getInterfaceDescriptor());
+        data.writeInterfaceToken(ICameraServiceListener::getInterfaceDescriptor());
 
         data.writeInt32(static_cast<int32_t>(status));
         data.writeInt32(cameraId);
@@ -54,19 +54,29 @@
                            data,
                            &reply,
                            IBinder::FLAG_ONEWAY);
+    }
 
-        reply.readExceptionCode();
+    virtual void onTorchStatusChanged(TorchStatus status, const String16 &cameraId)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraServiceListener::getInterfaceDescriptor());
+
+        data.writeInt32(static_cast<int32_t>(status));
+        data.writeString16(cameraId);
+
+        remote()->transact(TORCH_STATUS_CHANGED,
+                           data,
+                           &reply,
+                           IBinder::FLAG_ONEWAY);
     }
 };
 
-IMPLEMENT_META_INTERFACE(CameraServiceListener,
-                         "android.hardware.ICameraServiceListener");
+IMPLEMENT_META_INTERFACE(CameraServiceListener, "android.hardware.ICameraServiceListener");
 
 // ----------------------------------------------------------------------
 
-status_t BnCameraServiceListener::onTransact(
-    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
+status_t BnCameraServiceListener::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+        uint32_t flags) {
     switch(code) {
         case STATUS_CHANGED: {
             CHECK_INTERFACE(ICameraServiceListener, data, reply);
@@ -75,7 +85,16 @@
             int32_t cameraId = data.readInt32();
 
             onStatusChanged(status, cameraId);
-            reply->writeNoException();
+
+            return NO_ERROR;
+        } break;
+        case TORCH_STATUS_CHANGED: {
+            CHECK_INTERFACE(ICameraServiceListener, data, reply);
+
+            TorchStatus status = static_cast<TorchStatus>(data.readInt32());
+            String16 cameraId = data.readString16();
+
+            onTorchStatusChanged(status, cameraId);
 
             return NO_ERROR;
         } break;
diff --git a/camera/ICameraServiceProxy.cpp b/camera/ICameraServiceProxy.cpp
new file mode 100644
index 0000000..06a5afb
--- /dev/null
+++ b/camera/ICameraServiceProxy.cpp
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "BpCameraServiceProxy"
+
+#include <stdint.h>
+
+#include <binder/Parcel.h>
+
+#include <camera/ICameraServiceProxy.h>
+
+namespace android {
+
+class BpCameraServiceProxy: public BpInterface<ICameraServiceProxy> {
+public:
+    BpCameraServiceProxy(const sp<IBinder>& impl) : BpInterface<ICameraServiceProxy>(impl) {}
+
+    virtual void pingForUserUpdate() {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraServiceProxy::getInterfaceDescriptor());
+        remote()->transact(BnCameraServiceProxy::PING_FOR_USER_UPDATE, data, &reply,
+                IBinder::FLAG_ONEWAY);
+    }
+};
+
+
+IMPLEMENT_META_INTERFACE(CameraServiceProxy, "android.hardware.ICameraServiceProxy");
+
+status_t BnCameraServiceProxy::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+        uint32_t flags) {
+    switch(code) {
+        case PING_FOR_USER_UPDATE: {
+            CHECK_INTERFACE(ICameraServiceProxy, data, reply);
+            pingForUserUpdate();
+            return NO_ERROR;
+        } break;
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+}; // namespace android
+
diff --git a/camera/IProCameraCallbacks.cpp b/camera/IProCameraCallbacks.cpp
deleted file mode 100644
index bd3d420..0000000
--- a/camera/IProCameraCallbacks.cpp
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
-**
-** Copyright 2013, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "IProCameraCallbacks"
-#include <utils/Log.h>
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <binder/Parcel.h>
-#include <gui/IGraphicBufferProducer.h>
-#include <gui/Surface.h>
-#include <utils/Mutex.h>
-
-#include <camera/IProCameraCallbacks.h>
-
-#include "camera/CameraMetadata.h"
-
-namespace android {
-
-enum {
-    NOTIFY_CALLBACK = IBinder::FIRST_CALL_TRANSACTION,
-    LOCK_STATUS_CHANGED,
-    RESULT_RECEIVED,
-};
-
-class BpProCameraCallbacks: public BpInterface<IProCameraCallbacks>
-{
-public:
-    BpProCameraCallbacks(const sp<IBinder>& impl)
-        : BpInterface<IProCameraCallbacks>(impl)
-    {
-    }
-
-    // generic callback from camera service to app
-    void notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2)
-    {
-        ALOGV("notifyCallback");
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraCallbacks::getInterfaceDescriptor());
-        data.writeInt32(msgType);
-        data.writeInt32(ext1);
-        data.writeInt32(ext2);
-        remote()->transact(NOTIFY_CALLBACK, data, &reply, IBinder::FLAG_ONEWAY);
-    }
-
-    void onLockStatusChanged(LockStatus newLockStatus) {
-        ALOGV("onLockStatusChanged");
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraCallbacks::getInterfaceDescriptor());
-        data.writeInt32(newLockStatus);
-        remote()->transact(LOCK_STATUS_CHANGED, data, &reply,
-                           IBinder::FLAG_ONEWAY);
-    }
-
-    void onResultReceived(int32_t requestId, camera_metadata* result) {
-        ALOGV("onResultReceived");
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraCallbacks::getInterfaceDescriptor());
-        data.writeInt32(requestId);
-        CameraMetadata::writeToParcel(data, result);
-        remote()->transact(RESULT_RECEIVED, data, &reply, IBinder::FLAG_ONEWAY);
-    }
-};
-
-IMPLEMENT_META_INTERFACE(ProCameraCallbacks,
-                                        "android.hardware.IProCameraCallbacks");
-
-// ----------------------------------------------------------------------
-
-status_t BnProCameraCallbacks::onTransact(
-    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
-    ALOGV("onTransact - code = %d", code);
-    switch(code) {
-        case NOTIFY_CALLBACK: {
-            ALOGV("NOTIFY_CALLBACK");
-            CHECK_INTERFACE(IProCameraCallbacks, data, reply);
-            int32_t msgType = data.readInt32();
-            int32_t ext1 = data.readInt32();
-            int32_t ext2 = data.readInt32();
-            notifyCallback(msgType, ext1, ext2);
-            return NO_ERROR;
-        } break;
-        case LOCK_STATUS_CHANGED: {
-            ALOGV("LOCK_STATUS_CHANGED");
-            CHECK_INTERFACE(IProCameraCallbacks, data, reply);
-            LockStatus newLockStatus
-                                 = static_cast<LockStatus>(data.readInt32());
-            onLockStatusChanged(newLockStatus);
-            return NO_ERROR;
-        } break;
-        case RESULT_RECEIVED: {
-            ALOGV("RESULT_RECEIVED");
-            CHECK_INTERFACE(IProCameraCallbacks, data, reply);
-            int32_t requestId = data.readInt32();
-            camera_metadata_t *result = NULL;
-            CameraMetadata::readFromParcel(data, &result);
-            onResultReceived(requestId, result);
-            return NO_ERROR;
-            break;
-        }
-        default:
-            return BBinder::onTransact(code, data, reply, flags);
-    }
-}
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
-
diff --git a/camera/IProCameraUser.cpp b/camera/IProCameraUser.cpp
deleted file mode 100644
index 9bd7597..0000000
--- a/camera/IProCameraUser.cpp
+++ /dev/null
@@ -1,324 +0,0 @@
-/*
-**
-** Copyright 2013, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-// #define LOG_NDEBUG 0
-#define LOG_TAG "IProCameraUser"
-#include <utils/Log.h>
-#include <stdint.h>
-#include <sys/types.h>
-#include <binder/Parcel.h>
-#include <camera/IProCameraUser.h>
-#include <gui/IGraphicBufferProducer.h>
-#include <gui/Surface.h>
-#include "camera/CameraMetadata.h"
-
-namespace android {
-
-enum {
-    DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
-    CONNECT,
-    EXCLUSIVE_TRY_LOCK,
-    EXCLUSIVE_LOCK,
-    EXCLUSIVE_UNLOCK,
-    HAS_EXCLUSIVE_LOCK,
-    SUBMIT_REQUEST,
-    CANCEL_REQUEST,
-    DELETE_STREAM,
-    CREATE_STREAM,
-    CREATE_DEFAULT_REQUEST,
-    GET_CAMERA_INFO,
-};
-
-class BpProCameraUser: public BpInterface<IProCameraUser>
-{
-public:
-    BpProCameraUser(const sp<IBinder>& impl)
-        : BpInterface<IProCameraUser>(impl)
-    {
-    }
-
-    // disconnect from camera service
-    void disconnect()
-    {
-        ALOGV("disconnect");
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        remote()->transact(DISCONNECT, data, &reply);
-        reply.readExceptionCode();
-    }
-
-    virtual status_t connect(const sp<IProCameraCallbacks>& cameraClient)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        data.writeStrongBinder(IInterface::asBinder(cameraClient));
-        remote()->transact(CONNECT, data, &reply);
-        return reply.readInt32();
-    }
-
-    /* Shared ProCameraUser */
-
-    virtual status_t exclusiveTryLock()
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        remote()->transact(EXCLUSIVE_TRY_LOCK, data, &reply);
-        return reply.readInt32();
-    }
-    virtual status_t exclusiveLock()
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        remote()->transact(EXCLUSIVE_LOCK, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t exclusiveUnlock()
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        remote()->transact(EXCLUSIVE_UNLOCK, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual bool hasExclusiveLock()
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        remote()->transact(HAS_EXCLUSIVE_LOCK, data, &reply);
-        return !!reply.readInt32();
-    }
-
-    virtual int submitRequest(camera_metadata_t* metadata, bool streaming)
-    {
-
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-
-        // arg0+arg1
-        CameraMetadata::writeToParcel(data, metadata);
-
-        // arg2 = streaming (bool)
-        data.writeInt32(streaming);
-
-        remote()->transact(SUBMIT_REQUEST, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t cancelRequest(int requestId)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        data.writeInt32(requestId);
-
-        remote()->transact(CANCEL_REQUEST, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t deleteStream(int streamId)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        data.writeInt32(streamId);
-
-        remote()->transact(DELETE_STREAM, data, &reply);
-        return reply.readInt32();
-    }
-
-    virtual status_t createStream(int width, int height, int format,
-                          const sp<IGraphicBufferProducer>& bufferProducer,
-                          /*out*/
-                          int* streamId)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        data.writeInt32(width);
-        data.writeInt32(height);
-        data.writeInt32(format);
-
-        sp<IBinder> b(IInterface::asBinder(bufferProducer));
-        data.writeStrongBinder(b);
-
-        remote()->transact(CREATE_STREAM, data, &reply);
-
-        int sId = reply.readInt32();
-        if (streamId) {
-            *streamId = sId;
-        }
-        return reply.readInt32();
-    }
-
-    // Create a request object from a template.
-    virtual status_t createDefaultRequest(int templateId,
-                                 /*out*/
-                                  camera_metadata** request)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        data.writeInt32(templateId);
-        remote()->transact(CREATE_DEFAULT_REQUEST, data, &reply);
-        CameraMetadata::readFromParcel(reply, /*out*/request);
-        return reply.readInt32();
-    }
-
-
-    virtual status_t getCameraInfo(int cameraId, camera_metadata** info)
-    {
-        Parcel data, reply;
-        data.writeInterfaceToken(IProCameraUser::getInterfaceDescriptor());
-        data.writeInt32(cameraId);
-        remote()->transact(GET_CAMERA_INFO, data, &reply);
-        CameraMetadata::readFromParcel(reply, /*out*/info);
-        return reply.readInt32();
-    }
-
-
-private:
-
-
-};
-
-IMPLEMENT_META_INTERFACE(ProCameraUser, "android.hardware.IProCameraUser");
-
-// ----------------------------------------------------------------------
-
-status_t BnProCameraUser::onTransact(
-    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
-    switch(code) {
-        case DISCONNECT: {
-            ALOGV("DISCONNECT");
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            disconnect();
-            reply->writeNoException();
-            return NO_ERROR;
-        } break;
-        case CONNECT: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            sp<IProCameraCallbacks> cameraClient =
-                   interface_cast<IProCameraCallbacks>(data.readStrongBinder());
-            reply->writeInt32(connect(cameraClient));
-            return NO_ERROR;
-        } break;
-
-        /* Shared ProCameraUser */
-        case EXCLUSIVE_TRY_LOCK: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            reply->writeInt32(exclusiveTryLock());
-            return NO_ERROR;
-        } break;
-        case EXCLUSIVE_LOCK: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            reply->writeInt32(exclusiveLock());
-            return NO_ERROR;
-        } break;
-        case EXCLUSIVE_UNLOCK: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            reply->writeInt32(exclusiveUnlock());
-            return NO_ERROR;
-        } break;
-        case HAS_EXCLUSIVE_LOCK: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            reply->writeInt32(hasExclusiveLock());
-            return NO_ERROR;
-        } break;
-        case SUBMIT_REQUEST: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            camera_metadata_t* metadata;
-            CameraMetadata::readFromParcel(data, /*out*/&metadata);
-
-            // arg2 = streaming (bool)
-            bool streaming = data.readInt32();
-
-            // return code: requestId (int32)
-            reply->writeInt32(submitRequest(metadata, streaming));
-
-            return NO_ERROR;
-        } break;
-        case CANCEL_REQUEST: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            int requestId = data.readInt32();
-            reply->writeInt32(cancelRequest(requestId));
-            return NO_ERROR;
-        } break;
-        case DELETE_STREAM: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            int streamId = data.readInt32();
-            reply->writeInt32(deleteStream(streamId));
-            return NO_ERROR;
-        } break;
-        case CREATE_STREAM: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-            int width, height, format;
-
-            width = data.readInt32();
-            height = data.readInt32();
-            format = data.readInt32();
-
-            sp<IGraphicBufferProducer> bp =
-               interface_cast<IGraphicBufferProducer>(data.readStrongBinder());
-
-            int streamId = -1;
-            status_t ret;
-            ret = createStream(width, height, format, bp, &streamId);
-
-            reply->writeInt32(streamId);
-            reply->writeInt32(ret);
-
-            return NO_ERROR;
-        } break;
-
-        case CREATE_DEFAULT_REQUEST: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-
-            int templateId = data.readInt32();
-
-            camera_metadata_t* request = NULL;
-            status_t ret;
-            ret = createDefaultRequest(templateId, &request);
-
-            CameraMetadata::writeToParcel(*reply, request);
-            reply->writeInt32(ret);
-
-            free_camera_metadata(request);
-
-            return NO_ERROR;
-        } break;
-        case GET_CAMERA_INFO: {
-            CHECK_INTERFACE(IProCameraUser, data, reply);
-
-            int cameraId = data.readInt32();
-
-            camera_metadata_t* info = NULL;
-            status_t ret;
-            ret = getCameraInfo(cameraId, &info);
-
-            CameraMetadata::writeToParcel(*reply, info);
-            reply->writeInt32(ret);
-
-            free_camera_metadata(info);
-
-            return NO_ERROR;
-        } break;
-        default:
-            return BBinder::onTransact(code, data, reply, flags);
-    }
-}
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
diff --git a/camera/ProCamera.cpp b/camera/ProCamera.cpp
deleted file mode 100644
index 48f8e8e..0000000
--- a/camera/ProCamera.cpp
+++ /dev/null
@@ -1,436 +0,0 @@
-/*
-**
-** Copyright (C) 2013, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-**     http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ProCamera"
-#include <utils/Log.h>
-#include <utils/threads.h>
-#include <utils/Mutex.h>
-
-#include <binder/IPCThreadState.h>
-#include <binder/IServiceManager.h>
-#include <binder/IMemory.h>
-
-#include <camera/ProCamera.h>
-#include <camera/IProCameraUser.h>
-#include <camera/IProCameraCallbacks.h>
-
-#include <gui/IGraphicBufferProducer.h>
-
-#include <system/camera_metadata.h>
-
-namespace android {
-
-sp<ProCamera> ProCamera::connect(int cameraId)
-{
-    return CameraBaseT::connect(cameraId, String16(),
-                                 ICameraService::USE_CALLING_UID);
-}
-
-ProCamera::ProCamera(int cameraId)
-    : CameraBase(cameraId)
-{
-}
-
-CameraTraits<ProCamera>::TCamConnectService CameraTraits<ProCamera>::fnConnectService =
-        &ICameraService::connectPro;
-
-ProCamera::~ProCamera()
-{
-
-}
-
-/* IProCameraUser's implementation */
-
-// callback from camera service
-void ProCamera::notifyCallback(int32_t msgType, int32_t ext1, int32_t ext2)
-{
-    return CameraBaseT::notifyCallback(msgType, ext1, ext2);
-}
-
-void ProCamera::onLockStatusChanged(
-                                 IProCameraCallbacks::LockStatus newLockStatus)
-{
-    ALOGV("%s: newLockStatus = %d", __FUNCTION__, newLockStatus);
-
-    sp<ProCameraListener> listener;
-    {
-        Mutex::Autolock _l(mLock);
-        listener = mListener;
-    }
-    if (listener != NULL) {
-        switch (newLockStatus) {
-            case IProCameraCallbacks::LOCK_ACQUIRED:
-                listener->onLockAcquired();
-                break;
-            case IProCameraCallbacks::LOCK_RELEASED:
-                listener->onLockReleased();
-                break;
-            case IProCameraCallbacks::LOCK_STOLEN:
-                listener->onLockStolen();
-                break;
-            default:
-                ALOGE("%s: Unknown lock status: %d",
-                      __FUNCTION__, newLockStatus);
-        }
-    }
-}
-
-void ProCamera::onResultReceived(int32_t requestId, camera_metadata* result) {
-    ALOGV("%s: requestId = %d, result = %p", __FUNCTION__, requestId, result);
-
-    sp<ProCameraListener> listener;
-    {
-        Mutex::Autolock _l(mLock);
-        listener = mListener;
-    }
-
-    CameraMetadata tmp(result);
-
-    // Unblock waitForFrame(id) callers
-    {
-        Mutex::Autolock al(mWaitMutex);
-        mMetadataReady = true;
-        mLatestMetadata = tmp; // make copy
-        mWaitCondition.broadcast();
-    }
-
-    result = tmp.release();
-
-    if (listener != NULL) {
-        listener->onResultReceived(requestId, result);
-    } else {
-        free_camera_metadata(result);
-    }
-
-}
-
-status_t ProCamera::exclusiveTryLock()
-{
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    return c->exclusiveTryLock();
-}
-status_t ProCamera::exclusiveLock()
-{
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    return c->exclusiveLock();
-}
-status_t ProCamera::exclusiveUnlock()
-{
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    return c->exclusiveUnlock();
-}
-bool ProCamera::hasExclusiveLock()
-{
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    return c->hasExclusiveLock();
-}
-
-// Note that the callee gets a copy of the metadata.
-int ProCamera::submitRequest(const struct camera_metadata* metadata,
-                             bool streaming)
-{
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    return c->submitRequest(const_cast<struct camera_metadata*>(metadata),
-                            streaming);
-}
-
-status_t ProCamera::cancelRequest(int requestId)
-{
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    return c->cancelRequest(requestId);
-}
-
-status_t ProCamera::deleteStream(int streamId)
-{
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    status_t s = c->deleteStream(streamId);
-
-    mStreams.removeItem(streamId);
-
-    return s;
-}
-
-status_t ProCamera::createStream(int width, int height, int format,
-                                 const sp<Surface>& surface,
-                                 /*out*/
-                                 int* streamId)
-{
-    *streamId = -1;
-
-    ALOGV("%s: createStreamW %dx%d (fmt=0x%x)", __FUNCTION__, width, height,
-                                                                       format);
-
-    if (surface == 0) {
-        return BAD_VALUE;
-    }
-
-    return createStream(width, height, format,
-                        surface->getIGraphicBufferProducer(),
-                        streamId);
-}
-
-status_t ProCamera::createStream(int width, int height, int format,
-                                 const sp<IGraphicBufferProducer>& bufferProducer,
-                                 /*out*/
-                                 int* streamId) {
-    *streamId = -1;
-
-    ALOGV("%s: createStreamT %dx%d (fmt=0x%x)", __FUNCTION__, width, height,
-                                                                       format);
-
-    if (bufferProducer == 0) {
-        return BAD_VALUE;
-    }
-
-    sp <IProCameraUser> c = mCamera;
-    status_t stat = c->createStream(width, height, format, bufferProducer,
-                                    streamId);
-
-    if (stat == OK) {
-        StreamInfo s(*streamId);
-
-        mStreams.add(*streamId, s);
-    }
-
-    return stat;
-}
-
-status_t ProCamera::createStreamCpu(int width, int height, int format,
-                                    int heapCount,
-                                    /*out*/
-                                    sp<CpuConsumer>* cpuConsumer,
-                                    int* streamId) {
-    return createStreamCpu(width, height, format, heapCount,
-                           /*synchronousMode*/true,
-                           cpuConsumer, streamId);
-}
-
-status_t ProCamera::createStreamCpu(int width, int height, int format,
-                                    int heapCount,
-                                    bool synchronousMode,
-                                    /*out*/
-                                    sp<CpuConsumer>* cpuConsumer,
-                                    int* streamId)
-{
-    ALOGV("%s: createStreamW %dx%d (fmt=0x%x)", __FUNCTION__, width, height,
-                                                                        format);
-
-    *cpuConsumer = NULL;
-
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    sp<IGraphicBufferProducer> producer;
-    sp<IGraphicBufferConsumer> consumer;
-    BufferQueue::createBufferQueue(&producer, &consumer);
-    sp<CpuConsumer> cc = new CpuConsumer(consumer, heapCount
-            /*, synchronousMode*/);
-    cc->setName(String8("ProCamera::mCpuConsumer"));
-
-    sp<Surface> stc = new Surface(producer);
-
-    status_t s = createStream(width, height, format,
-                              stc->getIGraphicBufferProducer(),
-                              streamId);
-
-    if (s != OK) {
-        ALOGE("%s: Failure to create stream %dx%d (fmt=0x%x)", __FUNCTION__,
-                    width, height, format);
-        return s;
-    }
-
-    sp<ProFrameListener> frameAvailableListener =
-        new ProFrameListener(this, *streamId);
-
-    getStreamInfo(*streamId).cpuStream = true;
-    getStreamInfo(*streamId).cpuConsumer = cc;
-    getStreamInfo(*streamId).synchronousMode = synchronousMode;
-    getStreamInfo(*streamId).stc = stc;
-    // for lifetime management
-    getStreamInfo(*streamId).frameAvailableListener = frameAvailableListener;
-
-    cc->setFrameAvailableListener(frameAvailableListener);
-
-    *cpuConsumer = cc;
-
-    return s;
-}
-
-camera_metadata* ProCamera::getCameraInfo(int cameraId) {
-    ALOGV("%s: cameraId = %d", __FUNCTION__, cameraId);
-
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NULL;
-
-    camera_metadata* ptr = NULL;
-    status_t status = c->getCameraInfo(cameraId, &ptr);
-
-    if (status != OK) {
-        ALOGE("%s: Failed to get camera info, error = %d", __FUNCTION__, status);
-    }
-
-    return ptr;
-}
-
-status_t ProCamera::createDefaultRequest(int templateId,
-                                             camera_metadata** request) const {
-    ALOGV("%s: templateId = %d", __FUNCTION__, templateId);
-
-    sp <IProCameraUser> c = mCamera;
-    if (c == 0) return NO_INIT;
-
-    return c->createDefaultRequest(templateId, request);
-}
-
-void ProCamera::onFrameAvailable(int streamId) {
-    ALOGV("%s: streamId = %d", __FUNCTION__, streamId);
-
-    sp<ProCameraListener> listener = mListener;
-    StreamInfo& stream = getStreamInfo(streamId);
-
-    if (listener.get() != NULL) {
-        listener->onFrameAvailable(streamId, stream.cpuConsumer);
-    }
-
-    // Unblock waitForFrame(id) callers
-    {
-        Mutex::Autolock al(mWaitMutex);
-        getStreamInfo(streamId).frameReady++;
-        mWaitCondition.broadcast();
-    }
-}
-
-int ProCamera::waitForFrameBuffer(int streamId) {
-    status_t stat = BAD_VALUE;
-    Mutex::Autolock al(mWaitMutex);
-
-    StreamInfo& si = getStreamInfo(streamId);
-
-    if (si.frameReady > 0) {
-        int numFrames = si.frameReady;
-        si.frameReady = 0;
-        return numFrames;
-    } else {
-        while (true) {
-            stat = mWaitCondition.waitRelative(mWaitMutex,
-                                                mWaitTimeout);
-            if (stat != OK) {
-                ALOGE("%s: Error while waiting for frame buffer: %d",
-                    __FUNCTION__, stat);
-                return stat;
-            }
-
-            if (si.frameReady > 0) {
-                int numFrames = si.frameReady;
-                si.frameReady = 0;
-                return numFrames;
-            }
-            // else it was some other stream that got unblocked
-        }
-    }
-
-    return stat;
-}
-
-int ProCamera::dropFrameBuffer(int streamId, int count) {
-    StreamInfo& si = getStreamInfo(streamId);
-
-    if (!si.cpuStream) {
-        return BAD_VALUE;
-    } else if (count < 0) {
-        return BAD_VALUE;
-    }
-
-    if (!si.synchronousMode) {
-        ALOGW("%s: No need to drop frames on asynchronous streams,"
-              " as asynchronous mode only keeps 1 latest frame around.",
-              __FUNCTION__);
-        return BAD_VALUE;
-    }
-
-    int numDropped = 0;
-    for (int i = 0; i < count; ++i) {
-        CpuConsumer::LockedBuffer buffer;
-        if (si.cpuConsumer->lockNextBuffer(&buffer) != OK) {
-            break;
-        }
-
-        si.cpuConsumer->unlockBuffer(buffer);
-        numDropped++;
-    }
-
-    return numDropped;
-}
-
-status_t ProCamera::waitForFrameMetadata() {
-    status_t stat = BAD_VALUE;
-    Mutex::Autolock al(mWaitMutex);
-
-    if (mMetadataReady) {
-        return OK;
-    } else {
-        while (true) {
-            stat = mWaitCondition.waitRelative(mWaitMutex,
-                                               mWaitTimeout);
-
-            if (stat != OK) {
-                ALOGE("%s: Error while waiting for metadata: %d",
-                        __FUNCTION__, stat);
-                return stat;
-            }
-
-            if (mMetadataReady) {
-                mMetadataReady = false;
-                return OK;
-            }
-            // else it was some other stream or metadata
-        }
-    }
-
-    return stat;
-}
-
-CameraMetadata ProCamera::consumeFrameMetadata() {
-    Mutex::Autolock al(mWaitMutex);
-
-    // Destructive: Subsequent calls return empty metadatas
-    CameraMetadata tmp = mLatestMetadata;
-    mLatestMetadata.clear();
-
-    return tmp;
-}
-
-ProCamera::StreamInfo& ProCamera::getStreamInfo(int streamId) {
-    return mStreams.editValueFor(streamId);
-}
-
-}; // namespace android
diff --git a/camera/camera2/CaptureRequest.cpp b/camera/camera2/CaptureRequest.cpp
index 66d6913..4217bc6 100644
--- a/camera/camera2/CaptureRequest.cpp
+++ b/camera/camera2/CaptureRequest.cpp
@@ -81,6 +81,13 @@
         mSurfaceList.push_back(surface);
     }
 
+    int isReprocess = 0;
+    if ((err = parcel->readInt32(&isReprocess)) != OK) {
+        ALOGE("%s: Failed to read reprocessing from parcel", __FUNCTION__);
+        return err;
+    }
+    mIsReprocess = (isReprocess != 0);
+
     return OK;
 }
 
@@ -118,6 +125,8 @@
         parcel->writeStrongBinder(binder);
     }
 
+    parcel->writeInt32(mIsReprocess ? 1 : 0);
+
     return OK;
 }
 
diff --git a/camera/camera2/ICameraDeviceCallbacks.cpp b/camera/camera2/ICameraDeviceCallbacks.cpp
index 4cc7b5d..f599879 100644
--- a/camera/camera2/ICameraDeviceCallbacks.cpp
+++ b/camera/camera2/ICameraDeviceCallbacks.cpp
@@ -37,6 +37,7 @@
     CAMERA_IDLE,
     CAPTURE_STARTED,
     RESULT_RECEIVED,
+    PREPARED
 };
 
 class BpCameraDeviceCallbacks: public BpInterface<ICameraDeviceCallbacks>
@@ -80,7 +81,6 @@
         data.writeNoException();
     }
 
-
     void onResultReceived(const CameraMetadata& metadata,
             const CaptureResultExtras& resultExtras) {
         ALOGV("onResultReceived");
@@ -93,6 +93,17 @@
         remote()->transact(RESULT_RECEIVED, data, &reply, IBinder::FLAG_ONEWAY);
         data.writeNoException();
     }
+
+    void onPrepared(int streamId)
+    {
+        ALOGV("onPrepared");
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
+        data.writeInt32(streamId);
+        remote()->transact(PREPARED, data, &reply, IBinder::FLAG_ONEWAY);
+        data.writeNoException();
+    }
+
 };
 
 IMPLEMENT_META_INTERFACE(CameraDeviceCallbacks,
@@ -160,6 +171,15 @@
             data.readExceptionCode();
             return NO_ERROR;
         } break;
+        case PREPARED: {
+            ALOGV("onPrepared");
+            CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
+            CaptureResultExtras result;
+            int streamId = data.readInt32();
+            onPrepared(streamId);
+            data.readExceptionCode();
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/camera/camera2/ICameraDeviceUser.cpp b/camera/camera2/ICameraDeviceUser.cpp
index 277b5db..d2dc200 100644
--- a/camera/camera2/ICameraDeviceUser.cpp
+++ b/camera/camera2/ICameraDeviceUser.cpp
@@ -26,6 +26,7 @@
 #include <gui/Surface.h>
 #include <camera/CameraMetadata.h>
 #include <camera/camera2/CaptureRequest.h>
+#include <camera/camera2/OutputConfiguration.h>
 
 namespace android {
 
@@ -41,10 +42,14 @@
     END_CONFIGURE,
     DELETE_STREAM,
     CREATE_STREAM,
+    CREATE_INPUT_STREAM,
+    GET_INPUT_SURFACE,
     CREATE_DEFAULT_REQUEST,
     GET_CAMERA_INFO,
     WAIT_UNTIL_IDLE,
-    FLUSH
+    FLUSH,
+    PREPARE,
+    TEAR_DOWN
 };
 
 namespace {
@@ -78,7 +83,7 @@
         reply.readExceptionCode();
     }
 
-    virtual status_t submitRequest(sp<CaptureRequest> request, bool repeating,
+    virtual int submitRequest(sp<CaptureRequest> request, bool repeating,
                               int64_t *lastFrameNumber)
     {
         Parcel data, reply;
@@ -107,13 +112,13 @@
             }
         }
 
-	if ((res < NO_ERROR) || (resFrameNumber != NO_ERROR)) {
+        if (res < 0 || (resFrameNumber != NO_ERROR)) {
             res = FAILED_TRANSACTION;
         }
         return res;
     }
 
-    virtual status_t submitRequestList(List<sp<CaptureRequest> > requestList, bool repeating,
+    virtual int submitRequestList(List<sp<CaptureRequest> > requestList, bool repeating,
                                   int64_t *lastFrameNumber)
     {
         Parcel data, reply;
@@ -147,7 +152,7 @@
                 resFrameNumber = reply.readInt64(lastFrameNumber);
             }
         }
-        if ((res < NO_ERROR) || (resFrameNumber != NO_ERROR)) {
+        if (res < 0 || (resFrameNumber != NO_ERROR)) {
             res = FAILED_TRANSACTION;
         }
         return res;
@@ -186,11 +191,13 @@
         return reply.readInt32();
     }
 
-    virtual status_t endConfigure()
+    virtual status_t endConfigure(bool isConstrainedHighSpeed)
     {
         ALOGV("endConfigure");
         Parcel data, reply;
         data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+        data.writeInt32(isConstrainedHighSpeed);
+
         remote()->transact(END_CONFIGURE, data, &reply);
         reply.readExceptionCode();
         return reply.readInt32();
@@ -208,8 +215,23 @@
         return reply.readInt32();
     }
 
-    virtual status_t createStream(int width, int height, int format,
-                          const sp<IGraphicBufferProducer>& bufferProducer)
+    virtual status_t createStream(const OutputConfiguration& outputConfiguration)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+        if (outputConfiguration.getGraphicBufferProducer() != NULL) {
+            data.writeInt32(1); // marker that OutputConfiguration is not null. Mimic aidl behavior
+            outputConfiguration.writeToParcel(data);
+        } else {
+            data.writeInt32(0);
+        }
+        remote()->transact(CREATE_STREAM, data, &reply);
+
+        reply.readExceptionCode();
+        return reply.readInt32();
+    }
+
+    virtual status_t createInputStream(int width, int height, int format)
     {
         Parcel data, reply;
         data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
@@ -217,17 +239,42 @@
         data.writeInt32(height);
         data.writeInt32(format);
 
-        data.writeInt32(1); // marker that bufferProducer is not null
-        data.writeString16(String16("unknown_name")); // name of surface
-        sp<IBinder> b(IInterface::asBinder(bufferProducer));
-        data.writeStrongBinder(b);
-
-        remote()->transact(CREATE_STREAM, data, &reply);
+        remote()->transact(CREATE_INPUT_STREAM, data, &reply);
 
         reply.readExceptionCode();
         return reply.readInt32();
     }
 
+    // get the buffer producer of the input stream
+    virtual status_t getInputBufferProducer(
+            sp<IGraphicBufferProducer> *producer) {
+        if (producer == NULL) {
+            return BAD_VALUE;
+        }
+
+        Parcel data, reply;
+        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+
+        remote()->transact(GET_INPUT_SURFACE, data, &reply);
+
+        reply.readExceptionCode();
+        status_t result = reply.readInt32() ;
+        if (result != OK) {
+            return result;
+        }
+
+        sp<IGraphicBufferProducer> bp = NULL;
+        if (reply.readInt32() != 0) {
+            String16 name = readMaybeEmptyString16(reply);
+            bp = interface_cast<IGraphicBufferProducer>(
+                    reply.readStrongBinder());
+        }
+
+        *producer = bp;
+
+        return *producer == NULL ? INVALID_OPERATION : OK;
+    }
+
     // Create a request object from a template.
     virtual status_t createDefaultRequest(int templateId,
                                           /*out*/
@@ -305,6 +352,34 @@
         return res;
     }
 
+    virtual status_t prepare(int streamId)
+    {
+        ALOGV("prepare");
+        Parcel data, reply;
+
+        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+        data.writeInt32(streamId);
+
+        remote()->transact(PREPARE, data, &reply);
+
+        reply.readExceptionCode();
+        return reply.readInt32();
+    }
+
+    virtual status_t tearDown(int streamId)
+    {
+        ALOGV("tearDown");
+        Parcel data, reply;
+
+        data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
+        data.writeInt32(streamId);
+
+        remote()->transact(TEAR_DOWN, data, &reply);
+
+        reply.readExceptionCode();
+        return reply.readInt32();
+    }
+
 private:
 
 
@@ -396,31 +471,15 @@
         } break;
         case CREATE_STREAM: {
             CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-            int width, height, format;
 
-            width = data.readInt32();
-            ALOGV("%s: CREATE_STREAM: width = %d", __FUNCTION__, width);
-            height = data.readInt32();
-            ALOGV("%s: CREATE_STREAM: height = %d", __FUNCTION__, height);
-            format = data.readInt32();
-            ALOGV("%s: CREATE_STREAM: format = %d", __FUNCTION__, format);
-
-            sp<IGraphicBufferProducer> bp;
+            status_t ret = BAD_VALUE;
             if (data.readInt32() != 0) {
-                String16 name = readMaybeEmptyString16(data);
-                bp = interface_cast<IGraphicBufferProducer>(
-                        data.readStrongBinder());
-
-                ALOGV("%s: CREATE_STREAM: bp = %p, name = %s", __FUNCTION__,
-                      bp.get(), String8(name).string());
+                OutputConfiguration outputConfiguration(data);
+                ret = createStream(outputConfiguration);
             } else {
-                ALOGV("%s: CREATE_STREAM: bp = unset, name = unset",
-                      __FUNCTION__);
+                ALOGE("%s: cannot take an empty OutputConfiguration", __FUNCTION__);
             }
 
-            status_t ret;
-            ret = createStream(width, height, format, bp);
-
             reply->writeNoException();
             ALOGV("%s: CREATE_STREAM: write noException", __FUNCTION__);
             reply->writeInt32(ret);
@@ -428,7 +487,35 @@
 
             return NO_ERROR;
         } break;
+        case CREATE_INPUT_STREAM: {
+            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+            int width, height, format;
 
+            width = data.readInt32();
+            height = data.readInt32();
+            format = data.readInt32();
+            status_t ret = createInputStream(width, height, format);
+
+            reply->writeNoException();
+            reply->writeInt32(ret);
+            return NO_ERROR;
+
+        } break;
+        case GET_INPUT_SURFACE: {
+            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+
+            sp<IGraphicBufferProducer> bp;
+            status_t ret = getInputBufferProducer(&bp);
+            sp<IBinder> b(IInterface::asBinder(ret == OK ? bp : NULL));
+
+            reply->writeNoException();
+            reply->writeInt32(ret);
+            reply->writeInt32(1);
+            reply->writeString16(String16("camera input")); // name of surface
+            reply->writeStrongBinder(b);
+
+            return NO_ERROR;
+        } break;
         case CREATE_DEFAULT_REQUEST: {
             CHECK_INTERFACE(ICameraDeviceUser, data, reply);
 
@@ -486,10 +573,26 @@
         } break;
         case END_CONFIGURE: {
             CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+            bool isConstrainedHighSpeed = data.readInt32();
             reply->writeNoException();
-            reply->writeInt32(endConfigure());
+            reply->writeInt32(endConfigure(isConstrainedHighSpeed));
             return NO_ERROR;
         } break;
+        case PREPARE: {
+            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+            int streamId = data.readInt32();
+            reply->writeNoException();
+            reply->writeInt32(prepare(streamId));
+            return NO_ERROR;
+        } break;
+        case TEAR_DOWN: {
+            CHECK_INTERFACE(ICameraDeviceUser, data, reply);
+            int streamId = data.readInt32();
+            reply->writeNoException();
+            reply->writeInt32(tearDown(streamId));
+            return NO_ERROR;
+        } break;
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
new file mode 100644
index 0000000..20a23e0
--- /dev/null
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -0,0 +1,84 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "OutputConfiguration"
+#include <utils/Log.h>
+
+#include <camera/camera2/OutputConfiguration.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+
+const int OutputConfiguration::INVALID_ROTATION = -1;
+
+// Read empty strings without printing a false error message.
+String16 OutputConfiguration::readMaybeEmptyString16(const Parcel& parcel) {
+    size_t len;
+    const char16_t* str = parcel.readString16Inplace(&len);
+    if (str != NULL) {
+        return String16(str, len);
+    } else {
+        return String16();
+    }
+}
+
+sp<IGraphicBufferProducer> OutputConfiguration::getGraphicBufferProducer() const {
+    return mGbp;
+}
+
+int OutputConfiguration::getRotation() const {
+    return mRotation;
+}
+
+OutputConfiguration::OutputConfiguration(const Parcel& parcel) {
+    status_t err;
+    int rotation = 0;
+    if ((err = parcel.readInt32(&rotation)) != OK) {
+        ALOGE("%s: Failed to read rotation from parcel", __FUNCTION__);
+        mGbp = NULL;
+        mRotation = INVALID_ROTATION;
+        return;
+    }
+
+    String16 name = readMaybeEmptyString16(parcel);
+    const sp<IGraphicBufferProducer>& gbp =
+            interface_cast<IGraphicBufferProducer>(parcel.readStrongBinder());
+    mGbp = gbp;
+    mRotation = rotation;
+
+    ALOGV("%s: OutputConfiguration: bp = %p, name = %s", __FUNCTION__,
+          gbp.get(), String8(name).string());
+}
+
+OutputConfiguration::OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation) {
+    mGbp = gbp;
+    mRotation = rotation;
+}
+
+status_t OutputConfiguration::writeToParcel(Parcel& parcel) const {
+
+    parcel.writeInt32(mRotation);
+    parcel.writeString16(String16("unknown_name")); // name of surface
+    sp<IBinder> b(IInterface::asBinder(mGbp));
+    parcel.writeStrongBinder(b);
+
+    return OK;
+}
+
+}; // namespace android
+
diff --git a/camera/tests/Android.mk b/camera/tests/Android.mk
index 2db4c14..3777d94 100644
--- a/camera/tests/Android.mk
+++ b/camera/tests/Android.mk
@@ -17,8 +17,8 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
 
 LOCAL_SRC_FILES:= \
-	ProCameraTests.cpp \
-	VendorTagDescriptorTests.cpp
+	VendorTagDescriptorTests.cpp \
+	CameraBinderTests.cpp
 
 LOCAL_SHARED_LIBRARIES := \
 	libutils \
diff --git a/camera/tests/CameraBinderTests.cpp b/camera/tests/CameraBinderTests.cpp
new file mode 100644
index 0000000..572fb72
--- /dev/null
+++ b/camera/tests/CameraBinderTests.cpp
@@ -0,0 +1,484 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_NDEBUG 0
+#define LOG_TAG "CameraBinderTests"
+
+#include <binder/IInterface.h>
+#include <binder/IServiceManager.h>
+#include <binder/Parcel.h>
+#include <binder/ProcessState.h>
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <utils/List.h>
+#include <utils/String8.h>
+#include <utils/String16.h>
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
+#include <system/graphics.h>
+#include <hardware/gralloc.h>
+
+#include <camera/CameraMetadata.h>
+#include <camera/ICameraService.h>
+#include <camera/ICameraServiceListener.h>
+#include <camera/camera2/CaptureRequest.h>
+#include <camera/camera2/ICameraDeviceUser.h>
+#include <camera/camera2/ICameraDeviceCallbacks.h>
+#include <camera/camera2/OutputConfiguration.h>
+
+#include <gui/BufferItemConsumer.h>
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/Surface.h>
+
+#include <gtest/gtest.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <utility>
+#include <vector>
+#include <map>
+#include <algorithm>
+
+using namespace android;
+
+#define ASSERT_NOT_NULL(x) \
+    ASSERT_TRUE((x) != nullptr)
+
+#define SETUP_TIMEOUT 2000000000 // ns
+#define IDLE_TIMEOUT 2000000000 // ns
+
+// Stub listener implementation
+class TestCameraServiceListener : public BnCameraServiceListener {
+    std::map<String16, TorchStatus> mCameraTorchStatuses;
+    std::map<int32_t, Status> mCameraStatuses;
+    mutable Mutex mLock;
+    mutable Condition mCondition;
+    mutable Condition mTorchCondition;
+public:
+    virtual ~TestCameraServiceListener() {};
+
+    virtual void onStatusChanged(Status status, int32_t cameraId) {
+        Mutex::Autolock l(mLock);
+        mCameraStatuses[cameraId] = status;
+        mCondition.broadcast();
+    };
+
+    virtual void onTorchStatusChanged(TorchStatus status, const String16& cameraId) {
+        Mutex::Autolock l(mLock);
+        mCameraTorchStatuses[cameraId] = status;
+        mTorchCondition.broadcast();
+    };
+
+    bool waitForNumCameras(size_t num) const {
+        Mutex::Autolock l(mLock);
+
+        if (mCameraStatuses.size() == num) {
+            return true;
+        }
+
+        while (mCameraStatuses.size() < num) {
+            if (mCondition.waitRelative(mLock, SETUP_TIMEOUT) != OK) {
+                return false;
+            }
+        }
+        return true;
+    };
+
+    bool waitForTorchState(TorchStatus status, int32_t cameraId) const {
+        Mutex::Autolock l(mLock);
+
+        const auto& iter = mCameraTorchStatuses.find(String16(String8::format("%d", cameraId)));
+        if (iter != mCameraTorchStatuses.end() && iter->second == status) {
+            return true;
+        }
+
+        bool foundStatus = false;
+        while (!foundStatus) {
+            if (mTorchCondition.waitRelative(mLock, SETUP_TIMEOUT) != OK) {
+                return false;
+            }
+            const auto& iter =
+                    mCameraTorchStatuses.find(String16(String8::format("%d", cameraId)));
+            foundStatus = (iter != mCameraTorchStatuses.end() && iter->second == status);
+        }
+        return true;
+    };
+
+    TorchStatus getTorchStatus(int32_t cameraId) const {
+        Mutex::Autolock l(mLock);
+        const auto& iter = mCameraTorchStatuses.find(String16(String8::format("%d", cameraId)));
+        if (iter == mCameraTorchStatuses.end()) {
+            return ICameraServiceListener::TORCH_STATUS_UNKNOWN;
+        }
+        return iter->second;
+    };
+
+    Status getStatus(int32_t cameraId) const {
+        Mutex::Autolock l(mLock);
+        const auto& iter = mCameraStatuses.find(cameraId);
+        if (iter == mCameraStatuses.end()) {
+            return ICameraServiceListener::STATUS_UNKNOWN;
+        }
+        return iter->second;
+    };
+};
+
+// Callback implementation
+class TestCameraDeviceCallbacks : public BnCameraDeviceCallbacks {
+public:
+    enum Status {
+        IDLE,
+        ERROR,
+        PREPARED,
+        RUNNING,
+        SENT_RESULT,
+        UNINITIALIZED
+    };
+
+protected:
+    bool mError;
+    Status mLastStatus;
+    mutable std::vector<Status> mStatusesHit;
+    mutable Mutex mLock;
+    mutable Condition mStatusCondition;
+public:
+    TestCameraDeviceCallbacks() : mError(false), mLastStatus(UNINITIALIZED) {}
+
+    virtual ~TestCameraDeviceCallbacks() {}
+
+    virtual void onDeviceError(CameraErrorCode errorCode,
+            const CaptureResultExtras& resultExtras) {
+        ALOGE("%s: onDeviceError occurred with: %d", __FUNCTION__, static_cast<int>(errorCode));
+        Mutex::Autolock l(mLock);
+        mError = true;
+        mLastStatus = ERROR;
+        mStatusesHit.push_back(mLastStatus);
+        mStatusCondition.broadcast();
+    }
+
+    virtual void onDeviceIdle() {
+        Mutex::Autolock l(mLock);
+        mLastStatus = IDLE;
+        mStatusesHit.push_back(mLastStatus);
+        mStatusCondition.broadcast();
+    }
+
+    virtual void onCaptureStarted(const CaptureResultExtras& resultExtras,
+            int64_t timestamp) {
+        Mutex::Autolock l(mLock);
+        mLastStatus = RUNNING;
+        mStatusesHit.push_back(mLastStatus);
+        mStatusCondition.broadcast();
+    }
+
+
+    virtual void onResultReceived(const CameraMetadata& metadata,
+            const CaptureResultExtras& resultExtras) {
+        Mutex::Autolock l(mLock);
+        mLastStatus = SENT_RESULT;
+        mStatusesHit.push_back(mLastStatus);
+        mStatusCondition.broadcast();
+    }
+
+    virtual void onPrepared(int streamId) {
+        Mutex::Autolock l(mLock);
+        mLastStatus = PREPARED;
+        mStatusesHit.push_back(mLastStatus);
+        mStatusCondition.broadcast();
+    }
+
+    // Test helper functions:
+
+    bool hadError() const {
+        Mutex::Autolock l(mLock);
+        return mError;
+    }
+
+    bool waitForStatus(Status status) const {
+        Mutex::Autolock l(mLock);
+        if (mLastStatus == status) {
+            return true;
+        }
+
+        while (std::find(mStatusesHit.begin(), mStatusesHit.end(), status)
+                == mStatusesHit.end()) {
+
+            if (mStatusCondition.waitRelative(mLock, IDLE_TIMEOUT) != OK) {
+                mStatusesHit.clear();
+                return false;
+            }
+        }
+        mStatusesHit.clear();
+
+        return true;
+
+    }
+
+    void clearStatus() const {
+        Mutex::Autolock l(mLock);
+        mStatusesHit.clear();
+    }
+
+    bool waitForIdle() const {
+        return waitForStatus(IDLE);
+    }
+
+};
+
+// Exercise basic binder calls for the camera service
+TEST(CameraServiceBinderTest, CheckBinderCameraService) {
+    ProcessState::self()->startThreadPool();
+    sp<IServiceManager> sm = defaultServiceManager();
+    sp<IBinder> binder = sm->getService(String16("media.camera"));
+    ASSERT_NOT_NULL(binder);
+    sp<ICameraService> service = interface_cast<ICameraService>(binder);
+
+
+    int32_t numCameras = service->getNumberOfCameras();
+    EXPECT_LE(0, numCameras);
+
+    // Check listener binder calls
+    sp<TestCameraServiceListener> listener(new TestCameraServiceListener());
+    EXPECT_EQ(OK, service->addListener(listener));
+
+    EXPECT_TRUE(listener->waitForNumCameras(numCameras));
+
+    for (int32_t i = 0; i < numCameras; i++) {
+        // We only care about binder calls for the Camera2 API.  Camera1 is deprecated.
+        status_t camera2Support = service->supportsCameraApi(i, ICameraService::API_VERSION_2);
+        if (camera2Support != OK) {
+            EXPECT_EQ(-EOPNOTSUPP, camera2Support);
+            continue;
+        }
+
+        // Check metadata binder call
+        CameraMetadata metadata;
+        EXPECT_EQ(OK, service->getCameraCharacteristics(i, &metadata));
+        EXPECT_FALSE(metadata.isEmpty());
+
+        // Make sure we're available, or skip device tests otherwise
+        ICameraServiceListener::Status s = listener->getStatus(i);
+        EXPECT_EQ(ICameraServiceListener::STATUS_AVAILABLE, s);
+        if (s != ICameraServiceListener::STATUS_AVAILABLE) {
+            continue;
+        }
+
+        // Check connect binder calls
+        sp<TestCameraDeviceCallbacks> callbacks(new TestCameraDeviceCallbacks());
+        sp<ICameraDeviceUser> device;
+        EXPECT_EQ(OK, service->connectDevice(callbacks, i, String16("meeeeeeeee!"),
+                ICameraService::USE_CALLING_UID, /*out*/device));
+        ASSERT_NE(nullptr, device.get());
+        device->disconnect();
+        EXPECT_FALSE(callbacks->hadError());
+
+        ICameraServiceListener::TorchStatus torchStatus = listener->getTorchStatus(i);
+        if (torchStatus == ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF) {
+            // Check torch calls
+            EXPECT_EQ(OK, service->setTorchMode(String16(String8::format("%d", i)),
+                    /*enabled*/true, callbacks));
+            EXPECT_TRUE(listener->waitForTorchState(
+                    ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON, i));
+            EXPECT_EQ(OK, service->setTorchMode(String16(String8::format("%d", i)),
+                    /*enabled*/false, callbacks));
+            EXPECT_TRUE(listener->waitForTorchState(
+                    ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF, i));
+        }
+    }
+
+    EXPECT_EQ(OK, service->removeListener(listener));
+}
+
+// Test fixture for client focused binder tests
+class CameraClientBinderTest : public testing::Test {
+protected:
+    sp<ICameraService> service;
+    int32_t numCameras;
+    std::vector<std::pair<sp<TestCameraDeviceCallbacks>, sp<ICameraDeviceUser>>> openDeviceList;
+    sp<TestCameraServiceListener> serviceListener;
+
+    std::pair<sp<TestCameraDeviceCallbacks>, sp<ICameraDeviceUser>> openNewDevice(int deviceId) {
+
+        sp<TestCameraDeviceCallbacks> callbacks(new TestCameraDeviceCallbacks());
+        sp<ICameraDeviceUser> device;
+        {
+            SCOPED_TRACE("openNewDevice");
+            EXPECT_EQ(OK, service->connectDevice(callbacks, deviceId, String16("meeeeeeeee!"),
+                    ICameraService::USE_CALLING_UID, /*out*/device));
+        }
+        auto p = std::make_pair(callbacks, device);
+        openDeviceList.push_back(p);
+        return p;
+    }
+
+    void closeDevice(std::pair<sp<TestCameraDeviceCallbacks>, sp<ICameraDeviceUser>>& p) {
+        if (p.second.get() != nullptr) {
+            p.second->disconnect();
+            {
+                SCOPED_TRACE("closeDevice");
+                EXPECT_FALSE(p.first->hadError());
+            }
+        }
+        auto iter = std::find(openDeviceList.begin(), openDeviceList.end(), p);
+        if (iter != openDeviceList.end()) {
+            openDeviceList.erase(iter);
+        }
+    }
+
+    virtual void SetUp() {
+        ProcessState::self()->startThreadPool();
+        sp<IServiceManager> sm = defaultServiceManager();
+        sp<IBinder> binder = sm->getService(String16("media.camera"));
+        service = interface_cast<ICameraService>(binder);
+        serviceListener = new TestCameraServiceListener();
+        service->addListener(serviceListener);
+        numCameras = service->getNumberOfCameras();
+    }
+
+    virtual void TearDown() {
+        service = nullptr;
+        numCameras = 0;
+        for (auto& p : openDeviceList) {
+            closeDevice(p);
+        }
+    }
+
+};
+
+TEST_F(CameraClientBinderTest, CheckBinderCameraDeviceUser) {
+    ASSERT_NOT_NULL(service);
+
+    EXPECT_TRUE(serviceListener->waitForNumCameras(numCameras));
+    for (int32_t i = 0; i < numCameras; i++) {
+        // Make sure we're available, or skip device tests otherwise
+        ICameraServiceListener::Status s = serviceListener->getStatus(i);
+        EXPECT_EQ(ICameraServiceListener::STATUS_AVAILABLE, s);
+        if (s != ICameraServiceListener::STATUS_AVAILABLE) {
+            continue;
+        }
+
+        auto p = openNewDevice(i);
+        sp<TestCameraDeviceCallbacks> callbacks = p.first;
+        sp<ICameraDeviceUser> device = p.second;
+
+        // Setup a buffer queue; I'm just using the vendor opaque format here as that is
+        // guaranteed to be present
+        sp<IGraphicBufferProducer> gbProducer;
+        sp<IGraphicBufferConsumer> gbConsumer;
+        BufferQueue::createBufferQueue(&gbProducer, &gbConsumer);
+        sp<BufferItemConsumer> opaqueConsumer = new BufferItemConsumer(gbConsumer,
+                GRALLOC_USAGE_SW_READ_NEVER, /*maxImages*/2, /*controlledByApp*/true);
+        EXPECT_TRUE(opaqueConsumer.get() != nullptr);
+        opaqueConsumer->setName(String8("nom nom nom"));
+
+        // Set to VGA dimens for default, as that is guaranteed to be present
+        EXPECT_EQ(OK, gbConsumer->setDefaultBufferSize(640, 480));
+        EXPECT_EQ(OK, gbConsumer->setDefaultBufferFormat(HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED));
+
+        sp<Surface> surface(new Surface(gbProducer, /*controlledByApp*/false));
+
+        OutputConfiguration output(gbProducer, /*rotation*/0);
+
+        // Can we configure?
+        EXPECT_EQ(OK, device->beginConfigure());
+        status_t streamId = device->createStream(output);
+        EXPECT_LE(0, streamId);
+        EXPECT_EQ(OK, device->endConfigure());
+        EXPECT_FALSE(callbacks->hadError());
+
+        // Can we make requests?
+        CameraMetadata requestTemplate;
+        EXPECT_EQ(OK, device->createDefaultRequest(/*preview template*/1,
+                /*out*/&requestTemplate));
+        sp<CaptureRequest> request(new CaptureRequest());
+        request->mMetadata = requestTemplate;
+        request->mSurfaceList.add(surface);
+        request->mIsReprocess = false;
+        int64_t lastFrameNumber = 0;
+        int64_t lastFrameNumberPrev = 0;
+        callbacks->clearStatus();
+        int requestId = device->submitRequest(request, /*streaming*/true, /*out*/&lastFrameNumber);
+        EXPECT_TRUE(callbacks->waitForStatus(TestCameraDeviceCallbacks::SENT_RESULT));
+        EXPECT_LE(0, requestId);
+
+        // Can we stop requests?
+        EXPECT_EQ(OK, device->cancelRequest(requestId, /*out*/&lastFrameNumber));
+        EXPECT_TRUE(callbacks->waitForIdle());
+        EXPECT_FALSE(callbacks->hadError());
+
+        // Can we do it again?
+        lastFrameNumberPrev = lastFrameNumber;
+        lastFrameNumber = 0;
+        requestTemplate.clear();
+        EXPECT_EQ(OK, device->createDefaultRequest(/*preview template*/1,
+                /*out*/&requestTemplate));
+        sp<CaptureRequest> request2(new CaptureRequest());
+        request2->mMetadata = requestTemplate;
+        request2->mSurfaceList.add(surface);
+        request2->mIsReprocess = false;
+        callbacks->clearStatus();
+        int requestId2 = device->submitRequest(request2, /*streaming*/true,
+                /*out*/&lastFrameNumber);
+        EXPECT_EQ(-1, lastFrameNumber);
+        lastFrameNumber = 0;
+        EXPECT_TRUE(callbacks->waitForStatus(TestCameraDeviceCallbacks::SENT_RESULT));
+        EXPECT_LE(0, requestId2);
+        EXPECT_EQ(OK, device->cancelRequest(requestId2, /*out*/&lastFrameNumber));
+        EXPECT_TRUE(callbacks->waitForIdle());
+        EXPECT_LE(lastFrameNumberPrev, lastFrameNumber);
+        sleep(/*second*/1); // allow some time for errors to show up, if any
+        EXPECT_FALSE(callbacks->hadError());
+
+        // Can we do it with a request list?
+        lastFrameNumberPrev = lastFrameNumber;
+        lastFrameNumber = 0;
+        requestTemplate.clear();
+        CameraMetadata requestTemplate2;
+        EXPECT_EQ(OK, device->createDefaultRequest(/*preview template*/1,
+                /*out*/&requestTemplate));
+        EXPECT_EQ(OK, device->createDefaultRequest(/*preview template*/1,
+                /*out*/&requestTemplate2));
+        sp<CaptureRequest> request3(new CaptureRequest());
+        sp<CaptureRequest> request4(new CaptureRequest());
+        request3->mMetadata = requestTemplate;
+        request3->mSurfaceList.add(surface);
+        request3->mIsReprocess = false;
+        request4->mMetadata = requestTemplate2;
+        request4->mSurfaceList.add(surface);
+        request4->mIsReprocess = false;
+        List<sp<CaptureRequest>> requestList;
+        requestList.push_back(request3);
+        requestList.push_back(request4);
+
+        callbacks->clearStatus();
+        int requestId3 = device->submitRequestList(requestList, /*streaming*/false,
+                /*out*/&lastFrameNumber);
+        EXPECT_TRUE(callbacks->waitForStatus(TestCameraDeviceCallbacks::SENT_RESULT));
+        EXPECT_TRUE(callbacks->waitForIdle());
+        EXPECT_LE(lastFrameNumberPrev, lastFrameNumber);
+        sleep(/*second*/1); // allow some time for errors to show up, if any
+        EXPECT_FALSE(callbacks->hadError());
+
+        // Can we unconfigure?
+        EXPECT_EQ(OK, device->beginConfigure());
+        EXPECT_EQ(OK, device->deleteStream(streamId));
+        EXPECT_EQ(OK, device->endConfigure());
+        sleep(/*second*/1); // allow some time for errors to show up, if any
+        EXPECT_FALSE(callbacks->hadError());
+
+        closeDevice(p);
+    }
+
+};
diff --git a/camera/tests/ProCameraTests.cpp b/camera/tests/ProCameraTests.cpp
deleted file mode 100644
index 1f5867a..0000000
--- a/camera/tests/ProCameraTests.cpp
+++ /dev/null
@@ -1,1278 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <gtest/gtest.h>
-#include <iostream>
-
-#include <binder/IPCThreadState.h>
-#include <utils/Thread.h>
-
-#include "Camera.h"
-#include "ProCamera.h"
-#include <utils/Vector.h>
-#include <utils/Mutex.h>
-#include <utils/Condition.h>
-
-#include <gui/SurfaceComposerClient.h>
-#include <gui/Surface.h>
-
-#include <system/camera_metadata.h>
-#include <hardware/camera2.h> // for CAMERA2_TEMPLATE_PREVIEW only
-#include <camera/CameraMetadata.h>
-
-#include <camera/ICameraServiceListener.h>
-
-namespace android {
-namespace camera2 {
-namespace tests {
-namespace client {
-
-#define CAMERA_ID 0
-#define TEST_DEBUGGING 0
-
-#define TEST_LISTENER_TIMEOUT 1000000000 // 1 second listener timeout
-#define TEST_FORMAT HAL_PIXEL_FORMAT_Y16 //TODO: YUY2 instead
-
-#define TEST_FORMAT_MAIN HAL_PIXEL_FORMAT_Y8
-#define TEST_FORMAT_DEPTH HAL_PIXEL_FORMAT_Y16
-
-// defaults for display "test"
-#define TEST_DISPLAY_FORMAT HAL_PIXEL_FORMAT_Y8
-#define TEST_DISPLAY_WIDTH 320
-#define TEST_DISPLAY_HEIGHT 240
-
-#define TEST_CPU_FRAME_COUNT 2
-#define TEST_CPU_HEAP_COUNT 5
-
-#define TEST_FRAME_PROCESSING_DELAY_US 200000 // 200 ms
-
-#if TEST_DEBUGGING
-#define dout std::cerr
-#else
-#define dout if (0) std::cerr
-#endif
-
-#define EXPECT_OK(x) EXPECT_EQ(OK, (x))
-#define ASSERT_OK(x) ASSERT_EQ(OK, (x))
-
-class ProCameraTest;
-
-struct ServiceListener : public BnCameraServiceListener {
-
-    ServiceListener() :
-        mLatestStatus(STATUS_UNKNOWN),
-        mPrevStatus(STATUS_UNKNOWN)
-    {
-    }
-
-    void onStatusChanged(Status status, int32_t cameraId) {
-        dout << "On status changed: 0x" << std::hex
-             << (unsigned int) status << " cameraId " << cameraId
-             << std::endl;
-
-        Mutex::Autolock al(mMutex);
-
-        mLatestStatus = status;
-        mCondition.broadcast();
-    }
-
-    status_t waitForStatusChange(Status& newStatus) {
-        Mutex::Autolock al(mMutex);
-
-        if (mLatestStatus != mPrevStatus) {
-            newStatus = mLatestStatus;
-            mPrevStatus = mLatestStatus;
-            return OK;
-        }
-
-        status_t stat = mCondition.waitRelative(mMutex,
-                                               TEST_LISTENER_TIMEOUT);
-
-        if (stat == OK) {
-            newStatus = mLatestStatus;
-            mPrevStatus = mLatestStatus;
-        }
-
-        return stat;
-    }
-
-    Condition mCondition;
-    Mutex mMutex;
-
-    Status mLatestStatus;
-    Status mPrevStatus;
-};
-
-enum ProEvent {
-    UNKNOWN,
-    ACQUIRED,
-    RELEASED,
-    STOLEN,
-    FRAME_RECEIVED,
-    RESULT_RECEIVED,
-};
-
-inline int ProEvent_Mask(ProEvent e) {
-    return (1 << static_cast<int>(e));
-}
-
-typedef Vector<ProEvent> EventList;
-
-class ProCameraTestThread : public Thread
-{
-public:
-    ProCameraTestThread() {
-    }
-
-    virtual bool threadLoop() {
-        mProc = ProcessState::self();
-        mProc->startThreadPool();
-
-        IPCThreadState *ptr = IPCThreadState::self();
-
-        ptr->joinThreadPool();
-
-        return false;
-    }
-
-    sp<ProcessState> mProc;
-};
-
-class ProCameraTestListener : public ProCameraListener {
-
-public:
-    static const int EVENT_MASK_ALL = 0xFFFFFFFF;
-
-    ProCameraTestListener() {
-        mEventMask = EVENT_MASK_ALL;
-        mDropFrames = false;
-    }
-
-    status_t WaitForEvent() {
-        Mutex::Autolock cal(mConditionMutex);
-
-        {
-            Mutex::Autolock al(mListenerMutex);
-
-            if (mProEventList.size() > 0) {
-                return OK;
-            }
-        }
-
-        return mListenerCondition.waitRelative(mConditionMutex,
-                                               TEST_LISTENER_TIMEOUT);
-    }
-
-    /* Read events into out. Existing queue is flushed */
-    void ReadEvents(EventList& out) {
-        Mutex::Autolock al(mListenerMutex);
-
-        for (size_t i = 0; i < mProEventList.size(); ++i) {
-            out.push(mProEventList[i]);
-        }
-
-        mProEventList.clear();
-    }
-
-    /**
-      * Dequeue 1 event from the event queue.
-      * Returns UNKNOWN if queue is empty
-      */
-    ProEvent ReadEvent() {
-        Mutex::Autolock al(mListenerMutex);
-
-        if (mProEventList.size() == 0) {
-            return UNKNOWN;
-        }
-
-        ProEvent ev = mProEventList[0];
-        mProEventList.removeAt(0);
-
-        return ev;
-    }
-
-    void SetEventMask(int eventMask) {
-        Mutex::Autolock al(mListenerMutex);
-        mEventMask = eventMask;
-    }
-
-    // Automatically acquire/release frames as they are available
-    void SetDropFrames(bool dropFrames) {
-        Mutex::Autolock al(mListenerMutex);
-        mDropFrames = dropFrames;
-    }
-
-private:
-    void QueueEvent(ProEvent ev) {
-        bool eventAdded = false;
-        {
-            Mutex::Autolock al(mListenerMutex);
-
-            // Drop events not part of mask
-            if (ProEvent_Mask(ev) & mEventMask) {
-                mProEventList.push(ev);
-                eventAdded = true;
-            }
-        }
-
-        if (eventAdded) {
-            mListenerCondition.broadcast();
-        }
-    }
-
-protected:
-
-    //////////////////////////////////////////////////
-    ///////// ProCameraListener //////////////////////
-    //////////////////////////////////////////////////
-
-
-    // Lock has been acquired. Write operations now available.
-    virtual void onLockAcquired() {
-        QueueEvent(ACQUIRED);
-    }
-    // Lock has been released with exclusiveUnlock
-    virtual void onLockReleased() {
-        QueueEvent(RELEASED);
-    }
-
-    // Lock has been stolen by another client.
-    virtual void onLockStolen() {
-        QueueEvent(STOLEN);
-    }
-
-    // Lock free.
-    virtual void onTriggerNotify(int32_t ext1, int32_t ext2, int32_t ext3) {
-
-        dout << "Trigger notify: " << ext1 << " " << ext2
-             << " " << ext3 << std::endl;
-    }
-
-    virtual void onFrameAvailable(int streamId,
-                                  const sp<CpuConsumer>& consumer) {
-
-        QueueEvent(FRAME_RECEIVED);
-
-        Mutex::Autolock al(mListenerMutex);
-        if (mDropFrames) {
-            CpuConsumer::LockedBuffer buf;
-            status_t ret;
-
-            if (OK == (ret = consumer->lockNextBuffer(&buf))) {
-
-                dout << "Frame received on streamId = " << streamId <<
-                        ", dataPtr = " << (void*)buf.data <<
-                        ", timestamp = " << buf.timestamp << std::endl;
-
-                EXPECT_OK(consumer->unlockBuffer(buf));
-            }
-        } else {
-            dout << "Frame received on streamId = " << streamId << std::endl;
-        }
-    }
-
-    virtual void onResultReceived(int32_t requestId,
-                                  camera_metadata* request) {
-        dout << "Result received requestId = " << requestId
-             << ", requestPtr = " << (void*)request << std::endl;
-        QueueEvent(RESULT_RECEIVED);
-        free_camera_metadata(request);
-    }
-
-    virtual void notify(int32_t msg, int32_t ext1, int32_t ext2) {
-        dout << "Notify received: msg " << std::hex << msg
-             << ", ext1: " << std::hex << ext1 << ", ext2: " << std::hex << ext2
-             << std::endl;
-    }
-
-    Vector<ProEvent> mProEventList;
-    Mutex             mListenerMutex;
-    Mutex             mConditionMutex;
-    Condition         mListenerCondition;
-    int               mEventMask;
-    bool              mDropFrames;
-};
-
-class ProCameraTest : public ::testing::Test {
-
-public:
-    ProCameraTest() {
-        char* displaySecsEnv = getenv("TEST_DISPLAY_SECS");
-        if (displaySecsEnv != NULL) {
-            mDisplaySecs = atoi(displaySecsEnv);
-            if (mDisplaySecs < 0) {
-                mDisplaySecs = 0;
-            }
-        } else {
-            mDisplaySecs = 0;
-        }
-
-        char* displayFmtEnv = getenv("TEST_DISPLAY_FORMAT");
-        if (displayFmtEnv != NULL) {
-            mDisplayFmt = FormatFromString(displayFmtEnv);
-        } else {
-            mDisplayFmt = TEST_DISPLAY_FORMAT;
-        }
-
-        char* displayWidthEnv = getenv("TEST_DISPLAY_WIDTH");
-        if (displayWidthEnv != NULL) {
-            mDisplayW = atoi(displayWidthEnv);
-            if (mDisplayW < 0) {
-                mDisplayW = 0;
-            }
-        } else {
-            mDisplayW = TEST_DISPLAY_WIDTH;
-        }
-
-        char* displayHeightEnv = getenv("TEST_DISPLAY_HEIGHT");
-        if (displayHeightEnv != NULL) {
-            mDisplayH = atoi(displayHeightEnv);
-            if (mDisplayH < 0) {
-                mDisplayH = 0;
-            }
-        } else {
-            mDisplayH = TEST_DISPLAY_HEIGHT;
-        }
-    }
-
-    static void SetUpTestCase() {
-        // Binder Thread Pool Initialization
-        mTestThread = new ProCameraTestThread();
-        mTestThread->run("ProCameraTestThread");
-    }
-
-    virtual void SetUp() {
-        mCamera = ProCamera::connect(CAMERA_ID);
-        ASSERT_NE((void*)NULL, mCamera.get());
-
-        mListener = new ProCameraTestListener();
-        mCamera->setListener(mListener);
-    }
-
-    virtual void TearDown() {
-        ASSERT_NE((void*)NULL, mCamera.get());
-        mCamera->disconnect();
-    }
-
-protected:
-    sp<ProCamera> mCamera;
-    sp<ProCameraTestListener> mListener;
-
-    static sp<Thread> mTestThread;
-
-    int mDisplaySecs;
-    int mDisplayFmt;
-    int mDisplayW;
-    int mDisplayH;
-
-    sp<SurfaceComposerClient> mComposerClient;
-    sp<SurfaceControl> mSurfaceControl;
-
-    sp<SurfaceComposerClient> mDepthComposerClient;
-    sp<SurfaceControl> mDepthSurfaceControl;
-
-    int getSurfaceWidth() {
-        return 512;
-    }
-    int getSurfaceHeight() {
-        return 512;
-    }
-
-    void createOnScreenSurface(sp<Surface>& surface) {
-        mComposerClient = new SurfaceComposerClient;
-        ASSERT_EQ(NO_ERROR, mComposerClient->initCheck());
-
-        mSurfaceControl = mComposerClient->createSurface(
-                String8("ProCameraTest StreamingImage Surface"),
-                getSurfaceWidth(), getSurfaceHeight(),
-                PIXEL_FORMAT_RGB_888, 0);
-
-        mSurfaceControl->setPosition(0, 0);
-
-        ASSERT_TRUE(mSurfaceControl != NULL);
-        ASSERT_TRUE(mSurfaceControl->isValid());
-
-        SurfaceComposerClient::openGlobalTransaction();
-        ASSERT_EQ(NO_ERROR, mSurfaceControl->setLayer(0x7FFFFFFF));
-        ASSERT_EQ(NO_ERROR, mSurfaceControl->show());
-        SurfaceComposerClient::closeGlobalTransaction();
-
-        sp<ANativeWindow> window = mSurfaceControl->getSurface();
-        surface = mSurfaceControl->getSurface();
-
-        ASSERT_NE((void*)NULL, surface.get());
-    }
-
-    void createDepthOnScreenSurface(sp<Surface>& surface) {
-        mDepthComposerClient = new SurfaceComposerClient;
-        ASSERT_EQ(NO_ERROR, mDepthComposerClient->initCheck());
-
-        mDepthSurfaceControl = mDepthComposerClient->createSurface(
-                String8("ProCameraTest StreamingImage Surface"),
-                getSurfaceWidth(), getSurfaceHeight(),
-                PIXEL_FORMAT_RGB_888, 0);
-
-        mDepthSurfaceControl->setPosition(640, 0);
-
-        ASSERT_TRUE(mDepthSurfaceControl != NULL);
-        ASSERT_TRUE(mDepthSurfaceControl->isValid());
-
-        SurfaceComposerClient::openGlobalTransaction();
-        ASSERT_EQ(NO_ERROR, mDepthSurfaceControl->setLayer(0x7FFFFFFF));
-        ASSERT_EQ(NO_ERROR, mDepthSurfaceControl->show());
-        SurfaceComposerClient::closeGlobalTransaction();
-
-        sp<ANativeWindow> window = mDepthSurfaceControl->getSurface();
-        surface = mDepthSurfaceControl->getSurface();
-
-        ASSERT_NE((void*)NULL, surface.get());
-    }
-
-    template <typename T>
-    static bool ExistsItem(T needle, T* array, size_t count) {
-        if (!array) {
-            return false;
-        }
-
-        for (size_t i = 0; i < count; ++i) {
-            if (array[i] == needle) {
-                return true;
-            }
-        }
-        return false;
-    }
-
-
-    static int FormatFromString(const char* str) {
-        std::string s(str);
-
-#define CMP_STR(x, y)                               \
-        if (s == #x) return HAL_PIXEL_FORMAT_ ## y;
-#define CMP_STR_SAME(x) CMP_STR(x, x)
-
-        CMP_STR_SAME( Y16);
-        CMP_STR_SAME( Y8);
-        CMP_STR_SAME( YV12);
-        CMP_STR(NV16, YCbCr_422_SP);
-        CMP_STR(NV21, YCrCb_420_SP);
-        CMP_STR(YUY2, YCbCr_422_I);
-        CMP_STR(RAW,  RAW_SENSOR);
-        CMP_STR(RGBA, RGBA_8888);
-
-        std::cerr << "Unknown format string " << str << std::endl;
-        return -1;
-
-    }
-
-    /**
-     * Creating a streaming request for these output streams from a template,
-     *  and submit it
-     */
-    void createSubmitRequestForStreams(int32_t* streamIds, size_t count, int requestCount=-1) {
-
-        ASSERT_NE((void*)NULL, streamIds);
-        ASSERT_LT(0u, count);
-
-        camera_metadata_t *requestTmp = NULL;
-        EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
-                                                /*out*/&requestTmp));
-        ASSERT_NE((void*)NULL, requestTmp);
-        CameraMetadata request(requestTmp);
-
-        // set the output streams. default is empty
-
-        uint32_t tag = static_cast<uint32_t>(ANDROID_REQUEST_OUTPUT_STREAMS);
-        request.update(tag, streamIds, count);
-
-        requestTmp = request.release();
-
-        if (requestCount < 0) {
-            EXPECT_OK(mCamera->submitRequest(requestTmp, /*streaming*/true));
-        } else {
-            for (int i = 0; i < requestCount; ++i) {
-                EXPECT_OK(mCamera->submitRequest(requestTmp,
-                                                 /*streaming*/false));
-            }
-        }
-        request.acquire(requestTmp);
-    }
-};
-
-sp<Thread> ProCameraTest::mTestThread;
-
-TEST_F(ProCameraTest, AvailableFormats) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    CameraMetadata staticInfo = mCamera->getCameraInfo(CAMERA_ID);
-    ASSERT_FALSE(staticInfo.isEmpty());
-
-    uint32_t tag = static_cast<uint32_t>(ANDROID_SCALER_AVAILABLE_FORMATS);
-    EXPECT_TRUE(staticInfo.exists(tag));
-    camera_metadata_entry_t entry = staticInfo.find(tag);
-
-    EXPECT_TRUE(ExistsItem<int32_t>(HAL_PIXEL_FORMAT_YV12,
-                                                  entry.data.i32, entry.count));
-    EXPECT_TRUE(ExistsItem<int32_t>(HAL_PIXEL_FORMAT_YCrCb_420_SP,
-                                                  entry.data.i32, entry.count));
-}
-
-// test around exclusiveTryLock (immediate locking)
-TEST_F(ProCameraTest, LockingImmediate) {
-
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    mListener->SetEventMask(ProEvent_Mask(ACQUIRED) |
-                            ProEvent_Mask(STOLEN)   |
-                            ProEvent_Mask(RELEASED));
-
-    EXPECT_FALSE(mCamera->hasExclusiveLock());
-    EXPECT_EQ(OK, mCamera->exclusiveTryLock());
-    // at this point we definitely have the lock
-
-    EXPECT_EQ(OK, mListener->WaitForEvent());
-    EXPECT_EQ(ACQUIRED, mListener->ReadEvent());
-
-    EXPECT_TRUE(mCamera->hasExclusiveLock());
-    EXPECT_EQ(OK, mCamera->exclusiveUnlock());
-
-    EXPECT_EQ(OK, mListener->WaitForEvent());
-    EXPECT_EQ(RELEASED, mListener->ReadEvent());
-
-    EXPECT_FALSE(mCamera->hasExclusiveLock());
-}
-
-// test around exclusiveLock (locking at some future point in time)
-TEST_F(ProCameraTest, LockingAsynchronous) {
-
-    if (HasFatalFailure()) {
-        return;
-    }
-
-
-    mListener->SetEventMask(ProEvent_Mask(ACQUIRED) |
-                            ProEvent_Mask(STOLEN)   |
-                            ProEvent_Mask(RELEASED));
-
-    // TODO: Add another procamera that has a lock here.
-    // then we can be test that the lock wont immediately be acquired
-
-    EXPECT_FALSE(mCamera->hasExclusiveLock());
-    EXPECT_EQ(OK, mCamera->exclusiveTryLock());
-    // at this point we definitely have the lock
-
-    EXPECT_EQ(OK, mListener->WaitForEvent());
-    EXPECT_EQ(ACQUIRED, mListener->ReadEvent());
-
-    EXPECT_TRUE(mCamera->hasExclusiveLock());
-    EXPECT_EQ(OK, mCamera->exclusiveUnlock());
-
-    EXPECT_EQ(OK, mListener->WaitForEvent());
-    EXPECT_EQ(RELEASED, mListener->ReadEvent());
-
-    EXPECT_FALSE(mCamera->hasExclusiveLock());
-}
-
-// Stream directly to the screen.
-TEST_F(ProCameraTest, DISABLED_StreamingImageSingle) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    sp<Surface> surface;
-    if (mDisplaySecs > 0) {
-        createOnScreenSurface(/*out*/surface);
-    }
-    else {
-        dout << "Skipping, will not render to screen" << std::endl;
-        return;
-    }
-
-    int depthStreamId = -1;
-
-    sp<ServiceListener> listener = new ServiceListener();
-    EXPECT_OK(ProCamera::addServiceListener(listener));
-
-    ServiceListener::Status currentStatus;
-
-    // when subscribing a new listener,
-    // we immediately get a callback to the current status
-    while (listener->waitForStatusChange(/*out*/currentStatus) != OK);
-    EXPECT_EQ(ServiceListener::STATUS_PRESENT, currentStatus);
-
-    dout << "Will now stream and resume infinitely..." << std::endl;
-    while (true) {
-
-        if (currentStatus == ServiceListener::STATUS_PRESENT) {
-
-            ASSERT_OK(mCamera->createStream(mDisplayW, mDisplayH, mDisplayFmt,
-                                            surface,
-                                            &depthStreamId));
-            EXPECT_NE(-1, depthStreamId);
-
-            EXPECT_OK(mCamera->exclusiveTryLock());
-
-            int32_t streams[] = { depthStreamId };
-            ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(
-                                                 streams,
-                                                 /*count*/1));
-        }
-
-        ServiceListener::Status stat = ServiceListener::STATUS_UNKNOWN;
-
-        // TODO: maybe check for getch every once in a while?
-        while (listener->waitForStatusChange(/*out*/stat) != OK);
-
-        if (currentStatus != stat) {
-            if (stat == ServiceListener::STATUS_PRESENT) {
-                dout << "Reconnecting to camera" << std::endl;
-                mCamera = ProCamera::connect(CAMERA_ID);
-            } else if (stat == ServiceListener::STATUS_NOT_AVAILABLE) {
-                dout << "Disconnecting from camera" << std::endl;
-                mCamera->disconnect();
-            } else if (stat == ServiceListener::STATUS_NOT_PRESENT) {
-                dout << "Camera unplugged" << std::endl;
-                mCamera = NULL;
-            } else {
-                dout << "Unknown status change "
-                     << std::hex << stat << std::endl;
-            }
-
-            currentStatus = stat;
-        }
-    }
-
-    EXPECT_OK(ProCamera::removeServiceListener(listener));
-    EXPECT_OK(mCamera->deleteStream(depthStreamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-// Stream directly to the screen.
-TEST_F(ProCameraTest, DISABLED_StreamingImageDual) {
-    if (HasFatalFailure()) {
-        return;
-    }
-    sp<Surface> surface;
-    sp<Surface> depthSurface;
-    if (mDisplaySecs > 0) {
-        createOnScreenSurface(/*out*/surface);
-        createDepthOnScreenSurface(/*out*/depthSurface);
-    }
-
-    int streamId = -1;
-    EXPECT_OK(mCamera->createStream(/*width*/1280, /*height*/960,
-              TEST_FORMAT_MAIN, surface, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    int depthStreamId = -1;
-    EXPECT_OK(mCamera->createStream(/*width*/320, /*height*/240,
-              TEST_FORMAT_DEPTH, depthSurface, &depthStreamId));
-    EXPECT_NE(-1, depthStreamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-    /*
-    */
-    /* iterate in a loop submitting requests every frame.
-     *  what kind of requests doesnt really matter, just whatever.
-     */
-
-    // it would probably be better to use CameraMetadata from camera service.
-    camera_metadata_t *request = NULL;
-    EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
-              /*out*/&request));
-    EXPECT_NE((void*)NULL, request);
-
-    /*FIXME: dont need this later, at which point the above should become an
-             ASSERT_NE*/
-    if(request == NULL) request = allocate_camera_metadata(10, 100);
-
-    // set the output streams to just this stream ID
-
-    // wow what a verbose API.
-    int32_t allStreams[] = { streamId, depthStreamId };
-    // IMPORTANT. bad things will happen if its not a uint8.
-    size_t streamCount = sizeof(allStreams) / sizeof(allStreams[0]);
-    camera_metadata_entry_t entry;
-    uint32_t tag = static_cast<uint32_t>(ANDROID_REQUEST_OUTPUT_STREAMS);
-    int find = find_camera_metadata_entry(request, tag, &entry);
-    if (find == -ENOENT) {
-        if (add_camera_metadata_entry(request, tag, &allStreams,
-                                      /*data_count*/streamCount) != OK) {
-            camera_metadata_t *tmp = allocate_camera_metadata(1000, 10000);
-            ASSERT_OK(append_camera_metadata(tmp, request));
-            free_camera_metadata(request);
-            request = tmp;
-
-            ASSERT_OK(add_camera_metadata_entry(request, tag, &allStreams,
-                                                /*data_count*/streamCount));
-        }
-    } else {
-        ASSERT_OK(update_camera_metadata_entry(request, entry.index,
-                  &allStreams, /*data_count*/streamCount, &entry));
-    }
-
-    EXPECT_OK(mCamera->submitRequest(request, /*streaming*/true));
-
-    dout << "will sleep now for " << mDisplaySecs << std::endl;
-    sleep(mDisplaySecs);
-
-    free_camera_metadata(request);
-
-    for (size_t i = 0; i < streamCount; ++i) {
-        EXPECT_OK(mCamera->deleteStream(allStreams[i]));
-    }
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-TEST_F(ProCameraTest, CpuConsumerSingle) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    mListener->SetEventMask(ProEvent_Mask(ACQUIRED) |
-                            ProEvent_Mask(STOLEN)   |
-                            ProEvent_Mask(RELEASED) |
-                            ProEvent_Mask(FRAME_RECEIVED));
-    mListener->SetDropFrames(true);
-
-    int streamId = -1;
-    sp<CpuConsumer> consumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/320, /*height*/240,
-                TEST_FORMAT_DEPTH, TEST_CPU_HEAP_COUNT, &consumer, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-    EXPECT_EQ(OK, mListener->WaitForEvent());
-    EXPECT_EQ(ACQUIRED, mListener->ReadEvent());
-    /* iterate in a loop submitting requests every frame.
-     *  what kind of requests doesnt really matter, just whatever.
-     */
-
-    // it would probably be better to use CameraMetadata from camera service.
-    camera_metadata_t *request = NULL;
-    EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
-        /*out*/&request));
-    EXPECT_NE((void*)NULL, request);
-
-    /*FIXME: dont need this later, at which point the above should become an
-      ASSERT_NE*/
-    if(request == NULL) request = allocate_camera_metadata(10, 100);
-
-    // set the output streams to just this stream ID
-
-    int32_t allStreams[] = { streamId };
-    camera_metadata_entry_t entry;
-    uint32_t tag = static_cast<uint32_t>(ANDROID_REQUEST_OUTPUT_STREAMS);
-    int find = find_camera_metadata_entry(request, tag, &entry);
-    if (find == -ENOENT) {
-        if (add_camera_metadata_entry(request, tag, &allStreams,
-                /*data_count*/1) != OK) {
-            camera_metadata_t *tmp = allocate_camera_metadata(1000, 10000);
-            ASSERT_OK(append_camera_metadata(tmp, request));
-            free_camera_metadata(request);
-            request = tmp;
-
-            ASSERT_OK(add_camera_metadata_entry(request, tag, &allStreams,
-                /*data_count*/1));
-        }
-    } else {
-        ASSERT_OK(update_camera_metadata_entry(request, entry.index,
-            &allStreams, /*data_count*/1, &entry));
-    }
-
-    EXPECT_OK(mCamera->submitRequest(request, /*streaming*/true));
-
-    // Consume a couple of frames
-    for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) {
-        EXPECT_EQ(OK, mListener->WaitForEvent());
-        EXPECT_EQ(FRAME_RECEIVED, mListener->ReadEvent());
-    }
-
-    // Done: clean up
-    free_camera_metadata(request);
-    EXPECT_OK(mCamera->deleteStream(streamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-TEST_F(ProCameraTest, CpuConsumerDual) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    mListener->SetEventMask(ProEvent_Mask(FRAME_RECEIVED));
-    mListener->SetDropFrames(true);
-
-    int streamId = -1;
-    sp<CpuConsumer> consumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960,
-                TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    int depthStreamId = -1;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/320, /*height*/240,
-            TEST_FORMAT_DEPTH, TEST_CPU_HEAP_COUNT, &consumer, &depthStreamId));
-    EXPECT_NE(-1, depthStreamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-    /*
-    */
-    /* iterate in a loop submitting requests every frame.
-     *  what kind of requests doesnt really matter, just whatever.
-     */
-
-    // it would probably be better to use CameraMetadata from camera service.
-    camera_metadata_t *request = NULL;
-    EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
-                                            /*out*/&request));
-    EXPECT_NE((void*)NULL, request);
-
-    if(request == NULL) request = allocate_camera_metadata(10, 100);
-
-    // set the output streams to just this stream ID
-
-    // wow what a verbose API.
-    int32_t allStreams[] = { streamId, depthStreamId };
-    size_t streamCount = 2;
-    camera_metadata_entry_t entry;
-    uint32_t tag = static_cast<uint32_t>(ANDROID_REQUEST_OUTPUT_STREAMS);
-    int find = find_camera_metadata_entry(request, tag, &entry);
-    if (find == -ENOENT) {
-        if (add_camera_metadata_entry(request, tag, &allStreams,
-                                      /*data_count*/streamCount) != OK) {
-            camera_metadata_t *tmp = allocate_camera_metadata(1000, 10000);
-            ASSERT_OK(append_camera_metadata(tmp, request));
-            free_camera_metadata(request);
-            request = tmp;
-
-            ASSERT_OK(add_camera_metadata_entry(request, tag, &allStreams,
-                                                   /*data_count*/streamCount));
-        }
-    } else {
-        ASSERT_OK(update_camera_metadata_entry(request, entry.index,
-                              &allStreams, /*data_count*/streamCount, &entry));
-    }
-
-    EXPECT_OK(mCamera->submitRequest(request, /*streaming*/true));
-
-    // Consume a couple of frames
-    for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) {
-        // stream id 1
-        EXPECT_EQ(OK, mListener->WaitForEvent());
-        EXPECT_EQ(FRAME_RECEIVED, mListener->ReadEvent());
-
-        // stream id 2
-        EXPECT_EQ(OK, mListener->WaitForEvent());
-        EXPECT_EQ(FRAME_RECEIVED, mListener->ReadEvent());
-
-        //TODO: events should be a struct with some data like the stream id
-    }
-
-    // Done: clean up
-    free_camera_metadata(request);
-    EXPECT_OK(mCamera->deleteStream(streamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-TEST_F(ProCameraTest, ResultReceiver) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    mListener->SetEventMask(ProEvent_Mask(RESULT_RECEIVED));
-    mListener->SetDropFrames(true);
-    //FIXME: if this is run right after the previous test we get FRAME_RECEIVED
-    // need to filter out events at read time
-
-    int streamId = -1;
-    sp<CpuConsumer> consumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960,
-                TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-    /*
-    */
-    /* iterate in a loop submitting requests every frame.
-     *  what kind of requests doesnt really matter, just whatever.
-     */
-
-    camera_metadata_t *request = NULL;
-    EXPECT_OK(mCamera->createDefaultRequest(CAMERA2_TEMPLATE_PREVIEW,
-                                            /*out*/&request));
-    EXPECT_NE((void*)NULL, request);
-
-    /*FIXME*/
-    if(request == NULL) request = allocate_camera_metadata(10, 100);
-
-    // set the output streams to just this stream ID
-
-    int32_t allStreams[] = { streamId };
-    size_t streamCount = 1;
-    camera_metadata_entry_t entry;
-    uint32_t tag = static_cast<uint32_t>(ANDROID_REQUEST_OUTPUT_STREAMS);
-    int find = find_camera_metadata_entry(request, tag, &entry);
-    if (find == -ENOENT) {
-        if (add_camera_metadata_entry(request, tag, &allStreams,
-                                      /*data_count*/streamCount) != OK) {
-            camera_metadata_t *tmp = allocate_camera_metadata(1000, 10000);
-            ASSERT_OK(append_camera_metadata(tmp, request));
-            free_camera_metadata(request);
-            request = tmp;
-
-            ASSERT_OK(add_camera_metadata_entry(request, tag, &allStreams,
-                                                /*data_count*/streamCount));
-        }
-    } else {
-        ASSERT_OK(update_camera_metadata_entry(request, entry.index,
-                               &allStreams, /*data_count*/streamCount, &entry));
-    }
-
-    EXPECT_OK(mCamera->submitRequest(request, /*streaming*/true));
-
-    // Consume a couple of results
-    for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) {
-        EXPECT_EQ(OK, mListener->WaitForEvent());
-        EXPECT_EQ(RESULT_RECEIVED, mListener->ReadEvent());
-    }
-
-    // Done: clean up
-    free_camera_metadata(request);
-    EXPECT_OK(mCamera->deleteStream(streamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-// FIXME: This is racy and sometimes fails on waitForFrameMetadata
-TEST_F(ProCameraTest, DISABLED_WaitForResult) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    mListener->SetDropFrames(true);
-
-    int streamId = -1;
-    sp<CpuConsumer> consumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960,
-                 TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-
-    int32_t streams[] = { streamId };
-    ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/1));
-
-    // Consume a couple of results
-    for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) {
-        EXPECT_OK(mCamera->waitForFrameMetadata());
-        CameraMetadata meta = mCamera->consumeFrameMetadata();
-        EXPECT_FALSE(meta.isEmpty());
-    }
-
-    // Done: clean up
-    EXPECT_OK(mCamera->deleteStream(streamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-TEST_F(ProCameraTest, WaitForSingleStreamBuffer) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    int streamId = -1;
-    sp<CpuConsumer> consumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960,
-                  TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-
-    int32_t streams[] = { streamId };
-    ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/1,
-                                            /*requests*/TEST_CPU_FRAME_COUNT));
-
-    // Consume a couple of results
-    for (int i = 0; i < TEST_CPU_FRAME_COUNT; ++i) {
-        EXPECT_EQ(1, mCamera->waitForFrameBuffer(streamId));
-
-        CpuConsumer::LockedBuffer buf;
-        EXPECT_OK(consumer->lockNextBuffer(&buf));
-
-        dout << "Buffer synchronously received on streamId = " << streamId <<
-                ", dataPtr = " << (void*)buf.data <<
-                ", timestamp = " << buf.timestamp << std::endl;
-
-        EXPECT_OK(consumer->unlockBuffer(buf));
-    }
-
-    // Done: clean up
-    EXPECT_OK(mCamera->deleteStream(streamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-// FIXME: This is racy and sometimes fails on waitForFrameMetadata
-TEST_F(ProCameraTest, DISABLED_WaitForDualStreamBuffer) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    const int REQUEST_COUNT = TEST_CPU_FRAME_COUNT * 10;
-
-    // 15 fps
-    int streamId = -1;
-    sp<CpuConsumer> consumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960,
-                 TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT, &consumer, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    // 30 fps
-    int depthStreamId = -1;
-    sp<CpuConsumer> depthConsumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/320, /*height*/240,
-       TEST_FORMAT_DEPTH, TEST_CPU_HEAP_COUNT, &depthConsumer, &depthStreamId));
-    EXPECT_NE(-1, depthStreamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-
-    int32_t streams[] = { streamId, depthStreamId };
-    ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/2,
-                                                    /*requests*/REQUEST_COUNT));
-
-    int depthFrames = 0;
-    int greyFrames = 0;
-
-    // Consume two frames simultaneously. Unsynchronized by timestamps.
-    for (int i = 0; i < REQUEST_COUNT; ++i) {
-
-        // Exhaust event queue so it doesn't keep growing
-        while (mListener->ReadEvent() != UNKNOWN);
-
-        // Get the metadata
-        EXPECT_OK(mCamera->waitForFrameMetadata());
-        CameraMetadata meta = mCamera->consumeFrameMetadata();
-        EXPECT_FALSE(meta.isEmpty());
-
-        // Get the buffers
-
-        EXPECT_EQ(1, mCamera->waitForFrameBuffer(depthStreamId));
-
-        /**
-          * Guaranteed to be able to consume the depth frame,
-          * since we waited on it.
-          */
-        CpuConsumer::LockedBuffer depthBuffer;
-        EXPECT_OK(depthConsumer->lockNextBuffer(&depthBuffer));
-
-        dout << "Depth Buffer synchronously received on streamId = " <<
-                streamId <<
-                ", dataPtr = " << (void*)depthBuffer.data <<
-                ", timestamp = " << depthBuffer.timestamp << std::endl;
-
-        EXPECT_OK(depthConsumer->unlockBuffer(depthBuffer));
-
-        depthFrames++;
-
-
-        /** Consume Greyscale frames if there are any.
-          * There may not be since it runs at half FPS */
-        CpuConsumer::LockedBuffer greyBuffer;
-        while (consumer->lockNextBuffer(&greyBuffer) == OK) {
-
-            dout << "GRAY Buffer synchronously received on streamId = " <<
-                streamId <<
-                ", dataPtr = " << (void*)greyBuffer.data <<
-                ", timestamp = " << greyBuffer.timestamp << std::endl;
-
-            EXPECT_OK(consumer->unlockBuffer(greyBuffer));
-
-            greyFrames++;
-        }
-    }
-
-    dout << "Done, summary: depth frames " << std::dec << depthFrames
-         << ", grey frames " << std::dec << greyFrames << std::endl;
-
-    // Done: clean up
-    EXPECT_OK(mCamera->deleteStream(streamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-TEST_F(ProCameraTest, WaitForSingleStreamBufferAndDropFramesSync) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    const int NUM_REQUESTS = 20 * TEST_CPU_FRAME_COUNT;
-
-    int streamId = -1;
-    sp<CpuConsumer> consumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960,
-                  TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT,
-                  /*synchronousMode*/true, &consumer, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-
-    int32_t streams[] = { streamId };
-    ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/1,
-                                                     /*requests*/NUM_REQUESTS));
-
-    // Consume a couple of results
-    for (int i = 0; i < NUM_REQUESTS; ++i) {
-        int numFrames;
-        EXPECT_TRUE((numFrames = mCamera->waitForFrameBuffer(streamId)) > 0);
-
-        // Drop all but the newest framebuffer
-        EXPECT_EQ(numFrames-1, mCamera->dropFrameBuffer(streamId, numFrames-1));
-
-        dout << "Dropped " << (numFrames - 1) << " frames" << std::endl;
-
-        // Skip the counter ahead, don't try to consume these frames again
-        i += numFrames-1;
-
-        // "Consume" the buffer
-        CpuConsumer::LockedBuffer buf;
-        EXPECT_OK(consumer->lockNextBuffer(&buf));
-
-        dout << "Buffer synchronously received on streamId = " << streamId <<
-                ", dataPtr = " << (void*)buf.data <<
-                ", timestamp = " << buf.timestamp << std::endl;
-
-        // Process at 10fps, stream is at 15fps.
-        // This means we will definitely fill up the buffer queue with
-        // extra buffers and need to drop them.
-        usleep(TEST_FRAME_PROCESSING_DELAY_US);
-
-        EXPECT_OK(consumer->unlockBuffer(buf));
-    }
-
-    // Done: clean up
-    EXPECT_OK(mCamera->deleteStream(streamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-TEST_F(ProCameraTest, WaitForSingleStreamBufferAndDropFramesAsync) {
-    if (HasFatalFailure()) {
-        return;
-    }
-
-    const int NUM_REQUESTS = 20 * TEST_CPU_FRAME_COUNT;
-
-    int streamId = -1;
-    sp<CpuConsumer> consumer;
-    EXPECT_OK(mCamera->createStreamCpu(/*width*/1280, /*height*/960,
-                  TEST_FORMAT_MAIN, TEST_CPU_HEAP_COUNT,
-                  /*synchronousMode*/false, &consumer, &streamId));
-    EXPECT_NE(-1, streamId);
-
-    EXPECT_OK(mCamera->exclusiveTryLock());
-
-    int32_t streams[] = { streamId };
-    ASSERT_NO_FATAL_FAILURE(createSubmitRequestForStreams(streams, /*count*/1,
-                                                     /*requests*/NUM_REQUESTS));
-
-    uint64_t lastFrameNumber = 0;
-    int numFrames;
-
-    // Consume a couple of results
-    int i;
-    for (i = 0; i < NUM_REQUESTS && lastFrameNumber < NUM_REQUESTS; ++i) {
-        EXPECT_LT(0, (numFrames = mCamera->waitForFrameBuffer(streamId)));
-
-        dout << "Dropped " << (numFrames - 1) << " frames" << std::endl;
-
-        // Skip the counter ahead, don't try to consume these frames again
-        i += numFrames-1;
-
-        // "Consume" the buffer
-        CpuConsumer::LockedBuffer buf;
-
-        EXPECT_EQ(OK, consumer->lockNextBuffer(&buf));
-
-        lastFrameNumber = buf.frameNumber;
-
-        dout << "Buffer asynchronously received on streamId = " << streamId <<
-                ", dataPtr = " << (void*)buf.data <<
-                ", timestamp = " << buf.timestamp <<
-                ", framenumber = " << buf.frameNumber << std::endl;
-
-        // Process at 10fps, stream is at 15fps.
-        // This means we will definitely fill up the buffer queue with
-        // extra buffers and need to drop them.
-        usleep(TEST_FRAME_PROCESSING_DELAY_US);
-
-        EXPECT_OK(consumer->unlockBuffer(buf));
-    }
-
-    dout << "Done after " << i << " iterations " << std::endl;
-
-    // Done: clean up
-    EXPECT_OK(mCamera->deleteStream(streamId));
-    EXPECT_OK(mCamera->exclusiveUnlock());
-}
-
-
-
-//TODO: refactor into separate file
-TEST_F(ProCameraTest, ServiceListenersSubscribe) {
-
-    ASSERT_EQ(4u, sizeof(ServiceListener::Status));
-
-    sp<ServiceListener> listener = new ServiceListener();
-
-    EXPECT_EQ(BAD_VALUE, ProCamera::removeServiceListener(listener));
-    EXPECT_OK(ProCamera::addServiceListener(listener));
-
-    EXPECT_EQ(ALREADY_EXISTS, ProCamera::addServiceListener(listener));
-    EXPECT_OK(ProCamera::removeServiceListener(listener));
-
-    EXPECT_EQ(BAD_VALUE, ProCamera::removeServiceListener(listener));
-}
-
-//TODO: refactor into separate file
-TEST_F(ProCameraTest, ServiceListenersFunctional) {
-
-    sp<ServiceListener> listener = new ServiceListener();
-
-    EXPECT_OK(ProCamera::addServiceListener(listener));
-
-    sp<Camera> cam = Camera::connect(CAMERA_ID,
-                                     /*clientPackageName*/String16(),
-                                     -1);
-    EXPECT_NE((void*)NULL, cam.get());
-
-    ServiceListener::Status stat = ServiceListener::STATUS_UNKNOWN;
-    EXPECT_OK(listener->waitForStatusChange(/*out*/stat));
-
-    EXPECT_EQ(ServiceListener::STATUS_NOT_AVAILABLE, stat);
-
-    if (cam.get()) {
-        cam->disconnect();
-    }
-
-    EXPECT_OK(listener->waitForStatusChange(/*out*/stat));
-    EXPECT_EQ(ServiceListener::STATUS_PRESENT, stat);
-
-    EXPECT_OK(ProCamera::removeServiceListener(listener));
-}
-
-
-
-}
-}
-}
-}
diff --git a/camera/tests/VendorTagDescriptorTests.cpp b/camera/tests/VendorTagDescriptorTests.cpp
index 6624e79..9082dbf 100644
--- a/camera/tests/VendorTagDescriptorTests.cpp
+++ b/camera/tests/VendorTagDescriptorTests.cpp
@@ -53,6 +53,10 @@
 
 extern "C" {
 
+static int zero_get_tag_count(const vendor_tag_ops_t* vOps) {
+    return 0;
+}
+
 static int default_get_tag_count(const vendor_tag_ops_t* vOps) {
     return VENDOR_TAG_COUNT_ERR;
 }
@@ -173,10 +177,13 @@
     vendor_tag_ops_t vOps;
     FillWithDefaults(&vOps);
 
+    // Make empty tag count
+    vOps.get_tag_count = zero_get_tag_count;
+
     // Ensure create fails when using null vOps
     EXPECT_EQ(BAD_VALUE, VendorTagDescriptor::createDescriptorFromOps(/*vOps*/NULL, vDesc));
 
-    // Ensure create works when there are no vtags defined in a well-formed vOps
+    // Ensure creat succeeds for empty vendor tag ops
     ASSERT_EQ(OK, VendorTagDescriptor::createDescriptorFromOps(&vOps, vDesc));
 
     // Ensure defaults are returned when no vtags are defined, or tag is unknown
diff --git a/cmds/screenrecord/screenrecord.cpp b/cmds/screenrecord/screenrecord.cpp
index 02df1d2..36a7e73 100644
--- a/cmds/screenrecord/screenrecord.cpp
+++ b/cmds/screenrecord/screenrecord.cpp
@@ -23,7 +23,10 @@
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
 #include <sys/wait.h>
+
 #include <termios.h>
 #include <unistd.h>
 
@@ -637,7 +640,13 @@
         case FORMAT_MP4: {
             // Configure muxer.  We have to wait for the CSD blob from the encoder
             // before we can start it.
-            muxer = new MediaMuxer(fileName, MediaMuxer::OUTPUT_FORMAT_MPEG_4);
+            int fd = open(fileName, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
+            if (fd < 0) {
+                fprintf(stderr, "ERROR: couldn't open file\n");
+                abort();
+            }
+            muxer = new MediaMuxer(fd, MediaMuxer::OUTPUT_FORMAT_MPEG_4);
+            close(fd);
             if (gRotate) {
                 muxer->setOrientationHint(90);  // TODO: does this do anything?
             }
diff --git a/cmds/stagefright/Android.mk b/cmds/stagefright/Android.mk
index 561ce02..20c0094 100644
--- a/cmds/stagefright/Android.mk
+++ b/cmds/stagefright/Android.mk
@@ -17,7 +17,8 @@
 	$(TOP)/frameworks/native/include/media/openmax \
 	external/jpeg \
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -40,7 +41,8 @@
 	frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -63,7 +65,8 @@
 	frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -87,7 +90,8 @@
 	frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -110,7 +114,8 @@
 	frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -133,7 +138,8 @@
 	frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -157,7 +163,8 @@
 	frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
@@ -169,6 +176,49 @@
 
 include $(CLEAR_VARS)
 
+LOCAL_SRC_FILES:= \
+	filters/argbtorgba.rs \
+	filters/nightvision.rs \
+	filters/saturation.rs \
+	mediafilter.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+	libstagefright \
+	liblog \
+	libutils \
+	libbinder \
+	libstagefright_foundation \
+	libmedia \
+	libgui \
+	libcutils \
+	libui \
+	libRScpp \
+
+LOCAL_C_INCLUDES:= \
+	$(TOP)/frameworks/av/media/libstagefright \
+	$(TOP)/frameworks/native/include/media/openmax \
+	$(TOP)/frameworks/rs/cpp \
+	$(TOP)/frameworks/rs \
+
+intermediates := $(call intermediates-dir-for,STATIC_LIBRARIES,libRS,TARGET,)
+LOCAL_C_INCLUDES += $(intermediates)
+
+LOCAL_STATIC_LIBRARIES:= \
+	libstagefright_mediafilter
+
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
+
+LOCAL_MODULE_TAGS := optional
+
+LOCAL_MODULE:= mediafilter
+
+include $(BUILD_EXECUTABLE)
+
+################################################################################
+
+include $(CLEAR_VARS)
+
 LOCAL_SRC_FILES:=               \
         muxer.cpp            \
 
@@ -180,7 +230,8 @@
 	frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
diff --git a/cmds/stagefright/SimplePlayer.cpp b/cmds/stagefright/SimplePlayer.cpp
index 4b2f980..50913cd 100644
--- a/cmds/stagefright/SimplePlayer.cpp
+++ b/cmds/stagefright/SimplePlayer.cpp
@@ -21,6 +21,7 @@
 #include "SimplePlayer.h"
 
 #include <gui/Surface.h>
+
 #include <media/AudioTrack.h>
 #include <media/ICrypto.h>
 #include <media/IMediaHTTPService.h>
@@ -29,7 +30,6 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/NativeWindowWrapper.h>
 #include <media/stagefright/NuMediaExtractor.h>
 
 namespace android {
@@ -59,47 +59,46 @@
     return err;
 }
 status_t SimplePlayer::setDataSource(const char *path) {
-    sp<AMessage> msg = new AMessage(kWhatSetDataSource, id());
+    sp<AMessage> msg = new AMessage(kWhatSetDataSource, this);
     msg->setString("path", path);
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
 }
 
 status_t SimplePlayer::setSurface(const sp<IGraphicBufferProducer> &bufferProducer) {
-    sp<AMessage> msg = new AMessage(kWhatSetSurface, id());
+    sp<AMessage> msg = new AMessage(kWhatSetSurface, this);
 
     sp<Surface> surface;
     if (bufferProducer != NULL) {
         surface = new Surface(bufferProducer);
     }
 
-    msg->setObject(
-            "native-window", new NativeWindowWrapper(surface));
+    msg->setObject("surface", surface);
 
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
 }
 
 status_t SimplePlayer::prepare() {
-    sp<AMessage> msg = new AMessage(kWhatPrepare, id());
+    sp<AMessage> msg = new AMessage(kWhatPrepare, this);
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
 }
 
 status_t SimplePlayer::start() {
-    sp<AMessage> msg = new AMessage(kWhatStart, id());
+    sp<AMessage> msg = new AMessage(kWhatStart, this);
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
 }
 
 status_t SimplePlayer::stop() {
-    sp<AMessage> msg = new AMessage(kWhatStop, id());
+    sp<AMessage> msg = new AMessage(kWhatStop, this);
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
 }
 
 status_t SimplePlayer::reset() {
-    sp<AMessage> msg = new AMessage(kWhatReset, id());
+    sp<AMessage> msg = new AMessage(kWhatReset, this);
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
 }
@@ -116,7 +115,7 @@
                 mState = UNPREPARED;
             }
 
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             sp<AMessage> response = new AMessage;
@@ -132,14 +131,12 @@
                 err = INVALID_OPERATION;
             } else {
                 sp<RefBase> obj;
-                CHECK(msg->findObject("native-window", &obj));
-
-                mNativeWindow = static_cast<NativeWindowWrapper *>(obj.get());
-
+                CHECK(msg->findObject("surface", &obj));
+                mSurface = static_cast<Surface *>(obj.get());
                 err = OK;
             }
 
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             sp<AMessage> response = new AMessage;
@@ -161,7 +158,7 @@
                 }
             }
 
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             sp<AMessage> response = new AMessage;
@@ -194,7 +191,7 @@
                 }
             }
 
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             sp<AMessage> response = new AMessage;
@@ -217,7 +214,7 @@
                 }
             }
 
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             sp<AMessage> response = new AMessage;
@@ -240,7 +237,7 @@
                 mState = UNINITIALIZED;
             }
 
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             sp<AMessage> response = new AMessage;
@@ -324,7 +321,7 @@
 
         err = state->mCodec->configure(
                 format,
-                isVideo ? mNativeWindow->getSurfaceTextureClient() : NULL,
+                isVideo ? mSurface : NULL,
                 NULL /* crypto */,
                 0 /* flags */);
 
@@ -382,7 +379,7 @@
 
     mStartTimeRealUs = -1ll;
 
-    sp<AMessage> msg = new AMessage(kWhatDoMoreStuff, id());
+    sp<AMessage> msg = new AMessage(kWhatDoMoreStuff, this);
     msg->setInt32("generation", ++mDoMoreStuffGeneration);
     msg->post();
 
@@ -411,7 +408,7 @@
     mStateByTrackIndex.clear();
     mCodecLooper.clear();
     mExtractor.clear();
-    mNativeWindow.clear();
+    mSurface.clear();
     mPath.clear();
 
     return OK;
@@ -428,12 +425,12 @@
             err = state->mCodec->dequeueInputBuffer(&index);
 
             if (err == OK) {
-                ALOGV("dequeued input buffer on track %d",
+                ALOGV("dequeued input buffer on track %zu",
                       mStateByTrackIndex.keyAt(i));
 
                 state->mAvailInputBufferIndices.push_back(index);
             } else {
-                ALOGV("dequeueInputBuffer on track %d returned %d",
+                ALOGV("dequeueInputBuffer on track %zu returned %d",
                       mStateByTrackIndex.keyAt(i), err);
             }
         } while (err == OK);
@@ -448,7 +445,7 @@
                     &info.mFlags);
 
             if (err == OK) {
-                ALOGV("dequeued output buffer on track %d",
+                ALOGV("dequeued output buffer on track %zu",
                       mStateByTrackIndex.keyAt(i));
 
                 state->mAvailOutputBufferInfos.push_back(info);
@@ -459,7 +456,7 @@
                 err = state->mCodec->getOutputBuffers(&state->mBuffers[1]);
                 CHECK_EQ(err, (status_t)OK);
             } else {
-                ALOGV("dequeueOutputBuffer on track %d returned %d",
+                ALOGV("dequeueOutputBuffer on track %zu returned %d",
                       mStateByTrackIndex.keyAt(i), err);
             }
         } while (err == OK
@@ -502,7 +499,7 @@
                     0);
             CHECK_EQ(err, (status_t)OK);
 
-            ALOGV("enqueued input data on track %d", trackIndex);
+            ALOGV("enqueued input data on track %zu", trackIndex);
 
             err = mExtractor->advance();
             CHECK_EQ(err, (status_t)OK);
@@ -528,8 +525,8 @@
                 bool release = true;
 
                 if (lateByUs > 30000ll) {
-                    ALOGI("track %d buffer late by %lld us, dropping.",
-                          mStateByTrackIndex.keyAt(i), lateByUs);
+                    ALOGI("track %zu buffer late by %lld us, dropping.",
+                          mStateByTrackIndex.keyAt(i), (long long)lateByUs);
                     state->mCodec->releaseOutputBuffer(info->mIndex);
                 } else {
                     if (state->mAudioTrack != NULL) {
@@ -558,8 +555,8 @@
                     break;
                 }
             } else {
-                ALOGV("track %d buffer early by %lld us.",
-                      mStateByTrackIndex.keyAt(i), -lateByUs);
+                ALOGV("track %zu buffer early by %lld us.",
+                      mStateByTrackIndex.keyAt(i), (long long)-lateByUs);
                 break;
             }
         }
@@ -569,7 +566,7 @@
 }
 
 status_t SimplePlayer::onOutputFormatChanged(
-        size_t trackIndex, CodecState *state) {
+        size_t trackIndex __unused, CodecState *state) {
     sp<AMessage> format;
     status_t err = state->mCodec->getOutputFormat(&format);
 
@@ -640,7 +637,7 @@
     if (delayUs > 2000ll) {
         ALOGW("AudioTrack::write took %lld us, numFramesAvailableToWrite=%u, "
               "numFramesWritten=%u",
-              delayUs, numFramesAvailableToWrite, numFramesWritten);
+              (long long)delayUs, numFramesAvailableToWrite, numFramesWritten);
     }
 
     info->mOffset += nbytes;
diff --git a/cmds/stagefright/SimplePlayer.h b/cmds/stagefright/SimplePlayer.h
index 0a06059..ae9dfd2 100644
--- a/cmds/stagefright/SimplePlayer.h
+++ b/cmds/stagefright/SimplePlayer.h
@@ -23,10 +23,10 @@
 struct ABuffer;
 struct ALooper;
 struct AudioTrack;
-struct IGraphicBufferProducer;
+class IGraphicBufferProducer;
 struct MediaCodec;
-struct NativeWindowWrapper;
 struct NuMediaExtractor;
+class Surface;
 
 struct SimplePlayer : public AHandler {
     SimplePlayer();
@@ -84,7 +84,7 @@
 
     State mState;
     AString mPath;
-    sp<NativeWindowWrapper> mNativeWindow;
+    sp<Surface> mSurface;
 
     sp<NuMediaExtractor> mExtractor;
     sp<ALooper> mCodecLooper;
diff --git a/cmds/stagefright/audioloop.cpp b/cmds/stagefright/audioloop.cpp
index 96073f1..6e9e6ec 100644
--- a/cmds/stagefright/audioloop.cpp
+++ b/cmds/stagefright/audioloop.cpp
@@ -14,6 +14,12 @@
  * limitations under the License.
  */
 
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include <utils/String16.h>
+
 #include <binder/ProcessState.h>
 #include <media/mediarecorder.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -30,7 +36,7 @@
 
 static void usage(const char* name)
 {
-    fprintf(stderr, "Usage: %s [-d duration] [-m] [-w] [<output-file>]\n", name);
+    fprintf(stderr, "Usage: %s [-d du.ration] [-m] [-w] [<output-file>]\n", name);
     fprintf(stderr, "Encodes either a sine wave or microphone input to AMR format\n");
     fprintf(stderr, "    -d    duration in seconds, default 5 seconds\n");
     fprintf(stderr, "    -m    use microphone for input, default sine source\n");
@@ -81,6 +87,7 @@
         // talk into the appropriate microphone for the duration
         source = new AudioSource(
                 AUDIO_SOURCE_MIC,
+                String16(),
                 kSampleRate,
                 channels);
     } else {
@@ -109,7 +116,12 @@
 
     if (fileOut != NULL) {
         // target file specified, write encoded AMR output
-        sp<AMRWriter> writer = new AMRWriter(fileOut);
+        int fd = open(fileOut, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
+        if (fd < 0) {
+            return 1;
+        }
+        sp<AMRWriter> writer = new AMRWriter(fd);
+        close(fd);
         writer->addSource(encoder);
         writer->start();
         sleep(duration);
diff --git a/cmds/stagefright/codec.cpp b/cmds/stagefright/codec.cpp
index fd02bcc..dae9bbe 100644
--- a/cmds/stagefright/codec.cpp
+++ b/cmds/stagefright/codec.cpp
@@ -45,9 +45,10 @@
     fprintf(stderr, "usage: %s [-a] use audio\n"
                     "\t\t[-v] use video\n"
                     "\t\t[-p] playback\n"
-                    "\t\t[-S] allocate buffers from a surface\n",
+                    "\t\t[-S] allocate buffers from a surface\n"
+                    "\t\t[-R] render output to surface (enables -S)\n"
+                    "\t\t[-T] use render timestamps (enables -R)\n",
                     me);
-
     exit(1);
 }
 
@@ -71,7 +72,9 @@
         const char *path,
         bool useAudio,
         bool useVideo,
-        const android::sp<android::Surface> &surface) {
+        const android::sp<android::Surface> &surface,
+        bool renderSurface,
+        bool useTimestamp) {
     using namespace android;
 
     static int64_t kTimeout = 500ll;
@@ -105,7 +108,7 @@
             continue;
         }
 
-        ALOGV("selecting track %d", i);
+        ALOGV("selecting track %zu", i);
 
         err = extractor->selectTrack(i);
         CHECK_EQ(err, (status_t)OK);
@@ -136,6 +139,7 @@
     CHECK(!stateByTrack.isEmpty());
 
     int64_t startTimeUs = ALooper::GetNowUs();
+    int64_t startTimeRender = -1;
 
     for (size_t i = 0; i < stateByTrack.size(); ++i) {
         CodecState *state = &stateByTrack.editValueAt(i);
@@ -147,7 +151,7 @@
         CHECK_EQ((status_t)OK, codec->getInputBuffers(&state->mInBuffers));
         CHECK_EQ((status_t)OK, codec->getOutputBuffers(&state->mOutBuffers));
 
-        ALOGV("got %d input and %d output buffers",
+        ALOGV("got %zu input and %zu output buffers",
               state->mInBuffers.size(), state->mOutBuffers.size());
     }
 
@@ -168,7 +172,7 @@
                 err = state->mCodec->dequeueInputBuffer(&index, kTimeout);
 
                 if (err == OK) {
-                    ALOGV("filling input buffer %d", index);
+                    ALOGV("filling input buffer %zu", index);
 
                     const sp<ABuffer> &buffer = state->mInBuffers.itemAt(index);
 
@@ -205,7 +209,7 @@
                         state->mCodec->dequeueInputBuffer(&index, kTimeout);
 
                     if (err == OK) {
-                        ALOGV("signalling input EOS on track %d", i);
+                        ALOGV("signalling input EOS on track %zu", i);
 
                         err = state->mCodec->queueInputBuffer(
                                 index,
@@ -254,13 +258,29 @@
                     kTimeout);
 
             if (err == OK) {
-                ALOGV("draining output buffer %d, time = %lld us",
-                      index, presentationTimeUs);
+                ALOGV("draining output buffer %zu, time = %lld us",
+                      index, (long long)presentationTimeUs);
 
                 ++state->mNumBuffersDecoded;
                 state->mNumBytesDecoded += size;
 
-                err = state->mCodec->releaseOutputBuffer(index);
+                if (surface == NULL || !renderSurface) {
+                    err = state->mCodec->releaseOutputBuffer(index);
+                } else if (useTimestamp) {
+                    if (startTimeRender == -1) {
+                        // begin rendering 2 vsyncs (~33ms) after first decode
+                        startTimeRender =
+                                systemTime(SYSTEM_TIME_MONOTONIC) + 33000000
+                                - (presentationTimeUs * 1000);
+                    }
+                    presentationTimeUs =
+                            (presentationTimeUs * 1000) + startTimeRender;
+                    err = state->mCodec->renderOutputBufferAndRelease(
+                            index, presentationTimeUs);
+                } else {
+                    err = state->mCodec->renderOutputBufferAndRelease(index);
+                }
+
                 CHECK_EQ(err, (status_t)OK);
 
                 if (flags & MediaCodec::BUFFER_FLAG_EOS) {
@@ -273,7 +293,7 @@
                 CHECK_EQ((status_t)OK,
                          state->mCodec->getOutputBuffers(&state->mOutBuffers));
 
-                ALOGV("got %d output buffers", state->mOutBuffers.size());
+                ALOGV("got %zu output buffers", state->mOutBuffers.size());
             } else if (err == INFO_FORMAT_CHANGED) {
                 sp<AMessage> format;
                 CHECK_EQ((status_t)OK, state->mCodec->getOutputFormat(&format));
@@ -293,17 +313,17 @@
         CHECK_EQ((status_t)OK, state->mCodec->release());
 
         if (state->mIsAudio) {
-            printf("track %zu: %" PRId64 " bytes received. %.2f KB/sec\n",
+            printf("track %zu: %lld bytes received. %.2f KB/sec\n",
                    i,
-                   state->mNumBytesDecoded,
+                   (long long)state->mNumBytesDecoded,
                    state->mNumBytesDecoded * 1E6 / 1024 / elapsedTimeUs);
         } else {
-            printf("track %zu: %" PRId64 " frames decoded, %.2f fps. %" PRId64
+            printf("track %zu: %lld frames decoded, %.2f fps. %lld"
                     " bytes received. %.2f KB/sec\n",
                    i,
-                   state->mNumBuffersDecoded,
+                   (long long)state->mNumBuffersDecoded,
                    state->mNumBuffersDecoded * 1E6 / elapsedTimeUs,
-                   state->mNumBytesDecoded,
+                   (long long)state->mNumBytesDecoded,
                    state->mNumBytesDecoded * 1E6 / 1024 / elapsedTimeUs);
         }
     }
@@ -320,34 +340,42 @@
     bool useVideo = false;
     bool playback = false;
     bool useSurface = false;
+    bool renderSurface = false;
+    bool useTimestamp = false;
 
     int res;
-    while ((res = getopt(argc, argv, "havpSD")) >= 0) {
+    while ((res = getopt(argc, argv, "havpSDRT")) >= 0) {
         switch (res) {
             case 'a':
             {
                 useAudio = true;
                 break;
             }
-
             case 'v':
             {
                 useVideo = true;
                 break;
             }
-
             case 'p':
             {
                 playback = true;
                 break;
             }
-
+            case 'T':
+            {
+                useTimestamp = true;
+            }
+            // fall through
+            case 'R':
+            {
+                renderSurface = true;
+            }
+            // fall through
             case 'S':
             {
                 useSurface = true;
                 break;
             }
-
             case '?':
             case 'h':
             default:
@@ -390,7 +418,7 @@
         ssize_t displayWidth = info.w;
         ssize_t displayHeight = info.h;
 
-        ALOGV("display is %ld x %ld\n", displayWidth, displayHeight);
+        ALOGV("display is %zd x %zd\n", displayWidth, displayHeight);
 
         control = composerClient->createSurface(
                 String8("A Surface"),
@@ -422,7 +450,8 @@
         player->stop();
         player->reset();
     } else {
-        decode(looper, argv[0], useAudio, useVideo, surface);
+        decode(looper, argv[0], useAudio, useVideo, surface, renderSurface,
+                useTimestamp);
     }
 
     if (playback || (useSurface && useVideo)) {
diff --git a/include/media/nbaio/roundup.h b/cmds/stagefright/filters/argbtorgba.rs
similarity index 65%
copy from include/media/nbaio/roundup.h
copy to cmds/stagefright/filters/argbtorgba.rs
index 4c3cc25..229ff8c 100644
--- a/include/media/nbaio/roundup.h
+++ b/cmds/stagefright/filters/argbtorgba.rs
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 The Android Open Source Project
+ * Copyright (C) 2014 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,18 +14,13 @@
  * limitations under the License.
  */
 
-#ifndef ROUNDUP_H
-#define ROUNDUP_H
+#pragma version(1)
+#pragma rs java_package_name(com.android.rs.cppbasic)
+#pragma rs_fp_relaxed
 
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// Round up to the next highest power of 2
-unsigned roundup(unsigned v);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif  // ROUNDUP_H
+void root(const uchar4 *v_in, uchar4 *v_out) {
+    v_out->x = v_in->y;
+    v_out->y = v_in->z;
+    v_out->z = v_in->w;
+    v_out->w = v_in->x;
+}
\ No newline at end of file
diff --git a/cmds/stagefright/filters/nightvision.rs b/cmds/stagefright/filters/nightvision.rs
new file mode 100644
index 0000000..f61413c
--- /dev/null
+++ b/cmds/stagefright/filters/nightvision.rs
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma version(1)
+#pragma rs java_package_name(com.android.rs.cppbasic)
+#pragma rs_fp_relaxed
+
+const static float3 gMonoMult = {0.299f, 0.587f, 0.114f};
+const static float3 gNightVisionMult = {0.5f, 1.f, 0.5f};
+
+// calculates luminance of pixel, then biases color balance toward green
+void root(const uchar4 *v_in, uchar4 *v_out) {
+    v_out->x = v_in->x; // don't modify A
+
+    // get RGB, scale 0-255 uchar to 0-1.0 float
+    float3 rgb = {v_in->y * 0.003921569f, v_in->z * 0.003921569f,
+            v_in->w * 0.003921569f};
+
+    // apply filter
+    float3 result = dot(rgb, gMonoMult) * gNightVisionMult;
+
+    v_out->y = (uchar)clamp((result.r * 255.f + 0.5f), 0.f, 255.f);
+    v_out->z = (uchar)clamp((result.g * 255.f + 0.5f), 0.f, 255.f);
+    v_out->w = (uchar)clamp((result.b * 255.f + 0.5f), 0.f, 255.f);
+}
diff --git a/cmds/stagefright/filters/saturation.rs b/cmds/stagefright/filters/saturation.rs
new file mode 100644
index 0000000..1de9dd8
--- /dev/null
+++ b/cmds/stagefright/filters/saturation.rs
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma version(1)
+#pragma rs java_package_name(com.android.rs.cppbasic)
+#pragma rs_fp_relaxed
+
+const static float3 gMonoMult = {0.299f, 0.587f, 0.114f};
+
+// global variables (parameters accessible to application code)
+float gSaturation = 1.0f;
+
+void root(const uchar4 *v_in, uchar4 *v_out) {
+    v_out->x = v_in->x; // don't modify A
+
+    // get RGB, scale 0-255 uchar to 0-1.0 float
+    float3 rgb = {v_in->y * 0.003921569f, v_in->z * 0.003921569f,
+            v_in->w * 0.003921569f};
+
+    // apply saturation filter
+    float3 result = dot(rgb, gMonoMult);
+    result = mix(result, rgb, gSaturation);
+
+    v_out->y = (uchar)clamp((result.r * 255.f + 0.5f), 0.f, 255.f);
+    v_out->z = (uchar)clamp((result.g * 255.f + 0.5f), 0.f, 255.f);
+    v_out->w = (uchar)clamp((result.b * 255.f + 0.5f), 0.f, 255.f);
+}
diff --git a/cmds/stagefright/mediafilter.cpp b/cmds/stagefright/mediafilter.cpp
new file mode 100644
index 0000000..1183112
--- /dev/null
+++ b/cmds/stagefright/mediafilter.cpp
@@ -0,0 +1,785 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "mediafilterTest"
+
+#include <inttypes.h>
+
+#include <binder/ProcessState.h>
+#include <filters/ColorConvert.h>
+#include <gui/ISurfaceComposer.h>
+#include <gui/SurfaceComposerClient.h>
+#include <gui/Surface.h>
+#include <media/ICrypto.h>
+#include <media/IMediaHTTPService.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/NuMediaExtractor.h>
+#include <media/stagefright/RenderScriptWrapper.h>
+#include <OMX_IVCommon.h>
+#include <ui/DisplayInfo.h>
+
+#include "RenderScript.h"
+#include "ScriptC_argbtorgba.h"
+#include "ScriptC_nightvision.h"
+#include "ScriptC_saturation.h"
+
+// test parameters
+static const bool kTestFlush = true;        // Note: true will drop 1 out of
+static const int kFlushAfterFrames = 25;    // kFlushAfterFrames output frames
+static const int64_t kTimeout = 500ll;
+
+// built-in filter parameters
+static const int32_t kInvert = false;   // ZeroFilter param
+static const float kBlurRadius = 15.0f; // IntrinsicBlurFilter param
+static const float kSaturation = 0.0f;  // SaturationFilter param
+
+static void usage(const char *me) {
+    fprintf(stderr, "usage: [flags] %s\n"
+                    "\t[-b] use IntrinsicBlurFilter\n"
+                    "\t[-c] use argb to rgba conversion RSFilter\n"
+                    "\t[-n] use night vision RSFilter\n"
+                    "\t[-r] use saturation RSFilter\n"
+                    "\t[-s] use SaturationFilter\n"
+                    "\t[-z] use ZeroFilter (copy filter)\n"
+                    "\t[-R] render output to surface (enables -S)\n"
+                    "\t[-S] allocate buffers from a surface\n"
+                    "\t[-T] use render timestamps (enables -R)\n",
+                    me);
+    exit(1);
+}
+
+namespace android {
+
+struct SaturationRSFilter : RenderScriptWrapper::RSFilterCallback {
+    void init(RSC::sp<RSC::RS> context) {
+        mScript = new ScriptC_saturation(context);
+        mScript->set_gSaturation(3.f);
+    }
+
+    virtual status_t processBuffers(
+            RSC::Allocation *inBuffer, RSC::Allocation *outBuffer) {
+        mScript->forEach_root(inBuffer, outBuffer);
+
+        return OK;
+    }
+
+    status_t handleSetParameters(const sp<AMessage> &msg __unused) {
+        return OK;
+    }
+
+private:
+    RSC::sp<ScriptC_saturation> mScript;
+};
+
+struct NightVisionRSFilter : RenderScriptWrapper::RSFilterCallback {
+    void init(RSC::sp<RSC::RS> context) {
+        mScript = new ScriptC_nightvision(context);
+    }
+
+    virtual status_t processBuffers(
+            RSC::Allocation *inBuffer, RSC::Allocation *outBuffer) {
+        mScript->forEach_root(inBuffer, outBuffer);
+
+        return OK;
+    }
+
+    status_t handleSetParameters(const sp<AMessage> &msg __unused) {
+        return OK;
+    }
+
+private:
+    RSC::sp<ScriptC_nightvision> mScript;
+};
+
+struct ARGBToRGBARSFilter : RenderScriptWrapper::RSFilterCallback {
+    void init(RSC::sp<RSC::RS> context) {
+        mScript = new ScriptC_argbtorgba(context);
+    }
+
+    virtual status_t processBuffers(
+            RSC::Allocation *inBuffer, RSC::Allocation *outBuffer) {
+        mScript->forEach_root(inBuffer, outBuffer);
+
+        return OK;
+    }
+
+    status_t handleSetParameters(const sp<AMessage> &msg __unused) {
+        return OK;
+    }
+
+private:
+    RSC::sp<ScriptC_argbtorgba> mScript;
+};
+
+struct CodecState {
+    sp<MediaCodec> mCodec;
+    Vector<sp<ABuffer> > mInBuffers;
+    Vector<sp<ABuffer> > mOutBuffers;
+    bool mSignalledInputEOS;
+    bool mSawOutputEOS;
+    int64_t mNumBuffersDecoded;
+};
+
+struct DecodedFrame {
+    size_t index;
+    size_t offset;
+    size_t size;
+    int64_t presentationTimeUs;
+    uint32_t flags;
+};
+
+enum FilterType {
+    FILTERTYPE_ZERO,
+    FILTERTYPE_INTRINSIC_BLUR,
+    FILTERTYPE_SATURATION,
+    FILTERTYPE_RS_SATURATION,
+    FILTERTYPE_RS_NIGHT_VISION,
+    FILTERTYPE_RS_ARGB_TO_RGBA,
+};
+
+size_t inputFramesSinceFlush = 0;
+void tryCopyDecodedBuffer(
+        List<DecodedFrame> *decodedFrameIndices,
+        CodecState *filterState,
+        CodecState *vidState) {
+    if (decodedFrameIndices->empty()) {
+        return;
+    }
+
+    size_t filterIndex;
+    status_t err = filterState->mCodec->dequeueInputBuffer(
+            &filterIndex, kTimeout);
+    if (err != OK) {
+        return;
+    }
+
+    ++inputFramesSinceFlush;
+
+    DecodedFrame frame = *decodedFrameIndices->begin();
+
+    // only consume a buffer if we are not going to flush, since we expect
+    // the dequeue -> flush -> queue operation to cause an error and
+    // not produce an output frame
+    if (!kTestFlush || inputFramesSinceFlush < kFlushAfterFrames) {
+        decodedFrameIndices->erase(decodedFrameIndices->begin());
+    }
+    size_t outIndex = frame.index;
+
+    const sp<ABuffer> &srcBuffer =
+        vidState->mOutBuffers.itemAt(outIndex);
+    const sp<ABuffer> &destBuffer =
+        filterState->mInBuffers.itemAt(filterIndex);
+
+    sp<AMessage> srcFormat, destFormat;
+    vidState->mCodec->getOutputFormat(&srcFormat);
+    filterState->mCodec->getInputFormat(&destFormat);
+
+    int32_t srcWidth, srcHeight, srcStride, srcSliceHeight;
+    int32_t srcColorFormat, destColorFormat;
+    int32_t destWidth, destHeight, destStride, destSliceHeight;
+    CHECK(srcFormat->findInt32("stride", &srcStride)
+            && srcFormat->findInt32("slice-height", &srcSliceHeight)
+            && srcFormat->findInt32("width", &srcWidth)
+            && srcFormat->findInt32("height", & srcHeight)
+            && srcFormat->findInt32("color-format", &srcColorFormat));
+    CHECK(destFormat->findInt32("stride", &destStride)
+            && destFormat->findInt32("slice-height", &destSliceHeight)
+            && destFormat->findInt32("width", &destWidth)
+            && destFormat->findInt32("height", & destHeight)
+            && destFormat->findInt32("color-format", &destColorFormat));
+
+    CHECK(srcWidth <= destStride && srcHeight <= destSliceHeight);
+
+    convertYUV420spToARGB(
+            srcBuffer->data(),
+            srcBuffer->data() + srcStride * srcSliceHeight,
+            srcWidth,
+            srcHeight,
+            destBuffer->data());
+
+    // copy timestamp
+    int64_t timeUs;
+    CHECK(srcBuffer->meta()->findInt64("timeUs", &timeUs));
+    destBuffer->meta()->setInt64("timeUs", timeUs);
+
+    if (kTestFlush && inputFramesSinceFlush >= kFlushAfterFrames) {
+        inputFramesSinceFlush = 0;
+
+        // check that queueing a buffer that was dequeued before flush
+        // fails with expected error EACCES
+        filterState->mCodec->flush();
+
+        err = filterState->mCodec->queueInputBuffer(
+                filterIndex, 0 /* offset */, destBuffer->size(),
+                timeUs, frame.flags);
+
+        if (err == OK) {
+            ALOGE("FAIL: queue after flush returned OK");
+        } else if (err != -EACCES) {
+            ALOGE("queueInputBuffer after flush returned %d, "
+                    "expected -EACCES (-13)", err);
+        }
+    } else {
+        err = filterState->mCodec->queueInputBuffer(
+                filterIndex, 0 /* offset */, destBuffer->size(),
+                timeUs, frame.flags);
+        CHECK(err == OK);
+
+        err = vidState->mCodec->releaseOutputBuffer(outIndex);
+        CHECK(err == OK);
+    }
+}
+
+size_t outputFramesSinceFlush = 0;
+void tryDrainOutputBuffer(
+        CodecState *filterState,
+        const sp<Surface> &surface, bool renderSurface,
+        bool useTimestamp, int64_t *startTimeRender) {
+    size_t index;
+    size_t offset;
+    size_t size;
+    int64_t presentationTimeUs;
+    uint32_t flags;
+    status_t err = filterState->mCodec->dequeueOutputBuffer(
+            &index, &offset, &size, &presentationTimeUs, &flags,
+            kTimeout);
+
+    if (err != OK) {
+        return;
+    }
+
+    ++outputFramesSinceFlush;
+
+    if (kTestFlush && outputFramesSinceFlush >= kFlushAfterFrames) {
+        filterState->mCodec->flush();
+    }
+
+    if (surface == NULL || !renderSurface) {
+        err = filterState->mCodec->releaseOutputBuffer(index);
+    } else if (useTimestamp) {
+        if (*startTimeRender == -1) {
+            // begin rendering 2 vsyncs after first decode
+            *startTimeRender = systemTime(SYSTEM_TIME_MONOTONIC)
+                    + 33000000 - (presentationTimeUs * 1000);
+        }
+        presentationTimeUs =
+                (presentationTimeUs * 1000) + *startTimeRender;
+        err = filterState->mCodec->renderOutputBufferAndRelease(
+                index, presentationTimeUs);
+    } else {
+        err = filterState->mCodec->renderOutputBufferAndRelease(index);
+    }
+
+    if (kTestFlush && outputFramesSinceFlush >= kFlushAfterFrames) {
+        outputFramesSinceFlush = 0;
+
+        // releasing the buffer dequeued before flush should cause an error
+        // if so, the frame will also be skipped in output stream
+        if (err == OK) {
+            ALOGE("FAIL: release after flush returned OK");
+        } else if (err != -EACCES) {
+            ALOGE("releaseOutputBuffer after flush returned %d, "
+                    "expected -EACCES (-13)", err);
+        }
+    } else {
+        CHECK(err == OK);
+    }
+
+    if (flags & MediaCodec::BUFFER_FLAG_EOS) {
+        ALOGV("reached EOS on output.");
+        filterState->mSawOutputEOS = true;
+    }
+}
+
+static int decode(
+        const sp<ALooper> &looper,
+        const char *path,
+        const sp<Surface> &surface,
+        bool renderSurface,
+        bool useTimestamp,
+        FilterType filterType) {
+
+    static int64_t kTimeout = 500ll;
+
+    sp<NuMediaExtractor> extractor = new NuMediaExtractor;
+    if (extractor->setDataSource(NULL /* httpService */, path) != OK) {
+        fprintf(stderr, "unable to instantiate extractor.\n");
+        return 1;
+    }
+
+    KeyedVector<size_t, CodecState> stateByTrack;
+
+    CodecState *vidState = NULL;
+    for (size_t i = 0; i < extractor->countTracks(); ++i) {
+        sp<AMessage> format;
+        status_t err = extractor->getTrackFormat(i, &format);
+        CHECK(err == OK);
+
+        AString mime;
+        CHECK(format->findString("mime", &mime));
+        bool isVideo = !strncasecmp(mime.c_str(), "video/", 6);
+        if (!isVideo) {
+            continue;
+        }
+
+        ALOGV("selecting track %zu", i);
+
+        err = extractor->selectTrack(i);
+        CHECK(err == OK);
+
+        CodecState *state =
+            &stateByTrack.editValueAt(stateByTrack.add(i, CodecState()));
+
+        vidState = state;
+
+        state->mNumBuffersDecoded = 0;
+
+        state->mCodec = MediaCodec::CreateByType(
+                looper, mime.c_str(), false /* encoder */);
+
+        CHECK(state->mCodec != NULL);
+
+        err = state->mCodec->configure(
+                format, NULL /* surface */, NULL /* crypto */, 0 /* flags */);
+
+        CHECK(err == OK);
+
+        state->mSignalledInputEOS = false;
+        state->mSawOutputEOS = false;
+
+        break;
+    }
+    CHECK(!stateByTrack.isEmpty());
+    CHECK(vidState != NULL);
+    sp<AMessage> vidFormat;
+    vidState->mCodec->getOutputFormat(&vidFormat);
+
+    // set filter to use ARGB8888
+    vidFormat->setInt32("color-format", OMX_COLOR_Format32bitARGB8888);
+    // set app cache directory path
+    vidFormat->setString("cacheDir", "/system/bin");
+
+    // create RenderScript context for RSFilters
+    RSC::sp<RSC::RS> context = new RSC::RS();
+    context->init("/system/bin");
+
+    sp<RenderScriptWrapper::RSFilterCallback> rsFilter;
+
+    // create renderscript wrapper for RSFilters
+    sp<RenderScriptWrapper> rsWrapper = new RenderScriptWrapper;
+    rsWrapper->mContext = context.get();
+
+    CodecState *filterState = new CodecState();
+    filterState->mNumBuffersDecoded = 0;
+
+    sp<AMessage> params = new AMessage();
+
+    switch (filterType) {
+        case FILTERTYPE_ZERO:
+        {
+            filterState->mCodec = MediaCodec::CreateByComponentName(
+                    looper, "android.filter.zerofilter");
+            params->setInt32("invert", kInvert);
+            break;
+        }
+        case FILTERTYPE_INTRINSIC_BLUR:
+        {
+            filterState->mCodec = MediaCodec::CreateByComponentName(
+                    looper, "android.filter.intrinsicblur");
+            params->setFloat("blur-radius", kBlurRadius);
+            break;
+        }
+        case FILTERTYPE_SATURATION:
+        {
+            filterState->mCodec = MediaCodec::CreateByComponentName(
+                    looper, "android.filter.saturation");
+            params->setFloat("saturation", kSaturation);
+            break;
+        }
+        case FILTERTYPE_RS_SATURATION:
+        {
+            SaturationRSFilter *satFilter = new SaturationRSFilter;
+            satFilter->init(context);
+            rsFilter = satFilter;
+            rsWrapper->mCallback = rsFilter;
+            vidFormat->setObject("rs-wrapper", rsWrapper);
+
+            filterState->mCodec = MediaCodec::CreateByComponentName(
+                    looper, "android.filter.RenderScript");
+            break;
+        }
+        case FILTERTYPE_RS_NIGHT_VISION:
+        {
+            NightVisionRSFilter *nightVisionFilter = new NightVisionRSFilter;
+            nightVisionFilter->init(context);
+            rsFilter = nightVisionFilter;
+            rsWrapper->mCallback = rsFilter;
+            vidFormat->setObject("rs-wrapper", rsWrapper);
+
+            filterState->mCodec = MediaCodec::CreateByComponentName(
+                    looper, "android.filter.RenderScript");
+            break;
+        }
+        case FILTERTYPE_RS_ARGB_TO_RGBA:
+        {
+            ARGBToRGBARSFilter *argbToRgbaFilter = new ARGBToRGBARSFilter;
+            argbToRgbaFilter->init(context);
+            rsFilter = argbToRgbaFilter;
+            rsWrapper->mCallback = rsFilter;
+            vidFormat->setObject("rs-wrapper", rsWrapper);
+
+            filterState->mCodec = MediaCodec::CreateByComponentName(
+                    looper, "android.filter.RenderScript");
+            break;
+        }
+        default:
+        {
+            LOG_ALWAYS_FATAL("mediacodec.cpp error: unrecognized FilterType");
+            break;
+        }
+    }
+    CHECK(filterState->mCodec != NULL);
+
+    status_t err = filterState->mCodec->configure(
+            vidFormat /* format */, surface, NULL /* crypto */, 0 /* flags */);
+    CHECK(err == OK);
+
+    filterState->mSignalledInputEOS = false;
+    filterState->mSawOutputEOS = false;
+
+    int64_t startTimeUs = ALooper::GetNowUs();
+    int64_t startTimeRender = -1;
+
+    for (size_t i = 0; i < stateByTrack.size(); ++i) {
+        CodecState *state = &stateByTrack.editValueAt(i);
+
+        sp<MediaCodec> codec = state->mCodec;
+
+        CHECK_EQ((status_t)OK, codec->start());
+
+        CHECK_EQ((status_t)OK, codec->getInputBuffers(&state->mInBuffers));
+        CHECK_EQ((status_t)OK, codec->getOutputBuffers(&state->mOutBuffers));
+
+        ALOGV("got %zu input and %zu output buffers",
+                state->mInBuffers.size(), state->mOutBuffers.size());
+    }
+
+    CHECK_EQ((status_t)OK, filterState->mCodec->setParameters(params));
+
+    if (kTestFlush) {
+        status_t flushErr = filterState->mCodec->flush();
+        if (flushErr == OK) {
+            ALOGE("FAIL: Flush before start returned OK");
+        } else {
+            ALOGV("Flush before start returned status %d, usually ENOSYS (-38)",
+                    flushErr);
+        }
+    }
+
+    CHECK_EQ((status_t)OK, filterState->mCodec->start());
+    CHECK_EQ((status_t)OK, filterState->mCodec->getInputBuffers(
+            &filterState->mInBuffers));
+    CHECK_EQ((status_t)OK, filterState->mCodec->getOutputBuffers(
+            &filterState->mOutBuffers));
+
+    if (kTestFlush) {
+        status_t flushErr = filterState->mCodec->flush();
+        if (flushErr != OK) {
+            ALOGE("FAIL: Flush after start returned %d, expect OK (0)",
+                    flushErr);
+        } else {
+            ALOGV("Flush immediately after start OK");
+        }
+    }
+
+    List<DecodedFrame> decodedFrameIndices;
+
+    // loop until decoder reaches EOS
+    bool sawInputEOS = false;
+    bool sawOutputEOSOnAllTracks = false;
+    while (!sawOutputEOSOnAllTracks) {
+        if (!sawInputEOS) {
+            size_t trackIndex;
+            status_t err = extractor->getSampleTrackIndex(&trackIndex);
+
+            if (err != OK) {
+                ALOGV("saw input eos");
+                sawInputEOS = true;
+            } else {
+                CodecState *state = &stateByTrack.editValueFor(trackIndex);
+
+                size_t index;
+                err = state->mCodec->dequeueInputBuffer(&index, kTimeout);
+
+                if (err == OK) {
+                    ALOGV("filling input buffer %zu", index);
+
+                    const sp<ABuffer> &buffer = state->mInBuffers.itemAt(index);
+
+                    err = extractor->readSampleData(buffer);
+                    CHECK(err == OK);
+
+                    int64_t timeUs;
+                    err = extractor->getSampleTime(&timeUs);
+                    CHECK(err == OK);
+
+                    uint32_t bufferFlags = 0;
+
+                    err = state->mCodec->queueInputBuffer(
+                            index, 0 /* offset */, buffer->size(),
+                            timeUs, bufferFlags);
+
+                    CHECK(err == OK);
+
+                    extractor->advance();
+                } else {
+                    CHECK_EQ(err, -EAGAIN);
+                }
+            }
+        } else {
+            for (size_t i = 0; i < stateByTrack.size(); ++i) {
+                CodecState *state = &stateByTrack.editValueAt(i);
+
+                if (!state->mSignalledInputEOS) {
+                    size_t index;
+                    status_t err =
+                        state->mCodec->dequeueInputBuffer(&index, kTimeout);
+
+                    if (err == OK) {
+                        ALOGV("signalling input EOS on track %zu", i);
+
+                        err = state->mCodec->queueInputBuffer(
+                                index, 0 /* offset */, 0 /* size */,
+                                0ll /* timeUs */, MediaCodec::BUFFER_FLAG_EOS);
+
+                        CHECK(err == OK);
+
+                        state->mSignalledInputEOS = true;
+                    } else {
+                        CHECK_EQ(err, -EAGAIN);
+                    }
+                }
+            }
+        }
+
+        sawOutputEOSOnAllTracks = true;
+        for (size_t i = 0; i < stateByTrack.size(); ++i) {
+            CodecState *state = &stateByTrack.editValueAt(i);
+
+            if (state->mSawOutputEOS) {
+                continue;
+            } else {
+                sawOutputEOSOnAllTracks = false;
+            }
+
+            DecodedFrame frame;
+            status_t err = state->mCodec->dequeueOutputBuffer(
+                    &frame.index, &frame.offset, &frame.size,
+                    &frame.presentationTimeUs, &frame.flags, kTimeout);
+
+            if (err == OK) {
+                ALOGV("draining decoded buffer %zu, time = %lld us",
+                        frame.index, (long long)frame.presentationTimeUs);
+
+                ++(state->mNumBuffersDecoded);
+
+                decodedFrameIndices.push_back(frame);
+
+                if (frame.flags & MediaCodec::BUFFER_FLAG_EOS) {
+                    ALOGV("reached EOS on decoder output.");
+                    state->mSawOutputEOS = true;
+                }
+
+            } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
+                ALOGV("INFO_OUTPUT_BUFFERS_CHANGED");
+                CHECK_EQ((status_t)OK, state->mCodec->getOutputBuffers(
+                        &state->mOutBuffers));
+
+                ALOGV("got %zu output buffers", state->mOutBuffers.size());
+            } else if (err == INFO_FORMAT_CHANGED) {
+                sp<AMessage> format;
+                CHECK_EQ((status_t)OK, state->mCodec->getOutputFormat(&format));
+
+                ALOGV("INFO_FORMAT_CHANGED: %s",
+                        format->debugString().c_str());
+            } else {
+                CHECK_EQ(err, -EAGAIN);
+            }
+
+            tryCopyDecodedBuffer(&decodedFrameIndices, filterState, vidState);
+
+            tryDrainOutputBuffer(
+                    filterState, surface, renderSurface,
+                    useTimestamp, &startTimeRender);
+        }
+    }
+
+    // after EOS on decoder, let filter reach EOS
+    while (!filterState->mSawOutputEOS) {
+        tryCopyDecodedBuffer(&decodedFrameIndices, filterState, vidState);
+
+        tryDrainOutputBuffer(
+                filterState, surface, renderSurface,
+                useTimestamp, &startTimeRender);
+    }
+
+    int64_t elapsedTimeUs = ALooper::GetNowUs() - startTimeUs;
+
+    for (size_t i = 0; i < stateByTrack.size(); ++i) {
+        CodecState *state = &stateByTrack.editValueAt(i);
+
+        CHECK_EQ((status_t)OK, state->mCodec->release());
+
+        printf("track %zu: %" PRId64 " frames decoded and filtered, "
+                "%.2f fps.\n", i, state->mNumBuffersDecoded,
+                state->mNumBuffersDecoded * 1E6 / elapsedTimeUs);
+    }
+
+    return 0;
+}
+
+}  // namespace android
+
+int main(int argc, char **argv) {
+    using namespace android;
+
+    const char *me = argv[0];
+
+    bool useSurface = false;
+    bool renderSurface = false;
+    bool useTimestamp = false;
+    FilterType filterType = FILTERTYPE_ZERO;
+
+    int res;
+    while ((res = getopt(argc, argv, "bcnrszTRSh")) >= 0) {
+        switch (res) {
+            case 'b':
+            {
+                filterType = FILTERTYPE_INTRINSIC_BLUR;
+                break;
+            }
+            case 'c':
+            {
+                filterType = FILTERTYPE_RS_ARGB_TO_RGBA;
+                break;
+            }
+            case 'n':
+            {
+                filterType = FILTERTYPE_RS_NIGHT_VISION;
+                break;
+            }
+            case 'r':
+            {
+                filterType = FILTERTYPE_RS_SATURATION;
+                break;
+            }
+            case 's':
+            {
+                filterType = FILTERTYPE_SATURATION;
+                break;
+            }
+            case 'z':
+            {
+                filterType = FILTERTYPE_ZERO;
+                break;
+            }
+            case 'T':
+            {
+                useTimestamp = true;
+            }
+            // fall through
+            case 'R':
+            {
+                renderSurface = true;
+            }
+            // fall through
+            case 'S':
+            {
+                useSurface = true;
+                break;
+            }
+            case '?':
+            case 'h':
+            default:
+            {
+                usage(me);
+                break;
+            }
+        }
+    }
+
+    argc -= optind;
+    argv += optind;
+
+    if (argc != 1) {
+        usage(me);
+    }
+
+    ProcessState::self()->startThreadPool();
+
+    DataSource::RegisterDefaultSniffers();
+
+    android::sp<ALooper> looper = new ALooper;
+    looper->start();
+
+    android::sp<SurfaceComposerClient> composerClient;
+    android::sp<SurfaceControl> control;
+    android::sp<Surface> surface;
+
+    if (useSurface) {
+        composerClient = new SurfaceComposerClient;
+        CHECK_EQ((status_t)OK, composerClient->initCheck());
+
+        android::sp<IBinder> display(SurfaceComposerClient::getBuiltInDisplay(
+                ISurfaceComposer::eDisplayIdMain));
+        DisplayInfo info;
+        SurfaceComposerClient::getDisplayInfo(display, &info);
+        ssize_t displayWidth = info.w;
+        ssize_t displayHeight = info.h;
+
+        ALOGV("display is %zd x %zd", displayWidth, displayHeight);
+
+        control = composerClient->createSurface(
+                String8("A Surface"), displayWidth, displayHeight,
+                PIXEL_FORMAT_RGBA_8888, 0);
+
+        CHECK(control != NULL);
+        CHECK(control->isValid());
+
+        SurfaceComposerClient::openGlobalTransaction();
+        CHECK_EQ((status_t)OK, control->setLayer(INT_MAX));
+        CHECK_EQ((status_t)OK, control->show());
+        SurfaceComposerClient::closeGlobalTransaction();
+
+        surface = control->getSurface();
+        CHECK(surface != NULL);
+    }
+
+    decode(looper, argv[0], surface, renderSurface, useTimestamp, filterType);
+
+    if (useSurface) {
+        composerClient->dispose();
+    }
+
+    looper->stop();
+
+    return 0;
+}
diff --git a/cmds/stagefright/muxer.cpp b/cmds/stagefright/muxer.cpp
index f4a33e8..36fa3b5 100644
--- a/cmds/stagefright/muxer.cpp
+++ b/cmds/stagefright/muxer.cpp
@@ -17,6 +17,9 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "muxer"
 #include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
 #include <utils/Log.h>
 
 #include <binder/ProcessState.h>
@@ -50,7 +53,6 @@
 using namespace android;
 
 static int muxing(
-        const android::sp<android::ALooper> &looper,
         const char *path,
         bool useAudio,
         bool useVideo,
@@ -72,8 +74,15 @@
     ALOGV("input file %s, output file %s", path, outputFileName);
     ALOGV("useAudio %d, useVideo %d", useAudio, useVideo);
 
-    sp<MediaMuxer> muxer = new MediaMuxer(outputFileName,
+    int fd = open(outputFileName, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
+
+    if (fd < 0) {
+        ALOGE("couldn't open file");
+        return fd;
+    }
+    sp<MediaMuxer> muxer = new MediaMuxer(fd,
                                           MediaMuxer::OUTPUT_FORMAT_MPEG_4);
+    close(fd);
 
     size_t trackCount = extractor->countTracks();
     // Map the extractor's track index to the muxer's track index.
@@ -127,14 +136,19 @@
             }
         }
 
-        ALOGV("selecting track %d", i);
+        ALOGV("selecting track %zu", i);
 
         err = extractor->selectTrack(i);
         CHECK_EQ(err, (status_t)OK);
 
         ssize_t newTrackIndex = muxer->addTrack(format);
-        CHECK_GE(newTrackIndex, 0);
-        trackIndexMap.add(i, newTrackIndex);
+        if (newTrackIndex < 0) {
+            fprintf(stderr, "%s track (%zu) unsupported by muxer\n",
+                    isAudio ? "audio" : "video",
+                    i);
+        } else {
+            trackIndexMap.add(i, newTrackIndex);
+        }
     }
 
     int64_t muxerStartTimeUs = ALooper::GetNowUs();
@@ -153,7 +167,12 @@
             ALOGV("saw input eos, err %d", err);
             sawInputEOS = true;
             break;
+        } else if (trackIndexMap.indexOfKey(trackIndex) < 0) {
+            // ALOGV("skipping input from unsupported track %zu", trackIndex);
+            extractor->advance();
+            continue;
         } else {
+            // ALOGV("reading sample from track index %zu\n", trackIndex);
             err = extractor->readSampleData(newBuffer);
             CHECK_EQ(err, (status_t)OK);
 
@@ -298,7 +317,7 @@
     sp<ALooper> looper = new ALooper;
     looper->start();
 
-    int result = muxing(looper, argv[0], useAudio, useVideo, outputFileName,
+    int result = muxing(argv[0], useAudio, useVideo, outputFileName,
                         enableTrim, trimStartTimeMs, trimEndTimeMs, rotationDegrees);
 
     looper->stop();
diff --git a/cmds/stagefright/record.cpp b/cmds/stagefright/record.cpp
index fdc352e..594c933 100644
--- a/cmds/stagefright/record.cpp
+++ b/cmds/stagefright/record.cpp
@@ -32,13 +32,13 @@
 
 using namespace android;
 
+static const int32_t kAudioBitRate = 12200;
+#if 0
 static const int32_t kFramerate = 24;  // fps
 static const int32_t kIFramesIntervalSec = 1;
 static const int32_t kVideoBitRate = 512 * 1024;
-static const int32_t kAudioBitRate = 12200;
 static const int64_t kDurationUs = 10000000LL;  // 10 seconds
 
-#if 0
 class DummySource : public MediaSource {
 
 public:
diff --git a/cmds/stagefright/recordvideo.cpp b/cmds/stagefright/recordvideo.cpp
index 9f547c7..2ad40bd 100644
--- a/cmds/stagefright/recordvideo.cpp
+++ b/cmds/stagefright/recordvideo.cpp
@@ -17,6 +17,10 @@
 #include "SineSource.h"
 
 #include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
 #include <binder/ProcessState.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/AudioPlayer.h>
@@ -300,7 +304,13 @@
                 client.interface(), enc_meta, true /* createEncoder */, source,
                 0, preferSoftwareCodec ? OMXCodec::kPreferSoftwareCodecs : 0);
 
-    sp<MPEG4Writer> writer = new MPEG4Writer(fileName);
+    int fd = open(fileName, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
+    if (fd < 0) {
+        fprintf(stderr, "couldn't open file");
+        return 1;
+    }
+    sp<MPEG4Writer> writer = new MPEG4Writer(fd);
+    close(fd);
     writer->addSource(encoder);
     int64_t start = systemTime();
     CHECK_EQ((status_t)OK, writer->start());
diff --git a/cmds/stagefright/sf2.cpp b/cmds/stagefright/sf2.cpp
index 0f729a3..0d64d2f 100644
--- a/cmds/stagefright/sf2.cpp
+++ b/cmds/stagefright/sf2.cpp
@@ -38,10 +38,10 @@
 #include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
-#include <media/stagefright/NativeWindowWrapper.h>
 #include <media/stagefright/Utils.h>
 
 #include <gui/SurfaceComposerClient.h>
+#include <gui/Surface.h>
 
 #include "include/ESDS.h"
 
@@ -72,7 +72,7 @@
     }
 
     void startAsync() {
-        (new AMessage(kWhatStart, id()))->post();
+        (new AMessage(kWhatStart, this))->post();
     }
 
 protected:
@@ -100,7 +100,7 @@
         if (ctrlc) {
             printf("\n");
             printStatistics();
-            (new AMessage(kWhatStop, id()))->post();
+            (new AMessage(kWhatStop, this))->post();
             ctrlc = false;
         }
         switch (msg->what()) {
@@ -149,13 +149,12 @@
                 mDecodeLooper->registerHandler(mCodec);
 
                 mCodec->setNotificationMessage(
-                        new AMessage(kWhatCodecNotify, id()));
+                        new AMessage(kWhatCodecNotify, this));
 
                 sp<AMessage> format = makeFormat(mSource->getFormat());
 
                 if (mSurface != NULL) {
-                    format->setObject(
-                            "native-window", new NativeWindowWrapper(mSurface));
+                    format->setObject("surface", mSurface);
                 }
 
                 mCodec->initiateSetup(format);
@@ -168,7 +167,7 @@
                 mFinalResult = OK;
                 mSeekState = SEEK_NONE;
 
-                // (new AMessage(kWhatSeek, id()))->post(5000000ll);
+                // (new AMessage(kWhatSeek, this))->post(5000000ll);
                 break;
             }
 
@@ -225,12 +224,12 @@
                     printf((what == CodecBase::kWhatEOS) ? "$\n" : "E\n");
 
                     printStatistics();
-                    (new AMessage(kWhatStop, id()))->post();
+                    (new AMessage(kWhatStop, this))->post();
                 } else if (what == CodecBase::kWhatFlushCompleted) {
                     mSeekState = SEEK_FLUSH_COMPLETED;
                     mCodec->signalResume();
 
-                    (new AMessage(kWhatSeek, id()))->post(5000000ll);
+                    (new AMessage(kWhatSeek, this))->post(5000000ll);
                 } else if (what == CodecBase::kWhatOutputFormatChanged) {
                 } else if (what == CodecBase::kWhatShutdownCompleted) {
                     mDecodeLooper->unregisterHandler(mCodec->id());
@@ -328,14 +327,14 @@
 
             CHECK(size >= 7);
             CHECK_EQ((unsigned)ptr[0], 1u);  // configurationVersion == 1
-            uint8_t profile = ptr[1];
-            uint8_t level = ptr[3];
+            uint8_t profile __unused = ptr[1];
+            uint8_t level __unused = ptr[3];
 
             // There is decodable content out there that fails the following
             // assertion, let's be lenient for now...
             // CHECK((ptr[4] >> 2) == 0x3f);  // reserved
 
-            size_t lengthSize = 1 + (ptr[4] & 3);
+            size_t lengthSize __unused = 1 + (ptr[4] & 3);
 
             // commented out check below as H264_QVGA_500_NO_AUDIO.3gp
             // violates it...
@@ -491,7 +490,7 @@
 
                 if (sizeNeeded > sizeLeft) {
                     if (outBuffer->size() == 0) {
-                        ALOGE("Unable to fit even a single input buffer of size %d.",
+                        ALOGE("Unable to fit even a single input buffer of size %zu.",
                              sizeNeeded);
                     }
                     CHECK_GT(outBuffer->size(), 0u);
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 81edcb4..a9c6eda 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -19,6 +19,8 @@
 #include <stdlib.h>
 #include <string.h>
 #include <sys/time.h>
+#include <sys/types.h>
+#include <sys/stat.h>
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "stagefright"
@@ -506,8 +508,13 @@
     sp<MPEG4Writer> writer =
         new MPEG4Writer(gWriteMP4Filename.string());
 #else
+    int fd = open(gWriteMP4Filename.string(), O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
+    if (fd < 0) {
+        fprintf(stderr, "couldn't open file");
+        return;
+    }
     sp<MPEG2TSWriter> writer =
-        new MPEG2TSWriter(gWriteMP4Filename.string());
+        new MPEG2TSWriter(fd);
 #endif
 
     // at most one minute.
@@ -958,7 +965,7 @@
     OMXClient client;
     status_t err = client.connect();
 
-    for (int k = 0; k < argc; ++k) {
+    for (int k = 0; k < argc && err == OK; ++k) {
         bool syncInfoPresent = true;
 
         const char *filename = argv[k];
diff --git a/cmds/stagefright/stream.cpp b/cmds/stagefright/stream.cpp
index 0566d14..1a40e53 100644
--- a/cmds/stagefright/stream.cpp
+++ b/cmds/stagefright/stream.cpp
@@ -269,7 +269,7 @@
         : mEOS(false) {
     }
 
-    virtual void notify(int msg, int ext1, int ext2, const Parcel *obj) {
+    virtual void notify(int msg, int ext1 __unused, int ext2 __unused, const Parcel *obj __unused) {
         Mutex::Autolock autoLock(mLock);
 
         if (msg == MEDIA_ERROR || msg == MEDIA_PLAYBACK_COMPLETE) {
@@ -318,7 +318,7 @@
     ssize_t displayWidth = info.w;
     ssize_t displayHeight = info.h;
 
-    ALOGV("display is %d x %d\n", displayWidth, displayHeight);
+    ALOGV("display is %zd x %zd\n", displayWidth, displayHeight);
 
     sp<SurfaceControl> control =
         composerClient->createSurface(
diff --git a/drm/common/IDrmManagerService.cpp b/drm/common/IDrmManagerService.cpp
index 3f62ed7..b90da1b 100644
--- a/drm/common/IDrmManagerService.cpp
+++ b/drm/common/IDrmManagerService.cpp
@@ -34,6 +34,7 @@
 #include "IDrmManagerService.h"
 
 #define INVALID_BUFFER_LENGTH -1
+#define MAX_BINDER_TRANSACTION_SIZE ((1*1024*1024)-(4096*2))
 
 using namespace android;
 
@@ -933,7 +934,12 @@
 
         //Filling DRM info
         const int infoType = data.readInt32();
-        const int bufferSize = data.readInt32();
+        const uint32_t bufferSize = data.readInt32();
+
+        if (bufferSize > data.dataAvail()) {
+            return BAD_VALUE;
+        }
+
         char* buffer = NULL;
         if (0 < bufferSize) {
             buffer = (char *)data.readInplace(bufferSize);
@@ -986,6 +992,9 @@
 
         const int size = data.readInt32();
         for (int index = 0; index < size; ++index) {
+            if (!data.dataAvail()) {
+                break;
+            }
             const String8 key(data.readString8());
             if (key == String8("FileDescriptorKey")) {
                 char buffer[16];
@@ -1035,7 +1044,12 @@
         const int uniqueId = data.readInt32();
 
         //Filling DRM Rights
-        const int bufferSize = data.readInt32();
+        const uint32_t bufferSize = data.readInt32();
+        if (bufferSize > data.dataAvail()) {
+            reply->writeInt32(BAD_VALUE);
+            return DRM_NO_ERROR;
+        }
+
         const DrmBuffer drmBuffer((char *)data.readInplace(bufferSize), bufferSize);
 
         const String8 mimeType(data.readString8());
@@ -1206,10 +1220,13 @@
         const int convertId = data.readInt32();
 
         //Filling input data
-        const int bufferSize = data.readInt32();
+        const uint32_t bufferSize = data.readInt32();
+        if (bufferSize > data.dataAvail()) {
+            return BAD_VALUE;
+        }
         DrmBuffer* inputData = new DrmBuffer((char *)data.readInplace(bufferSize), bufferSize);
 
-        DrmConvertedStatus*    drmConvertedStatus = convertData(uniqueId, convertId, inputData);
+        DrmConvertedStatus* drmConvertedStatus = convertData(uniqueId, convertId, inputData);
 
         if (NULL != drmConvertedStatus) {
             //Filling Drm Converted Ststus
@@ -1393,7 +1410,12 @@
         const int decryptUnitId = data.readInt32();
 
         //Filling Header info
-        const int bufferSize = data.readInt32();
+        const uint32_t bufferSize = data.readInt32();
+        if (bufferSize > data.dataAvail()) {
+            reply->writeInt32(BAD_VALUE);
+            clearDecryptHandle(&handle);
+            return DRM_NO_ERROR;
+        }
         DrmBuffer* headerInfo = NULL;
         headerInfo = new DrmBuffer((char *)data.readInplace(bufferSize), bufferSize);
 
@@ -1417,9 +1439,17 @@
         readDecryptHandleFromParcelData(&handle, data);
 
         const int decryptUnitId = data.readInt32();
-        const int decBufferSize = data.readInt32();
+        const uint32_t decBufferSize = data.readInt32();
+        const uint32_t encBufferSize = data.readInt32();
 
-        const int encBufferSize = data.readInt32();
+        if (encBufferSize > data.dataAvail() ||
+            decBufferSize > MAX_BINDER_TRANSACTION_SIZE) {
+            reply->writeInt32(BAD_VALUE);
+            reply->writeInt32(0);
+            clearDecryptHandle(&handle);
+            return DRM_NO_ERROR;
+        }
+
         DrmBuffer* encBuffer
             = new DrmBuffer((char *)data.readInplace(encBufferSize), encBufferSize);
 
@@ -1429,8 +1459,10 @@
 
         DrmBuffer* IV = NULL;
         if (0 != data.dataAvail()) {
-            const int ivBufferlength = data.readInt32();
-            IV = new DrmBuffer((char *)data.readInplace(ivBufferlength), ivBufferlength);
+            const uint32_t ivBufferlength = data.readInt32();
+            if (ivBufferlength <= data.dataAvail()) {
+                IV = new DrmBuffer((char *)data.readInplace(ivBufferlength), ivBufferlength);
+            }
         }
 
         const status_t status
@@ -1477,7 +1509,11 @@
         DecryptHandle handle;
         readDecryptHandleFromParcelData(&handle, data);
 
-        const int numBytes = data.readInt32();
+        const uint32_t numBytes = data.readInt32();
+        if (numBytes > MAX_BINDER_TRANSACTION_SIZE) {
+            reply->writeInt32(BAD_VALUE);
+            return DRM_NO_ERROR;
+        }
         char* buffer = new char[numBytes];
 
         const off64_t offset = data.readInt64();
diff --git a/drm/libdrmframework/plugins/forward-lock/internal-format/converter/FwdLockConv.c b/drm/libdrmframework/plugins/forward-lock/internal-format/converter/FwdLockConv.c
index 9d15835..6a0b3c0 100644
--- a/drm/libdrmframework/plugins/forward-lock/internal-format/converter/FwdLockConv.c
+++ b/drm/libdrmframework/plugins/forward-lock/internal-format/converter/FwdLockConv.c
@@ -19,6 +19,7 @@
 #include <fcntl.h>
 #include <limits.h>
 #include <pthread.h>
+#include <stdlib.h>
 #include <string.h>
 #include <sys/stat.h>
 #include <unistd.h>
diff --git a/drm/mediadrm/plugins/clearkey/CryptoFactory.cpp b/drm/mediadrm/plugins/clearkey/CryptoFactory.cpp
index ee3189b..eeb64c3 100644
--- a/drm/mediadrm/plugins/clearkey/CryptoFactory.cpp
+++ b/drm/mediadrm/plugins/clearkey/CryptoFactory.cpp
@@ -43,10 +43,18 @@
         return android::BAD_VALUE;
     }
 
-    android::sp<Session> session = SessionLibrary::get()->findSession(
-            data, size);
-    *plugin = new CryptoPlugin(session);
-    return android::OK;
+    android::Vector<uint8_t> sessionId;
+    sessionId.appendArray(reinterpret_cast<const uint8_t*>(data), size);
+
+    CryptoPlugin *clearKeyPlugin = new CryptoPlugin(sessionId);
+    android::status_t result = clearKeyPlugin->getInitStatus();
+    if (result == android::OK) {
+        *plugin = clearKeyPlugin;
+    } else {
+        delete clearKeyPlugin;
+        *plugin = NULL;
+    }
+    return result;
 }
 
 } // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/CryptoPlugin.cpp b/drm/mediadrm/plugins/clearkey/CryptoPlugin.cpp
index adad136..53cbf80 100644
--- a/drm/mediadrm/plugins/clearkey/CryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/CryptoPlugin.cpp
@@ -19,9 +19,9 @@
 #include <utils/Log.h>
 
 #include <media/stagefright/MediaErrors.h>
-#include <utils/Errors.h>
 
 #include "CryptoPlugin.h"
+#include "SessionLibrary.h"
 
 namespace clearkeydrm {
 
@@ -80,4 +80,18 @@
     }
 }
 
+android::status_t CryptoPlugin::setMediaDrmSession(
+        const android::Vector<uint8_t>& sessionId) {
+    if (!sessionId.size()) {
+        mSession.clear();
+    } else {
+        mSession = SessionLibrary::get()->findSession(sessionId);
+        if (!mSession.get()) {
+            return android::ERROR_DRM_SESSION_NOT_OPENED;
+        }
+    }
+    return android::OK;
+}
+
+
 }  // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/CryptoPlugin.h b/drm/mediadrm/plugins/clearkey/CryptoPlugin.h
index 002d9e0..fd38f28 100644
--- a/drm/mediadrm/plugins/clearkey/CryptoPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/CryptoPlugin.h
@@ -31,7 +31,10 @@
 
 class CryptoPlugin : public android::CryptoPlugin {
 public:
-    CryptoPlugin(const android::sp<Session>& session) : mSession(session) {}
+    CryptoPlugin(const android::Vector<uint8_t>& sessionId) {
+        mInitStatus = setMediaDrmSession(sessionId);
+    }
+
     virtual ~CryptoPlugin() {}
 
     virtual bool requiresSecureDecoderComponent(const char* mime) const {
@@ -45,10 +48,16 @@
             const SubSample* subSamples, size_t numSubSamples,
             void* dstPtr, android::AString* errorDetailMsg);
 
+    virtual android::status_t setMediaDrmSession(
+            const android::Vector<uint8_t>& sessionId);
+
+    android::status_t getInitStatus() const {return mInitStatus;}
+
 private:
     DISALLOW_EVIL_CONSTRUCTORS(CryptoPlugin);
 
     android::sp<Session> mSession;
+    android::status_t mInitStatus;
 };
 
 } // namespace clearkeydrm
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
index 96fca94..e5ee403 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
@@ -37,7 +37,9 @@
 
 status_t DrmPlugin::closeSession(const Vector<uint8_t>& sessionId) {
     sp<Session> session = mSessionLibrary->findSession(sessionId);
-    mSessionLibrary->destroySession(session);
+    if (session.get()) {
+        mSessionLibrary->destroySession(session);
+    }
     return android::OK;
 }
 
@@ -48,14 +50,18 @@
         KeyType keyType,
         const KeyedVector<String8, String8>& optionalParameters,
         Vector<uint8_t>& request,
-        String8& defaultUrl) {
+        String8& defaultUrl,
+        DrmPlugin::KeyRequestType *keyRequestType) {
     UNUSED(optionalParameters);
     if (keyType != kKeyType_Streaming) {
         return android::ERROR_DRM_CANNOT_HANDLE;
     }
-
-    sp<Session> session = mSessionLibrary->findSession(scope);
+    *keyRequestType = DrmPlugin::kKeyRequestType_Initial;
     defaultUrl.clear();
+    sp<Session> session = mSessionLibrary->findSession(scope);
+    if (!session.get()) {
+        return android::ERROR_DRM_SESSION_NOT_OPENED;
+    }
     return session->getKeyRequest(initData, initDataType, &request);
 }
 
@@ -64,6 +70,9 @@
         const Vector<uint8_t>& response,
         Vector<uint8_t>& keySetId) {
     sp<Session> session = mSessionLibrary->findSession(scope);
+    if (!session.get()) {
+        return android::ERROR_DRM_SESSION_NOT_OPENED;
+    }
     status_t res = session->provideKeyResponse(response);
     if (res == android::OK) {
         keySetId.clear();
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
index 6139f1f..ba4aefe 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
@@ -54,7 +54,8 @@
             KeyType keyType,
             const KeyedVector<String8, String8>& optionalParameters,
             Vector<uint8_t>& request,
-            String8& defaultUrl);
+            String8& defaultUrl,
+            DrmPlugin::KeyRequestType *keyRequestType);
 
     virtual status_t provideKeyResponse(
             const Vector<uint8_t>& scope,
diff --git a/drm/mediadrm/plugins/clearkey/SessionLibrary.cpp b/drm/mediadrm/plugins/clearkey/SessionLibrary.cpp
index d047c53..46d7f77 100644
--- a/drm/mediadrm/plugins/clearkey/SessionLibrary.cpp
+++ b/drm/mediadrm/plugins/clearkey/SessionLibrary.cpp
@@ -63,13 +63,6 @@
     return mSessions.valueFor(sessionId);
 }
 
-const sp<Session>& SessionLibrary::findSession(
-        const void* data, size_t size) {
-    Vector<uint8_t> sessionId;
-    sessionId.appendArray(reinterpret_cast<const uint8_t*>(data), size);
-    return findSession(sessionId);
-}
-
 void SessionLibrary::destroySession(const sp<Session>& session) {
     Mutex::Autolock lock(mSessionsLock);\
     mSessions.removeItem(session->sessionId());
diff --git a/drm/mediadrm/plugins/clearkey/SessionLibrary.h b/drm/mediadrm/plugins/clearkey/SessionLibrary.h
index 56c8828..199ad64 100644
--- a/drm/mediadrm/plugins/clearkey/SessionLibrary.h
+++ b/drm/mediadrm/plugins/clearkey/SessionLibrary.h
@@ -36,8 +36,6 @@
     const android::sp<Session>& findSession(
             const android::Vector<uint8_t>& sessionId);
 
-    const android::sp<Session>& findSession(const void* data, size_t size);
-
     void destroySession(const android::sp<Session>& session);
 
 private:
@@ -50,7 +48,7 @@
 
     android::Mutex mSessionsLock;
     uint32_t mNextSessionId;
-    android::KeyedVector<android::Vector<uint8_t>, android::sp<Session> >
+    android::DefaultKeyedVector<android::Vector<uint8_t>, android::sp<Session> >
             mSessions;
 };
 
diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
index 7eac0a1..851ad2c 100644
--- a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.cpp
@@ -56,7 +56,7 @@
         return true;
     }
 
-    status_t MockDrmFactory::createDrmPlugin(const uint8_t uuid[16], DrmPlugin **plugin)
+    status_t MockDrmFactory::createDrmPlugin(const uint8_t /* uuid */[16], DrmPlugin **plugin)
     {
         *plugin = new MockDrmPlugin();
         return OK;
@@ -68,8 +68,9 @@
         return (!memcmp(uuid, mock_uuid, sizeof(mock_uuid)));
     }
 
-    status_t MockCryptoFactory::createPlugin(const uint8_t uuid[16], const void *data,
-                                             size_t size, CryptoPlugin **plugin)
+    status_t MockCryptoFactory::createPlugin(const uint8_t /* uuid */[16],
+                                             const void * /* data */,
+                                             size_t /* size */, CryptoPlugin **plugin)
     {
         *plugin = new MockCryptoPlugin();
         return OK;
@@ -111,7 +112,8 @@
                                           Vector<uint8_t> const &initData,
                                           String8 const &mimeType, KeyType keyType,
                                           KeyedVector<String8, String8> const &optionalParameters,
-                                          Vector<uint8_t> &request, String8 &defaultUrl)
+                                          Vector<uint8_t> &request, String8 &defaultUrl,
+                                          KeyRequestType *keyRequestType)
     {
         Mutex::Autolock lock(mLock);
         ALOGD("MockDrmPlugin::getKeyRequest(sessionId=%s, initData=%s, mimeType=%s"
@@ -149,6 +151,7 @@
         // Properties used in mock test, set by cts test app returned from mock plugin
         //   byte[] mock-request       -> request
         //   string mock-default-url   -> defaultUrl
+        //   string mock-keyRequestType -> keyRequestType
 
         index = mByteArrayProperties.indexOfKey(String8("mock-request"));
         if (index < 0) {
@@ -165,6 +168,16 @@
         } else {
             defaultUrl = mStringProperties.valueAt(index);
         }
+
+        index = mStringProperties.indexOfKey(String8("mock-keyRequestType"));
+        if (index < 0) {
+            ALOGD("Missing 'mock-keyRequestType' parameter for mock");
+            return BAD_VALUE;
+        } else {
+            *keyRequestType = static_cast<KeyRequestType>(
+                atoi(mStringProperties.valueAt(index).string()));
+        }
+
         return OK;
     }
 
@@ -254,8 +267,8 @@
         return OK;
     }
 
-    status_t MockDrmPlugin::getProvisionRequest(String8 const &certType,
-                                                String8 const &certAuthority,
+    status_t MockDrmPlugin::getProvisionRequest(String8 const & /* certType */,
+                                                String8 const & /* certAuthority */,
                                                 Vector<uint8_t> &request,
                                                 String8 &defaultUrl)
     {
@@ -285,8 +298,8 @@
     }
 
     status_t MockDrmPlugin::provideProvisionResponse(Vector<uint8_t> const &response,
-                                                     Vector<uint8_t> &certificate,
-                                                     Vector<uint8_t> &wrappedKey)
+                                                     Vector<uint8_t> & /* certificate */,
+                                                     Vector<uint8_t> & /* wrappedKey */)
     {
         Mutex::Autolock lock(mLock);
         ALOGD("MockDrmPlugin::provideProvisionResponse(%s)",
@@ -305,7 +318,8 @@
         return OK;
     }
 
-    status_t MockDrmPlugin::getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop)
+    status_t MockDrmPlugin::getSecureStop(Vector<uint8_t> const & /* ssid */,
+                                          Vector<uint8_t> & secureStop)
     {
         Mutex::Autolock lock(mLock);
         ALOGD("MockDrmPlugin::getSecureStop()");
@@ -427,6 +441,63 @@
                   pData ? vectorToString(*pData) : "{}");
 
             sendEvent(eventType, extra, pSessionId, pData);
+        } else if (name == "mock-send-expiration-update") {
+            int64_t expiryTimeMS;
+            sscanf(value.string(), "%jd", &expiryTimeMS);
+
+            Vector<uint8_t> const *pSessionId = NULL;
+            ssize_t index = mByteArrayProperties.indexOfKey(String8("mock-event-session-id"));
+            if (index >= 0) {
+                pSessionId = &mByteArrayProperties[index];
+            }
+
+            ALOGD("sending expiration-update from mock drm plugin: %jd %s",
+                  expiryTimeMS, pSessionId ? vectorToString(*pSessionId) : "{}");
+
+            sendExpirationUpdate(pSessionId, expiryTimeMS);
+        } else if (name == "mock-send-keys-change") {
+            Vector<uint8_t> const *pSessionId = NULL;
+            ssize_t index = mByteArrayProperties.indexOfKey(String8("mock-event-session-id"));
+            if (index >= 0) {
+                pSessionId = &mByteArrayProperties[index];
+            }
+
+            ALOGD("sending keys-change from mock drm plugin: %s",
+                  pSessionId ? vectorToString(*pSessionId) : "{}");
+
+            Vector<DrmPlugin::KeyStatus> keyStatusList;
+            DrmPlugin::KeyStatus keyStatus;
+            uint8_t keyId1[] = {'k', 'e', 'y', '1'};
+            keyStatus.mKeyId.clear();
+            keyStatus.mKeyId.appendArray(keyId1, sizeof(keyId1));
+            keyStatus.mType = DrmPlugin::kKeyStatusType_Usable;
+            keyStatusList.add(keyStatus);
+
+            uint8_t keyId2[] = {'k', 'e', 'y', '2'};
+            keyStatus.mKeyId.clear();
+            keyStatus.mKeyId.appendArray(keyId2, sizeof(keyId2));
+            keyStatus.mType = DrmPlugin::kKeyStatusType_Expired;
+            keyStatusList.add(keyStatus);
+
+            uint8_t keyId3[] = {'k', 'e', 'y', '3'};
+            keyStatus.mKeyId.clear();
+            keyStatus.mKeyId.appendArray(keyId3, sizeof(keyId3));
+            keyStatus.mType = DrmPlugin::kKeyStatusType_OutputNotAllowed;
+            keyStatusList.add(keyStatus);
+
+            uint8_t keyId4[] = {'k', 'e', 'y', '4'};
+            keyStatus.mKeyId.clear();
+            keyStatus.mKeyId.appendArray(keyId4, sizeof(keyId4));
+            keyStatus.mType = DrmPlugin::kKeyStatusType_StatusPending;
+            keyStatusList.add(keyStatus);
+
+            uint8_t keyId5[] = {'k', 'e', 'y', '5'};
+            keyStatus.mKeyId.clear();
+            keyStatus.mKeyId.appendArray(keyId5, sizeof(keyId5));
+            keyStatus.mType = DrmPlugin::kKeyStatusType_InternalError;
+            keyStatusList.add(keyStatus);
+
+            sendKeysChange(pSessionId, &keyStatusList, true);
         } else {
             mStringProperties.add(name, value);
         }
@@ -728,7 +799,7 @@
     ssize_t
     MockCryptoPlugin::decrypt(bool secure, const uint8_t key[16], const uint8_t iv[16],
                               Mode mode, const void *srcPtr, const SubSample *subSamples,
-                              size_t numSubSamples, void *dstPtr, AString *errorDetailMsg)
+                              size_t numSubSamples, void *dstPtr, AString * /* errorDetailMsg */)
     {
         ALOGD("MockCryptoPlugin::decrypt(secure=%d, key=%s, iv=%s, mode=%d, src=%p, "
               "subSamples=%s, dst=%p)",
@@ -757,7 +828,7 @@
     {
         String8 result;
         for (size_t i = 0; i < numSubSamples; i++) {
-            result.appendFormat("[%zu] {clear:%zu, encrypted:%zu} ", i,
+            result.appendFormat("[%zu] {clear:%u, encrypted:%u} ", i,
                                 subSamples[i].mNumBytesOfClearData,
                                 subSamples[i].mNumBytesOfEncryptedData);
         }
diff --git a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
index d1d8058..d0f2ddb 100644
--- a/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
+++ b/drm/mediadrm/plugins/mock/MockDrmCryptoPlugin.h
@@ -62,7 +62,8 @@
                                Vector<uint8_t> const &initData,
                                String8 const &mimeType, KeyType keyType,
                                KeyedVector<String8, String8> const &optionalParameters,
-                               Vector<uint8_t> &request, String8 &defaultUrl);
+                               Vector<uint8_t> &request, String8 &defaultUrl,
+                               KeyRequestType *keyRequestType);
 
         status_t provideKeyResponse(Vector<uint8_t> const &sessionId,
                                     Vector<uint8_t> const &response,
diff --git a/include/camera/CameraMetadata.h b/include/camera/CameraMetadata.h
index 1254d3c..953d711 100644
--- a/include/camera/CameraMetadata.h
+++ b/include/camera/CameraMetadata.h
@@ -56,7 +56,7 @@
      * thread-safety, it simply prevents the camera_metadata_t pointer returned
      * here from being accidentally invalidated by CameraMetadata operations.
      */
-    const camera_metadata_t* getAndLock();
+    const camera_metadata_t* getAndLock() const;
 
     /**
      * Unlock the CameraMetadata for use again. After this unlock, the pointer
@@ -208,7 +208,7 @@
 
   private:
     camera_metadata_t *mBuffer;
-    bool               mLocked;
+    mutable bool       mLocked;
 
     /**
      * Check if tag has a given type
diff --git a/include/camera/CameraParameters.h b/include/camera/CameraParameters.h
index c6074fc..ba33ffe 100644
--- a/include/camera/CameraParameters.h
+++ b/include/camera/CameraParameters.h
@@ -108,6 +108,9 @@
      */
     void getSupportedPreviewFormats(Vector<int>& formats) const;
 
+    // Returns true if no keys are present
+    bool isEmpty() const;
+
     // Parameter keys to communicate between camera application and driver.
     // The access (read/write, read only, or write only) is viewed from the
     // perspective of applications, not driver.
diff --git a/include/camera/ICameraService.h b/include/camera/ICameraService.h
index f7f06bb..1b68b5f 100644
--- a/include/camera/ICameraService.h
+++ b/include/camera/ICameraService.h
@@ -25,8 +25,6 @@
 
 class ICamera;
 class ICameraClient;
-class IProCameraUser;
-class IProCameraCallbacks;
 class ICameraServiceListener;
 class ICameraDeviceUser;
 class ICameraDeviceCallbacks;
@@ -44,7 +42,6 @@
         GET_NUMBER_OF_CAMERAS = IBinder::FIRST_CALL_TRANSACTION,
         GET_CAMERA_INFO,
         CONNECT,
-        CONNECT_PRO,
         CONNECT_DEVICE,
         ADD_LISTENER,
         REMOVE_LISTENER,
@@ -53,6 +50,8 @@
         GET_LEGACY_PARAMETERS,
         SUPPORTS_CAMERA_API,
         CONNECT_LEGACY,
+        SET_TORCH_MODE,
+        NOTIFY_SYSTEM_EVENT,
     };
 
     enum {
@@ -65,13 +64,34 @@
     };
 
     enum {
+        CAMERA_TYPE_BACKWARD_COMPATIBLE = 0,
+        CAMERA_TYPE_ALL = 1,
+    };
+
+    enum {
         CAMERA_HAL_API_VERSION_UNSPECIFIED = -1
-      };
+    };
+
+    /**
+     * Keep up-to-date with declarations in
+     * frameworks/base/services/core/java/com/android/server/camera/CameraService.java
+     *
+     * These event codes are intended to be used with the notifySystemEvent call.
+     */
+    enum {
+        NO_EVENT = 0,
+        USER_SWITCHED,
+    };
 
 public:
     DECLARE_META_INTERFACE(CameraService);
 
+    // Get the number of cameras that support basic color camera operation
+    // (type CAMERA_TYPE_BACKWARD_COMPATIBLE)
     virtual int32_t  getNumberOfCameras() = 0;
+    // Get the number of cameras of the specified type, one of CAMERA_TYPE_*
+    // enums
+    virtual int32_t  getNumberOfCameras(int cameraType) = 0;
     virtual status_t getCameraInfo(int cameraId,
             /*out*/
             struct CameraInfo* cameraInfo) = 0;
@@ -104,13 +124,6 @@
             /*out*/
             sp<ICamera>& device) = 0;
 
-    virtual status_t connectPro(const sp<IProCameraCallbacks>& cameraCb,
-            int cameraId,
-            const String16& clientPackageName,
-            int clientUid,
-            /*out*/
-            sp<IProCameraUser>& device) = 0;
-
     virtual status_t connectDevice(
             const sp<ICameraDeviceCallbacks>& cameraCb,
             int cameraId,
@@ -142,6 +155,26 @@
             int clientUid,
             /*out*/
             sp<ICamera>& device) = 0;
+
+    /**
+     * Turn on or off a camera's torch mode. Torch mode will be turned off by
+     * camera service if the lastest client binder that turns it on dies.
+     *
+     * return values:
+     * 0:       on a successful operation.
+     * -ENOSYS: the camera device doesn't support this operation. It it returned
+     *          if and only if android.flash.into.available is false.
+     * -EBUSY:  the camera device is opened.
+     * -EINVAL: camera_id is invalid or clientBinder is NULL when enabling a
+     *          torch mode.
+     */
+    virtual status_t setTorchMode(const String16& cameraId, bool enabled,
+            const sp<IBinder>& clientBinder) = 0;
+
+    /**
+     * Notify the camera service of a system event.  Should only be called from system_server.
+     */
+    virtual void notifySystemEvent(int32_t eventId, const int32_t* args, size_t length) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/camera/ICameraServiceListener.h b/include/camera/ICameraServiceListener.h
index 0a0e43a..709ff31 100644
--- a/include/camera/ICameraServiceListener.h
+++ b/include/camera/ICameraServiceListener.h
@@ -66,9 +66,35 @@
         STATUS_UNKNOWN          = 0xFFFFFFFF,
     };
 
+    /**
+     * The torch mode status of a camera.
+     *
+     * Initial status will be transmitted with onTorchStatusChanged immediately
+     * after this listener is added to the service listener list.
+     *
+     * The enums should be set to values matching
+     * include/hardware/camera_common.h
+     */
+    enum TorchStatus {
+        // The camera's torch mode has become not available to use via
+        // setTorchMode().
+        TORCH_STATUS_NOT_AVAILABLE  = TORCH_MODE_STATUS_NOT_AVAILABLE,
+        // The camera's torch mode is off and available to be turned on via
+        // setTorchMode().
+        TORCH_STATUS_AVAILABLE_OFF  = TORCH_MODE_STATUS_AVAILABLE_OFF,
+        // The camera's torch mode is on and available to be turned off via
+        // setTorchMode().
+        TORCH_STATUS_AVAILABLE_ON   = TORCH_MODE_STATUS_AVAILABLE_ON,
+
+        // Use to initialize variables only
+        TORCH_STATUS_UNKNOWN        = 0xFFFFFFFF,
+    };
+
     DECLARE_META_INTERFACE(CameraServiceListener);
 
     virtual void onStatusChanged(Status status, int32_t cameraId) = 0;
+
+    virtual void onTorchStatusChanged(TorchStatus status, const String16& cameraId) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/camera/ICameraServiceProxy.h b/include/camera/ICameraServiceProxy.h
new file mode 100644
index 0000000..12a555f
--- /dev/null
+++ b/include/camera/ICameraServiceProxy.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_ICAMERASERVICEPROXY_H
+#define ANDROID_HARDWARE_ICAMERASERVICEPROXY_H
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+class ICameraServiceProxy : public IInterface {
+public:
+    enum {
+        PING_FOR_USER_UPDATE = IBinder::FIRST_CALL_TRANSACTION,
+    };
+
+    DECLARE_META_INTERFACE(CameraServiceProxy);
+
+    virtual void pingForUserUpdate() = 0;
+};
+
+class BnCameraServiceProxy: public BnInterface<ICameraServiceProxy>
+{
+public:
+    virtual status_t    onTransact( uint32_t code,
+                                    const Parcel& data,
+                                    Parcel* reply,
+                                    uint32_t flags = 0);
+};
+
+
+
+}; // namespace android
+
+#endif // ANDROID_HARDWARE_ICAMERASERVICEPROXY_H
+
+
diff --git a/include/camera/IProCameraCallbacks.h b/include/camera/IProCameraCallbacks.h
deleted file mode 100644
index e8abb89..0000000
--- a/include/camera/IProCameraCallbacks.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_IPROCAMERA_CALLBACKS_H
-#define ANDROID_HARDWARE_IPROCAMERA_CALLBACKS_H
-
-#include <utils/RefBase.h>
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-#include <binder/IMemory.h>
-#include <utils/Timers.h>
-#include <system/camera.h>
-
-struct camera_metadata;
-
-namespace android {
-
-class IProCameraCallbacks : public IInterface
-{
-    /**
-     * Keep up-to-date with IProCameraCallbacks.aidl in frameworks/base
-     */
-public:
-    DECLARE_META_INTERFACE(ProCameraCallbacks);
-
-    virtual void            notifyCallback(int32_t msgType,
-                                           int32_t ext1,
-                                           int32_t ext2) = 0;
-
-    enum LockStatus {
-        LOCK_ACQUIRED,
-        LOCK_RELEASED,
-        LOCK_STOLEN,
-    };
-
-    virtual void            onLockStatusChanged(LockStatus newLockStatus) = 0;
-
-    /** Missing by design: implementation is client-side in ProCamera.cpp **/
-    // virtual void onBufferReceived(int streamId,
-    //                               const CpuConsumer::LockedBufer& buf);
-    virtual void            onResultReceived(int32_t requestId,
-                                             camera_metadata* result) = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnProCameraCallbacks : public BnInterface<IProCameraCallbacks>
-{
-public:
-    virtual status_t    onTransact( uint32_t code,
-                                    const Parcel& data,
-                                    Parcel* reply,
-                                    uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif
diff --git a/include/camera/IProCameraUser.h b/include/camera/IProCameraUser.h
deleted file mode 100644
index 2ccc4d2..0000000
--- a/include/camera/IProCameraUser.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_IPROCAMERAUSER_H
-#define ANDROID_HARDWARE_IPROCAMERAUSER_H
-
-#include <utils/RefBase.h>
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-#include <binder/IMemory.h>
-#include <utils/String8.h>
-#include <camera/IProCameraCallbacks.h>
-
-struct camera_metadata;
-
-namespace android {
-
-class IProCameraUserClient;
-class IGraphicBufferProducer;
-class Surface;
-
-class IProCameraUser: public IInterface
-{
-    /**
-     * Keep up-to-date with IProCameraUser.aidl in frameworks/base
-     */
-public:
-    DECLARE_META_INTERFACE(ProCameraUser);
-
-    virtual void            disconnect() = 0;
-
-    // connect to the service, given a callbacks listener
-    virtual status_t        connect(const sp<IProCameraCallbacks>& callbacks)
-                                                                            = 0;
-
-    /**
-     * Locking
-     **/
-    virtual status_t        exclusiveTryLock() = 0;
-    virtual status_t        exclusiveLock() = 0;
-    virtual status_t        exclusiveUnlock() = 0;
-
-    virtual bool            hasExclusiveLock() = 0;
-
-    /**
-     * Request Handling
-     **/
-
-    // Note that the callee gets a copy of the metadata.
-    virtual int             submitRequest(struct camera_metadata* metadata,
-                                          bool streaming = false) = 0;
-    virtual status_t        cancelRequest(int requestId) = 0;
-
-    virtual status_t        deleteStream(int streamId) = 0;
-    virtual status_t        createStream(
-                                      int width, int height, int format,
-                                      const sp<IGraphicBufferProducer>& bufferProducer,
-                                      /*out*/
-                                      int* streamId) = 0;
-
-    // Create a request object from a template.
-    virtual status_t        createDefaultRequest(int templateId,
-                                                 /*out*/
-                                                 camera_metadata** request)
-                                                                           = 0;
-
-    // Get static camera metadata
-    virtual status_t        getCameraInfo(int cameraId,
-                                          /*out*/
-                                          camera_metadata** info) = 0;
-
-};
-
-// ----------------------------------------------------------------------------
-
-class BnProCameraUser: public BnInterface<IProCameraUser>
-{
-public:
-    virtual status_t    onTransact( uint32_t code,
-                                    const Parcel& data,
-                                    Parcel* reply,
-                                    uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif
diff --git a/include/camera/ProCamera.h b/include/camera/ProCamera.h
deleted file mode 100644
index e9b687a..0000000
--- a/include/camera/ProCamera.h
+++ /dev/null
@@ -1,319 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_PRO_CAMERA_H
-#define ANDROID_HARDWARE_PRO_CAMERA_H
-
-#include <utils/Timers.h>
-#include <utils/KeyedVector.h>
-#include <gui/IGraphicBufferProducer.h>
-#include <system/camera.h>
-#include <camera/IProCameraCallbacks.h>
-#include <camera/IProCameraUser.h>
-#include <camera/Camera.h>
-#include <camera/CameraMetadata.h>
-#include <camera/ICameraService.h>
-#include <gui/CpuConsumer.h>
-
-#include <gui/Surface.h>
-
-#include <utils/Condition.h>
-#include <utils/Mutex.h>
-
-#include <camera/CameraBase.h>
-
-struct camera_metadata;
-
-namespace android {
-
-// All callbacks on this class are concurrent
-// (they come from separate threads)
-class ProCameraListener : virtual public RefBase
-{
-public:
-    virtual void notify(int32_t msgType, int32_t ext1, int32_t ext2) = 0;
-
-    // Lock has been acquired. Write operations now available.
-    virtual void onLockAcquired() = 0;
-    // Lock has been released with exclusiveUnlock.
-    virtual void onLockReleased() = 0;
-    // Lock has been stolen by another client.
-    virtual void onLockStolen() = 0;
-
-    // Lock free.
-    virtual void onTriggerNotify(int32_t msgType, int32_t ext1, int32_t ext2)
-                                                                            = 0;
-    // onFrameAvailable and OnResultReceived can come in with any order,
-    // use android.sensor.timestamp and LockedBuffer.timestamp to correlate them
-
-    /**
-      * A new metadata buffer has been received.
-      * -- Ownership of request passes on to the callee, free with
-      *    free_camera_metadata.
-      */
-    virtual void onResultReceived(int32_t frameId, camera_metadata* result) = 0;
-
-    // TODO: make onFrameAvailable pure virtual
-
-    // A new frame buffer has been received for this stream.
-    // -- This callback only fires for createStreamCpu streams
-    // -- A buffer may be obtained by calling cpuConsumer->lockNextBuffer
-    // -- Use buf.timestamp to correlate with result's android.sensor.timestamp
-    // -- The buffer should be accessed with CpuConsumer::lockNextBuffer
-    //      and CpuConsumer::unlockBuffer
-    virtual void onFrameAvailable(int /*streamId*/,
-                                  const sp<CpuConsumer>& /*cpuConsumer*/) {
-    }
-
-};
-
-class ProCamera;
-
-template <>
-struct CameraTraits<ProCamera>
-{
-    typedef ProCameraListener     TCamListener;
-    typedef IProCameraUser        TCamUser;
-    typedef IProCameraCallbacks   TCamCallbacks;
-    typedef status_t (ICameraService::*TCamConnectService)(const sp<IProCameraCallbacks>&,
-                                                           int, const String16&, int,
-                                                           /*out*/
-                                                           sp<IProCameraUser>&);
-    static TCamConnectService     fnConnectService;
-};
-
-
-class ProCamera :
-    public CameraBase<ProCamera>,
-    public BnProCameraCallbacks
-{
-public:
-    /**
-     * Connect a shared camera. By default access is restricted to read only
-     * (Lock free) operations. To be able to submit custom requests a lock needs
-     * to be acquired with exclusive[Try]Lock.
-     */
-    static sp<ProCamera> connect(int cameraId);
-    virtual ~ProCamera();
-
-    /**
-     * Exclusive Locks:
-     * - We may request exclusive access to a camera if no other
-     *   clients are using the camera. This works as a traditional
-     *   client, writing/reading any camera state.
-     * - An application opening the camera (a regular 'Camera') will
-     *   always steal away the exclusive lock from a ProCamera,
-     *   this will call onLockReleased.
-     * - onLockAcquired will be called again once it is possible
-     *   to again exclusively lock the camera.
-     *
-     */
-
-    /**
-     * All exclusiveLock/unlock functions are asynchronous. The remote endpoint
-     * shall not block while waiting to acquire the lock. Instead the lock
-     * notifications will come in asynchronously on the listener.
-     */
-
-    /**
-      * Attempt to acquire the lock instantly (non-blocking)
-      * - If this succeeds, you do not need to wait for onLockAcquired
-      *   but the event will still be fired
-      *
-      * Returns -EBUSY if already locked. 0 on success.
-      */
-    status_t exclusiveTryLock();
-    // always returns 0. wait for onLockAcquired before lock is acquired.
-    status_t exclusiveLock();
-    // release a lock if we have one, or cancel the lock request.
-    status_t exclusiveUnlock();
-
-    // exclusive lock = do whatever we want. no lock = read only.
-    bool hasExclusiveLock();
-
-    /**
-     * < 0 error, >= 0 the request ID. streaming to have the request repeat
-     *    until cancelled.
-     * The request queue is flushed when a lock is released or stolen
-     *    if not locked will return PERMISSION_DENIED
-     */
-    int submitRequest(const struct camera_metadata* metadata,
-                                                        bool streaming = false);
-    // if not locked will return PERMISSION_DENIED, BAD_VALUE if requestId bad
-    status_t cancelRequest(int requestId);
-
-    /**
-     * Ask for a stream to be enabled.
-     * Lock free. Service maintains counter of streams.
-     */
-    status_t requestStream(int streamId);
-// TODO: remove requestStream, its useless.
-
-    /**
-      * Delete a stream.
-      * Lock free.
-      *
-      * NOTE: As a side effect this cancels ALL streaming requests.
-      *
-      * Errors: BAD_VALUE if unknown stream ID.
-      *         PERMISSION_DENIED if the stream wasn't yours
-      */
-    status_t deleteStream(int streamId);
-
-    /**
-      * Create a new HW stream, whose sink will be the window.
-      * Lock free. Service maintains counter of streams.
-      * Errors: -EBUSY if too many streams created
-      */
-    status_t createStream(int width, int height, int format,
-                          const sp<Surface>& surface,
-                          /*out*/
-                          int* streamId);
-
-    /**
-      * Create a new HW stream, whose sink will be the SurfaceTexture.
-      * Lock free. Service maintains counter of streams.
-      * Errors: -EBUSY if too many streams created
-      */
-    status_t createStream(int width, int height, int format,
-                          const sp<IGraphicBufferProducer>& bufferProducer,
-                          /*out*/
-                          int* streamId);
-    status_t createStreamCpu(int width, int height, int format,
-                          int heapCount,
-                          /*out*/
-                          sp<CpuConsumer>* cpuConsumer,
-                          int* streamId);
-    status_t createStreamCpu(int width, int height, int format,
-                          int heapCount,
-                          bool synchronousMode,
-                          /*out*/
-                          sp<CpuConsumer>* cpuConsumer,
-                          int* streamId);
-
-    // Create a request object from a template.
-    status_t createDefaultRequest(int templateId,
-                                 /*out*/
-                                  camera_metadata** request) const;
-
-    // Get static camera metadata
-    camera_metadata* getCameraInfo(int cameraId);
-
-    // Blocks until a frame is available (CPU streams only)
-    // - Obtain the frame data by calling CpuConsumer::lockNextBuffer
-    // - Release the frame data after use with CpuConsumer::unlockBuffer
-    // Return value:
-    // - >0 - number of frames available to be locked
-    // - <0 - error (refer to error codes)
-    // Error codes:
-    // -ETIMEDOUT if it took too long to get a frame
-    int waitForFrameBuffer(int streamId);
-
-    // Blocks until a metadata result is available
-    // - Obtain the metadata by calling consumeFrameMetadata()
-    // Error codes:
-    // -ETIMEDOUT if it took too long to get a frame
-    status_t waitForFrameMetadata();
-
-    // Get the latest metadata. This is destructive.
-    // - Calling this repeatedly will produce empty metadata objects.
-    // - Use waitForFrameMetadata to sync until new data is available.
-    CameraMetadata consumeFrameMetadata();
-
-    // Convenience method to drop frame buffers (CPU streams only)
-    // Return values:
-    //  >=0 - number of frames dropped (up to count)
-    //  <0  - error code
-    // Error codes:
-    //   BAD_VALUE - invalid streamId or count passed
-    int dropFrameBuffer(int streamId, int count);
-
-protected:
-    ////////////////////////////////////////////////////////
-    // IProCameraCallbacks implementation
-    ////////////////////////////////////////////////////////
-    virtual void        notifyCallback(int32_t msgType,
-                                       int32_t ext,
-                                       int32_t ext2);
-
-    virtual void        onLockStatusChanged(
-                                IProCameraCallbacks::LockStatus newLockStatus);
-
-    virtual void        onResultReceived(int32_t requestId,
-                                         camera_metadata* result);
-private:
-    ProCamera(int cameraId);
-
-    class ProFrameListener : public CpuConsumer::FrameAvailableListener {
-    public:
-        ProFrameListener(wp<ProCamera> camera, int streamID) {
-            mCamera = camera;
-            mStreamId = streamID;
-        }
-
-    protected:
-        virtual void onFrameAvailable(const BufferItem& /* item */) {
-            sp<ProCamera> c = mCamera.promote();
-            if (c.get() != NULL) {
-                c->onFrameAvailable(mStreamId);
-            }
-        }
-
-    private:
-        wp<ProCamera> mCamera;
-        int mStreamId;
-    };
-    friend class ProFrameListener;
-
-    struct StreamInfo
-    {
-        StreamInfo(int streamId) {
-            this->streamID = streamId;
-            cpuStream = false;
-            frameReady = 0;
-        }
-
-        StreamInfo() {
-            streamID = -1;
-            cpuStream = false;
-        }
-
-        int  streamID;
-        bool cpuStream;
-        sp<CpuConsumer> cpuConsumer;
-        bool synchronousMode;
-        sp<ProFrameListener> frameAvailableListener;
-        sp<Surface> stc;
-        int frameReady;
-    };
-
-    Condition mWaitCondition;
-    Mutex     mWaitMutex;
-    static const nsecs_t mWaitTimeout = 1000000000; // 1sec
-    KeyedVector<int, StreamInfo> mStreams;
-    bool mMetadataReady;
-    CameraMetadata mLatestMetadata;
-
-    void onFrameAvailable(int streamId);
-
-    StreamInfo& getStreamInfo(int streamId);
-
-    friend class CameraBase;
-};
-
-}; // namespace android
-
-#endif
diff --git a/include/camera/camera2/CaptureRequest.h b/include/camera/camera2/CaptureRequest.h
index e56d61f..eeab217 100644
--- a/include/camera/camera2/CaptureRequest.h
+++ b/include/camera/camera2/CaptureRequest.h
@@ -30,6 +30,7 @@
 
     CameraMetadata          mMetadata;
     Vector<sp<Surface> >    mSurfaceList;
+    bool                    mIsReprocess;
 
     /**
      * Keep impl up-to-date with CaptureRequest.java in frameworks/base
diff --git a/include/camera/camera2/ICameraDeviceCallbacks.h b/include/camera/camera2/ICameraDeviceCallbacks.h
index 670480b..c57b39f 100644
--- a/include/camera/camera2/ICameraDeviceCallbacks.h
+++ b/include/camera/camera2/ICameraDeviceCallbacks.h
@@ -65,6 +65,9 @@
     // One way
     virtual void            onResultReceived(const CameraMetadata& metadata,
                                              const CaptureResultExtras& resultExtras) = 0;
+
+    // One way
+    virtual void            onPrepared(int streamId) = 0;
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/camera/camera2/ICameraDeviceUser.h b/include/camera/camera2/ICameraDeviceUser.h
index 35488bb..a7bf8ab 100644
--- a/include/camera/camera2/ICameraDeviceUser.h
+++ b/include/camera/camera2/ICameraDeviceUser.h
@@ -27,9 +27,9 @@
 
 class ICameraDeviceUserClient;
 class IGraphicBufferProducer;
-class Surface;
 class CaptureRequest;
 class CameraMetadata;
+class OutputConfiguration;
 
 enum {
     NO_IN_FLIGHT_REPEATING_FRAMES = -1,
@@ -97,12 +97,24 @@
      * must be called before any requests can be submitted.
      * <p>
      */
-    virtual status_t        endConfigure() = 0;
+    virtual status_t        endConfigure(bool isConstrainedHighSpeed = false) = 0;
 
     virtual status_t        deleteStream(int streamId) = 0;
-    virtual status_t        createStream(
-            int width, int height, int format,
-            const sp<IGraphicBufferProducer>& bufferProducer) = 0;
+
+    virtual status_t        createStream(const OutputConfiguration& outputConfiguration) = 0;
+
+    /**
+     * Create an input stream of width, height, and format (one of
+     * HAL_PIXEL_FORMAT_*)
+     *
+     * Return stream ID if it's a non-negative value. status_t if it's a
+     * negative value.
+     */
+    virtual status_t        createInputStream(int width, int height, int format) = 0;
+
+    // get the buffer producer of the input stream
+    virtual status_t        getInputBufferProducer(
+            sp<IGraphicBufferProducer> *producer) = 0;
 
     // Create a request object from a template.
     virtual status_t        createDefaultRequest(int templateId,
@@ -121,6 +133,17 @@
      */
     virtual status_t        flush(/*out*/
                                   int64_t* lastFrameNumber = NULL) = 0;
+
+    /**
+     * Preallocate buffers for a given output stream asynchronously.
+     */
+    virtual status_t        prepare(int streamId) = 0;
+
+    /**
+     * Free all unused buffers for a given output stream.
+     */
+    virtual status_t        tearDown(int streamId) = 0;
+
 };
 
 // ----------------------------------------------------------------------------
diff --git a/include/camera/camera2/OutputConfiguration.h b/include/camera/camera2/OutputConfiguration.h
new file mode 100644
index 0000000..5bcbe15
--- /dev/null
+++ b/include/camera/camera2/OutputConfiguration.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_CAMERA2_OUTPUTCONFIGURATION_H
+#define ANDROID_HARDWARE_CAMERA2_OUTPUTCONFIGURATION_H
+
+#include <utils/RefBase.h>
+#include <gui/IGraphicBufferProducer.h>
+
+namespace android {
+
+class Surface;
+
+class OutputConfiguration : public virtual RefBase {
+public:
+
+    static const int INVALID_ROTATION;
+    sp<IGraphicBufferProducer> getGraphicBufferProducer() const;
+    int                        getRotation() const;
+
+    /**
+     * Keep impl up-to-date with OutputConfiguration.java in frameworks/base
+     */
+    status_t                   writeToParcel(Parcel& parcel) const;
+    // getGraphicBufferProducer will be NULL if error occurred
+    // getRotation will be INVALID_ROTATION if error occurred
+    OutputConfiguration(const Parcel& parcel);
+
+    OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation);
+
+private:
+    sp<IGraphicBufferProducer> mGbp;
+    int                        mRotation;
+
+    // helper function
+    static String16 readMaybeEmptyString16(const Parcel& parcel);
+};
+}; // namespace android
+
+#endif
diff --git a/include/media/AVSyncSettings.h b/include/media/AVSyncSettings.h
new file mode 100644
index 0000000..10e3bcc
--- /dev/null
+++ b/include/media/AVSyncSettings.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AV_SYNC_SETTINGS_H
+#define ANDROID_AV_SYNC_SETTINGS_H
+
+namespace android {
+
+enum AVSyncSource : unsigned {
+    // let the system decide the best sync source
+    AVSYNC_SOURCE_DEFAULT = 0,
+    // sync to the system clock
+    AVSYNC_SOURCE_SYSTEM_CLOCK = 1,
+    // sync to the audio track
+    AVSYNC_SOURCE_AUDIO = 2,
+    // sync to the display vsync
+    AVSYNC_SOURCE_VSYNC = 3,
+    AVSYNC_SOURCE_MAX,
+};
+
+enum AVSyncAudioAdjustMode : unsigned {
+    // let the system decide the best audio adjust mode
+    AVSYNC_AUDIO_ADJUST_MODE_DEFAULT = 0,
+    // adjust audio by time stretching
+    AVSYNC_AUDIO_ADJUST_MODE_STRETCH = 1,
+    // adjust audio by resampling
+    AVSYNC_AUDIO_ADJUST_MODE_RESAMPLE = 2,
+    AVSYNC_AUDIO_ADJUST_MODE_MAX,
+};
+
+// max tolerance when adjusting playback speed to desired playback speed
+#define AVSYNC_TOLERANCE_MAX 1.0f
+
+struct AVSyncSettings {
+    AVSyncSource mSource;
+    AVSyncAudioAdjustMode mAudioAdjustMode;
+    float mTolerance;
+    AVSyncSettings()
+        : mSource(AVSYNC_SOURCE_DEFAULT),
+          mAudioAdjustMode(AVSYNC_AUDIO_ADJUST_MODE_DEFAULT),
+          mTolerance(.044f) { }
+};
+
+} // namespace android
+
+// ---------------------------------------------------------------------------
+
+#endif // ANDROID_AV_SYNC_SETTINGS_H
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
index 583695d..5af6c10 100644
--- a/include/media/AudioEffect.h
+++ b/include/media/AudioEffect.h
@@ -201,8 +201,12 @@
      */
 
     /* Simple Constructor.
+     *
+     * Parameters:
+     *
+     * opPackageName:      The package name used for app op checks.
      */
-    AudioEffect();
+    AudioEffect(const String16& opPackageName);
 
 
     /* Constructor.
@@ -211,6 +215,7 @@
      *
      * type:  type of effect created: can be null if uuid is specified. This corresponds to
      *        the OpenSL ES interface implemented by this effect.
+     * opPackageName:  The package name used for app op checks.
      * uuid:  Uuid of effect created: can be null if type is specified. This uuid corresponds to
      *        a particular implementation of an effect type.
      * priority:    requested priority for effect control: the priority level corresponds to the
@@ -227,6 +232,7 @@
      */
 
     AudioEffect(const effect_uuid_t *type,
+                const String16& opPackageName,
                 const effect_uuid_t *uuid = NULL,
                   int32_t priority = 0,
                   effect_callback_t cbf = NULL,
@@ -239,6 +245,7 @@
      *      Same as above but with type and uuid specified by character strings
      */
     AudioEffect(const char *typeStr,
+                    const String16& opPackageName,
                     const char *uuidStr = NULL,
                     int32_t priority = 0,
                     effect_callback_t cbf = NULL,
@@ -406,7 +413,9 @@
      void*                   mUserData;          // client context for callback function
      effect_descriptor_t     mDescriptor;        // effect descriptor
      int32_t                 mId;                // system wide unique effect engine instance ID
-     Mutex                   mLock;               // Mutex for mEnabled access
+     Mutex                   mLock;              // Mutex for mEnabled access
+
+     String16                mOpPackageName;     // The package name used for app op checks.
 
      // IEffectClient
      virtual void controlStatusChanged(bool controlGranted);
@@ -420,7 +429,8 @@
 private:
 
      // Implements the IEffectClient interface
-    class EffectClient : public android::BnEffectClient,  public android::IBinder::DeathRecipient
+    class EffectClient :
+        public android::BnEffectClient, public android::IBinder::DeathRecipient
     {
     public:
 
@@ -428,24 +438,39 @@
 
         // IEffectClient
         virtual void controlStatusChanged(bool controlGranted) {
-            mEffect->controlStatusChanged(controlGranted);
+            sp<AudioEffect> effect = mEffect.promote();
+            if (effect != 0) {
+                effect->controlStatusChanged(controlGranted);
+            }
         }
         virtual void enableStatusChanged(bool enabled) {
-            mEffect->enableStatusChanged(enabled);
+            sp<AudioEffect> effect = mEffect.promote();
+            if (effect != 0) {
+                effect->enableStatusChanged(enabled);
+            }
         }
         virtual void commandExecuted(uint32_t cmdCode,
                                      uint32_t cmdSize,
                                      void *pCmdData,
                                      uint32_t replySize,
                                      void *pReplyData) {
-            mEffect->commandExecuted(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+            sp<AudioEffect> effect = mEffect.promote();
+            if (effect != 0) {
+                effect->commandExecuted(
+                    cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+            }
         }
 
         // IBinder::DeathRecipient
-        virtual void binderDied(const wp<IBinder>& who) {mEffect->binderDied();}
+        virtual void binderDied(const wp<IBinder>& who) {
+            sp<AudioEffect> effect = mEffect.promote();
+            if (effect != 0) {
+                effect->binderDied();
+            }
+        }
 
     private:
-        AudioEffect *mEffect;
+        wp<AudioEffect> mEffect;
     };
 
     void binderDied();
diff --git a/include/media/AudioIoDescriptor.h b/include/media/AudioIoDescriptor.h
new file mode 100644
index 0000000..c94b738
--- /dev/null
+++ b/include/media/AudioIoDescriptor.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_IO_DESCRIPTOR_H
+#define ANDROID_AUDIO_IO_DESCRIPTOR_H
+
+namespace android {
+
+enum audio_io_config_event {
+    AUDIO_OUTPUT_OPENED,
+    AUDIO_OUTPUT_CLOSED,
+    AUDIO_OUTPUT_CONFIG_CHANGED,
+    AUDIO_INPUT_OPENED,
+    AUDIO_INPUT_CLOSED,
+    AUDIO_INPUT_CONFIG_CHANGED,
+};
+
+// audio input/output descriptor used to cache output configurations in client process to avoid
+// frequent calls through IAudioFlinger
+class AudioIoDescriptor : public RefBase {
+public:
+    AudioIoDescriptor() :
+        mIoHandle(AUDIO_IO_HANDLE_NONE),
+        mSamplingRate(0), mFormat(AUDIO_FORMAT_DEFAULT), mChannelMask(AUDIO_CHANNEL_NONE),
+        mFrameCount(0), mLatency(0)
+    {
+        memset(&mPatch, 0, sizeof(struct audio_patch));
+    }
+
+    virtual ~AudioIoDescriptor() {}
+
+    audio_port_handle_t getDeviceId() {
+        if (mPatch.num_sources != 0 && mPatch.num_sinks != 0) {
+            if (mPatch.sources[0].type == AUDIO_PORT_TYPE_MIX) {
+                // this is an output mix
+                // FIXME: the API only returns the first device in case of multiple device selection
+                return mPatch.sinks[0].id;
+            } else {
+                // this is an input mix
+                return mPatch.sources[0].id;
+            }
+        }
+        return AUDIO_PORT_HANDLE_NONE;
+    }
+
+    audio_io_handle_t mIoHandle;
+    struct audio_patch mPatch;
+    uint32_t mSamplingRate;
+    audio_format_t mFormat;
+    audio_channel_mask_t mChannelMask;
+    size_t mFrameCount;
+    uint32_t mLatency;
+};
+
+
+};  // namespace android
+
+#endif  /*ANDROID_AUDIO_IO_DESCRIPTOR_H*/
diff --git a/include/media/AudioPolicy.h b/include/media/AudioPolicy.h
index a755e1e..feed402 100644
--- a/include/media/AudioPolicy.h
+++ b/include/media/AudioPolicy.h
@@ -38,8 +38,17 @@
 #define MIX_TYPE_PLAYERS 0
 #define MIX_TYPE_RECORDERS 1
 
-#define ROUTE_FLAG_RENDER 0x1
-#define ROUTE_FLAG_LOOP_BACK (0x1 << 1)
+// definition of the different events that can be reported on a dynamic policy from
+//   AudioSystem's implementation of the AudioPolicyClient interface
+// keep in sync with AudioSystem.java
+#define DYNAMIC_POLICY_EVENT_MIX_STATE_UPDATE 0
+
+#define MIX_STATE_DISABLED -1
+#define MIX_STATE_IDLE 0
+#define MIX_STATE_MIXING 1
+
+#define MIX_ROUTE_FLAG_RENDER 0x1
+#define MIX_ROUTE_FLAG_LOOP_BACK (0x1 << 1)
 
 #define MAX_MIXES_PER_POLICY 10
 #define MAX_CRITERIA_PER_MIX 20
@@ -61,11 +70,15 @@
 
 class AudioMix {
 public:
+    // flag on an AudioMix indicating the activity on this mix (IDLE, MIXING)
+    //   must be reported through the AudioPolicyClient interface
+    static const uint32_t kCbFlagNotifyActivity = 0x1;
+
     AudioMix() {}
     AudioMix(Vector<AttributeMatchCriterion> criteria, uint32_t mixType, audio_config_t format,
-             uint32_t routeFlags, String8 registrationId) :
+             uint32_t routeFlags, String8 registrationId, uint32_t flags) :
         mCriteria(criteria), mMixType(mixType), mFormat(format),
-        mRouteFlags(routeFlags), mRegistrationId(registrationId) {}
+        mRouteFlags(routeFlags), mRegistrationId(registrationId), mCbFlags(flags){}
 
     status_t readFromParcel(Parcel *parcel);
     status_t writeToParcel(Parcel *parcel) const;
@@ -75,6 +88,7 @@
     audio_config_t  mFormat;
     uint32_t        mRouteFlags;
     String8         mRegistrationId;
+    uint32_t        mCbFlags; // flags indicating which callbacks to use, see kCbFlag*
 };
 
 }; // namespace android
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index f70d981..c4c7b0e 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -42,8 +42,7 @@
         EVENT_MORE_DATA = 0,        // Request to read available data from buffer.
                                     // If this event is delivered but the callback handler
                                     // does not want to read the available data, the handler must
-                                    // explicitly
-                                    // ignore the event by setting frameCount to zero.
+                                    // explicitly ignore the event by setting frameCount to zero.
         EVENT_OVERRUN = 1,          // Buffer overrun occurred.
         EVENT_MARKER = 2,           // Record head is at the specified marker position
                                     // (See setMarkerPosition()).
@@ -53,7 +52,7 @@
                                     // voluntary invalidation by mediaserver, or mediaserver crash.
     };
 
-    /* Client should declare Buffer on the stack and pass address to obtainBuffer()
+    /* Client should declare a Buffer and pass address to obtainBuffer()
      * and releaseBuffer().  See also callback_t for EVENT_MORE_DATA.
      */
 
@@ -62,20 +61,25 @@
     public:
         // FIXME use m prefix
         size_t      frameCount;     // number of sample frames corresponding to size;
-                                    // on input it is the number of frames available,
-                                    // on output is the number of frames actually drained
-                                    // (currently ignored but will make the primary field in future)
+                                    // on input to obtainBuffer() it is the number of frames desired
+                                    // on output from obtainBuffer() it is the number of available
+                                    //    frames to be read
+                                    // on input to releaseBuffer() it is currently ignored
 
         size_t      size;           // input/output in bytes == frameCount * frameSize
-                                    // on output is the number of bytes actually drained
-                                    // FIXME this is redundant with respect to frameCount,
-                                    // and TRANSFER_OBTAIN mode is broken for 8-bit data
-                                    // since we don't define the frame format
+                                    // on input to obtainBuffer() it is ignored
+                                    // on output from obtainBuffer() it is the number of available
+                                    //    bytes to be read, which is frameCount * frameSize
+                                    // on input to releaseBuffer() it is the number of bytes to
+                                    //    release
+                                    // FIXME This is redundant with respect to frameCount.  Consider
+                                    //    removing size and making frameCount the primary field.
 
         union {
             void*       raw;
             short*      i16;        // signed 16-bit
             int8_t*     i8;         // unsigned 8-bit, offset by 0x80
+                                    // input to obtainBuffer(): unused, output: pointer to buffer
         };
     };
 
@@ -88,8 +92,8 @@
      * user:    Pointer to context for use by the callback receiver.
      * info:    Pointer to optional parameter according to event type:
      *          - EVENT_MORE_DATA: pointer to AudioRecord::Buffer struct. The callback must not read
-     *            more bytes than indicated by 'size' field and update 'size' if fewer bytes are
-     *            consumed.
+     *                             more bytes than indicated by 'size' field and update 'size' if
+     *                             fewer bytes are consumed.
      *          - EVENT_OVERRUN: unused.
      *          - EVENT_MARKER: pointer to const uint32_t containing the marker position in frames.
      *          - EVENT_NEW_POS: pointer to const uint32_t containing the new position in frames.
@@ -106,6 +110,7 @@
      *  - BAD_VALUE: unsupported configuration
      * frameCount is guaranteed to be non-zero if status is NO_ERROR,
      * and is undefined otherwise.
+     * FIXME This API assumes a route, and so should be deprecated.
      */
 
      static status_t getMinFrameCount(size_t* frameCount,
@@ -118,14 +123,18 @@
     enum transfer_type {
         TRANSFER_DEFAULT,   // not specified explicitly; determine from the other parameters
         TRANSFER_CALLBACK,  // callback EVENT_MORE_DATA
-        TRANSFER_OBTAIN,    // FIXME deprecated: call obtainBuffer() and releaseBuffer()
+        TRANSFER_OBTAIN,    // call obtainBuffer() and releaseBuffer()
         TRANSFER_SYNC,      // synchronous read()
     };
 
     /* Constructs an uninitialized AudioRecord. No connection with
      * AudioFlinger takes place.  Use set() after this.
+     *
+     * Parameters:
+     *
+     * opPackageName:      The package name used for app ops.
      */
-                        AudioRecord();
+                        AudioRecord(const String16& opPackageName);
 
     /* Creates an AudioRecord object and registers it with AudioFlinger.
      * Once created, the track needs to be started before it can be used.
@@ -138,27 +147,30 @@
      * format:             Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
      *                     16 bits per sample).
      * channelMask:        Channel mask, such that audio_is_input_channel(channelMask) is true.
+     * opPackageName:      The package name used for app ops.
      * frameCount:         Minimum size of track PCM buffer in frames. This defines the
      *                     application's contribution to the
      *                     latency of the track.  The actual size selected by the AudioRecord could
      *                     be larger if the requested size is not compatible with current audio HAL
      *                     latency.  Zero means to use a default value.
      * cbf:                Callback function. If not null, this function is called periodically
-     *                     to consume new data and inform of marker, position updates, etc.
+     *                     to consume new data in TRANSFER_CALLBACK mode
+     *                     and inform of marker, position updates, etc.
      * user:               Context for use by the callback receiver.
      * notificationFrames: The callback function is called each time notificationFrames PCM
      *                     frames are ready in record track output buffer.
      * sessionId:          Not yet supported.
      * transferType:       How data is transferred from AudioRecord.
      * flags:              See comments on audio_input_flags_t in <system/audio.h>
+     * pAttributes:        If not NULL, supersedes inputSource for use case selection.
      * threadCanCallJava:  Not present in parameter list, and so is fixed at false.
-     * pAttributes:        if not NULL, supersedes inputSource for use case selection
      */
 
                         AudioRecord(audio_source_t inputSource,
                                     uint32_t sampleRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
+                                    const String16& opPackageName,
                                     size_t frameCount = 0,
                                     callback_t cbf = NULL,
                                     void* user = NULL,
@@ -166,6 +178,8 @@
                                     int sessionId = AUDIO_SESSION_ALLOCATE,
                                     transfer_type transferType = TRANSFER_DEFAULT,
                                     audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
+                                    int uid = -1,
+                                    pid_t pid = -1,
                                     const audio_attributes_t* pAttributes = NULL);
 
     /* Terminates the AudioRecord and unregisters it from AudioFlinger.
@@ -177,6 +191,7 @@
 
     /* Initialize an AudioRecord that was created using the AudioRecord() constructor.
      * Don't call set() more than once, or after an AudioRecord() constructor that takes parameters.
+     * set() is not multi-thread safe.
      * Returned status (from utils/Errors.h) can be:
      *  - NO_ERROR: successful intialization
      *  - INVALID_OPERATION: AudioRecord is already initialized or record device is already in use
@@ -201,6 +216,8 @@
                             int sessionId = AUDIO_SESSION_ALLOCATE,
                             transfer_type transferType = TRANSFER_DEFAULT,
                             audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
+                            int uid = -1,
+                            pid_t pid = -1,
                             const audio_attributes_t* pAttributes = NULL);
 
     /* Result of constructing the AudioRecord. This must be checked for successful initialization
@@ -211,7 +228,7 @@
             status_t    initCheck() const   { return mStatus; }
 
     /* Returns this track's estimated latency in milliseconds.
-     * This includes the latency due to AudioRecord buffer size,
+     * This includes the latency due to AudioRecord buffer size, resampling if applicable,
      * and audio hardware driver.
      */
             uint32_t    latency() const     { return mLatency; }
@@ -243,11 +260,6 @@
      */
             uint32_t    getSampleRate() const   { return mSampleRate; }
 
-    /* Return the notification frame count.
-     * This is approximately how often the callback is invoked, for transfer type TRANSFER_CALLBACK.
-     */
-            size_t      notificationFrames() const  { return mNotificationFramesAct; }
-
     /* Sets marker position. When record reaches the number of frames specified,
      * a callback with event type EVENT_MARKER is called. Calling setMarkerPosition
      * with marker == 0 cancels marker notification callback.
@@ -309,7 +321,12 @@
      * Returned value:
      *  handle on audio hardware input
      */
-            audio_io_handle_t    getInput() const;
+// FIXME The only known public caller is frameworks/opt/net/voip/src/jni/rtp/AudioGroup.cpp
+            audio_io_handle_t    getInput() const __attribute__((__deprecated__))
+                                                { return getInputPrivate(); }
+private:
+            audio_io_handle_t    getInputPrivate() const;
+public:
 
     /* Returns the audio session ID associated with this AudioRecord.
      *
@@ -323,10 +340,18 @@
      */
             int    getSessionId() const { return mSessionId; }
 
-    /* Obtains a buffer of up to "audioBuffer->frameCount" full frames.
+    /* Public API for TRANSFER_OBTAIN mode.
+     * Obtains a buffer of up to "audioBuffer->frameCount" full frames.
      * After draining these frames of data, the caller should release them with releaseBuffer().
      * If the track buffer is not empty, obtainBuffer() returns as many contiguous
      * full frames as are available immediately.
+     *
+     * If nonContig is non-NULL, it is an output parameter that will be set to the number of
+     * additional non-contiguous frames that are predicted to be available immediately,
+     * if the client were to release the first frames and then call obtainBuffer() again.
+     * This value is only a prediction, and needs to be confirmed.
+     * It will be set to zero for an error return.
+     *
      * If the track buffer is empty and track is stopped, obtainBuffer() returns WOULD_BLOCK
      * regardless of the value of waitCount.
      * If the track buffer is empty and track is not stopped, obtainBuffer() blocks with a
@@ -336,9 +361,6 @@
      * or return WOULD_BLOCK depending on the value of the "waitCount"
      * parameter.
      *
-     * obtainBuffer() and releaseBuffer() are deprecated for direct use by applications,
-     * which should use read() or callback EVENT_MORE_DATA instead.
-     *
      * Interpretation of waitCount:
      *  +n  limits wait time to n * WAIT_PERIOD_MS,
      *  -1  causes an (almost) infinite wait time,
@@ -347,6 +369,8 @@
      * Buffer fields
      * On entry:
      *  frameCount  number of frames requested
+     *  size        ignored
+     *  raw         ignored
      * After error return:
      *  frameCount  0
      *  size        0
@@ -357,13 +381,58 @@
      *  raw         pointer to the buffer
      */
 
-    /* FIXME Deprecated public API for TRANSFER_OBTAIN mode */
-            status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
-                                __attribute__((__deprecated__));
+            status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount,
+                                size_t *nonContig = NULL);
+
+            // Explicit Routing
+    /**
+     * TODO Document this method.
+     */
+            status_t setInputDevice(audio_port_handle_t deviceId);
+
+    /**
+     * TODO Document this method.
+     */
+            audio_port_handle_t getInputDevice();
+
+     /* Returns the ID of the audio device actually used by the input to which this AudioRecord
+      * is attached.
+      * A value of AUDIO_PORT_HANDLE_NONE indicates the AudioRecord is not attached to any input.
+      *
+      * Parameters:
+      *  none.
+      */
+     audio_port_handle_t getRoutedDeviceId();
+
+    /* Add an AudioDeviceCallback. The caller will be notified when the audio device
+     * to which this AudioRecord is routed is updated.
+     * Replaces any previously installed callback.
+     * Parameters:
+     *  callback:  The callback interface
+     * Returns NO_ERROR if successful.
+     *         INVALID_OPERATION if the same callback is already installed.
+     *         NO_INIT or PREMISSION_DENIED if AudioFlinger service is not reachable
+     *         BAD_VALUE if the callback is NULL
+     */
+            status_t addAudioDeviceCallback(
+                    const sp<AudioSystem::AudioDeviceCallback>& callback);
+
+    /* remove an AudioDeviceCallback.
+     * Parameters:
+     *  callback:  The callback interface
+     * Returns NO_ERROR if successful.
+     *         INVALID_OPERATION if the callback is not installed
+     *         BAD_VALUE if the callback is NULL
+     */
+            status_t removeAudioDeviceCallback(
+                    const sp<AudioSystem::AudioDeviceCallback>& callback);
 
 private:
     /* If nonContig is non-NULL, it is an output parameter that will be set to the number of
-     * additional non-contiguous frames that are available immediately.
+     * additional non-contiguous frames that are predicted to be available immediately,
+     * if the client were to release the first frames and then call obtainBuffer() again.
+     * This value is only a prediction, and needs to be confirmed.
+     * It will be set to zero for an error return.
      * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(),
      * in case the requested amount of frames is in two or more non-contiguous regions.
      * FIXME requested and elapsed are both relative times.  Consider changing to absolute time.
@@ -372,9 +441,15 @@
                                      struct timespec *elapsed = NULL, size_t *nonContig = NULL);
 public:
 
-    /* Release an emptied buffer of "audioBuffer->frameCount" frames for AudioFlinger to re-fill. */
-    // FIXME make private when obtainBuffer() for TRANSFER_OBTAIN is removed
-            void        releaseBuffer(Buffer* audioBuffer);
+    /* Public API for TRANSFER_OBTAIN mode.
+     * Release an emptied buffer of "audioBuffer->frameCount" frames for AudioFlinger to re-fill.
+     *
+     * Buffer fields:
+     *  frameCount  currently ignored but recommend to set to actual number of frames consumed
+     *  size        actual number of bytes consumed, must be multiple of frameSize
+     *  raw         ignored
+     */
+            void        releaseBuffer(const Buffer* audioBuffer);
 
     /* As a convenience we provide a read() interface to the audio buffer.
      * Input parameter 'size' is in byte units.
@@ -386,8 +461,11 @@
      *      WOULD_BLOCK         when obtainBuffer() returns same, or
      *                          AudioRecord was stopped during the read
      *      or any other error code returned by IAudioRecord::start() or restoreRecord_l().
+     * Default behavior is to only return when all data has been transferred. Set 'blocking' to
+     * false for the method to return immediately without waiting to try multiple times to read
+     * the full content of the buffer.
      */
-            ssize_t     read(void* buffer, size_t size);
+            ssize_t     read(void* buffer, size_t size, bool blocking = true);
 
     /* Return the number of input frames lost in the audio driver since the last call of this
      * function.  Audio driver is expected to reset the value to 0 and restart counting upon
@@ -416,6 +494,7 @@
 
                 void        pause();    // suspend thread from execution at next loop boundary
                 void        resume();   // allow thread to execute, if not requested to exit
+                void        wake();     // wake to handle changed notification conditions.
 
     private:
                 void        pauseInternal(nsecs_t ns = 0LL);
@@ -430,7 +509,9 @@
         bool                mPaused;    // whether thread is requested to pause at next loop entry
         bool                mPausedInt; // whether thread internally requests pause
         nsecs_t             mPausedNs;  // if mPausedInt then associated timeout, otherwise ignored
-        bool                mIgnoreNextPausedInt;   // whether to ignore next mPausedInt request
+        bool                mIgnoreNextPausedInt;   // skip any internal pause and go immediately
+                                        // to processAudioBuffer() as state may have changed
+                                        // since pause time calculated.
     };
 
             // body of AudioRecordThread::threadLoop()
@@ -445,7 +526,7 @@
 
             // caller must hold lock on mLock for all _l methods
 
-            status_t openRecord_l(size_t epoch);
+            status_t openRecord_l(size_t epoch, const String16& opPackageName);
 
             // FIXME enum is faster than strcmp() for parameter 'from'
             status_t restoreRecord_l(const char *from);
@@ -458,7 +539,7 @@
     bool                    mActive;
 
     // for client callback handler
-    callback_t              mCbf;               // callback handler for events, or NULL
+    callback_t              mCbf;                   // callback handler for events, or NULL
     void*                   mUserData;
 
     // for notification APIs
@@ -475,13 +556,15 @@
     bool                    mRetryOnPartialBuffer;  // sleep and retry after partial obtainBuffer()
     uint32_t                mObservedSequence;      // last observed value of mSequence
 
-    uint32_t                mMarkerPosition;    // in wrapping (overflow) frame units
+    uint32_t                mMarkerPosition;        // in wrapping (overflow) frame units
     bool                    mMarkerReached;
-    uint32_t                mNewPosition;       // in frames
-    uint32_t                mUpdatePeriod;      // in frames, zero means no EVENT_NEW_POS
+    uint32_t                mNewPosition;           // in frames
+    uint32_t                mUpdatePeriod;          // in frames, zero means no EVENT_NEW_POS
 
     status_t                mStatus;
 
+    String16                mOpPackageName;         // The package name used for app ops.
+
     size_t                  mFrameCount;            // corresponds to current IAudioRecord, value is
                                                     // reported back by AudioFlinger to the client
     size_t                  mReqFrameCount;         // frame count to request the first or next time
@@ -531,7 +614,14 @@
 
     sp<DeathNotifier>       mDeathNotifier;
     uint32_t                mSequence;              // incremented for each new IAudioRecord attempt
+    int                     mClientUid;
+    pid_t                   mClientPid;
     audio_attributes_t      mAttributes;
+
+    // For Device Selection API
+    //  a value of AUDIO_PORT_HANDLE_NONE indicated default (AudioPolicyManager) routing.
+    audio_port_handle_t    mSelectedDeviceId;
+    sp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
 };
 
 }; // namespace android
diff --git a/include/media/AudioResamplerPublic.h b/include/media/AudioResamplerPublic.h
index 97847a0..055f724 100644
--- a/include/media/AudioResamplerPublic.h
+++ b/include/media/AudioResamplerPublic.h
@@ -17,6 +17,11 @@
 #ifndef ANDROID_AUDIO_RESAMPLER_PUBLIC_H
 #define ANDROID_AUDIO_RESAMPLER_PUBLIC_H
 
+#include <stdint.h>
+#include <math.h>
+
+namespace android {
+
 // AUDIO_RESAMPLER_DOWN_RATIO_MAX is the maximum ratio between the original
 // audio sample rate and the target rate when downsampling,
 // as permitted in the audio framework, e.g. AudioTrack and AudioFlinger.
@@ -26,4 +31,147 @@
 // TODO: replace with an API
 #define AUDIO_RESAMPLER_DOWN_RATIO_MAX 256
 
+// AUDIO_RESAMPLER_UP_RATIO_MAX is the maximum suggested ratio between the original
+// audio sample rate and the target rate when upsampling.  It is loosely enforced by
+// the system. One issue with large upsampling ratios is the approximation by
+// an int32_t of the phase increments, making the resulting sample rate inexact.
+#define AUDIO_RESAMPLER_UP_RATIO_MAX 65536
+
+// AUDIO_TIMESTRETCH_SPEED_MIN and AUDIO_TIMESTRETCH_SPEED_MAX define the min and max time stretch
+// speeds supported by the system. These are enforced by the system and values outside this range
+// will result in a runtime error.
+// Depending on the AudioPlaybackRate::mStretchMode, the effective limits might be narrower than
+// the ones specified here
+// AUDIO_TIMESTRETCH_SPEED_MIN_DELTA is the minimum absolute speed difference that might trigger a
+// parameter update
+#define AUDIO_TIMESTRETCH_SPEED_MIN    0.01f
+#define AUDIO_TIMESTRETCH_SPEED_MAX    20.0f
+#define AUDIO_TIMESTRETCH_SPEED_NORMAL 1.0f
+#define AUDIO_TIMESTRETCH_SPEED_MIN_DELTA 0.0001f
+
+// AUDIO_TIMESTRETCH_PITCH_MIN and AUDIO_TIMESTRETCH_PITCH_MAX define the min and max time stretch
+// pitch shifting supported by the system. These are not enforced by the system and values
+// outside this range might result in a pitch different than the one requested.
+// Depending on the AudioPlaybackRate::mStretchMode, the effective limits might be narrower than
+// the ones specified here.
+// AUDIO_TIMESTRETCH_PITCH_MIN_DELTA is the minimum absolute pitch difference that might trigger a
+// parameter update
+#define AUDIO_TIMESTRETCH_PITCH_MIN    0.25f
+#define AUDIO_TIMESTRETCH_PITCH_MAX    4.0f
+#define AUDIO_TIMESTRETCH_PITCH_NORMAL 1.0f
+#define AUDIO_TIMESTRETCH_PITCH_MIN_DELTA 0.0001f
+
+
+//Determines the current algorithm used for stretching
+enum AudioTimestretchStretchMode : int32_t {
+    AUDIO_TIMESTRETCH_STRETCH_DEFAULT            = 0,
+    AUDIO_TIMESTRETCH_STRETCH_SPEECH             = 1,
+    //TODO: add more stretch modes/algorithms
+};
+
+//Limits for AUDIO_TIMESTRETCH_STRETCH_SPEECH mode
+#define TIMESTRETCH_SONIC_SPEED_MIN 0.1f
+#define TIMESTRETCH_SONIC_SPEED_MAX 6.0f
+
+//Determines behavior of Timestretch if current algorithm can't perform
+//with current parameters.
+// FALLBACK_CUT_REPEAT: (internal only) for speed <1.0 will truncate frames
+//    for speed > 1.0 will repeat frames
+// FALLBACK_MUTE: will set all processed frames to zero
+// FALLBACK_FAIL:  will stop program execution and log a fatal error
+enum AudioTimestretchFallbackMode : int32_t {
+    AUDIO_TIMESTRETCH_FALLBACK_CUT_REPEAT     = -1,
+    AUDIO_TIMESTRETCH_FALLBACK_DEFAULT        = 0,
+    AUDIO_TIMESTRETCH_FALLBACK_MUTE           = 1,
+    AUDIO_TIMESTRETCH_FALLBACK_FAIL           = 2,
+};
+
+struct AudioPlaybackRate {
+    float mSpeed;
+    float mPitch;
+    enum AudioTimestretchStretchMode  mStretchMode;
+    enum AudioTimestretchFallbackMode mFallbackMode;
+};
+
+static const AudioPlaybackRate AUDIO_PLAYBACK_RATE_DEFAULT = {
+        AUDIO_TIMESTRETCH_SPEED_NORMAL,
+        AUDIO_TIMESTRETCH_PITCH_NORMAL,
+        AUDIO_TIMESTRETCH_STRETCH_DEFAULT,
+        AUDIO_TIMESTRETCH_FALLBACK_DEFAULT
+};
+
+static inline bool isAudioPlaybackRateEqual(const AudioPlaybackRate &pr1,
+        const AudioPlaybackRate &pr2) {
+    return fabs(pr1.mSpeed - pr2.mSpeed) < AUDIO_TIMESTRETCH_SPEED_MIN_DELTA &&
+           fabs(pr1.mPitch - pr2.mPitch) < AUDIO_TIMESTRETCH_PITCH_MIN_DELTA &&
+           pr2.mStretchMode == pr2.mStretchMode &&
+           pr2.mFallbackMode == pr2.mFallbackMode;
+}
+
+static inline bool isAudioPlaybackRateValid(const AudioPlaybackRate &playbackRate) {
+    if (playbackRate.mFallbackMode == AUDIO_TIMESTRETCH_FALLBACK_FAIL &&
+            (playbackRate.mStretchMode == AUDIO_TIMESTRETCH_STRETCH_SPEECH ||
+                    playbackRate.mStretchMode == AUDIO_TIMESTRETCH_STRETCH_DEFAULT)) {
+        //test sonic specific constraints
+        return playbackRate.mSpeed >= TIMESTRETCH_SONIC_SPEED_MIN &&
+                playbackRate.mSpeed <= TIMESTRETCH_SONIC_SPEED_MAX &&
+                playbackRate.mPitch >= AUDIO_TIMESTRETCH_PITCH_MIN &&
+                playbackRate.mPitch <= AUDIO_TIMESTRETCH_PITCH_MAX;
+    } else {
+        return playbackRate.mSpeed >= AUDIO_TIMESTRETCH_SPEED_MIN &&
+                playbackRate.mSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX &&
+                playbackRate.mPitch >= AUDIO_TIMESTRETCH_PITCH_MIN &&
+                playbackRate.mPitch <= AUDIO_TIMESTRETCH_PITCH_MAX;
+    }
+}
+
+// TODO: Consider putting these inlines into a class scope
+
+// Returns the source frames needed to resample to destination frames.  This is not a precise
+// value and depends on the resampler (and possibly how it handles rounding internally).
+// Nevertheless, this should be an upper bound on the requirements of the resampler.
+// If srcSampleRate and dstSampleRate are equal, then it returns destination frames, which
+// may not be true if the resampler is asynchronous.
+static inline size_t sourceFramesNeeded(
+        uint32_t srcSampleRate, size_t dstFramesRequired, uint32_t dstSampleRate) {
+    // +1 for rounding - always do this even if matched ratio (resampler may use phases not ratio)
+    // +1 for additional sample needed for interpolation
+    return srcSampleRate == dstSampleRate ? dstFramesRequired :
+            size_t((uint64_t)dstFramesRequired * srcSampleRate / dstSampleRate + 1 + 1);
+}
+
+// An upper bound for the number of destination frames possible from srcFrames
+// after sample rate conversion.  This may be used for buffer sizing.
+static inline size_t destinationFramesPossible(size_t srcFrames, uint32_t srcSampleRate,
+        uint32_t dstSampleRate) {
+    if (srcSampleRate == dstSampleRate) {
+        return srcFrames;
+    }
+    uint64_t dstFrames = (uint64_t)srcFrames * dstSampleRate / srcSampleRate;
+    return dstFrames > 2 ? dstFrames - 2 : 0;
+}
+
+static inline size_t sourceFramesNeededWithTimestretch(
+        uint32_t srcSampleRate, size_t dstFramesRequired, uint32_t dstSampleRate,
+        float speed) {
+    // required is the number of input frames the resampler needs
+    size_t required = sourceFramesNeeded(srcSampleRate, dstFramesRequired, dstSampleRate);
+    // to deliver this, the time stretcher requires:
+    return required * (double)speed + 1 + 1; // accounting for rounding dependencies
+}
+
+// Identifies sample rates that we associate with music
+// and thus eligible for better resampling and fast capture.
+// This is somewhat less than 44100 to allow for pitch correction
+// involving resampling as well as asynchronous resampling.
+#define AUDIO_PROCESSING_MUSIC_RATE 40000
+
+static inline bool isMusicRate(uint32_t sampleRate) {
+    return sampleRate >= AUDIO_PROCESSING_MUSIC_RATE;
+}
+
+} // namespace android
+
+// ---------------------------------------------------------------------------
+
 #endif // ANDROID_AUDIO_RESAMPLER_PUBLIC_H
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 843a354..06116a5 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -19,6 +19,7 @@
 
 #include <hardware/audio_effect.h>
 #include <media/AudioPolicy.h>
+#include <media/AudioIoDescriptor.h>
 #include <media/IAudioFlingerClient.h>
 #include <media/IAudioPolicyServiceClient.h>
 #include <system/audio.h>
@@ -29,6 +30,7 @@
 namespace android {
 
 typedef void (*audio_error_callback)(status_t err);
+typedef void (*dynamic_policy_callback)(int event, String8 regId, int val);
 
 class IAudioFlinger;
 class IAudioPolicyService;
@@ -89,6 +91,7 @@
     static String8  getParameters(const String8& keys);
 
     static void setErrorCallback(audio_error_callback cb);
+    static void setDynPolicyCallback(dynamic_policy_callback cb);
 
     // helper function to obtain AudioFlinger service handle
     static const sp<IAudioFlinger> get_audio_flinger();
@@ -98,10 +101,13 @@
 
     // Returned samplingRate and frameCount output values are guaranteed
     // to be non-zero if status == NO_ERROR
+    // FIXME This API assumes a route, and so should be deprecated.
     static status_t getOutputSamplingRate(uint32_t* samplingRate,
             audio_stream_type_t stream);
+    // FIXME This API assumes a route, and so should be deprecated.
     static status_t getOutputFrameCount(size_t* frameCount,
             audio_stream_type_t stream);
+    // FIXME This API assumes a route, and so should be deprecated.
     static status_t getOutputLatency(uint32_t* latency,
             audio_stream_type_t stream);
     static status_t getSamplingRate(audio_io_handle_t output,
@@ -110,19 +116,20 @@
     // audio_stream->get_buffer_size()/audio_stream_out_frame_size()
     static status_t getFrameCount(audio_io_handle_t output,
                                   size_t* frameCount);
-    // returns the audio output stream latency in ms. Corresponds to
+    // returns the audio output latency in ms. Corresponds to
     // audio_stream_out->get_latency()
     static status_t getLatency(audio_io_handle_t output,
                                uint32_t* latency);
 
     // return status NO_ERROR implies *buffSize > 0
+    // FIXME This API assumes a route, and so should deprecated.
     static status_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
         audio_channel_mask_t channelMask, size_t* buffSize);
 
     static status_t setVoiceVolume(float volume);
 
     // return the number of audio frames written by AudioFlinger to audio HAL and
-    // audio dsp to DAC since the specified output I/O handle has exited standby.
+    // audio dsp to DAC since the specified output has exited standby.
     // returned status (from utils/Errors.h) can be:
     // - NO_ERROR: successful operation, halFrames and dspFrames point to valid data
     // - INVALID_OPERATION: Not supported on current hardware platform
@@ -151,32 +158,8 @@
     // or no HW sync source is used.
     static audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId);
 
-    // types of io configuration change events received with ioConfigChanged()
-    enum io_config_event {
-        OUTPUT_OPENED,
-        OUTPUT_CLOSED,
-        OUTPUT_CONFIG_CHANGED,
-        INPUT_OPENED,
-        INPUT_CLOSED,
-        INPUT_CONFIG_CHANGED,
-        STREAM_CONFIG_CHANGED,
-        NUM_CONFIG_EVENTS
-    };
-
-    // audio output descriptor used to cache output configurations in client process to avoid
-    // frequent calls through IAudioFlinger
-    class OutputDescriptor {
-    public:
-        OutputDescriptor()
-        : samplingRate(0), format(AUDIO_FORMAT_DEFAULT), channelMask(0), frameCount(0), latency(0)
-            {}
-
-        uint32_t samplingRate;
-        audio_format_t format;
-        audio_channel_mask_t channelMask;
-        size_t frameCount;
-        uint32_t latency;
-    };
+    // Indicate JAVA services are ready (scheduling, power management ...)
+    static status_t systemReady();
 
     // Events used to synchronize actions between audio sessions.
     // For instance SYNC_EVENT_PRESENTATION_COMPLETE can be used to delay recording start until
@@ -201,7 +184,7 @@
     // IAudioPolicyService interface (see AudioPolicyInterface for method descriptions)
     //
     static status_t setDeviceConnectionState(audio_devices_t device, audio_policy_dev_state_t state,
-                                                const char *device_address);
+                                             const char *device_address, const char *device_name);
     static audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
                                                                 const char *device_address);
     static status_t setPhoneState(audio_mode_t state);
@@ -217,14 +200,16 @@
                                         audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                                         const audio_offload_info_t *offloadInfo = NULL);
     static status_t getOutputForAttr(const audio_attributes_t *attr,
-                                        audio_io_handle_t *output,
-                                        audio_session_t session,
-                                        audio_stream_type_t *stream,
-                                        uint32_t samplingRate = 0,
-                                        audio_format_t format = AUDIO_FORMAT_DEFAULT,
-                                        audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO,
-                                        audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                                        const audio_offload_info_t *offloadInfo = NULL);
+                                     audio_io_handle_t *output,
+                                     audio_session_t session,
+                                     audio_stream_type_t *stream,
+                                     uid_t uid,
+                                     uint32_t samplingRate = 0,
+                                     audio_format_t format = AUDIO_FORMAT_DEFAULT,
+                                     audio_channel_mask_t channelMask = AUDIO_CHANNEL_OUT_STEREO,
+                                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                                     audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+                                     const audio_offload_info_t *offloadInfo = NULL);
     static status_t startOutput(audio_io_handle_t output,
                                 audio_stream_type_t stream,
                                 audio_session_t session);
@@ -240,10 +225,12 @@
     static status_t getInputForAttr(const audio_attributes_t *attr,
                                     audio_io_handle_t *input,
                                     audio_session_t session,
+                                    uid_t uid,
                                     uint32_t samplingRate,
                                     audio_format_t format,
                                     audio_channel_mask_t channelMask,
-                                    audio_input_flags_t flags);
+                                    audio_input_flags_t flags,
+                                    audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
 
     static status_t startInput(audio_io_handle_t input,
                                audio_session_t session);
@@ -327,6 +314,12 @@
 
     static status_t registerPolicyMixes(Vector<AudioMix> mixes, bool registration);
 
+    static status_t startAudioSource(const struct audio_port_config *source,
+                                      const audio_attributes_t *attributes,
+                                      audio_io_handle_t *handle);
+    static status_t stopAudioSource(audio_io_handle_t handle);
+
+
     // ----------------------------------------------------------------------------
 
     class AudioPortCallback : public RefBase
@@ -342,16 +335,42 @@
 
     };
 
-    static void setAudioPortCallback(sp<AudioPortCallback> callBack);
+    static status_t addAudioPortCallback(const sp<AudioPortCallback>& callback);
+    static status_t removeAudioPortCallback(const sp<AudioPortCallback>& callback);
+
+    class AudioDeviceCallback : public RefBase
+    {
+    public:
+
+                AudioDeviceCallback() {}
+        virtual ~AudioDeviceCallback() {}
+
+        virtual void onAudioDeviceUpdate(audio_io_handle_t audioIo,
+                                         audio_port_handle_t deviceId) = 0;
+    };
+
+    static status_t addAudioDeviceCallback(const sp<AudioDeviceCallback>& callback,
+                                           audio_io_handle_t audioIo);
+    static status_t removeAudioDeviceCallback(const sp<AudioDeviceCallback>& callback,
+                                              audio_io_handle_t audioIo);
+
+    static audio_port_handle_t getDeviceIdForIo(audio_io_handle_t audioIo);
 
 private:
 
     class AudioFlingerClient: public IBinder::DeathRecipient, public BnAudioFlingerClient
     {
     public:
-        AudioFlingerClient() {
+        AudioFlingerClient() :
+            mInBuffSize(0), mInSamplingRate(0),
+            mInFormat(AUDIO_FORMAT_DEFAULT), mInChannelMask(AUDIO_CHANNEL_NONE) {
         }
 
+        void clearIoCache();
+        status_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
+                                    audio_channel_mask_t channelMask, size_t* buffSize);
+        sp<AudioIoDescriptor> getIoDescriptor(audio_io_handle_t ioHandle);
+
         // DeathRecipient
         virtual void binderDied(const wp<IBinder>& who);
 
@@ -359,7 +378,27 @@
 
         // indicate a change in the configuration of an output or input: keeps the cached
         // values for output/input parameters up-to-date in client process
-        virtual void ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2);
+        virtual void ioConfigChanged(audio_io_config_event event,
+                                     const sp<AudioIoDescriptor>& ioDesc);
+
+
+        status_t addAudioDeviceCallback(const sp<AudioDeviceCallback>& callback,
+                                               audio_io_handle_t audioIo);
+        status_t removeAudioDeviceCallback(const sp<AudioDeviceCallback>& callback,
+                                           audio_io_handle_t audioIo);
+
+        audio_port_handle_t getDeviceIdForIo(audio_io_handle_t audioIo);
+
+    private:
+        Mutex                               mLock;
+        DefaultKeyedVector<audio_io_handle_t, sp<AudioIoDescriptor> >   mIoDescriptors;
+        DefaultKeyedVector<audio_io_handle_t, Vector < sp<AudioDeviceCallback> > >
+                                                                        mAudioDeviceCallbacks;
+        // cached values for recording getInputBufferSize() queries
+        size_t                              mInBuffSize;    // zero indicates cache is invalid
+        uint32_t                            mInSamplingRate;
+        audio_format_t                      mInFormat;
+        audio_channel_mask_t                mInChannelMask;
     };
 
     class AudioPolicyServiceClient: public IBinder::DeathRecipient,
@@ -369,26 +408,35 @@
         AudioPolicyServiceClient() {
         }
 
+        int addAudioPortCallback(const sp<AudioPortCallback>& callback);
+        int removeAudioPortCallback(const sp<AudioPortCallback>& callback);
+
         // DeathRecipient
         virtual void binderDied(const wp<IBinder>& who);
 
         // IAudioPolicyServiceClient
         virtual void onAudioPortListUpdate();
         virtual void onAudioPatchListUpdate();
+        virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
+
+    private:
+        Mutex                               mLock;
+        Vector <sp <AudioPortCallback> >    mAudioPortCallbacks;
     };
 
+    static const sp<AudioFlingerClient> getAudioFlingerClient();
+    static sp<AudioIoDescriptor> getIoDescriptor(audio_io_handle_t ioHandle);
+
     static sp<AudioFlingerClient> gAudioFlingerClient;
     static sp<AudioPolicyServiceClient> gAudioPolicyServiceClient;
     friend class AudioFlingerClient;
     friend class AudioPolicyServiceClient;
 
     static Mutex gLock;      // protects gAudioFlinger and gAudioErrorCallback,
-    static Mutex gLockCache; // protects gOutputs, gPrevInSamplingRate, gPrevInFormat,
-                             // gPrevInChannelMask and gInBuffSize
     static Mutex gLockAPS;   // protects gAudioPolicyService and gAudioPolicyServiceClient
-    static Mutex gLockAPC;   // protects gAudioPortCallback
     static sp<IAudioFlinger> gAudioFlinger;
     static audio_error_callback gAudioErrorCallback;
+    static dynamic_policy_callback gDynPolicyCallback;
 
     static size_t gInBuffSize;
     // previous parameters for recording buffer size queries
@@ -397,12 +445,6 @@
     static audio_channel_mask_t gPrevInChannelMask;
 
     static sp<IAudioPolicyService> gAudioPolicyService;
-
-    // list of output descriptors containing cached parameters
-    // (sampling rate, framecount, channel count...)
-    static DefaultKeyedVector<audio_io_handle_t, OutputDescriptor *> gOutputs;
-
-    static sp<AudioPortCallback> gAudioPortCallback;
 };
 
 };  // namespace android
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index fd51b8f..e02f1b7 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -21,6 +21,7 @@
 #include <media/AudioSystem.h>
 #include <media/AudioTimestamp.h>
 #include <media/IAudioTrack.h>
+#include <media/AudioResamplerPublic.h>
 #include <utils/threads.h>
 
 namespace android {
@@ -42,28 +43,38 @@
      */
     enum event_type {
         EVENT_MORE_DATA = 0,        // Request to write more data to buffer.
+                                    // This event only occurs for TRANSFER_CALLBACK.
                                     // If this event is delivered but the callback handler
-                                    // does not want to write more data, the handler must explicitly
+                                    // does not want to write more data, the handler must
                                     // ignore the event by setting frameCount to zero.
-        EVENT_UNDERRUN = 1,         // Buffer underrun occurred.
+                                    // This might occur, for example, if the application is
+                                    // waiting for source data or is at the end of stream.
+                                    //
+                                    // For data filling, it is preferred that the callback
+                                    // does not block and instead returns a short count on
+                                    // the amount of data actually delivered
+                                    // (or 0, if no data is currently available).
+        EVENT_UNDERRUN = 1,         // Buffer underrun occurred. This will not occur for
+                                    // static tracks.
         EVENT_LOOP_END = 2,         // Sample loop end was reached; playback restarted from
-                                    // loop start if loop count was not 0.
+                                    // loop start if loop count was not 0 for a static track.
         EVENT_MARKER = 3,           // Playback head is at the specified marker position
                                     // (See setMarkerPosition()).
         EVENT_NEW_POS = 4,          // Playback head is at a new position
                                     // (See setPositionUpdatePeriod()).
-        EVENT_BUFFER_END = 5,       // Playback head is at the end of the buffer.
-                                    // Not currently used by android.media.AudioTrack.
+        EVENT_BUFFER_END = 5,       // Playback has completed for a static track.
         EVENT_NEW_IAUDIOTRACK = 6,  // IAudioTrack was re-created, either due to re-routing and
                                     // voluntary invalidation by mediaserver, or mediaserver crash.
         EVENT_STREAM_END = 7,       // Sent after all the buffers queued in AF and HW are played
-                                    // back (after stop is called)
+                                    // back (after stop is called) for an offloaded track.
+#if 0   // FIXME not yet implemented
         EVENT_NEW_TIMESTAMP = 8,    // Delivered periodically and when there's a significant change
                                     // in the mapping from frame position to presentation time.
                                     // See AudioTimestamp for the information included with event.
+#endif
     };
 
-    /* Client should declare Buffer on the stack and pass address to obtainBuffer()
+    /* Client should declare a Buffer and pass the address to obtainBuffer()
      * and releaseBuffer().  See also callback_t for EVENT_MORE_DATA.
      */
 
@@ -72,22 +83,26 @@
     public:
         // FIXME use m prefix
         size_t      frameCount;   // number of sample frames corresponding to size;
-                                  // on input it is the number of frames desired,
-                                  // on output is the number of frames actually filled
-                                  // (currently ignored, but will make the primary field in future)
+                                  // on input to obtainBuffer() it is the number of frames desired,
+                                  // on output from obtainBuffer() it is the number of available
+                                  //    [empty slots for] frames to be filled
+                                  // on input to releaseBuffer() it is currently ignored
 
         size_t      size;         // input/output in bytes == frameCount * frameSize
-                                  // on input it is unused
-                                  // on output is the number of bytes actually filled
-                                  // FIXME this is redundant with respect to frameCount,
-                                  // and TRANSFER_OBTAIN mode is broken for 8-bit data
-                                  // since we don't define the frame format
+                                  // on input to obtainBuffer() it is ignored
+                                  // on output from obtainBuffer() it is the number of available
+                                  //    [empty slots for] bytes to be filled,
+                                  //    which is frameCount * frameSize
+                                  // on input to releaseBuffer() it is the number of bytes to
+                                  //    release
+                                  // FIXME This is redundant with respect to frameCount.  Consider
+                                  //    removing size and making frameCount the primary field.
 
         union {
             void*       raw;
             short*      i16;      // signed 16-bit
             int8_t*     i8;       // unsigned 8-bit, offset by 0x80
-        };                        // input: unused, output: pointer to buffer
+        };                        // input to obtainBuffer(): unused, output: pointer to buffer
     };
 
     /* As a convenience, if a callback is supplied, a handler thread
@@ -121,6 +136,7 @@
      *  - BAD_VALUE: unsupported configuration
      * frameCount is guaranteed to be non-zero if status is NO_ERROR,
      * and is undefined otherwise.
+     * FIXME This API assumes a route, and so should be deprecated.
      */
 
     static status_t getMinFrameCount(size_t* frameCount,
@@ -132,7 +148,7 @@
     enum transfer_type {
         TRANSFER_DEFAULT,   // not specified explicitly; determine from the other parameters
         TRANSFER_CALLBACK,  // callback EVENT_MORE_DATA
-        TRANSFER_OBTAIN,    // FIXME deprecated: call obtainBuffer() and releaseBuffer()
+        TRANSFER_OBTAIN,    // call obtainBuffer() and releaseBuffer()
         TRANSFER_SYNC,      // synchronous write()
         TRANSFER_SHARED,    // shared memory
     };
@@ -145,18 +161,15 @@
     /* Creates an AudioTrack object and registers it with AudioFlinger.
      * Once created, the track needs to be started before it can be used.
      * Unspecified values are set to appropriate default values.
-     * With this constructor, the track is configured for streaming mode.
-     * Data to be rendered is supplied by write() or by the callback EVENT_MORE_DATA.
-     * Intermixing a combination of write() and non-ignored EVENT_MORE_DATA is not allowed.
      *
      * Parameters:
      *
      * streamType:         Select the type of audio stream this track is attached to
      *                     (e.g. AUDIO_STREAM_MUSIC).
      * sampleRate:         Data source sampling rate in Hz.
-     * format:             Audio format.  For mixed tracks, any PCM format supported by server is OK
-     *                     or AUDIO_FORMAT_PCM_8_BIT which is handled on client side.  For direct
-     *                     and offloaded tracks, the possible format(s) depends on the output sink.
+     * format:             Audio format. For mixed tracks, any PCM format supported by server is OK.
+     *                     For direct and offloaded tracks, the possible format(s) depends on the
+     *                     output sink.
      * channelMask:        Channel mask, such that audio_is_output_channel(channelMask) is true.
      * frameCount:         Minimum size of track PCM buffer in frames. This defines the
      *                     application's contribution to the
@@ -165,20 +178,32 @@
      *                     configuration.  Zero means to use a default value.
      * flags:              See comments on audio_output_flags_t in <system/audio.h>.
      * cbf:                Callback function. If not null, this function is called periodically
-     *                     to provide new data and inform of marker, position updates, etc.
+     *                     to provide new data in TRANSFER_CALLBACK mode
+     *                     and inform of marker, position updates, etc.
      * user:               Context for use by the callback receiver.
      * notificationFrames: The callback function is called each time notificationFrames PCM
      *                     frames have been consumed from track input buffer.
      *                     This is expressed in units of frames at the initial source sample rate.
      * sessionId:          Specific session ID, or zero to use default.
      * transferType:       How data is transferred to AudioTrack.
+     * offloadInfo:        If not NULL, provides offload parameters for
+     *                     AudioSystem::getOutputForAttr().
+     * uid:                User ID of the app which initially requested this AudioTrack
+     *                     for power management tracking, or -1 for current user ID.
+     * pid:                Process ID of the app which initially requested this AudioTrack
+     *                     for power management tracking, or -1 for current process ID.
+     * pAttributes:        If not NULL, supersedes streamType for use case selection.
+     * doNotReconnect:     If set to true, AudioTrack won't automatically recreate the IAudioTrack
+                           binder to AudioFlinger.
+                           It will return an error instead.  The application will recreate
+                           the track based on offloading or different channel configuration, etc.
      * threadCanCallJava:  Not present in parameter list, and so is fixed at false.
      */
 
                         AudioTrack( audio_stream_type_t streamType,
                                     uint32_t sampleRate,
                                     audio_format_t format,
-                                    audio_channel_mask_t,
+                                    audio_channel_mask_t channelMask,
                                     size_t frameCount    = 0,
                                     audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
                                     callback_t cbf       = NULL,
@@ -189,13 +214,15 @@
                                     const audio_offload_info_t *offloadInfo = NULL,
                                     int uid = -1,
                                     pid_t pid = -1,
-                                    const audio_attributes_t* pAttributes = NULL);
+                                    const audio_attributes_t* pAttributes = NULL,
+                                    bool doNotReconnect = false);
 
     /* Creates an audio track and registers it with AudioFlinger.
      * With this constructor, the track is configured for static buffer mode.
-     * The format must not be 8-bit linear PCM.
      * Data to be rendered is passed in a shared memory buffer
-     * identified by the argument sharedBuffer, which must be non-0.
+     * identified by the argument sharedBuffer, which should be non-0.
+     * If sharedBuffer is zero, this constructor is equivalent to the previous constructor
+     * but without the ability to specify a non-zero value for the frameCount parameter.
      * The memory should be initialized to the desired data before calling start().
      * The write() method is not supported in this case.
      * It is recommended to pass a callback function to be notified of playback end by an
@@ -216,7 +243,8 @@
                                     const audio_offload_info_t *offloadInfo = NULL,
                                     int uid = -1,
                                     pid_t pid = -1,
-                                    const audio_attributes_t* pAttributes = NULL);
+                                    const audio_attributes_t* pAttributes = NULL,
+                                    bool doNotReconnect = false);
 
     /* Terminates the AudioTrack and unregisters it from AudioFlinger.
      * Also destroys all resources associated with the AudioTrack.
@@ -227,6 +255,7 @@
 
     /* Initialize an AudioTrack that was created using the AudioTrack() constructor.
      * Don't call set() more than once, or after the AudioTrack() constructors that take parameters.
+     * set() is not multi-thread safe.
      * Returned status (from utils/Errors.h) can be:
      *  - NO_ERROR: successful initialization
      *  - INVALID_OPERATION: AudioTrack is already initialized
@@ -259,7 +288,8 @@
                             const audio_offload_info_t *offloadInfo = NULL,
                             int uid = -1,
                             pid_t pid = -1,
-                            const audio_attributes_t* pAttributes = NULL);
+                            const audio_attributes_t* pAttributes = NULL,
+                            bool doNotReconnect = false);
 
     /* Result of constructing the AudioTrack. This must be checked for successful initialization
      * before using any AudioTrack API (except for set()), because using
@@ -347,6 +377,26 @@
     /* Return current source sample rate in Hz */
             uint32_t    getSampleRate() const;
 
+    /* Return the original source sample rate in Hz. This corresponds to the sample rate
+     * if playback rate had normal speed and pitch.
+     */
+            uint32_t    getOriginalSampleRate() const;
+
+    /* Set source playback rate for timestretch
+     * 1.0 is normal speed: < 1.0 is slower, > 1.0 is faster
+     * 1.0 is normal pitch: < 1.0 is lower pitch, > 1.0 is higher pitch
+     *
+     * AUDIO_TIMESTRETCH_SPEED_MIN <= speed <= AUDIO_TIMESTRETCH_SPEED_MAX
+     * AUDIO_TIMESTRETCH_PITCH_MIN <= pitch <= AUDIO_TIMESTRETCH_PITCH_MAX
+     *
+     * Speed increases the playback rate of media, but does not alter pitch.
+     * Pitch increases the "tonal frequency" of media, but does not affect the playback rate.
+     */
+            status_t    setPlaybackRate(const AudioPlaybackRate &playbackRate);
+
+    /* Return current playback rate */
+            const AudioPlaybackRate& getPlaybackRate() const;
+
     /* Enables looping and sets the start and end points of looping.
      * Only supported for static buffer mode.
      *
@@ -461,7 +511,38 @@
      *  handle on audio hardware output, or AUDIO_IO_HANDLE_NONE if the
      *  track needed to be re-created but that failed
      */
+private:
             audio_io_handle_t    getOutput() const;
+public:
+
+    /* Selects the audio device to use for output of this AudioTrack. A value of
+     * AUDIO_PORT_HANDLE_NONE indicates default (AudioPolicyManager) routing.
+     *
+     * Parameters:
+     *  The device ID of the selected device (as returned by the AudioDevicesManager API).
+     *
+     * Returned value:
+     *  - NO_ERROR: successful operation
+     *    TODO: what else can happen here?
+     */
+            status_t    setOutputDevice(audio_port_handle_t deviceId);
+
+    /* Returns the ID of the audio device selected for this AudioTrack.
+     * A value of AUDIO_PORT_HANDLE_NONE indicates default (AudioPolicyManager) routing.
+     *
+     * Parameters:
+     *  none.
+     */
+     audio_port_handle_t getOutputDevice();
+
+     /* Returns the ID of the audio device actually used by the output to which this AudioTrack is
+      * attached.
+      * A value of AUDIO_PORT_HANDLE_NONE indicates the audio track is not attached to any output.
+      *
+      * Parameters:
+      *  none.
+      */
+     audio_port_handle_t getRoutedDeviceId();
 
     /* Returns the unique session ID associated with this track.
      *
@@ -487,10 +568,18 @@
      */
             status_t    attachAuxEffect(int effectId);
 
-    /* Obtains a buffer of up to "audioBuffer->frameCount" empty slots for frames.
+    /* Public API for TRANSFER_OBTAIN mode.
+     * Obtains a buffer of up to "audioBuffer->frameCount" empty slots for frames.
      * After filling these slots with data, the caller should release them with releaseBuffer().
      * If the track buffer is not full, obtainBuffer() returns as many contiguous
      * [empty slots for] frames as are available immediately.
+     *
+     * If nonContig is non-NULL, it is an output parameter that will be set to the number of
+     * additional non-contiguous frames that are predicted to be available immediately,
+     * if the client were to release the first frames and then call obtainBuffer() again.
+     * This value is only a prediction, and needs to be confirmed.
+     * It will be set to zero for an error return.
+     *
      * If the track buffer is full and track is stopped, obtainBuffer() returns WOULD_BLOCK
      * regardless of the value of waitCount.
      * If the track buffer is full and track is not stopped, obtainBuffer() blocks with a
@@ -499,10 +588,6 @@
      * is exhausted, at which point obtainBuffer() will either block
      * or return WOULD_BLOCK depending on the value of the "waitCount"
      * parameter.
-     * Each sample is 16-bit signed PCM.
-     *
-     * obtainBuffer() and releaseBuffer() are deprecated for direct use by applications,
-     * which should use write() or callback EVENT_MORE_DATA instead.
      *
      * Interpretation of waitCount:
      *  +n  limits wait time to n * WAIT_PERIOD_MS,
@@ -511,24 +596,27 @@
      *
      * Buffer fields
      * On entry:
-     *  frameCount  number of frames requested
+     *  frameCount  number of [empty slots for] frames requested
+     *  size        ignored
+     *  raw         ignored
      * After error return:
      *  frameCount  0
      *  size        0
      *  raw         undefined
      * After successful return:
-     *  frameCount  actual number of frames available, <= number requested
+     *  frameCount  actual number of [empty slots for] frames available, <= number requested
      *  size        actual number of bytes available
      *  raw         pointer to the buffer
      */
-
-    /* FIXME Deprecated public API for TRANSFER_OBTAIN mode */
-            status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
-                                __attribute__((__deprecated__));
+            status_t    obtainBuffer(Buffer* audioBuffer, int32_t waitCount,
+                                size_t *nonContig = NULL);
 
 private:
     /* If nonContig is non-NULL, it is an output parameter that will be set to the number of
-     * additional non-contiguous frames that are available immediately.
+     * additional non-contiguous frames that are predicted to be available immediately,
+     * if the client were to release the first frames and then call obtainBuffer() again.
+     * This value is only a prediction, and needs to be confirmed.
+     * It will be set to zero for an error return.
      * FIXME We could pass an array of Buffers instead of only one Buffer to obtainBuffer(),
      * in case the requested amount of frames is in two or more non-contiguous regions.
      * FIXME requested and elapsed are both relative times.  Consider changing to absolute time.
@@ -537,9 +625,15 @@
                                      struct timespec *elapsed = NULL, size_t *nonContig = NULL);
 public:
 
-    /* Release a filled buffer of "audioBuffer->frameCount" frames for AudioFlinger to process. */
-    // FIXME make private when obtainBuffer() for TRANSFER_OBTAIN is removed
-            void        releaseBuffer(Buffer* audioBuffer);
+    /* Public API for TRANSFER_OBTAIN mode.
+     * Release a filled buffer of frames for AudioFlinger to process.
+     *
+     * Buffer fields:
+     *  frameCount  currently ignored but recommend to set to actual number of frames filled
+     *  size        actual number of bytes filled, must be multiple of frameSize
+     *  raw         ignored
+     */
+            void        releaseBuffer(const Buffer* audioBuffer);
 
     /* As a convenience we provide a write() interface to the audio buffer.
      * Input parameter 'size' is in byte units.
@@ -550,8 +644,14 @@
      *      BAD_VALUE           size is invalid
      *      WOULD_BLOCK         when obtainBuffer() returns same, or
      *                          AudioTrack was stopped during the write
+     *      DEAD_OBJECT         when AudioFlinger dies or the output device changes and
+     *                          the track cannot be automatically restored.
+     *                          The application needs to recreate the AudioTrack
+     *                          because the audio device changed or AudioFlinger died.
+     *                          This typically occurs for direct or offload tracks
+     *                          or if mDoNotReconnect is true.
      *      or any other error code returned by IAudioTrack::start() or restoreTrack_l().
-     * Default behavior is to only return until all data has been transferred. Set 'blocking' to
+     * Default behavior is to only return when all data has been transferred. Set 'blocking' to
      * false for the method to return immediately without waiting to try multiple times to write
      * the full content of the buffer.
      */
@@ -559,6 +659,7 @@
 
     /*
      * Dumps the state of an audio track.
+     * Not a general-purpose API; intended only for use by media player service to dump its tracks.
      */
             status_t    dump(int fd, const Vector<String16>& args) const;
 
@@ -589,19 +690,45 @@
      *                     overall hardware latency to physical output. In WOULD_BLOCK cases,
      *                     one might poll again, or use getPosition(), or use 0 position and
      *                     current time for the timestamp.
+     *         DEAD_OBJECT if AudioFlinger dies or the output device changes and
+     *                     the track cannot be automatically restored.
+     *                     The application needs to recreate the AudioTrack
+     *                     because the audio device changed or AudioFlinger died.
+     *                     This typically occurs for direct or offload tracks
+     *                     or if mDoNotReconnect is true.
      *         INVALID_OPERATION  if called on a FastTrack, wrong state, or some other error.
      *
      * The timestamp parameter is undefined on return, if status is not NO_ERROR.
      */
             status_t    getTimestamp(AudioTimestamp& timestamp);
 
+    /* Add an AudioDeviceCallback. The caller will be notified when the audio device to which this
+     * AudioTrack is routed is updated.
+     * Replaces any previously installed callback.
+     * Parameters:
+     *  callback:  The callback interface
+     * Returns NO_ERROR if successful.
+     *         INVALID_OPERATION if the same callback is already installed.
+     *         NO_INIT or PREMISSION_DENIED if AudioFlinger service is not reachable
+     *         BAD_VALUE if the callback is NULL
+     */
+            status_t addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
+
+    /* remove an AudioDeviceCallback.
+     * Parameters:
+     *  callback:  The callback interface
+     * Returns NO_ERROR if successful.
+     *         INVALID_OPERATION if the callback is not installed
+     *         BAD_VALUE if the callback is NULL
+     */
+            status_t removeAudioDeviceCallback(
+                    const sp<AudioSystem::AudioDeviceCallback>& callback);
+
 protected:
     /* copying audio tracks is not allowed */
                         AudioTrack(const AudioTrack& other);
             AudioTrack& operator = (const AudioTrack& other);
 
-            void        setAttributesFromStreamType(audio_stream_type_t streamType);
-
     /* a small internal class to handle the callback */
     class AudioTrackThread : public Thread
     {
@@ -614,6 +741,7 @@
 
                 void        pause();    // suspend thread from execution at next loop boundary
                 void        resume();   // allow thread to execute, if not requested to exit
+                void        wake();     // wake to handle changed notification conditions.
 
     private:
                 void        pauseInternal(nsecs_t ns = 0LL);
@@ -628,7 +756,9 @@
         bool                mPaused;    // whether thread is requested to pause at next loop entry
         bool                mPausedInt; // whether thread internally requests pause
         nsecs_t             mPausedNs;  // if mPausedInt then associated timeout, otherwise ignored
-        bool                mIgnoreNextPausedInt;   // whether to ignore next mPausedInt request
+        bool                mIgnoreNextPausedInt;   // skip any internal pause and go immediately
+                                        // to processAudioBuffer() as state may have changed
+                                        // since pause time calculated.
     };
 
             // body of AudioTrackThread::threadLoop()
@@ -641,10 +771,6 @@
             static const nsecs_t NS_WHENEVER = -1, NS_INACTIVE = -2, NS_NEVER = -3;
             nsecs_t processAudioBuffer();
 
-            bool     isOffloaded() const;
-            bool     isDirect() const;
-            bool     isOffloadedOrDirect() const;
-
             // caller must hold lock on mLock for all _l methods
 
             status_t createTrack_l();
@@ -657,6 +783,10 @@
             // FIXME enum is faster than strcmp() for parameter 'from'
             status_t restoreTrack_l(const char *from);
 
+            bool     isOffloaded() const;
+            bool     isDirect() const;
+            bool     isOffloadedOrDirect() const;
+
             bool     isOffloaded_l() const
                 { return (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0; }
 
@@ -670,6 +800,9 @@
             // increment mPosition by the delta of mServer, and return new value of mPosition
             uint32_t updateAndGetPosition_l();
 
+            // check sample rate and speed is compatible with AudioTrack
+            bool     isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const;
+
     // Next 4 fields may be changed if IAudioTrack is re-created, but always != 0
     sp<IAudioTrack>         mAudioTrack;
     sp<IMemory>             mCblkMemory;
@@ -680,12 +813,21 @@
 
     float                   mVolume[2];
     float                   mSendLevel;
-    mutable uint32_t        mSampleRate;            // mutable because getSampleRate() can update it.
+    mutable uint32_t        mSampleRate;            // mutable because getSampleRate() can update it
+    uint32_t                mOriginalSampleRate;
+    AudioPlaybackRate       mPlaybackRate;
     size_t                  mFrameCount;            // corresponds to current IAudioTrack, value is
                                                     // reported back by AudioFlinger to the client
     size_t                  mReqFrameCount;         // frame count to request the first or next time
                                                     // a new IAudioTrack is needed, non-decreasing
 
+    // The following AudioFlinger server-side values are cached in createAudioTrack_l().
+    // These values can be used for informational purposes until the track is invalidated,
+    // whereupon restoreTrack_l() calls createTrack_l() to update the values.
+    uint32_t                mAfLatency;             // AudioFlinger latency in ms
+    size_t                  mAfFrameCount;          // AudioFlinger frame count
+    uint32_t                mAfSampleRate;          // AudioFlinger sample rate
+
     // constant after constructor or set()
     audio_format_t          mFormat;                // as requested by client, not forced to 16-bit
     audio_stream_type_t     mStreamType;            // mStreamType == AUDIO_STREAM_DEFAULT implies
@@ -698,10 +840,7 @@
     const audio_offload_info_t* mOffloadInfo;
     audio_attributes_t      mAttributes;
 
-    // mFrameSize is equal to mFrameSizeAF for non-PCM or 16-bit PCM data.  For 8-bit PCM data, it's
-    // twice as large as mFrameSize because data is expanded to 16-bit before it's stored in buffer.
-    size_t                  mFrameSize;             // app-level frame size
-    size_t                  mFrameSizeAF;           // AudioFlinger frame size
+    size_t                  mFrameSize;             // frame size in bytes
 
     status_t                mStatus;
 
@@ -732,17 +871,25 @@
     bool                    mRefreshRemaining;      // processAudioBuffer() should refresh
                                                     // mRemainingFrames and mRetryOnPartialBuffer
 
+                                                    // used for static track cbf and restoration
+    int32_t                 mLoopCount;             // last setLoop loopCount; zero means disabled
+    uint32_t                mLoopStart;             // last setLoop loopStart
+    uint32_t                mLoopEnd;               // last setLoop loopEnd
+    int32_t                 mLoopCountNotified;     // the last loopCount notified by callback.
+                                                    // mLoopCountNotified counts down, matching
+                                                    // the remaining loop count for static track
+                                                    // playback.
+
     // These are private to processAudioBuffer(), and are not protected by a lock
     uint32_t                mRemainingFrames;       // number of frames to request in obtainBuffer()
     bool                    mRetryOnPartialBuffer;  // sleep and retry after partial obtainBuffer()
     uint32_t                mObservedSequence;      // last observed value of mSequence
 
-    uint32_t                mLoopPeriod;            // in frames, zero means looping is disabled
-
     uint32_t                mMarkerPosition;        // in wrapping (overflow) frame units
     bool                    mMarkerReached;
     uint32_t                mNewPosition;           // in frames
     uint32_t                mUpdatePeriod;          // in frames, zero means no EVENT_NEW_POS
+
     uint32_t                mServer;                // in frames, last known mProxy->getPosition()
                                                     // which is count of frames consumed by server,
                                                     // reset by new IAudioTrack,
@@ -758,10 +905,17 @@
     int64_t                 mStartUs;               // the start time after flush or stop.
                                                     // only used for offloaded and direct tracks.
 
+    bool                    mPreviousTimestampValid;// true if mPreviousTimestamp is valid
+    bool                    mTimestampStartupGlitchReported; // reduce log spam
+    bool                    mRetrogradeMotionReported; // reduce log spam
+    AudioTimestamp          mPreviousTimestamp;     // used to detect retrograde motion
+
     audio_output_flags_t    mFlags;
         // const after set(), except for bits AUDIO_OUTPUT_FLAG_FAST and AUDIO_OUTPUT_FLAG_OFFLOAD.
         // mLock must be held to read or write those bits reliably.
 
+    bool                    mDoNotReconnect;
+
     int                     mSessionId;
     int                     mAuxEffectId;
 
@@ -783,6 +937,10 @@
     bool                    mInUnderrun;            // whether track is currently in underrun state
     uint32_t                mPausedPosition;
 
+    // For Device Selection API
+    //  a value of AUDIO_PORT_HANDLE_NONE indicated default (AudioPolicyManager) routing.
+    audio_port_handle_t     mSelectedDeviceId;
+
 private:
     class DeathNotifier : public IBinder::DeathRecipient {
     public:
@@ -797,6 +955,8 @@
     uint32_t                mSequence;              // incremented for each new IAudioTrack attempt
     int                     mClientUid;
     pid_t                   mClientPid;
+
+    sp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
 };
 
 class TimedAudioTrack : public AudioTrack
diff --git a/include/media/EffectsFactoryApi.h b/include/media/EffectsFactoryApi.h
index b1ed7b0..64a3212 100644
--- a/include/media/EffectsFactoryApi.h
+++ b/include/media/EffectsFactoryApi.h
@@ -171,6 +171,8 @@
 ////////////////////////////////////////////////////////////////////////////////
 int EffectIsNullUuid(const effect_uuid_t *pEffectUuid);
 
+int EffectDumpEffects(int fd);
+
 #if __cplusplus
 }  // extern "C"
 #endif
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 31a14f0..5051aff 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -85,15 +85,19 @@
                                 uint32_t sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
+                                const String16& callingPackage,
                                 size_t *pFrameCount,
                                 track_flags_t *flags,
                                 pid_t tid,  // -1 means unused, otherwise must be valid non-0
+                                int clientUid,
                                 int *sessionId,
                                 size_t *notificationFrames,
                                 sp<IMemory>& cblk,
                                 sp<IMemory>& buffers,   // return value 0 means it follows cblk
                                 status_t *status) = 0;
 
+    // FIXME Surprisingly, sampleRate/format/frameCount/latency don't work for input handles
+
     /* query the audio hardware state. This state never changes,
      * and therefore can be cached.
      */
@@ -142,6 +146,7 @@
     virtual void registerClient(const sp<IAudioFlingerClient>& client) = 0;
 
     // retrieve the audio recording buffer size
+    // FIXME This API assumes a route, and so should be deprecated.
     virtual size_t getInputBufferSize(uint32_t sampleRate, audio_format_t format,
             audio_channel_mask_t channelMask) const = 0;
 
@@ -195,6 +200,7 @@
                                     // AudioFlinger doesn't take over handle reference from client
                                     audio_io_handle_t output,
                                     int sessionId,
+                                    const String16& callingPackage,
                                     status_t *status,
                                     int *id,
                                     int *enabled) = 0;
@@ -237,6 +243,9 @@
 
     /* Get the HW synchronization source used for an audio session */
     virtual audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId) = 0;
+
+    /* Indicate JAVA services are ready (scheduling, power management ...) */
+    virtual status_t systemReady() = 0;
 };
 
 
diff --git a/include/media/IAudioFlingerClient.h b/include/media/IAudioFlingerClient.h
index 75a9971..0080bc9 100644
--- a/include/media/IAudioFlingerClient.h
+++ b/include/media/IAudioFlingerClient.h
@@ -22,6 +22,7 @@
 #include <binder/IInterface.h>
 #include <utils/KeyedVector.h>
 #include <system/audio.h>
+#include <media/AudioIoDescriptor.h>
 
 namespace android {
 
@@ -33,7 +34,8 @@
     DECLARE_META_INTERFACE(AudioFlingerClient);
 
     // Notifies a change of audio input/output configuration.
-    virtual void ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2) = 0;
+    virtual void ioConfigChanged(audio_io_config_event event,
+                                 const sp<AudioIoDescriptor>& ioDesc) = 0;
 
 };
 
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index c98c475..6b93f6f 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -44,7 +44,8 @@
     //
     virtual status_t setDeviceConnectionState(audio_devices_t device,
                                               audio_policy_dev_state_t state,
-                                              const char *device_address) = 0;
+                                              const char *device_address,
+                                              const char *device_name) = 0;
     virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
                                                                   const char *device_address) = 0;
     virtual status_t setPhoneState(audio_mode_t state) = 0;
@@ -61,10 +62,12 @@
                                         audio_io_handle_t *output,
                                         audio_session_t session,
                                         audio_stream_type_t *stream,
+                                        uid_t uid,
                                         uint32_t samplingRate = 0,
                                         audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                         audio_channel_mask_t channelMask = 0,
                                         audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                                        audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
                                         const audio_offload_info_t *offloadInfo = NULL) = 0;
     virtual status_t startOutput(audio_io_handle_t output,
                                  audio_stream_type_t stream,
@@ -76,12 +79,14 @@
                                audio_stream_type_t stream,
                                audio_session_t session) = 0;
     virtual status_t  getInputForAttr(const audio_attributes_t *attr,
-                                      audio_io_handle_t *input,
-                                      audio_session_t session,
-                                      uint32_t samplingRate,
-                                      audio_format_t format,
-                                      audio_channel_mask_t channelMask,
-                                      audio_input_flags_t flags) = 0;
+                              audio_io_handle_t *input,
+                              audio_session_t session,
+                              uid_t uid,
+                              uint32_t samplingRate,
+                              audio_format_t format,
+                              audio_channel_mask_t channelMask,
+                              audio_input_flags_t flags,
+                              audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE) = 0;
     virtual status_t startInput(audio_io_handle_t input,
                                 audio_session_t session) = 0;
     virtual status_t stopInput(audio_io_handle_t input,
@@ -144,6 +149,8 @@
 
     virtual void registerClient(const sp<IAudioPolicyServiceClient>& client) = 0;
 
+    virtual void setAudioPortCallbacksEnabled(bool enabled) = 0;
+
     virtual status_t acquireSoundTriggerSession(audio_session_t *session,
                                            audio_io_handle_t *ioHandle,
                                            audio_devices_t *device) = 0;
@@ -153,6 +160,11 @@
     virtual audio_mode_t getPhoneState() = 0;
 
     virtual status_t registerPolicyMixes(Vector<AudioMix> mixes, bool registration) = 0;
+
+    virtual status_t startAudioSource(const struct audio_port_config *source,
+                                      const audio_attributes_t *attributes,
+                                      audio_io_handle_t *handle) = 0;
+    virtual status_t stopAudioSource(audio_io_handle_t handle) = 0;
 };
 
 
diff --git a/include/media/IAudioPolicyServiceClient.h b/include/media/IAudioPolicyServiceClient.h
index 59df046..a7f2cc3 100644
--- a/include/media/IAudioPolicyServiceClient.h
+++ b/include/media/IAudioPolicyServiceClient.h
@@ -35,6 +35,8 @@
     virtual void onAudioPortListUpdate() = 0;
     // Notifies a change of audio patch configuration.
     virtual void onAudioPatchListUpdate() = 0;
+    // Notifies a change in the mixing state of a specific mix in a dynamic audio policy
+    virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state) = 0;
 };
 
 
diff --git a/include/media/ICrypto.h b/include/media/ICrypto.h
index 07742ca..ea316de 100644
--- a/include/media/ICrypto.h
+++ b/include/media/ICrypto.h
@@ -25,6 +25,7 @@
 namespace android {
 
 struct AString;
+class IMemory;
 
 struct ICrypto : public IInterface {
     DECLARE_META_INTERFACE(Crypto);
@@ -43,12 +44,14 @@
 
     virtual void notifyResolution(uint32_t width, uint32_t height) = 0;
 
+    virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId) = 0;
+
     virtual ssize_t decrypt(
             bool secure,
             const uint8_t key[16],
             const uint8_t iv[16],
             CryptoPlugin::Mode mode,
-            const void *srcPtr,
+            const sp<IMemory> &sharedBuffer, size_t offset,
             const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
             void *dstPtr,
             AString *errorDetailMsg) = 0;
@@ -61,6 +64,9 @@
     virtual status_t onTransact(
             uint32_t code, const Parcel &data, Parcel *reply,
             uint32_t flags = 0);
+private:
+    void readVector(const Parcel &data, Vector<uint8_t> &vector) const;
+    void writeVector(Parcel *reply, Vector<uint8_t> const &vector) const;
 };
 
 }  // namespace android
diff --git a/include/media/IDataSource.h b/include/media/IDataSource.h
new file mode 100644
index 0000000..07e46f7
--- /dev/null
+++ b/include/media/IDataSource.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IDATASOURCE_H
+#define ANDROID_IDATASOURCE_H
+
+#include <binder/IInterface.h>
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/Errors.h>
+
+namespace android {
+
+class IMemory;
+
+// A binder interface for implementing a stagefright DataSource remotely.
+class IDataSource : public IInterface {
+public:
+    DECLARE_META_INTERFACE(DataSource);
+
+    // Get the memory that readAt writes into.
+    virtual sp<IMemory> getIMemory() = 0;
+    // Read up to |size| bytes into the memory returned by getIMemory(). Returns
+    // the number of bytes read, or -1 on error. |size| must not be larger than
+    // the buffer.
+    virtual ssize_t readAt(off64_t offset, size_t size) = 0;
+    // Get the size, or -1 if the size is unknown.
+    virtual status_t getSize(off64_t* size) = 0;
+    // This should be called before deleting |this|. The other methods may
+    // return errors if they're called after calling close().
+    virtual void close() = 0;
+
+private:
+    DISALLOW_EVIL_CONSTRUCTORS(IDataSource);
+};
+
+// ----------------------------------------------------------------------------
+
+class BnDataSource : public BnInterface<IDataSource> {
+public:
+    virtual status_t onTransact(uint32_t code,
+                                const Parcel& data,
+                                Parcel* reply,
+                                uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif // ANDROID_IDATASOURCE_H
diff --git a/include/media/IDrm.h b/include/media/IDrm.h
index affcbd7..9449beb6 100644
--- a/include/media/IDrm.h
+++ b/include/media/IDrm.h
@@ -47,7 +47,8 @@
                       Vector<uint8_t> const &initData,
                       String8 const &mimeType, DrmPlugin::KeyType keyType,
                       KeyedVector<String8, String8> const &optionalParameters,
-                      Vector<uint8_t> &request, String8 &defaultUrl) = 0;
+                      Vector<uint8_t> &request, String8 &defaultUrl,
+                      DrmPlugin::KeyRequestType *keyRequestType) = 0;
 
     virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId,
                                         Vector<uint8_t> const &response,
diff --git a/include/media/IMediaCodecList.h b/include/media/IMediaCodecList.h
index e93ea8b..12b52d7 100644
--- a/include/media/IMediaCodecList.h
+++ b/include/media/IMediaCodecList.h
@@ -21,6 +21,8 @@
 #include <binder/IInterface.h>
 #include <binder/Parcel.h>
 
+#include <media/stagefright/foundation/AMessage.h>
+
 namespace android {
 
 struct MediaCodecInfo;
@@ -33,6 +35,8 @@
     virtual size_t countCodecs() const = 0;
     virtual sp<MediaCodecInfo> getCodecInfo(size_t index) const = 0;
 
+    virtual const sp<AMessage> getGlobalSettings() const = 0;
+
     virtual ssize_t findCodecByType(
             const char *type, bool encoder, size_t startIndex = 0) const = 0;
 
diff --git a/include/media/IMediaMetadataRetriever.h b/include/media/IMediaMetadataRetriever.h
index 2529800..c90f254 100644
--- a/include/media/IMediaMetadataRetriever.h
+++ b/include/media/IMediaMetadataRetriever.h
@@ -26,6 +26,7 @@
 
 namespace android {
 
+class IDataSource;
 struct IMediaHTTPService;
 
 class IMediaMetadataRetriever: public IInterface
@@ -40,6 +41,7 @@
             const KeyedVector<String8, String8> *headers = NULL) = 0;
 
     virtual status_t        setDataSource(int fd, int64_t offset, int64_t length) = 0;
+    virtual status_t        setDataSource(const sp<IDataSource>& dataSource) = 0;
     virtual sp<IMemory>     getFrameAtTime(int64_t timeUs, int option) = 0;
     virtual sp<IMemory>     extractAlbumArt() = 0;
     virtual const char*     extractMetadata(int keyCode) = 0;
diff --git a/include/media/IMediaPlayer.h b/include/media/IMediaPlayer.h
index db62cd5..0fd8933 100644
--- a/include/media/IMediaPlayer.h
+++ b/include/media/IMediaPlayer.h
@@ -31,9 +31,12 @@
 
 class Parcel;
 class Surface;
-class IStreamSource;
+class IDataSource;
+struct IStreamSource;
 class IGraphicBufferProducer;
 struct IMediaHTTPService;
+struct AudioPlaybackRate;
+struct AVSyncSettings;
 
 class IMediaPlayer: public IInterface
 {
@@ -49,6 +52,7 @@
 
     virtual status_t        setDataSource(int fd, int64_t offset, int64_t length) = 0;
     virtual status_t        setDataSource(const sp<IStreamSource>& source) = 0;
+    virtual status_t        setDataSource(const sp<IDataSource>& source) = 0;
     virtual status_t        setVideoSurfaceTexture(
                                     const sp<IGraphicBufferProducer>& bufferProducer) = 0;
     virtual status_t        prepareAsync() = 0;
@@ -56,6 +60,11 @@
     virtual status_t        stop() = 0;
     virtual status_t        pause() = 0;
     virtual status_t        isPlaying(bool* state) = 0;
+    virtual status_t        setPlaybackSettings(const AudioPlaybackRate& rate) = 0;
+    virtual status_t        getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */) = 0;
+    virtual status_t        setSyncSettings(const AVSyncSettings& sync, float videoFpsHint) = 0;
+    virtual status_t        getSyncSettings(AVSyncSettings* sync /* nonnull */,
+                                            float* videoFps /* nonnull */) = 0;
     virtual status_t        seekTo(int msec) = 0;
     virtual status_t        getCurrentPosition(int* msec) = 0;
     virtual status_t        getDuration(int* msec) = 0;
diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h
index 67b599a..a316ce2 100644
--- a/include/media/IMediaPlayerService.h
+++ b/include/media/IMediaPlayerService.h
@@ -47,9 +47,10 @@
 public:
     DECLARE_META_INTERFACE(MediaPlayerService);
 
-    virtual sp<IMediaRecorder> createMediaRecorder() = 0;
+    virtual sp<IMediaRecorder> createMediaRecorder(const String16 &opPackageName) = 0;
     virtual sp<IMediaMetadataRetriever> createMetadataRetriever() = 0;
-    virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client, int audioSessionId = 0) = 0;
+    virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client, int audioSessionId = 0)
+            = 0;
 
     virtual sp<IOMX>            getOMX() = 0;
     virtual sp<ICrypto>         makeCrypto() = 0;
@@ -64,8 +65,8 @@
     // display client when display connection, disconnection or errors occur.
     // The assumption is that at most one remote display will be connected to the
     // provided interface at a time.
-    virtual sp<IRemoteDisplay> listenForRemoteDisplay(const sp<IRemoteDisplayClient>& client,
-            const String8& iface) = 0;
+    virtual sp<IRemoteDisplay> listenForRemoteDisplay(const String16 &opPackageName,
+            const sp<IRemoteDisplayClient>& client, const String8& iface) = 0;
 
     // codecs and audio devices usage tracking for the battery app
     enum BatteryDataBits {
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
index 3e67550..77ed5d3 100644
--- a/include/media/IMediaRecorder.h
+++ b/include/media/IMediaRecorder.h
@@ -26,6 +26,7 @@
 class ICamera;
 class ICameraRecordingProxy;
 class IMediaRecorderClient;
+class IGraphicBufferConsumer;
 class IGraphicBufferProducer;
 
 class IMediaRecorder: public IInterface
@@ -41,7 +42,6 @@
     virtual status_t setOutputFormat(int of) = 0;
     virtual status_t setVideoEncoder(int ve) = 0;
     virtual status_t setAudioEncoder(int ae) = 0;
-    virtual status_t setOutputFile(const char* path) = 0;
     virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0;
     virtual status_t setVideoSize(int width, int height) = 0;
     virtual status_t setVideoFrameRate(int frames_per_second) = 0;
@@ -56,6 +56,7 @@
     virtual status_t init() = 0;
     virtual status_t close() = 0;
     virtual status_t release() = 0;
+    virtual status_t setInputSurface(const sp<IGraphicBufferConsumer>& surface) = 0;
     virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() = 0;
 };
 
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index 627f23b..3d29e4a 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -20,10 +20,15 @@
 
 #include <binder/IInterface.h>
 #include <gui/IGraphicBufferProducer.h>
+#include <gui/IGraphicBufferConsumer.h>
 #include <ui/GraphicBuffer.h>
 #include <utils/List.h>
 #include <utils/String8.h>
 
+#include <list>
+
+#include <media/hardware/MetadataBufferType.h>
+
 #include <OMX_Core.h>
 #include <OMX_Video.h>
 
@@ -80,14 +85,16 @@
     virtual status_t getState(
             node_id node, OMX_STATETYPE* state) = 0;
 
+    // This will set *type to previous metadata buffer type on OMX error (not on binder error), and
+    // new metadata buffer type on success.
     virtual status_t storeMetaDataInBuffers(
-            node_id node, OMX_U32 port_index, OMX_BOOL enable) = 0;
+            node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type = NULL) = 0;
 
     virtual status_t prepareForAdaptivePlayback(
             node_id node, OMX_U32 portIndex, OMX_BOOL enable,
             OMX_U32 maxFrameWidth, OMX_U32 maxFrameHeight) = 0;
 
-   virtual status_t configureVideoTunnelMode(
+    virtual status_t configureVideoTunnelMode(
             node_id node, OMX_U32 portIndex, OMX_BOOL tunneled,
             OMX_U32 audioHwSync, native_handle_t **sidebandHandle) = 0;
 
@@ -97,9 +104,10 @@
     virtual status_t getGraphicBufferUsage(
             node_id node, OMX_U32 port_index, OMX_U32* usage) = 0;
 
+    // Use |params| as an OMX buffer, but limit the size of the OMX buffer to |allottedSize|.
     virtual status_t useBuffer(
             node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-            buffer_id *buffer) = 0;
+            buffer_id *buffer, OMX_U32 allottedSize) = 0;
 
     virtual status_t useGraphicBuffer(
             node_id node, OMX_U32 port_index,
@@ -109,9 +117,23 @@
             node_id node, OMX_U32 port_index,
             const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer) = 0;
 
+    // This will set *type to resulting metadata buffer type on OMX error (not on binder error) as
+    // well as on success.
     virtual status_t createInputSurface(
             node_id node, OMX_U32 port_index,
-            sp<IGraphicBufferProducer> *bufferProducer) = 0;
+            sp<IGraphicBufferProducer> *bufferProducer,
+            MetadataBufferType *type = NULL) = 0;
+
+    virtual status_t createPersistentInputSurface(
+            sp<IGraphicBufferProducer> *bufferProducer,
+            sp<IGraphicBufferConsumer> *bufferConsumer) = 0;
+
+    // This will set *type to resulting metadata buffer type on OMX error (not on binder error) as
+    // well as on success.
+    virtual status_t setInputSurface(
+            node_id node, OMX_U32 port_index,
+            const sp<IGraphicBufferConsumer> &bufferConsumer,
+            MetadataBufferType *type) = 0;
 
     virtual status_t signalEndOfInputStream(node_id node) = 0;
 
@@ -123,20 +145,32 @@
             node_id node, OMX_U32 port_index, size_t size,
             buffer_id *buffer, void **buffer_data) = 0;
 
+    // Allocate an OMX buffer of size |allotedSize|. Use |params| as the backup buffer, which
+    // may be larger.
     virtual status_t allocateBufferWithBackup(
             node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-            buffer_id *buffer) = 0;
+            buffer_id *buffer, OMX_U32 allottedSize) = 0;
 
     virtual status_t freeBuffer(
             node_id node, OMX_U32 port_index, buffer_id buffer) = 0;
 
-    virtual status_t fillBuffer(node_id node, buffer_id buffer) = 0;
+    enum {
+        kFenceTimeoutMs = 1000
+    };
+    // Calls OMX_FillBuffer on buffer, and passes |fenceFd| to component if it supports
+    // fences. Otherwise, it waits on |fenceFd| before calling OMX_FillBuffer.
+    // Takes ownership of |fenceFd| even if this call fails.
+    virtual status_t fillBuffer(node_id node, buffer_id buffer, int fenceFd = -1) = 0;
 
+    // Calls OMX_EmptyBuffer on buffer (after updating buffer header with |range_offset|,
+    // |range_length|, |flags| and |timestamp|). Passes |fenceFd| to component if it
+    // supports fences. Otherwise, it waits on |fenceFd| before calling OMX_EmptyBuffer.
+    // Takes ownership of |fenceFd| even if this call fails.
     virtual status_t emptyBuffer(
             node_id node,
             buffer_id buffer,
             OMX_U32 range_offset, OMX_U32 range_length,
-            OMX_U32 flags, OMX_TICKS timestamp) = 0;
+            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd = -1) = 0;
 
     virtual status_t getExtensionIndex(
             node_id node,
@@ -147,6 +181,7 @@
         INTERNAL_OPTION_SUSPEND,  // data is a bool
         INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY,  // data is an int64_t
         INTERNAL_OPTION_MAX_TIMESTAMP_GAP, // data is int64_t
+        INTERNAL_OPTION_MAX_FPS, // data is float
         INTERNAL_OPTION_START_TIME, // data is an int64_t
         INTERNAL_OPTION_TIME_LAPSE, // data is an int64_t[2]
     };
@@ -163,10 +198,11 @@
         EVENT,
         EMPTY_BUFFER_DONE,
         FILL_BUFFER_DONE,
-
+        FRAME_RENDERED,
     } type;
 
     IOMX::node_id node;
+    int fenceFd; // used for EMPTY_BUFFER_DONE and FILL_BUFFER_DONE; client must close this
 
     union {
         // if type == EVENT
@@ -190,6 +226,11 @@
             OMX_TICKS timestamp;
         } extended_buffer_data;
 
+        // if type == FRAME_RENDERED
+        struct {
+            OMX_TICKS timestamp;
+            OMX_S64 nanoTime;
+        } render_data;
     } u;
 };
 
@@ -197,7 +238,8 @@
 public:
     DECLARE_META_INTERFACE(OMXObserver);
 
-    virtual void onMessage(const omx_message &msg) = 0;
+    // Handle (list of) messages.
+    virtual void onMessages(const std::list<omx_message> &messages) = 0;
 };
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -223,4 +265,15 @@
 
 }  // namespace android
 
+inline static const char *asString(android::MetadataBufferType i, const char *def = "??") {
+    using namespace android;
+    switch (i) {
+        case kMetadataBufferTypeCameraSource:   return "CameraSource";
+        case kMetadataBufferTypeGrallocSource:  return "GrallocSource";
+        case kMetadataBufferTypeANWBuffer:      return "ANWBuffer";
+        case kMetadataBufferTypeInvalid:        return "Invalid";
+        default:                                return def;
+    }
+}
+
 #endif  // ANDROID_IOMX_H_
diff --git a/include/media/IResourceManagerClient.h b/include/media/IResourceManagerClient.h
new file mode 100644
index 0000000..aa0cd88
--- /dev/null
+++ b/include/media/IResourceManagerClient.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IRESOURCEMANAGERCLIENT_H
+#define ANDROID_IRESOURCEMANAGERCLIENT_H
+
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+class IResourceManagerClient: public IInterface
+{
+public:
+    DECLARE_META_INTERFACE(ResourceManagerClient);
+
+    virtual bool reclaimResource() = 0;
+    virtual String8 getName() = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnResourceManagerClient: public BnInterface<IResourceManagerClient>
+{
+public:
+    virtual status_t onTransact(uint32_t code,
+                                const Parcel &data,
+                                Parcel *reply,
+                                uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif // ANDROID_IRESOURCEMANAGERCLIENT_H
diff --git a/include/media/IResourceManagerService.h b/include/media/IResourceManagerService.h
new file mode 100644
index 0000000..1e4f6de
--- /dev/null
+++ b/include/media/IResourceManagerService.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_IRESOURCEMANAGERSERVICE_H
+#define ANDROID_IRESOURCEMANAGERSERVICE_H
+
+#include <utils/Errors.h>  // for status_t
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+#include <media/IResourceManagerClient.h>
+#include <media/MediaResource.h>
+#include <media/MediaResourcePolicy.h>
+
+namespace android {
+
+class IResourceManagerService: public IInterface
+{
+public:
+    DECLARE_META_INTERFACE(ResourceManagerService);
+
+    virtual void config(const Vector<MediaResourcePolicy> &policies) = 0;
+
+    virtual void addResource(
+            int pid,
+            int64_t clientId,
+            const sp<IResourceManagerClient> client,
+            const Vector<MediaResource> &resources) = 0;
+
+    virtual void removeResource(int pid, int64_t clientId) = 0;
+
+    virtual bool reclaimResource(
+            int callingPid,
+            const Vector<MediaResource> &resources) = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnResourceManagerService: public BnInterface<IResourceManagerService>
+{
+public:
+    virtual status_t onTransact(uint32_t code,
+                                const Parcel &data,
+                                Parcel *reply,
+                                uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif // ANDROID_IRESOURCEMANAGERSERVICE_H
diff --git a/include/media/IStreamSource.h b/include/media/IStreamSource.h
index 677119b..4a6aafd 100644
--- a/include/media/IStreamSource.h
+++ b/include/media/IStreamSource.h
@@ -23,7 +23,7 @@
 namespace android {
 
 struct AMessage;
-struct IMemory;
+class IMemory;
 struct IStreamListener;
 
 struct IStreamSource : public IInterface {
@@ -81,6 +81,13 @@
     // with the next PTS occuring in the stream. The value is of type int64_t.
     static const char *const kKeyMediaTimeUs;
 
+    // Optionally signalled as part of a discontinuity that includes
+    // DISCONTINUITY_TIME. It indicates the media time (in us) of a recent
+    // sample from the same content, and is used as a hint for the parser to
+    // handle PTS wraparound. This is required when a new parser is created
+    // to continue parsing content from the same timeline.
+    static const char *const kKeyRecentMediaTimeUs;
+
     virtual void issueCommand(
             Command cmd, bool synchronous, const sp<AMessage> &msg = NULL) = 0;
 };
diff --git a/include/media/MediaCodecInfo.h b/include/media/MediaCodecInfo.h
index cd56adb..4067b47 100644
--- a/include/media/MediaCodecInfo.h
+++ b/include/media/MediaCodecInfo.h
@@ -32,9 +32,11 @@
 namespace android {
 
 struct AMessage;
-struct Parcel;
+class Parcel;
 struct CodecCapabilities;
 
+typedef KeyedVector<AString, AString> CodecSettings;
+
 struct MediaCodecInfo : public RefBase {
     struct ProfileLevel {
         uint32_t mProfile;
@@ -104,6 +106,7 @@
     MediaCodecInfo(AString name, bool encoder, const char *mime);
     void addQuirk(const char *name);
     status_t addMime(const char *mime);
+    status_t updateMime(const char *mime);
     status_t initializeCapabilities(const CodecCapabilities &caps);
     void addDetail(const AString &key, const AString &value);
     void addFeature(const AString &key, int32_t value);
@@ -114,6 +117,7 @@
     DISALLOW_EVIL_CONSTRUCTORS(MediaCodecInfo);
 
     friend class MediaCodecList;
+    friend class MediaCodecListOverridesTest;
 };
 
 }  // namespace android
diff --git a/include/media/MediaMetadataRetrieverInterface.h b/include/media/MediaMetadataRetrieverInterface.h
index 38dbb20..bce6ee3 100644
--- a/include/media/MediaMetadataRetrieverInterface.h
+++ b/include/media/MediaMetadataRetrieverInterface.h
@@ -25,6 +25,7 @@
 
 namespace android {
 
+class DataSource;
 struct IMediaHTTPService;
 
 // Abstract base class
@@ -40,6 +41,7 @@
             const KeyedVector<String8, String8> *headers = NULL) = 0;
 
     virtual status_t    setDataSource(int fd, int64_t offset, int64_t length) = 0;
+    virtual status_t setDataSource(const sp<DataSource>& source) = 0;
     virtual VideoFrame* getFrameAtTime(int64_t timeUs, int option) = 0;
     virtual MediaAlbumArt* extractAlbumArt() = 0;
     virtual const char* extractMetadata(int keyCode) = 0;
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 4a6bf28..de82554 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -26,8 +26,10 @@
 #include <utils/RefBase.h>
 
 #include <media/mediaplayer.h>
+#include <media/AudioResamplerPublic.h>
 #include <media/AudioSystem.h>
 #include <media/AudioTimestamp.h>
+#include <media/AVSyncSettings.h>
 #include <media/Metadata.h>
 
 // Fwd decl to make sure everyone agrees that the scope of struct sockaddr_in is
@@ -36,6 +38,7 @@
 
 namespace android {
 
+class DataSource;
 class Parcel;
 class Surface;
 class IGraphicBufferProducer;
@@ -110,20 +113,35 @@
                 AudioCallback cb = NULL,
                 void *cookie = NULL,
                 audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                const audio_offload_info_t *offloadInfo = NULL) = 0;
+                const audio_offload_info_t *offloadInfo = NULL,
+                bool doNotReconnect = false,
+                uint32_t suggestedFrameCount = 0) = 0;
 
         virtual status_t    start() = 0;
-        virtual ssize_t     write(const void* buffer, size_t size) = 0;
+
+        /* Input parameter |size| is in byte units stored in |buffer|.
+         * Data is copied over and actual number of bytes written (>= 0)
+         * is returned, or no data is copied and a negative status code
+         * is returned (even when |blocking| is true).
+         * When |blocking| is false, AudioSink will immediately return after
+         * part of or full |buffer| is copied over.
+         * When |blocking| is true, AudioSink will wait to copy the entire
+         * buffer, unless an error occurs or the copy operation is
+         * prematurely stopped.
+         */
+        virtual ssize_t     write(const void* buffer, size_t size, bool blocking = true) = 0;
+
         virtual void        stop() = 0;
         virtual void        flush() = 0;
         virtual void        pause() = 0;
         virtual void        close() = 0;
 
-        virtual status_t    setPlaybackRatePermille(int32_t rate) { return INVALID_OPERATION; }
+        virtual status_t    setPlaybackRate(const AudioPlaybackRate& rate) = 0;
+        virtual status_t    getPlaybackRate(AudioPlaybackRate* rate /* nonnull */) = 0;
         virtual bool        needsTrailingPadding() { return true; }
 
-        virtual status_t    setParameters(const String8& keyValuePairs) { return NO_ERROR; };
-        virtual String8     getParameters(const String8& keys) { return String8::empty(); };
+        virtual status_t    setParameters(const String8& /* keyValuePairs */) { return NO_ERROR; }
+        virtual String8     getParameters(const String8& /* keys */) { return String8::empty(); }
     };
 
                         MediaPlayerBase() : mCookie(0), mNotify(0) {}
@@ -131,7 +149,7 @@
     virtual status_t    initCheck() = 0;
     virtual bool        hardwareOutput() = 0;
 
-    virtual status_t    setUID(uid_t uid) {
+    virtual status_t    setUID(uid_t /* uid */) {
         return INVALID_OPERATION;
     }
 
@@ -142,7 +160,11 @@
 
     virtual status_t    setDataSource(int fd, int64_t offset, int64_t length) = 0;
 
-    virtual status_t    setDataSource(const sp<IStreamSource> &source) {
+    virtual status_t    setDataSource(const sp<IStreamSource>& /* source */) {
+        return INVALID_OPERATION;
+    }
+
+    virtual status_t    setDataSource(const sp<DataSource>& /* source */) {
         return INVALID_OPERATION;
     }
 
@@ -156,6 +178,31 @@
     virtual status_t    stop() = 0;
     virtual status_t    pause() = 0;
     virtual bool        isPlaying() = 0;
+    virtual status_t    setPlaybackSettings(const AudioPlaybackRate& rate) {
+        // by default, players only support setting rate to the default
+        if (!isAudioPlaybackRateEqual(rate, AUDIO_PLAYBACK_RATE_DEFAULT)) {
+            return BAD_VALUE;
+        }
+        return OK;
+    }
+    virtual status_t    getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */) {
+        *rate = AUDIO_PLAYBACK_RATE_DEFAULT;
+        return OK;
+    }
+    virtual status_t    setSyncSettings(const AVSyncSettings& sync, float /* videoFps */) {
+        // By default, players only support setting sync source to default; all other sync
+        // settings are ignored. There is no requirement for getters to return set values.
+        if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
+            return BAD_VALUE;
+        }
+        return OK;
+    }
+    virtual status_t    getSyncSettings(
+                                AVSyncSettings* sync /* nonnull */, float* videoFps /* nonnull */) {
+        *sync = AVSyncSettings();
+        *videoFps = -1.f;
+        return OK;
+    }
     virtual status_t    seekTo(int msec) = 0;
     virtual status_t    getCurrentPosition(int *msec) = 0;
     virtual status_t    getDuration(int *msec) = 0;
@@ -166,13 +213,13 @@
     virtual status_t    getParameter(int key, Parcel *reply) = 0;
 
     // default no-op implementation of optional extensions
-    virtual status_t setRetransmitEndpoint(const struct sockaddr_in* endpoint) {
+    virtual status_t setRetransmitEndpoint(const struct sockaddr_in* /* endpoint */) {
         return INVALID_OPERATION;
     }
-    virtual status_t getRetransmitEndpoint(struct sockaddr_in* endpoint) {
+    virtual status_t getRetransmitEndpoint(struct sockaddr_in* /* endpoint */) {
         return INVALID_OPERATION;
     }
-    virtual status_t setNextPlayer(const sp<MediaPlayerBase>& next) {
+    virtual status_t setNextPlayer(const sp<MediaPlayerBase>& /* next */) {
         return OK;
     }
 
@@ -192,8 +239,8 @@
     //            the known metadata should be returned.
     // @param[inout] records Parcel where the player appends its metadata.
     // @return OK if the call was successful.
-    virtual status_t    getMetadata(const media::Metadata::Filter& ids,
-                                    Parcel *records) {
+    virtual status_t    getMetadata(const media::Metadata::Filter& /* ids */,
+                                    Parcel* /* records */) {
         return INVALID_OPERATION;
     };
 
@@ -216,7 +263,7 @@
         if (notifyCB) notifyCB(cookie, msg, ext1, ext2, obj);
     }
 
-    virtual status_t dump(int fd, const Vector<String16> &args) const {
+    virtual status_t dump(int /* fd */, const Vector<String16>& /* args */) const {
         return INVALID_OPERATION;
     }
 
diff --git a/include/media/MediaProfiles.h b/include/media/MediaProfiles.h
index f061d22..e02918f 100644
--- a/include/media/MediaProfiles.h
+++ b/include/media/MediaProfiles.h
@@ -58,24 +58,6 @@
     CAMCORDER_QUALITY_HIGH_SPEED_LIST_END = 2005,
 };
 
-/**
- * Set CIF as default maximum import and export resolution of video editor.
- * The maximum import and export resolutions are platform specific,
- * which should be defined in media_profiles.xml.
- * Set default maximum prefetch YUV frames to 6, which means video editor can
- * queue up to 6 YUV frames in the video encoder source.
- * This value is used to limit the amount of memory used by video editor
- * engine when the encoder consumes YUV frames at a lower speed
- * than video editor engine produces.
- */
-enum videoeditor_capability {
-    VIDEOEDITOR_DEFAULT_MAX_INPUT_FRAME_WIDTH = 352,
-    VIDEOEDITOR_DEFUALT_MAX_INPUT_FRAME_HEIGHT = 288,
-    VIDEOEDITOR_DEFAULT_MAX_OUTPUT_FRAME_WIDTH = 352,
-    VIDEOEDITOR_DEFUALT_MAX_OUTPUT_FRAME_HEIGHT = 288,
-    VIDEOEDITOR_DEFAULT_MAX_PREFETCH_YUV_FRAMES = 6
-};
-
 enum video_decoder {
     VIDEO_DECODER_WMV,
 };
@@ -148,32 +130,6 @@
     int getVideoEncoderParamByName(const char *name, video_encoder codec) const;
 
     /**
-     * Returns the value for the given param name for the video editor cap
-     * param or -1 if error.
-     * Supported param name are:
-     * videoeditor.input.width.max - max input video frame width
-     * videoeditor.input.height.max - max input video frame height
-     * videoeditor.output.width.max - max output video frame width
-     * videoeditor.output.height.max - max output video frame height
-     * maxPrefetchYUVFrames - max prefetch YUV frames in video editor engine. This value is used
-     * to limit the memory consumption.
-     */
-    int getVideoEditorCapParamByName(const char *name) const;
-
-    /**
-     * Returns the value for the given param name for the video editor export codec format
-     * param or -1 if error.
-     * Supported param name are:
-     * videoeditor.export.profile - export video profile
-     * videoeditor.export.level - export video level
-     * Supported param codec are:
-     * 1 for h263
-     * 2 for h264
-     * 3 for mpeg4
-     */
-    int getVideoEditorExportParamByName(const char *name, int codec) const;
-
-    /**
      * Returns the audio encoders supported.
      */
     Vector<audio_encoder> getAudioEncoders() const;
@@ -221,7 +177,7 @@
 
     MediaProfiles& operator=(const MediaProfiles&);  // Don't call me
     MediaProfiles(const MediaProfiles&);             // Don't call me
-    MediaProfiles() { mVideoEditorCap = NULL; }        // Dummy default constructor
+    MediaProfiles() {}                               // Dummy default constructor
     ~MediaProfiles();                                // Don't delete me
 
     struct VideoCodec {
@@ -366,31 +322,6 @@
         int mCameraId;
         Vector<int> mLevels;
     };
-    struct ExportVideoProfile {
-        ExportVideoProfile(int codec, int profile, int level)
-            :mCodec(codec),mProfile(profile),mLevel(level) {}
-        ~ExportVideoProfile() {}
-        int mCodec;
-        int mProfile;
-        int mLevel;
-    };
-    struct VideoEditorCap {
-        VideoEditorCap(int inFrameWidth, int inFrameHeight,
-            int outFrameWidth, int outFrameHeight, int frames)
-            : mMaxInputFrameWidth(inFrameWidth),
-              mMaxInputFrameHeight(inFrameHeight),
-              mMaxOutputFrameWidth(outFrameWidth),
-              mMaxOutputFrameHeight(outFrameHeight),
-              mMaxPrefetchYUVFrames(frames) {}
-
-        ~VideoEditorCap() {}
-
-        int mMaxInputFrameWidth;
-        int mMaxInputFrameHeight;
-        int mMaxOutputFrameWidth;
-        int mMaxOutputFrameHeight;
-        int mMaxPrefetchYUVFrames;
-    };
 
     int getCamcorderProfileIndex(int cameraId, camcorder_quality quality) const;
     void initRequiredProfileRefs(const Vector<int>& cameraIds);
@@ -403,7 +334,6 @@
     static void logAudioEncoderCap(const AudioEncoderCap& cap);
     static void logVideoDecoderCap(const VideoDecoderCap& cap);
     static void logAudioDecoderCap(const AudioDecoderCap& cap);
-    static void logVideoEditorCap(const VideoEditorCap& cap);
 
     // If the xml configuration file does exist, use the settings
     // from the xml
@@ -415,9 +345,6 @@
     static VideoDecoderCap* createVideoDecoderCap(const char **atts);
     static VideoEncoderCap* createVideoEncoderCap(const char **atts);
     static AudioEncoderCap* createAudioEncoderCap(const char **atts);
-    static VideoEditorCap* createVideoEditorCap(
-                const char **atts, MediaProfiles *profiles);
-    static ExportVideoProfile* createExportVideoProfile(const char **atts);
 
     static CamcorderProfile* createCamcorderProfile(
                 int cameraId, const char **atts, Vector<int>& cameraIds);
@@ -461,8 +388,6 @@
     static void createDefaultEncoderOutputFileFormats(MediaProfiles *profiles);
     static void createDefaultImageEncodingQualityLevels(MediaProfiles *profiles);
     static void createDefaultImageDecodingMaxMemory(MediaProfiles *profiles);
-    static void createDefaultVideoEditorCap(MediaProfiles *profiles);
-    static void createDefaultExportVideoProfiles(MediaProfiles *profiles);
 
     static VideoEncoderCap* createDefaultH263VideoEncoderCap();
     static VideoEncoderCap* createDefaultM4vVideoEncoderCap();
@@ -520,8 +445,6 @@
 
     RequiredProfiles *mRequiredProfileRefs;
     Vector<int>              mCameraIds;
-    VideoEditorCap* mVideoEditorCap;
-    Vector<ExportVideoProfile*> mVideoEditorExportProfiles;
 };
 
 }; // namespace android
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
index d7ac302..d6cc4bb 100644
--- a/include/media/MediaRecorderBase.h
+++ b/include/media/MediaRecorderBase.h
@@ -26,10 +26,12 @@
 
 class ICameraRecordingProxy;
 class Surface;
+class IGraphicBufferConsumer;
 class IGraphicBufferProducer;
 
 struct MediaRecorderBase {
-    MediaRecorderBase() {}
+    MediaRecorderBase(const String16 &opPackageName)
+        : mOpPackageName(opPackageName) {}
     virtual ~MediaRecorderBase() {}
 
     virtual status_t init() = 0;
@@ -43,7 +45,6 @@
     virtual status_t setCamera(const sp<ICamera>& camera,
                                const sp<ICameraRecordingProxy>& proxy) = 0;
     virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) = 0;
-    virtual status_t setOutputFile(const char *path) = 0;
     virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0;
     virtual status_t setOutputFileAuxiliary(int fd) {return INVALID_OPERATION;}
     virtual status_t setParameters(const String8& params) = 0;
@@ -56,8 +57,13 @@
     virtual status_t reset() = 0;
     virtual status_t getMaxAmplitude(int *max) = 0;
     virtual status_t dump(int fd, const Vector<String16>& args) const = 0;
+    virtual status_t setInputSurface(const sp<IGraphicBufferConsumer>& surface) = 0;
     virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() const = 0;
 
+
+protected:
+    String16 mOpPackageName;
+
 private:
     MediaRecorderBase(const MediaRecorderBase &);
     MediaRecorderBase &operator=(const MediaRecorderBase &);
diff --git a/include/media/MediaResource.h b/include/media/MediaResource.h
new file mode 100644
index 0000000..20f2cad
--- /dev/null
+++ b/include/media/MediaResource.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_MEDIA_RESOURCE_H
+#define ANDROID_MEDIA_RESOURCE_H
+
+#include <binder/Parcel.h>
+#include <utils/String8.h>
+
+namespace android {
+
+extern const char kResourceSecureCodec[];
+extern const char kResourceNonSecureCodec[];
+extern const char kResourceAudioCodec[];
+extern const char kResourceVideoCodec[];
+extern const char kResourceGraphicMemory[];
+
+class MediaResource {
+public:
+    MediaResource();
+    MediaResource(String8 type, uint64_t value);
+    MediaResource(String8 type, String8 subType, uint64_t value);
+
+    void readFromParcel(const Parcel &parcel);
+    void writeToParcel(Parcel *parcel) const;
+
+    String8 toString() const;
+
+    bool operator==(const MediaResource &other) const;
+    bool operator!=(const MediaResource &other) const;
+
+    String8 mType;
+    String8 mSubType;
+    uint64_t mValue;
+};
+
+}; // namespace android
+
+#endif  // ANDROID_MEDIA_RESOURCE_H
diff --git a/include/media/MediaResourcePolicy.h b/include/media/MediaResourcePolicy.h
new file mode 100644
index 0000000..9bc2eec
--- /dev/null
+++ b/include/media/MediaResourcePolicy.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_MEDIA_RESOURCE_POLICY_H
+#define ANDROID_MEDIA_RESOURCE_POLICY_H
+
+#include <binder/Parcel.h>
+#include <utils/String8.h>
+
+namespace android {
+
+extern const char kPolicySupportsMultipleSecureCodecs[];
+extern const char kPolicySupportsSecureWithNonSecureCodec[];
+
+class MediaResourcePolicy {
+public:
+    MediaResourcePolicy();
+    MediaResourcePolicy(String8 type, String8 value);
+
+    void readFromParcel(const Parcel &parcel);
+    void writeToParcel(Parcel *parcel) const;
+
+    String8 toString() const;
+
+    String8 mType;
+    String8 mValue;
+};
+
+}; // namespace android
+
+#endif  // ANDROID_MEDIA_RESOURCE_POLICY_H
diff --git a/include/media/RingBuffer.h b/include/media/RingBuffer.h
new file mode 100644
index 0000000..df7c00e
--- /dev/null
+++ b/include/media/RingBuffer.h
@@ -0,0 +1,361 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_SERVICE_UTILS_RING_BUFFER_H
+#define ANDROID_SERVICE_UTILS_RING_BUFFER_H
+
+#include <utils/Log.h>
+#include <cutils/compiler.h>
+
+#include <iterator>
+#include <utility>
+#include <vector>
+
+namespace android {
+
+/**
+ * A RingBuffer class that maintains an array of objects that can grow up to a certain size.
+ * Elements added to the RingBuffer are inserted in the logical front of the buffer, and
+ * invalidate all current iterators for that RingBuffer object.
+ */
+template <class T>
+class RingBuffer final {
+public:
+
+    /**
+     * Construct a RingBuffer that can grow up to the given length.
+     */
+    RingBuffer(size_t length);
+
+    /**
+     * Forward iterator to this class.  Implements an std:forward_iterator.
+     */
+    class iterator : public std::iterator<std::forward_iterator_tag, T> {
+    public:
+        iterator(T* ptr, size_t size, size_t pos, size_t ctr);
+
+        iterator& operator++();
+
+        iterator operator++(int);
+
+        bool operator==(const iterator& rhs);
+
+        bool operator!=(const iterator& rhs);
+
+        T& operator*();
+
+        T* operator->();
+
+    private:
+        T* mPtr;
+        size_t mSize;
+        size_t mPos;
+        size_t mCtr;
+    };
+
+    /**
+     * Constant forward iterator to this class.  Implements an std:forward_iterator.
+     */
+    class const_iterator : public std::iterator<std::forward_iterator_tag, T> {
+    public:
+        const_iterator(const T* ptr, size_t size, size_t pos, size_t ctr);
+
+        const_iterator& operator++();
+
+        const_iterator operator++(int);
+
+        bool operator==(const const_iterator& rhs);
+
+        bool operator!=(const const_iterator& rhs);
+
+        const T& operator*();
+
+        const T* operator->();
+
+    private:
+        const T* mPtr;
+        size_t mSize;
+        size_t mPos;
+        size_t mCtr;
+    };
+
+    /**
+     * Adds item to the front of this RingBuffer.  If the RingBuffer is at its maximum length,
+     * this will result in the last element being replaced (this is done using the element's
+     * assignment operator).
+     *
+     * All current iterators are invalidated.
+     */
+    void add(const T& item);
+
+    /**
+     * Moves item to the front of this RingBuffer.  Following a call to this, item should no
+     * longer be used.  If the RingBuffer is at its maximum length, this will result in the
+     * last element being replaced (this is done using the element's assignment operator).
+     *
+     * All current iterators are invalidated.
+     */
+    void add(T&& item);
+
+    /**
+     * Construct item in-place in the front of this RingBuffer using the given arguments.  If
+     * the RingBuffer is at its maximum length, this will result in the last element being
+     * replaced (this is done using the element's assignment operator).
+     *
+     * All current iterators are invalidated.
+     */
+    template <class... Args>
+    void emplace(Args&&... args);
+
+    /**
+     * Get an iterator to the front of this RingBuffer.
+     */
+    iterator begin();
+
+    /**
+     * Get an iterator to the end of this RingBuffer.
+     */
+    iterator end();
+
+    /**
+     * Get a const_iterator to the front of this RingBuffer.
+     */
+    const_iterator begin() const;
+
+    /**
+     * Get a const_iterator to the end of this RingBuffer.
+     */
+    const_iterator end() const;
+
+    /**
+     * Return a reference to the element at a given index.  If the index is out of range for
+     * this ringbuffer, [0, size), the behavior for this is undefined.
+     */
+    T& operator[](size_t index);
+
+    /**
+     * Return a const reference to the element at a given index.  If the index is out of range
+     * for this ringbuffer, [0, size), the behavior for this is undefined.
+     */
+    const T& operator[](size_t index) const;
+
+    /**
+     * Return the current size of this RingBuffer.
+     */
+    size_t size() const;
+
+    /**
+     * Remove all elements from this RingBuffer and set the size to 0.
+     */
+    void clear();
+
+private:
+    size_t mFrontIdx;
+    size_t mMaxBufferSize;
+    std::vector<T> mBuffer;
+}; // class RingBuffer
+
+
+template <class T>
+RingBuffer<T>::RingBuffer(size_t length) : mFrontIdx{0}, mMaxBufferSize{length} {}
+
+template <class T>
+RingBuffer<T>::iterator::iterator(T* ptr, size_t size, size_t pos, size_t ctr) :
+        mPtr{ptr}, mSize{size}, mPos{pos}, mCtr{ctr} {}
+
+template <class T>
+typename RingBuffer<T>::iterator& RingBuffer<T>::iterator::operator++() {
+    ++mCtr;
+
+    if (CC_UNLIKELY(mCtr == mSize)) {
+        mPos = mSize;
+        return *this;
+    }
+
+    mPos = ((CC_UNLIKELY(mPos == 0)) ? mSize - 1 : mPos - 1);
+    return *this;
+}
+
+template <class T>
+typename RingBuffer<T>::iterator RingBuffer<T>::iterator::operator++(int) {
+    iterator tmp{mPtr, mSize, mPos, mCtr};
+    ++(*this);
+    return tmp;
+}
+
+template <class T>
+bool RingBuffer<T>::iterator::operator==(const iterator& rhs) {
+    return (mPtr + mPos) == (rhs.mPtr + rhs.mPos);
+}
+
+template <class T>
+bool RingBuffer<T>::iterator::operator!=(const iterator& rhs) {
+    return (mPtr + mPos) != (rhs.mPtr + rhs.mPos);
+}
+
+template <class T>
+T& RingBuffer<T>::iterator::operator*() {
+    return *(mPtr + mPos);
+}
+
+template <class T>
+T* RingBuffer<T>::iterator::operator->() {
+    return mPtr + mPos;
+}
+
+template <class T>
+RingBuffer<T>::const_iterator::const_iterator(const T* ptr, size_t size, size_t pos, size_t ctr) :
+        mPtr{ptr}, mSize{size}, mPos{pos}, mCtr{ctr} {}
+
+template <class T>
+typename RingBuffer<T>::const_iterator& RingBuffer<T>::const_iterator::operator++() {
+    ++mCtr;
+
+    if (CC_UNLIKELY(mCtr == mSize)) {
+        mPos = mSize;
+        return *this;
+    }
+
+    mPos = ((CC_UNLIKELY(mPos == 0)) ? mSize - 1 : mPos - 1);
+    return *this;
+}
+
+template <class T>
+typename RingBuffer<T>::const_iterator RingBuffer<T>::const_iterator::operator++(int) {
+    const_iterator tmp{mPtr, mSize, mPos, mCtr};
+    ++(*this);
+    return tmp;
+}
+
+template <class T>
+bool RingBuffer<T>::const_iterator::operator==(const const_iterator& rhs) {
+    return (mPtr + mPos) == (rhs.mPtr + rhs.mPos);
+}
+
+template <class T>
+bool RingBuffer<T>::const_iterator::operator!=(const const_iterator& rhs) {
+    return (mPtr + mPos) != (rhs.mPtr + rhs.mPos);
+}
+
+template <class T>
+const T& RingBuffer<T>::const_iterator::operator*() {
+    return *(mPtr + mPos);
+}
+
+template <class T>
+const T* RingBuffer<T>::const_iterator::operator->() {
+    return mPtr + mPos;
+}
+
+template <class T>
+void RingBuffer<T>::add(const T& item) {
+    if (mBuffer.size() < mMaxBufferSize) {
+        mBuffer.push_back(item);
+        mFrontIdx = ((mFrontIdx + 1) % mMaxBufferSize);
+        return;
+    }
+
+    mBuffer[mFrontIdx] = item;
+    mFrontIdx = ((mFrontIdx + 1) % mMaxBufferSize);
+}
+
+template <class T>
+void RingBuffer<T>::add(T&& item) {
+    if (mBuffer.size() != mMaxBufferSize) {
+        mBuffer.push_back(std::forward<T>(item));
+        mFrontIdx = ((mFrontIdx + 1) % mMaxBufferSize);
+        return;
+    }
+
+    // Only works for types with move assignment operator
+    mBuffer[mFrontIdx] = std::forward<T>(item);
+    mFrontIdx = ((mFrontIdx + 1) % mMaxBufferSize);
+}
+
+template <class T>
+template <class... Args>
+void RingBuffer<T>::emplace(Args&&... args) {
+    if (mBuffer.size() != mMaxBufferSize) {
+        mBuffer.emplace_back(std::forward<Args>(args)...);
+        mFrontIdx = ((mFrontIdx + 1) % mMaxBufferSize);
+        return;
+    }
+
+    // Only works for types with move assignment operator
+    mBuffer[mFrontIdx] = T(std::forward<Args>(args)...);
+    mFrontIdx = ((mFrontIdx + 1) % mMaxBufferSize);
+}
+
+template <class T>
+typename RingBuffer<T>::iterator RingBuffer<T>::begin() {
+    size_t tmp = (mBuffer.size() == 0) ? 0 : mBuffer.size() - 1;
+    return iterator(mBuffer.data(), mBuffer.size(), (mFrontIdx == 0) ? tmp : mFrontIdx - 1, 0);
+}
+
+template <class T>
+typename RingBuffer<T>::iterator RingBuffer<T>::end() {
+    size_t s = mBuffer.size();
+    return iterator(mBuffer.data(), s, s, s);
+}
+
+template <class T>
+typename RingBuffer<T>::const_iterator RingBuffer<T>::begin() const {
+    size_t tmp = (mBuffer.size() == 0) ? 0 : mBuffer.size() - 1;
+    return const_iterator(mBuffer.data(), mBuffer.size(),
+            (mFrontIdx == 0) ? tmp : mFrontIdx - 1, 0);
+}
+
+template <class T>
+typename RingBuffer<T>::const_iterator RingBuffer<T>::end() const {
+    size_t s = mBuffer.size();
+    return const_iterator(mBuffer.data(), s, s, s);
+}
+
+template <class T>
+T& RingBuffer<T>::operator[](size_t index) {
+    LOG_ALWAYS_FATAL_IF(index >= mBuffer.size(), "Index %zu out of bounds, size is %zu.",
+            index, mBuffer.size());
+    size_t pos = (index >= mFrontIdx) ?
+            mBuffer.size() - 1 - (index - mFrontIdx) : mFrontIdx - 1 - index;
+    return mBuffer[pos];
+}
+
+template <class T>
+const T& RingBuffer<T>::operator[](size_t index) const {
+    LOG_ALWAYS_FATAL_IF(index >= mBuffer.size(), "Index %zu out of bounds, size is %zu.",
+            index, mBuffer.size());
+    size_t pos = (index >= mFrontIdx) ?
+            mBuffer.size() - 1 - (index - mFrontIdx) : mFrontIdx - 1 - index;
+    return mBuffer[pos];
+}
+
+template <class T>
+size_t RingBuffer<T>::size() const {
+    return mBuffer.size();
+}
+
+template <class T>
+void RingBuffer<T>::clear() {
+    mBuffer.clear();
+    mFrontIdx = 0;
+}
+
+}; // namespace android
+
+#endif // ANDROID_SERVICE_UTILS_RING_BUFFER_H
+
+
diff --git a/include/media/SingleStateQueue.h b/include/media/SingleStateQueue.h
index 04c5fd0..d423962 100644
--- a/include/media/SingleStateQueue.h
+++ b/include/media/SingleStateQueue.h
@@ -21,6 +21,7 @@
 // Non-blocking single-reader / single-writer multi-word atomic load / store
 
 #include <stdint.h>
+#include <cutils/atomic.h>
 
 namespace android {
 
@@ -31,6 +32,12 @@
     class Mutator;
     class Observer;
 
+    enum SSQ_STATUS {
+        SSQ_PENDING, /* = 0 */
+        SSQ_READ,
+        SSQ_DONE,
+    };
+
     struct Shared {
         // needs to be part of a union so don't define constructor or destructor
 
@@ -41,28 +48,56 @@
         void                init() { mAck = 0; mSequence = 0; }
 
         volatile int32_t    mAck;
-#if 0
-        int                 mPad[7];
-        // cache line boundary
-#endif
         volatile int32_t    mSequence;
         T                   mValue;
     };
 
     class Mutator {
     public:
-        Mutator(Shared *shared);
-        /*virtual*/ ~Mutator() { }
+        Mutator(Shared *shared)
+            : mSequence(0), mShared(shared)
+        {
+            // exactly one of Mutator and Observer must initialize, currently it is Observer
+            // shared->init();
+        }
 
         // push new value onto state queue, overwriting previous value;
         // returns a sequence number which can be used with ack()
-        int32_t push(const T& value);
+        int32_t push(const T& value)
+        {
+            Shared *shared = mShared;
+            int32_t sequence = mSequence;
+            sequence++;
+            android_atomic_acquire_store(sequence, &shared->mSequence);
+            shared->mValue = value;
+            sequence++;
+            android_atomic_release_store(sequence, &shared->mSequence);
+            mSequence = sequence;
+            // consider signalling a futex here, if we know that observer is waiting
+            return sequence;
+        }
 
-        // return true if most recent push has been observed
-        bool ack();
+        // returns the status of the last state push.  This may be a stale value.
+        //
+        // SSQ_PENDING, or 0, means it has not been observed
+        // SSQ_READ means it has been read
+        // SSQ_DONE means it has been acted upon, after Observer::done() is called
+        enum SSQ_STATUS ack() const
+        {
+            // in the case of SSQ_DONE, prevent any subtle data-races of subsequent reads
+            // being performed (out-of-order) before the ack read, should the caller be
+            // depending on sequentiality of reads.
+            const int32_t ack = android_atomic_acquire_load(&mShared->mAck);
+            return ack - mSequence & ~1 ? SSQ_PENDING /* seq differ */ :
+                    ack & 1 ? SSQ_DONE : SSQ_READ;
+        }
 
         // return true if a push with specified sequence number or later has been observed
-        bool ack(int32_t sequence);
+        bool ack(int32_t sequence) const
+        {
+            // this relies on 2's complement rollover to detect an ancient sequence number
+            return mShared->mAck - sequence >= 0;
+        }
 
     private:
         int32_t     mSequence;
@@ -71,11 +106,54 @@
 
     class Observer {
     public:
-        Observer(Shared *shared);
-        /*virtual*/ ~Observer() { }
+        Observer(Shared *shared)
+            : mSequence(0), mSeed(1), mShared(shared)
+        {
+            // exactly one of Mutator and Observer must initialize, currently it is Observer
+            shared->init();
+        }
 
         // return true if value has changed
-        bool poll(T& value);
+        bool poll(T& value)
+        {
+            Shared *shared = mShared;
+            int32_t before = shared->mSequence;
+            if (before == mSequence) {
+                return false;
+            }
+            for (int tries = 0; ; ) {
+                const int MAX_TRIES = 5;
+                if (before & 1) {
+                    if (++tries >= MAX_TRIES) {
+                        return false;
+                    }
+                    before = shared->mSequence;
+                } else {
+                    android_memory_barrier();
+                    T temp = shared->mValue;
+                    int32_t after = android_atomic_release_load(&shared->mSequence);
+                    if (after == before) {
+                        value = temp;
+                        shared->mAck = before;
+                        mSequence = before; // mSequence is even after poll success
+                        return true;
+                    }
+                    if (++tries >= MAX_TRIES) {
+                        return false;
+                    }
+                    before = after;
+                }
+            }
+        }
+
+        // (optional) used to indicate to the Mutator that the state that has been polled
+        // has also been acted upon.
+        void done()
+        {
+            const int32_t ack = mShared->mAck + 1;
+            // ensure all previous writes have been performed.
+            android_atomic_release_store(ack, &mShared->mAck); // mSequence is odd after "done"
+        }
 
     private:
         int32_t     mSequence;
diff --git a/include/media/StringArray.h b/include/media/StringArray.h
index ae47085..48d98bf 100644
--- a/include/media/StringArray.h
+++ b/include/media/StringArray.h
@@ -16,7 +16,7 @@
 
 //
 // Sortable array of strings.  STL-ish, but STL-free.
-//  
+//
 #ifndef _LIBS_MEDIA_STRING_ARRAY_H
 #define _LIBS_MEDIA_STRING_ARRAY_H
 
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
index 6167dd6..186e018 100644
--- a/include/media/Visualizer.h
+++ b/include/media/Visualizer.h
@@ -65,7 +65,8 @@
     /* Constructor.
      * See AudioEffect constructor for details on parameters.
      */
-                        Visualizer(int32_t priority = 0,
+                        Visualizer(const String16& opPackageName,
+                                   int32_t priority = 0,
                                    effect_callback_t cbf = NULL,
                                    void* user = NULL,
                                    int sessionId = 0);
@@ -94,7 +95,8 @@
 
     // install a callback to receive periodic captures. The capture rate is specified in milliHertz
     // and the capture format is according to flags  (see callback_flags).
-    status_t setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags, uint32_t rate);
+    status_t setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags, uint32_t rate,
+                                bool force = false);
 
     // set the capture size capture size must be a power of two in the range
     // [VISUALIZER_CAPTURE_SIZE_MAX. VISUALIZER_CAPTURE_SIZE_MIN]
diff --git a/include/media/mediametadataretriever.h b/include/media/mediametadataretriever.h
index b35cf32..f655f35 100644
--- a/include/media/mediametadataretriever.h
+++ b/include/media/mediametadataretriever.h
@@ -25,6 +25,7 @@
 
 namespace android {
 
+class IDataSource;
 struct IMediaHTTPService;
 class IMediaPlayerService;
 class IMediaMetadataRetriever;
@@ -57,6 +58,7 @@
     METADATA_KEY_IS_DRM          = 22,
     METADATA_KEY_LOCATION        = 23,
     METADATA_KEY_VIDEO_ROTATION  = 24,
+    METADATA_KEY_CAPTURE_FRAMERATE = 25,
 
     // Add more here...
 };
@@ -74,6 +76,7 @@
             const KeyedVector<String8, String8> *headers = NULL);
 
     status_t setDataSource(int fd, int64_t offset, int64_t length);
+    status_t setDataSource(const sp<IDataSource>& dataSource);
     sp<IMemory> getFrameAtTime(int64_t timeUs, int option);
     sp<IMemory> extractAlbumArt();
     const char* extractMetadata(int keyCode);
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 5830933..3fe749c 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -20,6 +20,8 @@
 #include <arpa/inet.h>
 
 #include <binder/IMemory.h>
+
+#include <media/AudioResamplerPublic.h>
 #include <media/IMediaPlayerClient.h>
 #include <media/IMediaPlayer.h>
 #include <media/IMediaDeathNotifier.h>
@@ -32,8 +34,9 @@
 
 namespace android {
 
-class Surface;
+struct AVSyncSettings;
 class IGraphicBufferProducer;
+class Surface;
 
 enum media_event_type {
     MEDIA_NOP               = 0, // interface test message
@@ -50,6 +53,7 @@
     MEDIA_ERROR             = 100,
     MEDIA_INFO              = 200,
     MEDIA_SUBTITLE_DATA     = 201,
+    MEDIA_META_DATA         = 202,
 };
 
 // Generic error codes for the media player framework.  Errors are fatal, the
@@ -183,6 +187,7 @@
     MEDIA_TRACK_TYPE_AUDIO = 2,
     MEDIA_TRACK_TYPE_TIMEDTEXT = 3,
     MEDIA_TRACK_TYPE_SUBTITLE = 4,
+    MEDIA_TRACK_TYPE_METADATA = 5,
 };
 
 // ----------------------------------------------------------------------------
@@ -211,6 +216,7 @@
 
             status_t        setDataSource(int fd, int64_t offset, int64_t length);
             status_t        setDataSource(const sp<IStreamSource> &source);
+            status_t        setDataSource(const sp<IDataSource> &source);
             status_t        setVideoSurfaceTexture(
                                     const sp<IGraphicBufferProducer>& bufferProducer);
             status_t        setListener(const sp<MediaPlayerListener>& listener);
@@ -220,6 +226,12 @@
             status_t        stop();
             status_t        pause();
             bool            isPlaying();
+            status_t        setPlaybackSettings(const AudioPlaybackRate& rate);
+            status_t        getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */);
+            status_t        setSyncSettings(const AVSyncSettings& sync, float videoFpsHint);
+            status_t        getSyncSettings(
+                                    AVSyncSettings* sync /* nonnull */,
+                                    float* videoFps /* nonnull */);
             status_t        getVideoWidth(int *w);
             status_t        getVideoHeight(int *h);
             status_t        seekTo(int msec);
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index b0a62a7..15ff82d 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -32,6 +32,7 @@
 class ICamera;
 class ICameraRecordingProxy;
 class IGraphicBufferProducer;
+struct PersistentSurface;
 class Surface;
 
 typedef void (*media_completion_f)(status_t status, void *cookie);
@@ -209,7 +210,7 @@
                       public virtual IMediaDeathNotifier
 {
 public:
-    MediaRecorder();
+    MediaRecorder(const String16& opPackageName);
     ~MediaRecorder();
 
     void        died();
@@ -221,7 +222,6 @@
     status_t    setOutputFormat(int of);
     status_t    setVideoEncoder(int ve);
     status_t    setAudioEncoder(int ae);
-    status_t    setOutputFile(const char* path);
     status_t    setOutputFile(int fd, int64_t offset, int64_t length);
     status_t    setVideoSize(int width, int height);
     status_t    setVideoFrameRate(int frames_per_second);
@@ -237,6 +237,7 @@
     status_t    close();
     status_t    release();
     void        notify(int msg, int ext1, int ext2);
+    status_t    setInputSurface(const sp<PersistentSurface>& surface);
     sp<IGraphicBufferProducer>     querySurfaceMediaSourceFromMediaServer();
 
 private:
diff --git a/include/media/nbaio/NBAIO.h b/include/media/nbaio/NBAIO.h
index d422576..d9bbc8d 100644
--- a/include/media/nbaio/NBAIO.h
+++ b/include/media/nbaio/NBAIO.h
@@ -231,7 +231,8 @@
     virtual status_t getTimestamp(AudioTimestamp& timestamp) { return INVALID_OPERATION; }
 
 protected:
-    NBAIO_Sink(const NBAIO_Format& format = Format_Invalid) : NBAIO_Port(format), mFramesWritten(0) { }
+    NBAIO_Sink(const NBAIO_Format& format = Format_Invalid) : NBAIO_Port(format), mFramesWritten(0)
+            { }
     virtual ~NBAIO_Sink() { }
 
     // Implementations are free to ignore these if they don't need them
@@ -322,7 +323,8 @@
     virtual void    onTimestamp(const AudioTimestamp& timestamp) { }
 
 protected:
-    NBAIO_Source(const NBAIO_Format& format = Format_Invalid) : NBAIO_Port(format), mFramesRead(0) { }
+    NBAIO_Source(const NBAIO_Format& format = Format_Invalid) : NBAIO_Port(format), mFramesRead(0)
+            { }
     virtual ~NBAIO_Source() { }
 
     // Implementations are free to ignore these if they don't need them
diff --git a/include/media/nbaio/NBLog.h b/include/media/nbaio/NBLog.h
index bcbbc04..1297b51 100644
--- a/include/media/nbaio/NBLog.h
+++ b/include/media/nbaio/NBLog.h
@@ -21,7 +21,7 @@
 
 #include <binder/IMemory.h>
 #include <utils/Mutex.h>
-#include <media/nbaio/roundup.h>
+#include <audio_utils/roundup.h>
 
 namespace android {
 
diff --git a/include/media/stagefright/AACWriter.h b/include/media/stagefright/AACWriter.h
index d22707a..aa60a19 100644
--- a/include/media/stagefright/AACWriter.h
+++ b/include/media/stagefright/AACWriter.h
@@ -24,10 +24,9 @@
 namespace android {
 
 struct MediaSource;
-struct MetaData;
+class MetaData;
 
 struct AACWriter : public MediaWriter {
-    AACWriter(const char *filename);
     AACWriter(int fd);
 
     status_t initCheck() const;
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 7825508..8b5b862 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -20,9 +20,11 @@
 
 #include <stdint.h>
 #include <android/native_window.h>
+#include <media/hardware/MetadataBufferType.h>
 #include <media/IOMX.h>
 #include <media/stagefright/foundation/AHierarchicalStateMachine.h>
 #include <media/stagefright/CodecBase.h>
+#include <media/stagefright/FrameRenderTracker.h>
 #include <media/stagefright/SkipCutBuffer.h>
 #include <OMX_Audio.h>
 
@@ -44,9 +46,12 @@
     virtual void initiateAllocateComponent(const sp<AMessage> &msg);
     virtual void initiateConfigureComponent(const sp<AMessage> &msg);
     virtual void initiateCreateInputSurface();
+    virtual void initiateSetInputSurface(const sp<PersistentSurface> &surface);
     virtual void initiateStart();
     virtual void initiateShutdown(bool keepComponentAllocated = false);
 
+    virtual status_t setSurface(const sp<Surface> &surface);
+
     virtual void signalFlush();
     virtual void signalResume();
 
@@ -105,6 +110,10 @@
     enum {
         kWhatSetup                   = 'setu',
         kWhatOMXMessage              = 'omx ',
+        // same as kWhatOMXMessage - but only used with
+        // handleMessage during OMX message-list handling
+        kWhatOMXMessageItem          = 'omxI',
+        kWhatOMXMessageList          = 'omxL',
         kWhatInputBufferFilled       = 'inpF',
         kWhatOutputBufferDrained     = 'outD',
         kWhatShutdown                = 'shut',
@@ -113,12 +122,14 @@
         kWhatDrainDeferredMessages   = 'drai',
         kWhatAllocateComponent       = 'allo',
         kWhatConfigureComponent      = 'conf',
+        kWhatSetSurface              = 'setS',
         kWhatCreateInputSurface      = 'cisf',
+        kWhatSetInputSurface         = 'sisf',
         kWhatSignalEndOfInputStream  = 'eois',
         kWhatStart                   = 'star',
         kWhatRequestIDRFrame         = 'ridr',
         kWhatSetParameters           = 'setP',
-        kWhatSubmitOutputMetaDataBufferIfEOS = 'subm',
+        kWhatSubmitOutputMetadataBufferIfEOS = 'subm',
         kWhatOMXDied                 = 'OMXd',
         kWhatReleaseCodecInstance    = 'relC',
     };
@@ -134,6 +145,12 @@
         kFlagIsGrallocUsageProtected                  = 4,
     };
 
+    enum {
+        kVideoGrallocUsage = (GRALLOC_USAGE_HW_TEXTURE
+                            | GRALLOC_USAGE_HW_COMPOSER
+                            | GRALLOC_USAGE_EXTERNAL_DISP),
+    };
+
     struct BufferInfo {
         enum Status {
             OWNED_BY_US,
@@ -141,16 +158,39 @@
             OWNED_BY_UPSTREAM,
             OWNED_BY_DOWNSTREAM,
             OWNED_BY_NATIVE_WINDOW,
+            UNRECOGNIZED,            // not a tracked buffer
         };
 
+        static inline Status getSafeStatus(BufferInfo *info) {
+            return info == NULL ? UNRECOGNIZED : info->mStatus;
+        }
+
         IOMX::buffer_id mBufferID;
         Status mStatus;
         unsigned mDequeuedAt;
 
         sp<ABuffer> mData;
         sp<GraphicBuffer> mGraphicBuffer;
+        int mFenceFd;
+        FrameRenderTracker::Info *mRenderInfo;
+
+        // The following field and 4 methods are used for debugging only
+        bool mIsReadFence;
+        // Store |fenceFd| and set read/write flag. Log error, if there is already a fence stored.
+        void setReadFence(int fenceFd, const char *dbg);
+        void setWriteFence(int fenceFd, const char *dbg);
+        // Log error, if the current fence is not a read/write fence.
+        void checkReadFence(const char *dbg);
+        void checkWriteFence(const char *dbg);
     };
 
+    static const char *_asString(BufferInfo::Status s);
+    void dumpBuffers(OMX_U32 portIndex);
+
+    // If |fd| is non-negative, waits for fence with |fd| and logs an error if it fails. Returns
+    // the error code or OK on success. If |fd| is negative, it returns OK
+    status_t waitForFence(int fd, const char *dbg);
+
 #if TRACK_BUFFER_TIMING
     struct BufferStats {
         int64_t mEmptyBufferTimeUs;
@@ -181,10 +221,12 @@
     sp<MemoryDealer> mDealer[2];
 
     sp<ANativeWindow> mNativeWindow;
+    int mNativeWindowUsageBits;
     sp<AMessage> mInputFormat;
     sp<AMessage> mOutputFormat;
     sp<AMessage> mBaseOutputFormat;
 
+    FrameRenderTracker mRenderTracker; // render information for buffers rendered by ACodec
     Vector<BufferInfo> mBuffers[2];
     bool mPortEOS[2];
     status_t mInputEOSResult;
@@ -192,8 +234,8 @@
     List<sp<AMessage> > mDeferredQueue;
 
     bool mSentFormat;
+    bool mIsVideo;
     bool mIsEncoder;
-    bool mUseMetadataOnEncoderOutput;
     bool mFatalError;
     bool mShutdownInProgress;
     bool mExplicitShutdown;
@@ -209,12 +251,15 @@
     bool mChannelMaskPresent;
     int32_t mChannelMask;
     unsigned mDequeueCounter;
-    bool mStoreMetaDataInOutputBuffers;
-    int32_t mMetaDataBuffersToSubmit;
+    MetadataBufferType mInputMetadataType;
+    MetadataBufferType mOutputMetadataType;
+    bool mLegacyAdaptiveExperiment;
+    int32_t mMetadataBuffersToSubmit;
     size_t mNumUndequeuedBuffers;
 
     int64_t mRepeatFrameDelayUs;
     int64_t mMaxPtsGapUs;
+    float mMaxFps;
 
     int64_t mTimePerFrameUs;
     int64_t mTimePerCaptureUs;
@@ -228,17 +273,29 @@
     status_t freeBuffersOnPort(OMX_U32 portIndex);
     status_t freeBuffer(OMX_U32 portIndex, size_t i);
 
+    status_t handleSetSurface(const sp<Surface> &surface);
+    status_t setupNativeWindowSizeFormatAndUsage(
+            ANativeWindow *nativeWindow /* nonnull */, int *finalUsage /* nonnull */);
+
     status_t configureOutputBuffersFromNativeWindow(
             OMX_U32 *nBufferCount, OMX_U32 *nBufferSize,
             OMX_U32 *nMinUndequeuedBuffers);
-    status_t allocateOutputMetaDataBuffers();
-    status_t submitOutputMetaDataBuffer();
-    void signalSubmitOutputMetaDataBufferIfEOS_workaround();
+    status_t allocateOutputMetadataBuffers();
+    status_t submitOutputMetadataBuffer();
+    void signalSubmitOutputMetadataBufferIfEOS_workaround();
     status_t allocateOutputBuffersFromNativeWindow();
     status_t cancelBufferToNativeWindow(BufferInfo *info);
     status_t freeOutputBuffersNotOwnedByComponent();
     BufferInfo *dequeueBufferFromNativeWindow();
 
+    inline bool storingMetadataInDecodedBuffers() {
+        return mOutputMetadataType >= 0 && !mIsEncoder;
+    }
+
+    inline bool usingMetadataOnEncoderOutput() {
+        return mOutputMetadataType >= 0 && mIsEncoder;
+    }
+
     BufferInfo *findBufferByID(
             uint32_t portIndex, IOMX::buffer_id bufferID,
             ssize_t *index = NULL);
@@ -299,6 +356,9 @@
     status_t setupRawAudioFormat(
             OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels);
 
+    status_t setPriority(int32_t priority);
+    status_t setOperatingRate(float rateFloat, bool isVideo);
+
     status_t setMinBufferSize(OMX_U32 portIndex, size_t size);
 
     status_t setupMPEG4EncoderParameters(const sp<AMessage> &msg);
@@ -316,8 +376,6 @@
 
     status_t initNativeWindow();
 
-    status_t pushBlankBuffersToNativeWindow();
-
     // Returns true iff all buffers on the given port have status
     // OWNED_BY_US or OWNED_BY_NATIVE_WINDOW.
     bool allYourBuffersAreBelongToUs(OMX_U32 portIndex);
@@ -332,6 +390,23 @@
     void deferMessage(const sp<AMessage> &msg);
     void processDeferredMessages();
 
+    void onFrameRendered(int64_t mediaTimeUs, nsecs_t systemNano);
+    // called when we have dequeued a buffer |buf| from the native window to track render info.
+    // |fenceFd| is the dequeue fence, and |info| points to the buffer info where this buffer is
+    // stored.
+    void updateRenderInfoForDequeuedBuffer(
+            ANativeWindowBuffer *buf, int fenceFd, BufferInfo *info);
+
+    // Checks to see if any frames have rendered up until |until|, and to notify client
+    // (MediaCodec) of rendered frames up-until the frame pointed to by |until| or the first
+    // unrendered frame. These frames are removed from the render queue.
+    // If |dropIncomplete| is true, unrendered frames up-until |until| will be dropped from the
+    // queue, allowing all rendered framed up till then to be notified of.
+    // (This will effectively clear the render queue up-until (and including) |until|.)
+    // If |until| is NULL, or is not in the rendered queue, this method will check all frames.
+    void notifyOfRenderedFrames(
+            bool dropIncomplete = false, FrameRenderTracker::Info *until = NULL);
+
     void sendFormatChange(const sp<AMessage> &reply);
     status_t getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify);
 
diff --git a/include/media/stagefright/AMRWriter.h b/include/media/stagefright/AMRWriter.h
index 392f968..b38be55 100644
--- a/include/media/stagefright/AMRWriter.h
+++ b/include/media/stagefright/AMRWriter.h
@@ -26,10 +26,9 @@
 namespace android {
 
 struct MediaSource;
-struct MetaData;
+class MetaData;
 
 struct AMRWriter : public MediaWriter {
-    AMRWriter(const char *filename);
     AMRWriter(int fd);
 
     status_t initCheck() const;
diff --git a/include/media/stagefright/AudioPlayer.h b/include/media/stagefright/AudioPlayer.h
index 14afb85..e0cd965 100644
--- a/include/media/stagefright/AudioPlayer.h
+++ b/include/media/stagefright/AudioPlayer.h
@@ -25,9 +25,10 @@
 
 namespace android {
 
-class MediaSource;
+struct AudioPlaybackRate;
 class AudioTrack;
-class AwesomePlayer;
+struct AwesomePlayer;
+class MediaSource;
 
 class AudioPlayer : public TimeSource {
 public:
@@ -73,7 +74,8 @@
     bool isSeeking();
     bool reachedEOS(status_t *finalStatus);
 
-    status_t setPlaybackRatePermille(int32_t ratePermille);
+    status_t setPlaybackRate(const AudioPlaybackRate &rate);
+    status_t getPlaybackRate(AudioPlaybackRate *rate /* nonnull */);
 
     void notifyAudioEOS();
 
diff --git a/include/media/stagefright/AudioSource.h b/include/media/stagefright/AudioSource.h
index 4c9aaad..3074910 100644
--- a/include/media/stagefright/AudioSource.h
+++ b/include/media/stagefright/AudioSource.h
@@ -35,8 +35,10 @@
     // _not_ a bitmask of audio_channels_t constants.
     AudioSource(
             audio_source_t inputSource,
+            const String16 &opPackageName,
             uint32_t sampleRate,
-            uint32_t channels = 1);
+            uint32_t channels,
+            uint32_t outSampleRate = 0);
 
     status_t initCheck() const;
 
@@ -77,11 +79,13 @@
     status_t mInitCheck;
     bool mStarted;
     int32_t mSampleRate;
+    int32_t mOutSampleRate;
 
     bool mTrackMaxAmplitude;
     int64_t mStartTimeUs;
     int16_t mMaxAmplitude;
     int64_t mPrevSampleTimeUs;
+    int64_t mFirstSampleTimeUs;
     int64_t mInitialReadTimeUs;
     int64_t mNumFramesReceived;
     int64_t mNumClientOwnedBuffers;
diff --git a/include/media/stagefright/BufferProducerWrapper.h b/include/media/stagefright/BufferProducerWrapper.h
index d8acf30..4caa2c6 100644
--- a/include/media/stagefright/BufferProducerWrapper.h
+++ b/include/media/stagefright/BufferProducerWrapper.h
@@ -19,6 +19,7 @@
 #define BUFFER_PRODUCER_WRAPPER_H_
 
 #include <gui/IGraphicBufferProducer.h>
+#include <media/stagefright/foundation/ABase.h>
 
 namespace android {
 
diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h
index dd0a106..069e897 100644
--- a/include/media/stagefright/CameraSource.h
+++ b/include/media/stagefright/CameraSource.h
@@ -83,7 +83,7 @@
                                           Size videoSize,
                                           int32_t frameRate,
                                           const sp<IGraphicBufferProducer>& surface,
-                                          bool storeMetaDataInVideoBuffers = false);
+                                          bool storeMetaDataInVideoBuffers = true);
 
     virtual ~CameraSource();
 
@@ -149,6 +149,8 @@
     int32_t  mNumInputBuffers;
     int32_t  mVideoFrameRate;
     int32_t  mColorFormat;
+    int32_t  mEncoderFormat;
+    int32_t  mEncoderDataSpace;
     status_t mInitCheck;
 
     sp<Camera>   mCamera;
@@ -188,7 +190,7 @@
     void releaseCamera();
 
 private:
-    friend class CameraSourceListener;
+    friend struct CameraSourceListener;
 
     Mutex mLock;
     Condition mFrameAvailableCondition;
diff --git a/include/media/stagefright/CodecBase.h b/include/media/stagefright/CodecBase.h
index 1bf27a6..bb36052 100644
--- a/include/media/stagefright/CodecBase.h
+++ b/include/media/stagefright/CodecBase.h
@@ -26,6 +26,7 @@
 namespace android {
 
 struct ABuffer;
+struct PersistentSurface;
 
 struct CodecBase : public AHandler {
     enum {
@@ -39,8 +40,10 @@
         kWhatComponentAllocated  = 'cAll',
         kWhatComponentConfigured = 'cCon',
         kWhatInputSurfaceCreated = 'isfc',
+        kWhatInputSurfaceAccepted = 'isfa',
         kWhatSignaledInputEOS    = 'seos',
         kWhatBuffersAllocated    = 'allc',
+        kWhatOutputFramesRendered = 'outR',
     };
 
     virtual void setNotificationMessage(const sp<AMessage> &msg) = 0;
@@ -48,12 +51,16 @@
     virtual void initiateAllocateComponent(const sp<AMessage> &msg) = 0;
     virtual void initiateConfigureComponent(const sp<AMessage> &msg) = 0;
     virtual void initiateCreateInputSurface() = 0;
+    virtual void initiateSetInputSurface(
+            const sp<PersistentSurface> &surface) = 0;
     virtual void initiateStart() = 0;
     virtual void initiateShutdown(bool keepComponentAllocated = false) = 0;
 
     // require an explicit message handler
     virtual void onMessageReceived(const sp<AMessage> &msg) = 0;
 
+    virtual status_t setSurface(const sp<Surface> &surface) { return INVALID_OPERATION; }
+
     virtual void signalFlush() = 0;
     virtual void signalResume() = 0;
 
diff --git a/include/media/stagefright/DataSource.h b/include/media/stagefright/DataSource.h
index 3630263..dcde36f 100644
--- a/include/media/stagefright/DataSource.h
+++ b/include/media/stagefright/DataSource.h
@@ -32,6 +32,7 @@
 
 struct AMessage;
 struct AString;
+class  IDataSource;
 struct IMediaHTTPService;
 class String8;
 struct HTTPBase;
@@ -53,11 +54,15 @@
             HTTPBase *httpSource = NULL);
 
     static sp<DataSource> CreateMediaHTTP(const sp<IMediaHTTPService> &httpService);
+    static sp<DataSource> CreateFromIDataSource(const sp<IDataSource> &source);
 
     DataSource() {}
 
     virtual status_t initCheck() const = 0;
 
+    // Returns the number of bytes read, or -1 on failure. It's not an error if
+    // this returns zero; it just means the given offset is equal to, or
+    // beyond, the end of the source.
     virtual ssize_t readAt(off64_t offset, void *data, size_t size) = 0;
 
     // Convenience methods:
diff --git a/include/media/stagefright/FrameRenderTracker.h b/include/media/stagefright/FrameRenderTracker.h
new file mode 100644
index 0000000..9333e8f
--- /dev/null
+++ b/include/media/stagefright/FrameRenderTracker.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FRAME_RENDER_TRACKER_H_
+
+#define FRAME_RENDER_TRACKER_H_
+
+#include <utils/RefBase.h>
+#include <utils/Timers.h>
+#include <system/window.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AString.h>
+
+#include <list>
+
+namespace android {
+
+class Fence;
+class GraphicBuffer;
+
+struct FrameRenderTracker : public RefBase {
+    // Tracks the render information about a frame. Frames go through several states while
+    // the render information is tracked:
+    //
+    // 1. queued frame: mMediaTime and mGraphicBuffer are set for the frame. mFence is the
+    // queue fence (read fence). mIndex is negative, and mRenderTimeNs is invalid.
+    // Key characteristics: mFence is not NULL and mIndex is negative.
+    //
+    // 2. dequeued frame: mFence is updated with the dequeue fence (write fence). mIndex is set.
+    // Key characteristics: mFence is not NULL and mIndex is non-negative. mRenderTime is still
+    // invalid.
+    //
+    // 3. rendered frame or frame: mFence is cleared, mRenderTimeNs is set.
+    // Key characteristics: mFence is NULL.
+    //
+    struct Info {
+        // set by client during onFrameQueued or onFrameRendered
+        int64_t getMediaTimeUs() const  { return mMediaTimeUs; }
+
+        // -1 if frame is not yet rendered
+        nsecs_t getRenderTimeNs() const { return mRenderTimeNs; }
+
+        // set by client during updateRenderInfoForDequeuedBuffer; -1 otherwise
+        ssize_t getIndex() const        { return mIndex; }
+
+        // creates information for a queued frame
+        Info(int64_t mediaTimeUs, const sp<GraphicBuffer> &graphicBuffer, const sp<Fence> &fence)
+            : mMediaTimeUs(mediaTimeUs),
+              mRenderTimeNs(-1),
+              mIndex(-1),
+              mGraphicBuffer(graphicBuffer),
+              mFence(fence) {
+        }
+
+        // creates information for a frame rendered on a tunneled surface
+        Info(int64_t mediaTimeUs, nsecs_t renderTimeNs)
+            : mMediaTimeUs(mediaTimeUs),
+              mRenderTimeNs(renderTimeNs),
+              mIndex(-1),
+              mGraphicBuffer(NULL),
+              mFence(NULL) {
+        }
+
+    private:
+        int64_t mMediaTimeUs;
+        nsecs_t mRenderTimeNs;
+        ssize_t mIndex;         // to be used by client
+        sp<GraphicBuffer> mGraphicBuffer;
+        sp<Fence> mFence;
+
+        friend class FrameRenderTracker;
+    };
+
+    FrameRenderTracker();
+
+    void setComponentName(const AString &componentName);
+
+    // clears all tracked frames, and resets last render time
+    void clear(nsecs_t lastRenderTimeNs);
+
+    // called when |graphicBuffer| corresponding to |mediaTimeUs| is
+    // queued to the output surface using |fence|.
+    void onFrameQueued(
+            int64_t mediaTimeUs, const sp<GraphicBuffer> &graphicBuffer, const sp<Fence> &fence);
+
+    // Called when we have dequeued a buffer |buf| from the native window to track render info.
+    // |fenceFd| is the dequeue fence, and |index| is a positive buffer ID to be usable by the
+    // client to track this render info among the dequeued buffers.
+    // Returns pointer to the tracked info, or NULL if buffer is not tracked or if |index|
+    // is negative.
+    Info *updateInfoForDequeuedBuffer(ANativeWindowBuffer *buf, int fenceFd, int index);
+
+    // called when tunneled codec signals frame rendered event
+    // returns BAD_VALUE if systemNano is not monotonic. Otherwise, returns OK.
+    status_t onFrameRendered(int64_t mediaTimeUs, nsecs_t systemNano);
+
+    // Checks to see if any frames have rendered up until |until|. If |until| is NULL or not a
+    // tracked info, this method searches the entire render queue.
+    // Returns list of rendered frames up-until the frame pointed to by |until| or the first
+    // unrendered frame, as well as any dropped frames (those with invalid fence) up-until |until|.
+    // These frames are removed from the render queue.
+    // If |dropIncomplete| is true, unrendered frames up-until |until| will also be dropped from the
+    // queue, allowing all rendered framed up till then to be notified of.
+    // (This will effectively clear the render queue up-until (and including) |until|.)
+    std::list<Info> checkFencesAndGetRenderedFrames(const Info *until, bool dropIncomplete);
+
+    // Stop tracking a queued frame (e.g. if the frame has been discarded). If |info| is NULL or is
+    // not tracked, this method is a no-op. If |index| is specified, all indices larger that |index|
+    // are decremented. This is useful if the untracked frame is deleted from the frame vector.
+    void untrackFrame(const Info *info, ssize_t index = SSIZE_MAX);
+
+    void dumpRenderQueue() const;
+
+    virtual ~FrameRenderTracker();
+
+private:
+
+    // Render information for buffers. Regular surface buffers are queued in the order of
+    // rendering. Tunneled buffers are queued in the order of receipt.
+    std::list<Info> mRenderQueue;
+    nsecs_t mLastRenderTimeNs;
+    AString mComponentName;
+
+    DISALLOW_EVIL_CONSTRUCTORS(FrameRenderTracker);
+};
+
+}  // namespace android
+
+#endif  // FRAME_RENDER_TRACKER_H_
diff --git a/include/media/stagefright/MPEG2TSWriter.h b/include/media/stagefright/MPEG2TSWriter.h
index 2e2922e..3d7960b 100644
--- a/include/media/stagefright/MPEG2TSWriter.h
+++ b/include/media/stagefright/MPEG2TSWriter.h
@@ -29,7 +29,6 @@
 
 struct MPEG2TSWriter : public MediaWriter {
     MPEG2TSWriter(int fd);
-    MPEG2TSWriter(const char *filename);
 
     MPEG2TSWriter(
             void *cookie,
diff --git a/include/media/stagefright/MPEG4Writer.h b/include/media/stagefright/MPEG4Writer.h
index 26ce5f9..a195fe8 100644
--- a/include/media/stagefright/MPEG4Writer.h
+++ b/include/media/stagefright/MPEG4Writer.h
@@ -26,13 +26,13 @@
 
 namespace android {
 
+class AMessage;
 class MediaBuffer;
 class MediaSource;
 class MetaData;
 
 class MPEG4Writer : public MediaWriter {
 public:
-    MPEG4Writer(const char *filename);
     MPEG4Writer(int fd);
 
     // Limitations
@@ -49,6 +49,7 @@
     virtual status_t dump(int fd, const Vector<String16>& args);
 
     void beginBox(const char *fourcc);
+    void beginBox(uint32_t id);
     void writeInt8(int8_t x);
     void writeInt16(int16_t x);
     void writeInt32(int32_t x);
@@ -63,6 +64,7 @@
     int32_t getTimeScale() const { return mTimeScale; }
 
     status_t setGeoData(int latitudex10000, int longitudex10000);
+    status_t setCaptureRate(float captureFps);
     virtual void setStartTimeOffsetMs(int ms) { mStartTimeOffsetMs = ms; }
     virtual int32_t getStartTimeOffsetMs() const { return mStartTimeOffsetMs; }
 
@@ -89,6 +91,7 @@
     off64_t mFreeBoxOffset;
     bool mStreamableFile;
     off64_t mEstimatedMoovBoxSize;
+    off64_t mMoovExtraSize;
     uint32_t mInterleaveDurationUs;
     int32_t mTimeScale;
     int64_t mStartTimestampUs;
@@ -103,6 +106,8 @@
 
     List<off64_t> mBoxes;
 
+    sp<AMessage> mMetaKeys;
+
     void setStartTimestampUs(int64_t timeUs);
     int64_t getStartTimestampUs();  // Not const
     status_t startTracks(MetaData *params);
@@ -196,6 +201,12 @@
     void writeGeoDataBox();
     void writeLatitude(int degreex10000);
     void writeLongitude(int degreex10000);
+
+    void addDeviceMeta();
+    void writeHdlr();
+    void writeKeys();
+    void writeIlst();
+    void writeMetaBox();
     void sendSessionSummary();
     void release();
     status_t reset();
diff --git a/include/media/stagefright/MediaClock.h b/include/media/stagefright/MediaClock.h
new file mode 100644
index 0000000..dd1a809
--- /dev/null
+++ b/include/media/stagefright/MediaClock.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_CLOCK_H_
+
+#define MEDIA_CLOCK_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/Mutex.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+struct AMessage;
+
+struct MediaClock : public RefBase {
+    MediaClock();
+
+    void setStartingTimeMedia(int64_t startingTimeMediaUs);
+
+    void clearAnchor();
+    // It's required to use timestamp of just rendered frame as
+    // anchor time in paused state.
+    void updateAnchor(
+            int64_t anchorTimeMediaUs,
+            int64_t anchorTimeRealUs,
+            int64_t maxTimeMediaUs = INT64_MAX);
+
+    void updateMaxTimeMedia(int64_t maxTimeMediaUs);
+
+    void setPlaybackRate(float rate);
+    float getPlaybackRate() const;
+
+    // query media time corresponding to real time |realUs|, and save the
+    // result in |outMediaUs|.
+    status_t getMediaTime(
+            int64_t realUs,
+            int64_t *outMediaUs,
+            bool allowPastMaxTime = false) const;
+    // query real time corresponding to media time |targetMediaUs|.
+    // The result is saved in |outRealUs|.
+    status_t getRealTimeFor(int64_t targetMediaUs, int64_t *outRealUs) const;
+
+protected:
+    virtual ~MediaClock();
+
+private:
+    status_t getMediaTime_l(
+            int64_t realUs,
+            int64_t *outMediaUs,
+            bool allowPastMaxTime) const;
+
+    mutable Mutex mLock;
+
+    int64_t mAnchorTimeMediaUs;
+    int64_t mAnchorTimeRealUs;
+    int64_t mMaxTimeMediaUs;
+    int64_t mStartingTimeMediaUs;
+
+    float mPlaybackRate;
+
+    DISALLOW_EVIL_CONSTRUCTORS(MediaClock);
+};
+
+}  // namespace android
+
+#endif  // MEDIA_CLOCK_H_
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index d448097..c10963d 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -20,17 +20,25 @@
 
 #include <gui/IGraphicBufferProducer.h>
 #include <media/hardware/CryptoAPI.h>
+#include <media/MediaResource.h>
 #include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/FrameRenderTracker.h>
 #include <utils/Vector.h>
 
 namespace android {
 
 struct ABuffer;
 struct AMessage;
+struct AReplyToken;
 struct AString;
 struct CodecBase;
-struct ICrypto;
 struct IBatteryStats;
+struct ICrypto;
+class IMemory;
+struct MemoryDealer;
+class IResourceManagerClient;
+class IResourceManagerService;
+struct PersistentSurface;
 struct SoftwareRenderer;
 struct Surface;
 
@@ -50,15 +58,20 @@
         CB_OUTPUT_AVAILABLE = 2,
         CB_ERROR = 3,
         CB_OUTPUT_FORMAT_CHANGED = 4,
+        CB_RESOURCE_RECLAIMED = 5,
     };
 
-    struct BatteryNotifier;
+    static const pid_t kNoPid = -1;
 
     static sp<MediaCodec> CreateByType(
-            const sp<ALooper> &looper, const char *mime, bool encoder, status_t *err = NULL);
+            const sp<ALooper> &looper, const char *mime, bool encoder, status_t *err = NULL,
+            pid_t pid = kNoPid);
 
     static sp<MediaCodec> CreateByComponentName(
-            const sp<ALooper> &looper, const char *name, status_t *err = NULL);
+            const sp<ALooper> &looper, const char *name, status_t *err = NULL,
+            pid_t pid = kNoPid);
+
+    static sp<PersistentSurface> CreatePersistentInputSurface();
 
     status_t configure(
             const sp<AMessage> &format,
@@ -68,8 +81,12 @@
 
     status_t setCallback(const sp<AMessage> &callback);
 
+    status_t setOnFrameRenderedNotification(const sp<AMessage> &notify);
+
     status_t createInputSurface(sp<IGraphicBufferProducer>* bufferProducer);
 
+    status_t setInputSurface(const sp<PersistentSurface> &surface);
+
     status_t start();
 
     // Returns to a state in which the component remains allocated but
@@ -125,6 +142,8 @@
     status_t getOutputFormat(sp<AMessage> *format) const;
     status_t getInputFormat(sp<AMessage> *format) const;
 
+    status_t getWidevineLegacyBuffers(Vector<sp<ABuffer> > *buffers) const;
+
     status_t getInputBuffers(Vector<sp<ABuffer> > *buffers) const;
     status_t getOutputBuffers(Vector<sp<ABuffer> > *buffers) const;
 
@@ -132,6 +151,8 @@
     status_t getOutputFormat(size_t index, sp<AMessage> *format);
     status_t getInputBuffer(size_t index, sp<ABuffer> *buffer);
 
+    status_t setSurface(const sp<Surface> &nativeWindow);
+
     status_t requestIDRFrame();
 
     // Notification will be posted once there "is something to do", i.e.
@@ -143,11 +164,22 @@
 
     status_t setParameters(const sp<AMessage> &params);
 
+    // Create a MediaCodec notification message from a list of rendered or dropped render infos
+    // by adding rendered frame information to a base notification message. Returns the number
+    // of frames that were rendered.
+    static size_t CreateFramesRenderedMessage(
+            std::list<FrameRenderTracker::Info> done, sp<AMessage> &msg);
+
 protected:
     virtual ~MediaCodec();
     virtual void onMessageReceived(const sp<AMessage> &msg);
 
 private:
+    // used by ResourceManagerClient
+    status_t reclaim();
+    friend struct ResourceManagerClient;
+
+private:
     enum State {
         UNINITIALIZED,
         INITIALIZING,
@@ -170,7 +202,9 @@
     enum {
         kWhatInit                           = 'init',
         kWhatConfigure                      = 'conf',
+        kWhatSetSurface                     = 'sSur',
         kWhatCreateInputSurface             = 'cisf',
+        kWhatSetInputSurface                = 'sisf',
         kWhatStart                          = 'strt',
         kWhatStop                           = 'stop',
         kWhatRelease                        = 'rele',
@@ -191,6 +225,7 @@
         kWhatGetName                        = 'getN',
         kWhatSetParameters                  = 'setP',
         kWhatSetCallback                    = 'setC',
+        kWhatSetNotification                = 'setN',
     };
 
     enum {
@@ -206,39 +241,78 @@
         kFlagGatherCodecSpecificData    = 512,
         kFlagIsAsync                    = 1024,
         kFlagIsComponentAllocated       = 2048,
+        kFlagPushBlankBuffersOnShutdown = 4096,
     };
 
     struct BufferInfo {
         uint32_t mBufferID;
         sp<ABuffer> mData;
         sp<ABuffer> mEncryptedData;
+        sp<IMemory> mSharedEncryptedBuffer;
         sp<AMessage> mNotify;
         sp<AMessage> mFormat;
         bool mOwnedByClient;
     };
 
+    struct ResourceManagerServiceProxy : public IBinder::DeathRecipient {
+        ResourceManagerServiceProxy(pid_t pid);
+        ~ResourceManagerServiceProxy();
+
+        void init();
+
+        // implements DeathRecipient
+        virtual void binderDied(const wp<IBinder>& /*who*/);
+
+        void addResource(
+                int64_t clientId,
+                const sp<IResourceManagerClient> client,
+                const Vector<MediaResource> &resources);
+
+        void removeResource(int64_t clientId);
+
+        bool reclaimResource(const Vector<MediaResource> &resources);
+
+    private:
+        Mutex mLock;
+        sp<IResourceManagerService> mService;
+        pid_t mPid;
+    };
+
     State mState;
+    bool mReleasedByResourceManager;
     sp<ALooper> mLooper;
     sp<ALooper> mCodecLooper;
     sp<CodecBase> mCodec;
     AString mComponentName;
-    uint32_t mReplyID;
+    sp<AReplyToken> mReplyID;
     uint32_t mFlags;
     status_t mStickyError;
-    sp<Surface> mNativeWindow;
+    sp<Surface> mSurface;
     SoftwareRenderer *mSoftRenderer;
+
     sp<AMessage> mOutputFormat;
     sp<AMessage> mInputFormat;
     sp<AMessage> mCallback;
+    sp<AMessage> mOnFrameRenderedNotification;
+    sp<MemoryDealer> mDealer;
+
+    sp<IResourceManagerClient> mResourceManagerClient;
+    sp<ResourceManagerServiceProxy> mResourceManagerService;
 
     bool mBatteryStatNotified;
     bool mIsVideo;
+    int32_t mVideoWidth;
+    int32_t mVideoHeight;
+    int32_t mRotationDegrees;
 
     // initial create parameters
     AString mInitName;
     bool mInitNameIsType;
     bool mInitIsEncoder;
 
+    // configure parameter
+    sp<AMessage> mConfigureMsg;
+
     // Used only to synchronize asynchronous getBufferAndFormat
     // across all the other (synchronous) buffer state change
     // operations, such as de/queueIn/OutputBuffer, start and
@@ -249,10 +323,10 @@
     Vector<BufferInfo> mPortBuffers[2];
 
     int32_t mDequeueInputTimeoutGeneration;
-    uint32_t mDequeueInputReplyID;
+    sp<AReplyToken> mDequeueInputReplyID;
 
     int32_t mDequeueOutputTimeoutGeneration;
-    uint32_t mDequeueOutputReplyID;
+    sp<AReplyToken> mDequeueOutputReplyID;
 
     sp<ICrypto> mCrypto;
 
@@ -261,13 +335,14 @@
     sp<AMessage> mActivityNotify;
 
     bool mHaveInputSurface;
+    bool mHavePendingInputBuffers;
 
-    MediaCodec(const sp<ALooper> &looper);
+    MediaCodec(const sp<ALooper> &looper, pid_t pid);
 
     static status_t PostAndAwaitResponse(
             const sp<AMessage> &msg, sp<AMessage> *response);
 
-    static void PostReplyWithError(int32_t replyID, int32_t err);
+    void PostReplyWithError(const sp<AReplyToken> &replyID, int32_t err);
 
     status_t init(const AString &name, bool nameIsType, bool encoder);
 
@@ -283,15 +358,16 @@
             size_t portIndex, size_t index,
             sp<ABuffer> *buffer, sp<AMessage> *format);
 
-    bool handleDequeueInputBuffer(uint32_t replyID, bool newRequest = false);
-    bool handleDequeueOutputBuffer(uint32_t replyID, bool newRequest = false);
+    bool handleDequeueInputBuffer(const sp<AReplyToken> &replyID, bool newRequest = false);
+    bool handleDequeueOutputBuffer(const sp<AReplyToken> &replyID, bool newRequest = false);
     void cancelPendingDequeueOperations();
 
     void extractCSD(const sp<AMessage> &format);
     status_t queueCSDInputBuffer(size_t bufferIndex);
 
-    status_t setNativeWindow(
-            const sp<Surface> &surface);
+    status_t handleSetSurface(const sp<Surface> &surface);
+    status_t connectToSurface(const sp<Surface> &surface);
+    status_t disconnectFromSurface();
 
     void postActivityNotificationIfPossible();
 
@@ -306,6 +382,9 @@
     void updateBatteryStat();
     bool isExecuting() const;
 
+    uint64_t getGraphicBufferSize();
+    void addResource(const String8 &type, const String8 &subtype, uint64_t value);
+
     /* called to get the last codec error when the sticky flag is set.
      * if no such codec error is found, returns UNKNOWN_ERROR.
      */
diff --git a/include/media/stagefright/MediaCodecList.h b/include/media/stagefright/MediaCodecList.h
index c2bbe4d..3aaa032 100644
--- a/include/media/stagefright/MediaCodecList.h
+++ b/include/media/stagefright/MediaCodecList.h
@@ -32,6 +32,8 @@
 
 namespace android {
 
+extern const char *kMaxEncoderInputBuffers;
+
 struct AMessage;
 
 struct MediaCodecList : public BnMediaCodecList {
@@ -48,9 +50,17 @@
         return mCodecInfos.itemAt(index);
     }
 
+    virtual const sp<AMessage> getGlobalSettings() const;
+
     // to be used by MediaPlayerService alone
     static sp<IMediaCodecList> getLocalInstance();
 
+    // only to be used by getLocalInstance
+    static void *profilerThreadWrapper(void * /*arg*/);
+
+    // only to be used by MediaPlayerService
+    void parseTopLevelXMLFile(const char *path, bool ignore_errors = false);
+
 private:
     class BinderDeathObserver : public IBinder::DeathRecipient {
         void binderDied(const wp<IBinder> &the_late_who __unused);
@@ -60,6 +70,7 @@
 
     enum Section {
         SECTION_TOPLEVEL,
+        SECTION_SETTINGS,
         SECTION_DECODERS,
         SECTION_DECODER,
         SECTION_DECODER_TYPE,
@@ -74,10 +85,14 @@
 
     status_t mInitCheck;
     Section mCurrentSection;
+    bool mUpdate;
     Vector<Section> mPastSections;
     int32_t mDepth;
     AString mHrefBase;
 
+    sp<AMessage> mGlobalSettings;
+    KeyedVector<AString, CodecSettings> mOverrides;
+
     Vector<sp<MediaCodecInfo> > mCodecInfos;
     sp<MediaCodecInfo> mCurrentInfo;
     sp<IOMX> mOMX;
@@ -87,7 +102,6 @@
 
     status_t initCheck() const;
     void parseXMLFile(const char *path);
-    void parseTopLevelXMLFile(const char *path);
 
     static void StartElementHandlerWrapper(
             void *me, const char *name, const char **attrs);
@@ -98,9 +112,12 @@
     void endElementHandler(const char *name);
 
     status_t includeXMLFile(const char **attrs);
+    status_t addSettingFromAttributes(const char **attrs);
     status_t addMediaCodecFromAttributes(bool encoder, const char **attrs);
     void addMediaCodec(bool encoder, const char *name, const char *type = NULL);
 
+    void setCurrentCodecInfo(bool encoder, const char *name, const char *type);
+
     status_t addQuirk(const char **attrs);
     status_t addTypeFromAttributes(const char **attrs);
     status_t addLimit(const char **attrs);
diff --git a/include/media/stagefright/MediaCodecSource.h b/include/media/stagefright/MediaCodecSource.h
index 0970b2b..71f58a9 100644
--- a/include/media/stagefright/MediaCodecSource.h
+++ b/include/media/stagefright/MediaCodecSource.h
@@ -23,9 +23,11 @@
 
 namespace android {
 
-class ALooper;
+struct ALooper;
 class AMessage;
+struct AReplyToken;
 class IGraphicBufferProducer;
+class IGraphicBufferConsumer;
 class MediaCodec;
 class MetaData;
 
@@ -40,6 +42,7 @@
             const sp<ALooper> &looper,
             const sp<AMessage> &format,
             const sp<MediaSource> &source,
+            const sp<IGraphicBufferConsumer> &consumer = NULL,
             uint32_t flags = 0);
 
     bool isVideo() const { return mIsVideo; }
@@ -78,6 +81,7 @@
             const sp<ALooper> &looper,
             const sp<AMessage> &outputFormat,
             const sp<MediaSource> &source,
+            const sp<IGraphicBufferConsumer> &consumer,
             uint32_t flags = 0);
 
     status_t onStart(MetaData *params);
@@ -99,13 +103,17 @@
     sp<Puller> mPuller;
     sp<MediaCodec> mEncoder;
     uint32_t mFlags;
-    List<uint32_t> mStopReplyIDQueue;
+    List<sp<AReplyToken>> mStopReplyIDQueue;
     bool mIsVideo;
     bool mStarted;
     bool mStopping;
     bool mDoMoreWorkPending;
+    bool mSetEncoderFormat;
+    int mEncoderFormat;
+    int mEncoderDataSpace;
     sp<AMessage> mEncoderActivityNotify;
     sp<IGraphicBufferProducer> mGraphicBufferProducer;
+    sp<IGraphicBufferConsumer> mGraphicBufferConsumer;
     List<MediaBuffer *> mInputBufferQueue;
     List<size_t> mAvailEncoderInputIndices;
     List<int64_t> mDecodingTimeQueue; // decoding time (us) for video
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index a0036e0..21eb04a 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -64,6 +64,7 @@
 extern const char *MEDIA_MIMETYPE_TEXT_SUBRIP;
 extern const char *MEDIA_MIMETYPE_TEXT_VTT;
 extern const char *MEDIA_MIMETYPE_TEXT_CEA_608;
+extern const char *MEDIA_MIMETYPE_DATA_TIMED_ID3;
 
 }  // namespace android
 
diff --git a/include/media/stagefright/MediaFilter.h b/include/media/stagefright/MediaFilter.h
new file mode 100644
index 0000000..d0a572c
--- /dev/null
+++ b/include/media/stagefright/MediaFilter.h
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_FILTER_H_
+#define MEDIA_FILTER_H_
+
+#include <media/stagefright/CodecBase.h>
+
+namespace android {
+
+struct ABuffer;
+struct GraphicBufferListener;
+struct MemoryDealer;
+struct SimpleFilter;
+
+struct MediaFilter : public CodecBase {
+    MediaFilter();
+
+    virtual void setNotificationMessage(const sp<AMessage> &msg);
+
+    virtual void initiateAllocateComponent(const sp<AMessage> &msg);
+    virtual void initiateConfigureComponent(const sp<AMessage> &msg);
+    virtual void initiateCreateInputSurface();
+    virtual void initiateSetInputSurface(const sp<PersistentSurface> &surface);
+
+    virtual void initiateStart();
+    virtual void initiateShutdown(bool keepComponentAllocated = false);
+
+    virtual void signalFlush();
+    virtual void signalResume();
+
+    virtual void signalRequestIDRFrame();
+    virtual void signalSetParameters(const sp<AMessage> &msg);
+    virtual void signalEndOfInputStream();
+
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+    struct PortDescription : public CodecBase::PortDescription {
+        virtual size_t countBuffers();
+        virtual IOMX::buffer_id bufferIDAt(size_t index) const;
+        virtual sp<ABuffer> bufferAt(size_t index) const;
+
+    protected:
+        PortDescription();
+
+    private:
+        friend struct MediaFilter;
+
+        Vector<IOMX::buffer_id> mBufferIDs;
+        Vector<sp<ABuffer> > mBuffers;
+
+        void addBuffer(IOMX::buffer_id id, const sp<ABuffer> &buffer);
+
+        DISALLOW_EVIL_CONSTRUCTORS(PortDescription);
+    };
+
+protected:
+    virtual ~MediaFilter();
+
+private:
+    struct BufferInfo {
+        enum Status {
+            OWNED_BY_US,
+            OWNED_BY_UPSTREAM,
+        };
+
+        IOMX::buffer_id mBufferID;
+        int32_t mGeneration;
+        int32_t mOutputFlags;
+        Status mStatus;
+
+        sp<ABuffer> mData;
+    };
+
+    enum State {
+      UNINITIALIZED,
+      INITIALIZED,
+      CONFIGURED,
+      STARTED,
+    };
+
+    enum {
+        kWhatInputBufferFilled       = 'inpF',
+        kWhatOutputBufferDrained     = 'outD',
+        kWhatShutdown                = 'shut',
+        kWhatFlush                   = 'flus',
+        kWhatResume                  = 'resm',
+        kWhatAllocateComponent       = 'allo',
+        kWhatConfigureComponent      = 'conf',
+        kWhatCreateInputSurface      = 'cisf',
+        kWhatSignalEndOfInputStream  = 'eois',
+        kWhatStart                   = 'star',
+        kWhatSetParameters           = 'setP',
+        kWhatProcessBuffers          = 'proc',
+    };
+
+    enum {
+        kPortIndexInput  = 0,
+        kPortIndexOutput = 1
+    };
+
+    // member variables
+    AString mComponentName;
+    State mState;
+    status_t mInputEOSResult;
+    int32_t mWidth, mHeight;
+    int32_t mStride, mSliceHeight;
+    int32_t mColorFormatIn, mColorFormatOut;
+    size_t mMaxInputSize, mMaxOutputSize;
+    int32_t mGeneration;
+    sp<AMessage> mNotify;
+    sp<AMessage> mInputFormat;
+    sp<AMessage> mOutputFormat;
+
+    sp<MemoryDealer> mDealer[2];
+    Vector<BufferInfo> mBuffers[2];
+    Vector<BufferInfo*> mAvailableInputBuffers;
+    Vector<BufferInfo*> mAvailableOutputBuffers;
+    bool mPortEOS[2];
+
+    sp<SimpleFilter> mFilter;
+    sp<GraphicBufferListener> mGraphicBufferListener;
+
+    // helper functions
+    void signalProcessBuffers();
+    void signalError(status_t error);
+
+    status_t allocateBuffersOnPort(OMX_U32 portIndex);
+    BufferInfo *findBufferByID(
+            uint32_t portIndex, IOMX::buffer_id bufferID,
+            ssize_t *index = NULL);
+    void postFillThisBuffer(BufferInfo *info);
+    void postDrainThisBuffer(BufferInfo *info);
+    void postEOS();
+    void sendFormatChange();
+    void requestFillEmptyInput();
+    void processBuffers();
+
+    void onAllocateComponent(const sp<AMessage> &msg);
+    void onConfigureComponent(const sp<AMessage> &msg);
+    void onStart();
+    void onInputBufferFilled(const sp<AMessage> &msg);
+    void onOutputBufferDrained(const sp<AMessage> &msg);
+    void onShutdown(const sp<AMessage> &msg);
+    void onFlush();
+    void onSetParameters(const sp<AMessage> &msg);
+    void onCreateInputSurface();
+    void onInputFrameAvailable();
+    void onSignalEndOfInputStream();
+
+    DISALLOW_EVIL_CONSTRUCTORS(MediaFilter);
+};
+
+}  // namespace android
+
+#endif  // MEDIA_FILTER_H_
diff --git a/include/media/stagefright/MediaMuxer.h b/include/media/stagefright/MediaMuxer.h
index 9da98d9..fa855a8 100644
--- a/include/media/stagefright/MediaMuxer.h
+++ b/include/media/stagefright/MediaMuxer.h
@@ -29,9 +29,9 @@
 struct ABuffer;
 struct AMessage;
 struct MediaAdapter;
-struct MediaBuffer;
+class MediaBuffer;
 struct MediaSource;
-struct MetaData;
+class MetaData;
 struct MediaWriter;
 
 // MediaMuxer is used to mux multiple tracks into a video. Currently, we only
@@ -50,9 +50,6 @@
         OUTPUT_FORMAT_LIST_END // must be last - used to validate format type
     };
 
-    // Construct the muxer with the output file path.
-    MediaMuxer(const char *path, OutputFormat format);
-
     // Construct the muxer with the file descriptor. Note that the MediaMuxer
     // will close this file at stop().
     MediaMuxer(int fd, OutputFormat format);
diff --git a/include/media/stagefright/MediaSync.h b/include/media/stagefright/MediaSync.h
new file mode 100644
index 0000000..ef8cb23
--- /dev/null
+++ b/include/media/stagefright/MediaSync.h
@@ -0,0 +1,291 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_SYNC_H
+#define MEDIA_SYNC_H
+
+#include <gui/IConsumerListener.h>
+#include <gui/IProducerListener.h>
+
+#include <media/AudioResamplerPublic.h>
+#include <media/AVSyncSettings.h>
+#include <media/stagefright/foundation/AHandler.h>
+
+#include <utils/Condition.h>
+#include <utils/KeyedVector.h>
+#include <utils/Mutex.h>
+
+namespace android {
+
+class AudioTrack;
+class BufferItem;
+class Fence;
+class GraphicBuffer;
+class IGraphicBufferConsumer;
+class IGraphicBufferProducer;
+struct MediaClock;
+struct VideoFrameScheduler;
+
+// MediaSync manages media playback and its synchronization to a media clock
+// source. It can be also used for video-only playback.
+//
+// For video playback, it requires an output surface and provides an input
+// surface. It then controls the rendering of input buffers (buffer queued to
+// the input surface) on the output surface to happen at the appropriate time.
+//
+// For audio playback, it requires an audio track and takes updates of
+// information of rendered audio data to maintain media clock when audio track
+// serves as media clock source. (TODO: move audio rendering from JAVA to
+// native code).
+//
+// It can use the audio or video track as media clock source, as well as an
+// external clock. (TODO: actually support external clock as media clock
+// sources; use video track as media clock source for audio-and-video stream).
+//
+// In video-only mode, MediaSync will playback every video frame even though
+// a video frame arrives late based on its timestamp and last frame's.
+//
+// The client needs to configure surface (for output video rendering) and audio
+// track (for querying information of audio rendering) for MediaSync.
+//
+// Then the client needs to obtain a surface from MediaSync and render video
+// frames onto that surface. Internally, the MediaSync will receive those video
+// frames and render them onto the output surface at the appropriate time.
+//
+// The client needs to call updateQueuedAudioData() immediately after it writes
+// audio data to the audio track. Such information will be used to update media
+// clock.
+//
+class MediaSync : public AHandler {
+public:
+    // Create an instance of MediaSync.
+    static sp<MediaSync> create();
+
+    // Called when MediaSync is used to render video. It should be called
+    // before createInputSurface().
+    status_t setSurface(const sp<IGraphicBufferProducer> &output);
+
+    // Called when audio track is used as media clock source. It should be
+    // called before updateQueuedAudioData().
+    status_t setAudioTrack(const sp<AudioTrack> &audioTrack);
+
+    // Create a surface for client to render video frames. This is the surface
+    // on which the client should render video frames. Those video frames will
+    // be internally directed to output surface for rendering at appropriate
+    // time.
+    status_t createInputSurface(sp<IGraphicBufferProducer> *outBufferProducer);
+
+    // Update just-rendered audio data size and the presentation timestamp of
+    // the first frame of that audio data. It should be called immediately
+    // after the client write audio data into AudioTrack.
+    // This function assumes continous audio stream.
+    // TODO: support gap or backwards updates.
+    status_t updateQueuedAudioData(
+            size_t sizeInBytes, int64_t presentationTimeUs);
+
+    // Set the consumer name of the input queue.
+    void setName(const AString &name);
+
+    // Get the media clock used by the MediaSync so that the client can obtain
+    // corresponding media time or real time via
+    // MediaClock::getMediaTime() and MediaClock::getRealTimeFor().
+    sp<const MediaClock> getMediaClock();
+
+    // Flush mediasync
+    void flush();
+
+    // Set the video frame rate hint - this is used by the video FrameScheduler
+    status_t setVideoFrameRateHint(float rate);
+
+    // Get the video frame rate measurement from the FrameScheduler
+    // returns -1 if there is no measurement
+    float getVideoFrameRate();
+
+    // Set the sync settings parameters.
+    status_t setSyncSettings(const AVSyncSettings &syncSettings);
+
+    // Gets the sync settings parameters.
+    void getSyncSettings(AVSyncSettings *syncSettings /* nonnull */);
+
+    // Sets the playback rate using playback settings.
+    // This method can be called any time.
+    status_t setPlaybackSettings(const AudioPlaybackRate &rate);
+
+    // Gets the playback rate (playback settings parameters).
+    void getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
+
+    // Get the play time for pending audio frames in audio sink.
+    status_t getPlayTimeForPendingAudioFrames(int64_t *outTimeUs);
+
+protected:
+    virtual void onMessageReceived(const sp<AMessage> &msg);
+
+private:
+    enum {
+        kWhatDrainVideo = 'dVid',
+    };
+
+    // This is a thin wrapper class that lets us listen to
+    // IConsumerListener::onFrameAvailable from mInput.
+    class InputListener : public BnConsumerListener,
+                          public IBinder::DeathRecipient {
+    public:
+        InputListener(const sp<MediaSync> &sync);
+        virtual ~InputListener();
+
+        // From IConsumerListener
+        virtual void onFrameAvailable(const BufferItem &item);
+
+        // From IConsumerListener
+        // We don't care about released buffers because we detach each buffer as
+        // soon as we acquire it. See the comment for onBufferReleased below for
+        // some clarifying notes about the name.
+        virtual void onBuffersReleased() {}
+
+        // From IConsumerListener
+        // We don't care about sideband streams, since we won't relay them.
+        virtual void onSidebandStreamChanged();
+
+        // From IBinder::DeathRecipient
+        virtual void binderDied(const wp<IBinder> &who);
+
+    private:
+        sp<MediaSync> mSync;
+    };
+
+    // This is a thin wrapper class that lets us listen to
+    // IProducerListener::onBufferReleased from mOutput.
+    class OutputListener : public BnProducerListener,
+                           public IBinder::DeathRecipient {
+    public:
+        OutputListener(const sp<MediaSync> &sync, const sp<IGraphicBufferProducer> &output);
+        virtual ~OutputListener();
+
+        // From IProducerListener
+        virtual void onBufferReleased();
+
+        // From IBinder::DeathRecipient
+        virtual void binderDied(const wp<IBinder> &who);
+
+    private:
+        sp<MediaSync> mSync;
+        sp<IGraphicBufferProducer> mOutput;
+    };
+
+    // mIsAbandoned is set to true when the input or output dies.
+    // Once the MediaSync has been abandoned by one side, it will disconnect
+    // from the other side and not attempt to communicate with it further.
+    bool mIsAbandoned;
+
+    mutable Mutex mMutex;
+    Condition mReleaseCondition;
+    size_t mNumOutstandingBuffers;
+    sp<IGraphicBufferConsumer> mInput;
+    sp<IGraphicBufferProducer> mOutput;
+    int mUsageFlagsFromOutput;
+    uint32_t mMaxAcquiredBufferCount; // max acquired buffer count
+    bool mReturnPendingInputFrame;    // set while we are pending before acquiring an input frame
+
+    sp<AudioTrack> mAudioTrack;
+    uint32_t mNativeSampleRateInHz;
+    int64_t mNumFramesWritten;
+    bool mHasAudio;
+
+    int64_t mNextBufferItemMediaUs;
+    List<BufferItem> mBufferItems;
+    sp<VideoFrameScheduler> mFrameScheduler;
+
+    // Keep track of buffers received from |mInput|. This is needed because
+    // it's possible the consumer of |mOutput| could return a different
+    // GraphicBuffer::handle (e.g., due to passing buffers through IPC),
+    // and that could cause problem if the producer of |mInput| only
+    // supports pre-registered buffers.
+    KeyedVector<uint64_t, sp<GraphicBuffer> > mBuffersFromInput;
+
+    // Keep track of buffers sent to |mOutput|. When a new output surface comes
+    // in, those buffers will be returned to input and old output surface will
+    // be disconnected immediately.
+    KeyedVector<uint64_t, sp<GraphicBuffer> > mBuffersSentToOutput;
+
+    sp<ALooper> mLooper;
+    float mPlaybackRate;
+
+    AudioPlaybackRate mPlaybackSettings;
+    AVSyncSettings mSyncSettings;
+
+    sp<MediaClock> mMediaClock;
+
+    MediaSync();
+
+    // Must be accessed through RefBase
+    virtual ~MediaSync();
+
+    int64_t getRealTime(int64_t mediaTimeUs, int64_t nowUs);
+    int64_t getDurationIfPlayedAtNativeSampleRate_l(int64_t numFrames);
+    int64_t getPlayedOutAudioDurationMedia_l(int64_t nowUs);
+
+    void onDrainVideo_l();
+
+    // This implements the onFrameAvailable callback from IConsumerListener.
+    // It gets called from an InputListener.
+    // During this callback, we detach the buffer from the input, and queue
+    // it for rendering on the output. This call can block if there are too
+    // many outstanding buffers. If it blocks, it will resume when
+    // onBufferReleasedByOutput releases a buffer back to the input.
+    void onFrameAvailableFromInput();
+
+    // Send |bufferItem| to the output for rendering.
+    void renderOneBufferItem_l(const BufferItem &bufferItem);
+
+    // This implements the onBufferReleased callback from IProducerListener.
+    // It gets called from an OutputListener.
+    // During this callback, we detach the buffer from the output, and release
+    // it to the input. A blocked onFrameAvailable call will be allowed to proceed.
+    void onBufferReleasedByOutput(sp<IGraphicBufferProducer> &output);
+
+    // Return |buffer| back to the input.
+    void returnBufferToInput_l(const sp<GraphicBuffer> &buffer, const sp<Fence> &fence);
+
+    // When this is called, the MediaSync disconnects from (i.e., abandons) its
+    // input or output, and signals any waiting onFrameAvailable calls to wake
+    // up. This must be called with mMutex locked.
+    void onAbandoned_l(bool isInput);
+
+    // Set the playback in a desired speed.
+    // This method can be called any time.
+    // |rate| is the ratio between desired speed and the normal one, and should
+    // be non-negative. The meaning of rate values:
+    // 1.0 -- normal playback
+    // 0.0 -- stop or pause
+    // larger than 1.0 -- faster than normal speed
+    // between 0.0 and 1.0 -- slower than normal speed
+    void updatePlaybackRate_l(float rate);
+
+    // apply new sync settings
+    void resync_l();
+
+    // apply playback settings only - without resyncing or updating playback rate
+    status_t setPlaybackSettings_l(const AudioPlaybackRate &rate);
+
+    // helper.
+    bool isPlaying() { return mPlaybackRate != 0.0; }
+
+    DISALLOW_EVIL_CONSTRUCTORS(MediaSync);
+};
+
+} // namespace android
+
+#endif
diff --git a/include/media/stagefright/MediaWriter.h b/include/media/stagefright/MediaWriter.h
index e27ea1d..8e02506 100644
--- a/include/media/stagefright/MediaWriter.h
+++ b/include/media/stagefright/MediaWriter.h
@@ -24,7 +24,7 @@
 namespace android {
 
 struct MediaSource;
-struct MetaData;
+class MetaData;
 
 struct MediaWriter : public RefBase {
     MediaWriter()
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h
index 3f42790..8d4e15a 100644
--- a/include/media/stagefright/MetaData.h
+++ b/include/media/stagefright/MetaData.h
@@ -70,11 +70,15 @@
     kKeyDriftTime         = 'dftT',  // int64_t (usecs)
     kKeyAnchorTime        = 'ancT',  // int64_t (usecs)
     kKeyDuration          = 'dura',  // int64_t (usecs)
-    kKeyColorFormat       = 'colf',
+    kKeyPixelFormat       = 'pixf',  // int32_t
+    kKeyColorFormat       = 'colf',  // int32_t
+    kKeyColorSpace        = 'cols',  // int32_t
     kKeyPlatformPrivate   = 'priv',  // pointer
     kKeyDecoderComponent  = 'decC',  // cstring
     kKeyBufferID          = 'bfID',
     kKeyMaxInputSize      = 'inpS',
+    kKeyMaxWidth          = 'maxW',
+    kKeyMaxHeight         = 'maxH',
     kKeyThumbnailTime     = 'thbT',  // int64_t (usecs)
     kKeyTrackID           = 'trID',
     kKeyIsDRM             = 'idrm',  // int32_t (bool)
@@ -98,6 +102,7 @@
     kKeyCompilation       = 'cpil',  // cstring
     kKeyLocation          = 'loc ',  // cstring
     kKeyTimeScale         = 'tmsl',  // int32_t
+    kKeyCaptureFramerate  = 'capF',  // float (capture fps)
 
     // video profile and level
     kKeyVideoProfile      = 'vprf',  // int32_t
@@ -173,6 +178,9 @@
     kKeyTrackIsDefault    = 'dflt', // bool (int32_t)
     // Similar to MediaFormat.KEY_IS_FORCED_SUBTITLE but pertains to av tracks as well.
     kKeyTrackIsForced     = 'frcd', // bool (int32_t)
+
+    // H264 supplemental enhancement information offsets/sizes
+    kKeySEI               = 'sei ', // raw data
 };
 
 enum {
diff --git a/include/media/stagefright/NativeWindowWrapper.h b/include/media/stagefright/NativeWindowWrapper.h
deleted file mode 100644
index cfeec22..0000000
--- a/include/media/stagefright/NativeWindowWrapper.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NATIVE_WINDOW_WRAPPER_H_
-
-#define NATIVE_WINDOW_WRAPPER_H_
-
-#include <gui/Surface.h>
-
-namespace android {
-
-// Surface derives from ANativeWindow which derives from multiple
-// base classes, in order to carry it in AMessages, we'll temporarily wrap it
-// into a NativeWindowWrapper.
-
-struct NativeWindowWrapper : RefBase {
-    NativeWindowWrapper(
-            const sp<Surface> &surfaceTextureClient) :
-        mSurfaceTextureClient(surfaceTextureClient) { }
-
-    sp<ANativeWindow> getNativeWindow() const {
-        return mSurfaceTextureClient;
-    }
-
-    sp<Surface> getSurfaceTextureClient() const {
-        return mSurfaceTextureClient;
-    }
-
-private:
-    const sp<Surface> mSurfaceTextureClient;
-
-    DISALLOW_EVIL_CONSTRUCTORS(NativeWindowWrapper);
-};
-
-}  // namespace android
-
-#endif  // NATIVE_WINDOW_WRAPPER_H_
diff --git a/include/media/stagefright/NuMediaExtractor.h b/include/media/stagefright/NuMediaExtractor.h
index 402e7f8..fd74452 100644
--- a/include/media/stagefright/NuMediaExtractor.h
+++ b/include/media/stagefright/NuMediaExtractor.h
@@ -30,12 +30,12 @@
 
 struct ABuffer;
 struct AMessage;
-struct DataSource;
+class DataSource;
 struct IMediaHTTPService;
-struct MediaBuffer;
+class MediaBuffer;
 struct MediaExtractor;
 struct MediaSource;
-struct MetaData;
+class MetaData;
 
 struct NuMediaExtractor : public RefBase {
     enum SampleFlags {
diff --git a/include/media/stagefright/OMXCodec.h b/include/media/stagefright/OMXCodec.h
index 84b1b1a..7fabcb3 100644
--- a/include/media/stagefright/OMXCodec.h
+++ b/include/media/stagefright/OMXCodec.h
@@ -298,7 +298,6 @@
     status_t queueBufferToNativeWindow(BufferInfo *info);
     status_t cancelBufferToNativeWindow(BufferInfo *info);
     BufferInfo* dequeueBufferFromNativeWindow();
-    status_t pushBlankBuffersToNativeWindow();
 
     status_t freeBuffersOnPort(
             OMX_U32 portIndex, bool onlyThoseWeOwn = false);
@@ -347,7 +346,6 @@
 
     status_t configureCodec(const sp<MetaData> &meta);
 
-    status_t applyRotation();
     status_t waitForBufferFilled_l();
 
     int64_t getDecodingTimeUs();
diff --git a/include/media/stagefright/PersistentSurface.h b/include/media/stagefright/PersistentSurface.h
new file mode 100644
index 0000000..a35b9f1
--- /dev/null
+++ b/include/media/stagefright/PersistentSurface.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef PERSISTENT_SURFACE_H_
+
+#define PERSISTENT_SURFACE_H_
+
+#include <gui/IGraphicBufferProducer.h>
+#include <gui/IGraphicBufferConsumer.h>
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+struct PersistentSurface : public RefBase {
+    PersistentSurface(
+            const sp<IGraphicBufferProducer>& bufferProducer,
+            const sp<IGraphicBufferConsumer>& bufferConsumer) :
+        mBufferProducer(bufferProducer),
+        mBufferConsumer(bufferConsumer) { }
+
+    sp<IGraphicBufferProducer> getBufferProducer() const {
+        return mBufferProducer;
+    }
+
+    sp<IGraphicBufferConsumer> getBufferConsumer() const {
+        return mBufferConsumer;
+    }
+
+private:
+    const sp<IGraphicBufferProducer> mBufferProducer;
+    const sp<IGraphicBufferConsumer> mBufferConsumer;
+
+    DISALLOW_EVIL_CONSTRUCTORS(PersistentSurface);
+};
+
+}  // namespace android
+
+#endif  // PERSISTENT_SURFACE_H_
diff --git a/include/media/stagefright/ProcessInfo.h b/include/media/stagefright/ProcessInfo.h
new file mode 100644
index 0000000..ec0cdff
--- /dev/null
+++ b/include/media/stagefright/ProcessInfo.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef PROCESS_INFO_H_
+
+#define PROCESS_INFO_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/ProcessInfoInterface.h>
+
+namespace android {
+
+struct ProcessInfo : public ProcessInfoInterface {
+    ProcessInfo();
+
+    virtual bool getPriority(int pid, int* priority);
+
+protected:
+    virtual ~ProcessInfo();
+
+private:
+    DISALLOW_EVIL_CONSTRUCTORS(ProcessInfo);
+};
+
+}  // namespace android
+
+#endif  // PROCESS_INFO_H_
diff --git a/services/audiopolicy/AudioPolicyFactory.cpp b/include/media/stagefright/ProcessInfoInterface.h
similarity index 60%
copy from services/audiopolicy/AudioPolicyFactory.cpp
copy to include/media/stagefright/ProcessInfoInterface.h
index 2ae7bc1..222f92d 100644
--- a/services/audiopolicy/AudioPolicyFactory.cpp
+++ b/include/media/stagefright/ProcessInfoInterface.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright (C) 2015 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,19 +14,20 @@
  * limitations under the License.
  */
 
-#include "AudioPolicyManager.h"
+#ifndef PROCESS_INFO_INTERFACE_H_
+#define PROCESS_INFO_INTERFACE_H_
+
+#include <utils/RefBase.h>
 
 namespace android {
 
-extern "C" AudioPolicyInterface* createAudioPolicyManager(
-        AudioPolicyClientInterface *clientInterface)
-{
-    return new AudioPolicyManager(clientInterface);
-}
+struct ProcessInfoInterface : public RefBase {
+    virtual bool getPriority(int pid, int* priority) = 0;
 
-extern "C" void destroyAudioPolicyManager(AudioPolicyInterface *interface)
-{
-    delete interface;
-}
+protected:
+    virtual ~ProcessInfoInterface() {}
+};
 
-}; // namespace android
+}  // namespace android
+
+#endif  // PROCESS_INFO_INTERFACE_H_
diff --git a/include/media/stagefright/RenderScriptWrapper.h b/include/media/stagefright/RenderScriptWrapper.h
new file mode 100644
index 0000000..b42649e
--- /dev/null
+++ b/include/media/stagefright/RenderScriptWrapper.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RENDERSCRIPT_WRAPPER_H_
+#define RENDERSCRIPT_WRAPPER_H_
+
+#include <RenderScript.h>
+
+namespace android {
+
+struct RenderScriptWrapper : public RefBase {
+public:
+    struct RSFilterCallback : public RefBase {
+    public:
+        // called by RSFilter to process each input buffer
+        virtual status_t processBuffers(
+                RSC::Allocation* inBuffer,
+                RSC::Allocation* outBuffer) = 0;
+
+        virtual status_t handleSetParameters(const sp<AMessage> &msg) = 0;
+    };
+
+    sp<RSFilterCallback> mCallback;
+    RSC::sp<RSC::RS> mContext;
+};
+
+}   // namespace android
+
+#endif  // RENDERSCRIPT_WRAPPER_H_
diff --git a/include/media/stagefright/SurfaceUtils.h b/include/media/stagefright/SurfaceUtils.h
new file mode 100644
index 0000000..c1a9c0a
--- /dev/null
+++ b/include/media/stagefright/SurfaceUtils.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SURFACE_UTILS_H_
+
+#define SURFACE_UTILS_H_
+
+#include <utils/Errors.h>
+
+struct ANativeWindow;
+
+namespace android {
+
+status_t setNativeWindowSizeFormatAndUsage(
+        ANativeWindow *nativeWindow /* nonnull */,
+        int width, int height, int format, int rotation, int usage);
+status_t pushBlankBuffersToNativeWindow(ANativeWindow *nativeWindow /* nonnull */);
+
+} // namespace android
+
+#endif  // SURFACE_UTILS_H_
diff --git a/include/media/stagefright/Utils.h b/include/media/stagefright/Utils.h
index a795c80..5e9d7d4 100644
--- a/include/media/stagefright/Utils.h
+++ b/include/media/stagefright/Utils.h
@@ -41,7 +41,7 @@
 uint64_t ntoh64(uint64_t x);
 uint64_t hton64(uint64_t x);
 
-struct MetaData;
+class MetaData;
 struct AMessage;
 status_t convertMetaDataToMessage(
         const sp<MetaData> &meta, sp<AMessage> *format);
@@ -65,6 +65,26 @@
 
 AString uriDebugString(const AString &uri, bool incognito = false);
 
+struct HLSTime {
+    int32_t mSeq;
+    int64_t mTimeUs;
+    sp<AMessage> mMeta;
+
+    HLSTime(const sp<AMessage> &meta = NULL);
+    int64_t getSegmentTimeUs() const;
+};
+
+bool operator <(const HLSTime &t0, const HLSTime &t1);
+
+// read and write various object to/from AMessage
+
+void writeToAMessage(sp<AMessage> msg, const AudioPlaybackRate &rate);
+void readFromAMessage(const sp<AMessage> &msg, AudioPlaybackRate *rate /* nonnull */);
+
+void writeToAMessage(sp<AMessage> msg, const AVSyncSettings &sync, float videoFpsHint);
+void readFromAMessage(
+        const sp<AMessage> &msg, AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
+
 }  // namespace android
 
 #endif  // UTILS_H_
diff --git a/media/libmediaplayerservice/VideoFrameScheduler.h b/include/media/stagefright/VideoFrameScheduler.h
similarity index 92%
rename from media/libmediaplayerservice/VideoFrameScheduler.h
rename to include/media/stagefright/VideoFrameScheduler.h
index 84b27b4..9d97dfd 100644
--- a/media/libmediaplayerservice/VideoFrameScheduler.h
+++ b/include/media/stagefright/VideoFrameScheduler.h
@@ -24,7 +24,7 @@
 
 namespace android {
 
-struct ISurfaceComposer;
+class ISurfaceComposer;
 
 struct VideoFrameScheduler : public RefBase {
     VideoFrameScheduler();
@@ -39,6 +39,9 @@
     // returns the vsync period for the main display
     nsecs_t getVsyncPeriod();
 
+    // returns the current frames-per-second, or 0.f if not primed
+    float getFrameRate();
+
     void release();
 
     static const size_t kHistorySize = 8;
@@ -54,8 +57,9 @@
         void reset(float fps = -1);
         // keep current estimate, but restart phase
         void restart();
-        // returns period
+        // returns period or 0 if not yet primed
         nsecs_t addSample(nsecs_t time);
+        nsecs_t getPeriod() const;
 
     private:
         nsecs_t mPeriod;
diff --git a/include/media/stagefright/foundation/ABase.h b/include/media/stagefright/foundation/ABase.h
index 72e3d87..ef1e010 100644
--- a/include/media/stagefright/foundation/ABase.h
+++ b/include/media/stagefright/foundation/ABase.h
@@ -18,7 +18,9 @@
 
 #define A_BASE_H_
 
+#ifndef ARRAY_SIZE
 #define ARRAY_SIZE(a) (sizeof(a) / sizeof(*(a)))
+#endif
 
 #define DISALLOW_EVIL_CONSTRUCTORS(name) \
     name(const name &); \
diff --git a/include/media/stagefright/foundation/ADebug.h b/include/media/stagefright/foundation/ADebug.h
index 1d0e2cb..65f415a 100644
--- a/include/media/stagefright/foundation/ADebug.h
+++ b/include/media/stagefright/foundation/ADebug.h
@@ -24,6 +24,31 @@
 #include <media/stagefright/foundation/AString.h>
 #include <utils/Log.h>
 
+inline static const char *asString(android::status_t i, const char *def = "??") {
+    using namespace android;
+    switch (i) {
+        case NO_ERROR:              return "NO_ERROR";
+        case UNKNOWN_ERROR:         return "UNKNOWN_ERROR";
+        case NO_MEMORY:             return "NO_MEMORY";
+        case INVALID_OPERATION:     return "INVALID_OPERATION";
+        case BAD_VALUE:             return "BAD_VALUE";
+        case BAD_TYPE:              return "BAD_TYPE";
+        case NAME_NOT_FOUND:        return "NAME_NOT_FOUND";
+        case PERMISSION_DENIED:     return "PERMISSION_DENIED";
+        case NO_INIT:               return "NO_INIT";
+        case ALREADY_EXISTS:        return "ALREADY_EXISTS";
+        case DEAD_OBJECT:           return "DEAD_OBJECT";
+        case FAILED_TRANSACTION:    return "FAILED_TRANSACTION";
+        case BAD_INDEX:             return "BAD_INDEX";
+        case NOT_ENOUGH_DATA:       return "NOT_ENOUGH_DATA";
+        case WOULD_BLOCK:           return "WOULD_BLOCK";
+        case TIMED_OUT:             return "TIMED_OUT";
+        case UNKNOWN_TRANSACTION:   return "UNKNOWN_TRANSACTION";
+        case FDS_NOT_ALLOWED:       return "FDS_NOT_ALLOWED";
+        default:                    return def;
+    }
+}
+
 namespace android {
 
 #define LITERAL_TO_STRING_INTERNAL(x)    #x
@@ -92,7 +117,7 @@
 
     };
 
-    // parse the property or string to get the debug level for a component name
+    // parse the property or string to get a long-type level for a component name
     // string format is:
     // <level>[:<glob>][,<level>[:<glob>]...]
     // - <level> is 0-5 corresponding to ADebug::Level
@@ -100,14 +125,38 @@
     //   matches all components
     // - string is read left-to-right, and the last matching level is returned, or
     //   the def if no terms matched
+    static long GetLevelFromSettingsString(
+            const char *name, const char *value, long def);
+    static long GetLevelFromProperty(
+            const char *name, const char *value, long def);
+
+    // same for ADebug::Level - performs clamping to valid debug ranges
     static Level GetDebugLevelFromProperty(
             const char *name, const char *propertyName, Level def = kDebugNone);
-    static Level GetDebugLevelFromString(
-            const char *name, const char *value, Level def = kDebugNone);
 
     // remove redundant segments of a codec name, and return a newly allocated
     // string suitable for debugging
     static char *GetDebugName(const char *name);
+
+    inline static bool isExperimentEnabled(
+            const char *name __unused /* nonnull */, bool allow __unused = true) {
+#ifdef ENABLE_STAGEFRIGHT_EXPERIMENTS
+        if (!strcmp(name, "legacy-adaptive")) {
+            return getExperimentFlag(allow, name, 2, 1); // every other day
+        } else if (!strcmp(name, "legacy-setsurface")) {
+            return getExperimentFlag(allow, name, 3, 1); // every third day
+        } else {
+            ALOGE("unknown experiment '%s' (disabled)", name);
+        }
+#endif
+        return false;
+    }
+
+private:
+    // pass in allow, so we can print in the log if the experiment is disabled
+    static bool getExperimentFlag(
+            bool allow, const char *name, uint64_t modulo, uint64_t limit,
+            uint64_t plus = 0, uint64_t timeDivisor = 24 * 60 * 60 /* 1 day */);
 };
 
 }  // namespace android
diff --git a/include/media/stagefright/foundation/AHandler.h b/include/media/stagefright/foundation/AHandler.h
index 41ade77..fe02a86 100644
--- a/include/media/stagefright/foundation/AHandler.h
+++ b/include/media/stagefright/foundation/AHandler.h
@@ -29,6 +29,7 @@
 struct AHandler : public RefBase {
     AHandler()
         : mID(0),
+          mVerboseStats(false),
           mMessageCounter(0) {
     }
 
@@ -36,23 +37,40 @@
         return mID;
     }
 
-    sp<ALooper> looper();
+    sp<ALooper> looper() const {
+        return mLooper.promote();
+    }
+
+    wp<ALooper> getLooper() const {
+        return mLooper;
+    }
+
+    wp<AHandler> getHandler() const {
+        // allow getting a weak reference to a const handler
+        return const_cast<AHandler *>(this);
+    }
 
 protected:
     virtual void onMessageReceived(const sp<AMessage> &msg) = 0;
 
 private:
-    friend struct ALooperRoster;
+    friend struct AMessage;      // deliverMessage()
+    friend struct ALooperRoster; // setID()
 
     ALooper::handler_id mID;
+    wp<ALooper> mLooper;
 
-    void setID(ALooper::handler_id id) {
+    inline void setID(ALooper::handler_id id, wp<ALooper> looper) {
         mID = id;
+        mLooper = looper;
     }
 
+    bool mVerboseStats;
     uint32_t mMessageCounter;
     KeyedVector<uint32_t, uint32_t> mMessages;
 
+    void deliverMessage(const sp<AMessage> &msg);
+
     DISALLOW_EVIL_CONSTRUCTORS(AHandler);
 };
 
diff --git a/include/media/stagefright/foundation/ALooper.h b/include/media/stagefright/foundation/ALooper.h
index 70e0c5e..09c469b 100644
--- a/include/media/stagefright/foundation/ALooper.h
+++ b/include/media/stagefright/foundation/ALooper.h
@@ -30,6 +30,7 @@
 
 struct AHandler;
 struct AMessage;
+struct AReplyToken;
 
 struct ALooper : public RefBase {
     typedef int32_t event_id;
@@ -53,11 +54,15 @@
 
     static int64_t GetNowUs();
 
+    const char *getName() const {
+        return mName.c_str();
+    }
+
 protected:
     virtual ~ALooper();
 
 private:
-    friend struct ALooperRoster;
+    friend struct AMessage;       // post()
 
     struct Event {
         int64_t mWhenUs;
@@ -75,12 +80,32 @@
     sp<LooperThread> mThread;
     bool mRunningLocally;
 
+    // use a separate lock for reply handling, as it is always on another thread
+    // use a central lock, however, to avoid creating a mutex for each reply
+    Mutex mRepliesLock;
+    Condition mRepliesCondition;
+
+    // START --- methods used only by AMessage
+
+    // posts a message on this looper with the given timeout
     void post(const sp<AMessage> &msg, int64_t delayUs);
+
+    // creates a reply token to be used with this looper
+    sp<AReplyToken> createReplyToken();
+    // waits for a response for the reply token.  If status is OK, the response
+    // is stored into the supplied variable.  Otherwise, it is unchanged.
+    status_t awaitResponse(const sp<AReplyToken> &replyToken, sp<AMessage> *response);
+    // posts a reply for a reply token.  If the reply could be successfully posted,
+    // it returns OK. Otherwise, it returns an error value.
+    status_t postReply(const sp<AReplyToken> &replyToken, const sp<AMessage> &msg);
+
+    // END --- methods used only by AMessage
+
     bool loop();
 
     DISALLOW_EVIL_CONSTRUCTORS(ALooper);
 };
 
-}  // namespace android
+} // namespace android
 
 #endif  // A_LOOPER_H_
diff --git a/include/media/stagefright/foundation/ALooperRoster.h b/include/media/stagefright/foundation/ALooperRoster.h
index a0be8eb..9912455 100644
--- a/include/media/stagefright/foundation/ALooperRoster.h
+++ b/include/media/stagefright/foundation/ALooperRoster.h
@@ -33,16 +33,6 @@
     void unregisterHandler(ALooper::handler_id handlerID);
     void unregisterStaleHandlers();
 
-    status_t postMessage(const sp<AMessage> &msg, int64_t delayUs = 0);
-    void deliverMessage(const sp<AMessage> &msg);
-
-    status_t postAndAwaitResponse(
-            const sp<AMessage> &msg, sp<AMessage> *response);
-
-    void postReply(uint32_t replyID, const sp<AMessage> &reply);
-
-    sp<ALooper> findLooper(ALooper::handler_id handlerID);
-
     void dump(int fd, const Vector<String16>& args);
 
 private:
@@ -54,10 +44,6 @@
     Mutex mLock;
     KeyedVector<ALooper::handler_id, HandlerInfo> mHandlers;
     ALooper::handler_id mNextHandlerID;
-    uint32_t mNextReplyID;
-    Condition mRepliesCondition;
-
-    KeyedVector<uint32_t, sp<AMessage> > mReplies;
 
     DISALLOW_EVIL_CONSTRUCTORS(ALooperRoster);
 };
diff --git a/include/media/stagefright/foundation/AMessage.h b/include/media/stagefright/foundation/AMessage.h
index a9e235b..83b9444 100644
--- a/include/media/stagefright/foundation/AMessage.h
+++ b/include/media/stagefright/foundation/AMessage.h
@@ -26,11 +26,41 @@
 namespace android {
 
 struct ABuffer;
+struct AHandler;
 struct AString;
-struct Parcel;
+class Parcel;
+
+struct AReplyToken : public RefBase {
+    AReplyToken(const sp<ALooper> &looper)
+        : mLooper(looper),
+          mReplied(false) {
+    }
+
+private:
+    friend struct AMessage;
+    friend struct ALooper;
+    wp<ALooper> mLooper;
+    sp<AMessage> mReply;
+    bool mReplied;
+
+    sp<ALooper> getLooper() const {
+        return mLooper.promote();
+    }
+    // if reply is not set, returns false; otherwise, it retrieves the reply and returns true
+    bool retrieveReply(sp<AMessage> *reply) {
+        if (mReplied) {
+            *reply = mReply;
+            mReply.clear();
+        }
+        return mReplied;
+    }
+    // sets the reply for this token. returns OK or error
+    status_t setReply(const sp<AMessage> &reply);
+};
 
 struct AMessage : public RefBase {
-    AMessage(uint32_t what = 0, ALooper::handler_id target = 0);
+    AMessage();
+    AMessage(uint32_t what, const sp<const AHandler> &handler);
 
     static sp<AMessage> FromParcel(const Parcel &parcel);
     void writeToParcel(Parcel *parcel) const;
@@ -38,8 +68,7 @@
     void setWhat(uint32_t what);
     uint32_t what() const;
 
-    void setTarget(ALooper::handler_id target);
-    ALooper::handler_id target() const;
+    void setTarget(const sp<const AHandler> &handler);
 
     void clear();
 
@@ -76,18 +105,22 @@
             const char *name,
             int32_t *left, int32_t *top, int32_t *right, int32_t *bottom) const;
 
-    void post(int64_t delayUs = 0);
+    status_t post(int64_t delayUs = 0);
 
     // Posts the message to its target and waits for a response (or error)
     // before returning.
     status_t postAndAwaitResponse(sp<AMessage> *response);
 
     // If this returns true, the sender of this message is synchronously
-    // awaiting a response, the "replyID" can be used to send the response
-    // via "postReply" below.
-    bool senderAwaitsResponse(uint32_t *replyID) const;
+    // awaiting a response and the reply token is consumed from the message
+    // and stored into replyID. The reply token must be used to send the response
+    // using "postReply" below.
+    bool senderAwaitsResponse(sp<AReplyToken> *replyID);
 
-    void postReply(uint32_t replyID);
+    // Posts the message as a response to a reply token.  A reply token can
+    // only be used once. Returns OK if the response could be posted; otherwise,
+    // an error.
+    status_t postReply(const sp<AReplyToken> &replyID);
 
     // Performs a deep-copy of "this", contained messages are in turn "dup'ed".
     // Warning: RefBase items, i.e. "objects" are _not_ copied but only have
@@ -117,9 +150,16 @@
     virtual ~AMessage();
 
 private:
+    friend struct ALooper; // deliver()
+
     uint32_t mWhat;
+
+    // used only for debugging
     ALooper::handler_id mTarget;
 
+    wp<AHandler> mHandler;
+    wp<ALooper> mLooper;
+
     struct Rect {
         int32_t mLeft, mTop, mRight, mBottom;
     };
@@ -157,6 +197,8 @@
 
     size_t findItemIndex(const char *name, size_t len) const;
 
+    void deliver();
+
     DISALLOW_EVIL_CONSTRUCTORS(AMessage);
 };
 
diff --git a/include/media/stagefright/foundation/AString.h b/include/media/stagefright/foundation/AString.h
index 822dbb3..2f6d532 100644
--- a/include/media/stagefright/foundation/AString.h
+++ b/include/media/stagefright/foundation/AString.h
@@ -24,7 +24,7 @@
 namespace android {
 
 class String8;
-struct Parcel;
+class Parcel;
 
 struct AString {
     AString();
diff --git a/include/media/stagefright/timedtext/TimedTextDriver.h b/include/media/stagefright/timedtext/TimedTextDriver.h
index 37ef674..6f7c693 100644
--- a/include/media/stagefright/timedtext/TimedTextDriver.h
+++ b/include/media/stagefright/timedtext/TimedTextDriver.h
@@ -24,7 +24,7 @@
 
 namespace android {
 
-class ALooper;
+struct ALooper;
 struct IMediaHTTPService;
 class MediaPlayerBase;
 class MediaSource;
diff --git a/include/ndk/NdkMediaCodec.h b/include/ndk/NdkMediaCodec.h
index c07f4c9..4f6a1ef 100644
--- a/include/ndk/NdkMediaCodec.h
+++ b/include/ndk/NdkMediaCodec.h
@@ -142,7 +142,8 @@
 /**
  * Get the index of the next available buffer of processed data.
  */
-ssize_t AMediaCodec_dequeueOutputBuffer(AMediaCodec*, AMediaCodecBufferInfo *info, int64_t timeoutUs);
+ssize_t AMediaCodec_dequeueOutputBuffer(AMediaCodec*, AMediaCodecBufferInfo *info,
+        int64_t timeoutUs);
 AMediaFormat* AMediaCodec_getOutputFormat(AMediaCodec*);
 
 /**
diff --git a/include/ndk/NdkMediaExtractor.h b/include/ndk/NdkMediaExtractor.h
index 7a4e702..7324d31 100644
--- a/include/ndk/NdkMediaExtractor.h
+++ b/include/ndk/NdkMediaExtractor.h
@@ -55,12 +55,14 @@
 /**
  *  Set the file descriptor from which the extractor will read.
  */
-media_status_t AMediaExtractor_setDataSourceFd(AMediaExtractor*, int fd, off64_t offset, off64_t length);
+media_status_t AMediaExtractor_setDataSourceFd(AMediaExtractor*, int fd, off64_t offset,
+        off64_t length);
 
 /**
  * Set the URI from which the extractor will read.
  */
-media_status_t AMediaExtractor_setDataSource(AMediaExtractor*, const char *location); // TODO support headers
+media_status_t AMediaExtractor_setDataSource(AMediaExtractor*, const char *location);
+        // TODO support headers
 
 /**
  * Return the number of tracks in the previously specified media file
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index 31dff36..1e5064f 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -24,9 +24,9 @@
 #include <utils/threads.h>
 #include <utils/Log.h>
 #include <utils/RefBase.h>
-#include <media/nbaio/roundup.h>
+#include <audio_utils/roundup.h>
+#include <media/AudioResamplerPublic.h>
 #include <media/SingleStateQueue.h>
-#include <private/media/StaticAudioTrackState.h>
 
 namespace android {
 
@@ -54,24 +54,68 @@
 struct AudioTrackSharedStreaming {
     // similar to NBAIO MonoPipe
     // in continuously incrementing frame units, take modulo buffer size, which must be a power of 2
-    volatile int32_t mFront;    // read by server
-    volatile int32_t mRear;     // write by client
+    volatile int32_t mFront;    // read by consumer (output: server, input: client)
+    volatile int32_t mRear;     // written by producer (output: client, input: server)
     volatile int32_t mFlush;    // incremented by client to indicate a request to flush;
                                 // server notices and discards all data between mFront and mRear
     volatile uint32_t mUnderrunFrames;  // server increments for each unavailable but desired frame
 };
 
+// Represents a single state of an AudioTrack that was created in static mode (shared memory buffer
+// supplied by the client).  This state needs to be communicated from the client to server.  As this
+// state is too large to be updated atomically without a mutex, and mutexes aren't allowed here, the
+// state is wrapped by a SingleStateQueue.
+struct StaticAudioTrackState {
+    // Do not define constructors, destructors, or virtual methods as this is part of a
+    // union in shared memory and they will not get called properly.
+
+    // These fields should both be size_t, but since they are located in shared memory we
+    // force to 32-bit.  The client and server may have different typedefs for size_t.
+
+    // The state has a sequence counter to indicate whether changes are made to loop or position.
+    // The sequence counter also currently indicates whether loop or position is first depending
+    // on which is greater; it jumps by max(mLoopSequence, mPositionSequence) + 1.
+
+    uint32_t    mLoopStart;
+    uint32_t    mLoopEnd;
+    int32_t     mLoopCount;
+    uint32_t    mLoopSequence; // a sequence counter to indicate changes to loop
+    uint32_t    mPosition;
+    uint32_t    mPositionSequence; // a sequence counter to indicate changes to position
+};
+
 typedef SingleStateQueue<StaticAudioTrackState> StaticAudioTrackSingleStateQueue;
 
+struct StaticAudioTrackPosLoop {
+    // Do not define constructors, destructors, or virtual methods as this is part of a
+    // union in shared memory and will not get called properly.
+
+    // These fields should both be size_t, but since they are located in shared memory we
+    // force to 32-bit.  The client and server may have different typedefs for size_t.
+
+    // This struct information is stored in a single state queue to communicate the
+    // static AudioTrack server state to the client while data is consumed.
+    // It is smaller than StaticAudioTrackState to prevent unnecessary information from
+    // being sent.
+
+    uint32_t mBufferPosition;
+    int32_t  mLoopCount;
+};
+
+typedef SingleStateQueue<StaticAudioTrackPosLoop> StaticAudioTrackPosLoopQueue;
+
 struct AudioTrackSharedStatic {
+    // client requests to the server for loop or position changes.
     StaticAudioTrackSingleStateQueue::Shared
                     mSingleStateQueue;
-    // This field should be a size_t, but since it is located in shared memory we
-    // force to 32-bit.  The client and server may have different typedefs for size_t.
-    uint32_t        mBufferPosition;    // updated asynchronously by server,
-                                        // "for entertainment purposes only"
+    // position info updated asynchronously by server and read by client,
+    // "for entertainment purposes only"
+    StaticAudioTrackPosLoopQueue::Shared
+                    mPosLoopQueue;
 };
 
+typedef SingleStateQueue<AudioPlaybackRate> PlaybackRateQueue;
+
 // ----------------------------------------------------------------------------
 
 // Important: do not add any virtual methods, including ~
@@ -96,7 +140,8 @@
                 uint32_t    mServer;    // Number of filled frames consumed by server (mIsOut),
                                         // or filled frames provided by server (!mIsOut).
                                         // It is updated asynchronously by server without a barrier.
-                                        // The value should be used "for entertainment purposes only",
+                                        // The value should be used
+                                        // "for entertainment purposes only",
                                         // which means don't make important decisions based on it.
 
                 uint32_t    mPad1;      // unused
@@ -117,6 +162,8 @@
                 uint32_t    mSampleRate;    // AudioTrack only: client's requested sample rate in Hz
                                             // or 0 == default. Write-only client, read-only server.
 
+                PlaybackRateQueue::Shared mPlaybackRateQueue;
+
                 // client write-only, server read-only
                 uint16_t    mSendLevel;      // Fixed point U4.12 so 0x1000 means 1.0
 
@@ -271,7 +318,8 @@
     AudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
             size_t frameSize, bool clientInServer = false)
         : ClientProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/,
-          clientInServer) { }
+          clientInServer),
+          mPlaybackRateMutator(&cblk->mPlaybackRateQueue) { }
     virtual ~AudioTrackClientProxy() { }
 
     // No barriers on the following operations, so the ordering of loads/stores
@@ -291,6 +339,10 @@
         mCblk->mSampleRate = sampleRate;
     }
 
+    void        setPlaybackRate(const AudioPlaybackRate& playbackRate) {
+        mPlaybackRateMutator.push(playbackRate);
+    }
+
     virtual void flush();
 
     virtual uint32_t    getUnderrunFrames() const {
@@ -302,6 +354,9 @@
     bool        getStreamEndDone() const;
 
     status_t    waitStreamEndDone(const struct timespec *requested);
+
+private:
+    PlaybackRateQueue::Mutator   mPlaybackRateMutator;
 };
 
 class StaticAudioTrackClientProxy : public AudioTrackClientProxy {
@@ -313,8 +368,28 @@
     virtual void    flush();
 
 #define MIN_LOOP    16  // minimum length of each loop iteration in frames
+
+            // setLoop(), setBufferPosition(), and setBufferPositionAndLoop() set the
+            // static buffer position and looping parameters.  These commands are not
+            // synchronous (they do not wait or block); instead they take effect at the
+            // next buffer data read from the server side. However, the client side
+            // getters will read a cached version of the position and loop variables
+            // until the setting takes effect.
+            //
+            // setBufferPositionAndLoop() is equivalent to calling, in order, setLoop() and
+            // setBufferPosition().
+            //
+            // The functions should not be relied upon to do parameter or state checking.
+            // That is done at the AudioTrack level.
+
             void    setLoop(size_t loopStart, size_t loopEnd, int loopCount);
+            void    setBufferPosition(size_t position);
+            void    setBufferPositionAndLoop(size_t position, size_t loopStart, size_t loopEnd,
+                                             int loopCount);
             size_t  getBufferPosition();
+                    // getBufferPositionAndLoopCount() provides the proper snapshot of
+                    // position and loopCount together.
+            void    getBufferPositionAndLoopCount(size_t *position, int *loopCount);
 
     virtual size_t  getMisalignment() {
         return 0;
@@ -326,7 +401,9 @@
 
 private:
     StaticAudioTrackSingleStateQueue::Mutator   mMutator;
-    size_t          mBufferPosition;    // so that getBufferPosition() appears to be synchronous
+    StaticAudioTrackPosLoopQueue::Observer      mPosLoopObserver;
+                        StaticAudioTrackState   mState;   // last communicated state to server
+                        StaticAudioTrackPosLoop mPosLoop; // snapshot of position and loop.
 };
 
 // ----------------------------------------------------------------------------
@@ -394,8 +471,10 @@
 public:
     AudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
             size_t frameSize, bool clientInServer = false, uint32_t sampleRate = 0)
-        : ServerProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/, clientInServer) {
+        : ServerProxy(cblk, buffers, frameCount, frameSize, true /*isOut*/, clientInServer),
+          mPlaybackRateObserver(&cblk->mPlaybackRateQueue) {
         mCblk->mSampleRate = sampleRate;
+        mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
     }
 protected:
     virtual ~AudioTrackServerProxy() { }
@@ -429,6 +508,13 @@
 
     // Return the total number of frames that AudioFlinger has obtained and released
     virtual size_t      framesReleased() const { return mCblk->mServer; }
+
+    // Return the playback speed and pitch read atomically. Not multi-thread safe on server side.
+    AudioPlaybackRate getPlaybackRate();
+
+private:
+    AudioPlaybackRate             mPlaybackRate;  // last observed playback rate
+    PlaybackRateQueue::Observer   mPlaybackRateObserver;
 };
 
 class StaticAudioTrackServerProxy : public AudioTrackServerProxy {
@@ -447,10 +533,13 @@
     virtual uint32_t    getUnderrunFrames() const { return 0; }
 
 private:
+    status_t            updateStateWithLoop(StaticAudioTrackState *localState,
+                                            const StaticAudioTrackState &update) const;
+    status_t            updateStateWithPosition(StaticAudioTrackState *localState,
+                                                const StaticAudioTrackState &update) const;
     ssize_t             pollPosition(); // poll for state queue update, and return current position
     StaticAudioTrackSingleStateQueue::Observer  mObserver;
-    size_t              mPosition;  // server's current play position in frames, relative to 0
-
+    StaticAudioTrackPosLoopQueue::Mutator       mPosLoopMutator;
     size_t              mFramesReadySafe; // Assuming size_t read/writes are atomic on 32 / 64 bit
                                           // processors, this is a thread-safe version of
                                           // mFramesReady.
@@ -459,7 +548,8 @@
                                           // can cause a track to appear to have a large number
                                           // of frames. INT64_MAX means an infinite loop.
     bool                mFramesReadyIsCalledByMultipleThreads;
-    StaticAudioTrackState   mState;
+    StaticAudioTrackState mState;         // Server side state. Any updates from client must be
+                                          // passed by the mObserver SingleStateQueue.
 };
 
 // Proxy used by AudioFlinger for servicing AudioRecord
diff --git a/include/private/media/StaticAudioTrackState.h b/include/private/media/StaticAudioTrackState.h
deleted file mode 100644
index d483061..0000000
--- a/include/private/media/StaticAudioTrackState.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef STATIC_AUDIO_TRACK_STATE_H
-#define STATIC_AUDIO_TRACK_STATE_H
-
-namespace android {
-
-// Represents a single state of an AudioTrack that was created in static mode (shared memory buffer
-// supplied by the client).  This state needs to be communicated from the client to server.  As this
-// state is too large to be updated atomically without a mutex, and mutexes aren't allowed here, the
-// state is wrapped by a SingleStateQueue.
-struct StaticAudioTrackState {
-    // do not define constructors, destructors, or virtual methods
-
-    // These fields should both be size_t, but since they are located in shared memory we
-    // force to 32-bit.  The client and server may have different typedefs for size_t.
-    uint32_t    mLoopStart;
-    uint32_t    mLoopEnd;
-
-    int         mLoopCount;
-};
-
-}   // namespace android
-
-#endif  // STATIC_AUDIO_TRACK_STATE_H
diff --git a/include/radio/IRadio.h b/include/radio/IRadio.h
new file mode 100644
index 0000000..1877f8f
--- /dev/null
+++ b/include/radio/IRadio.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_IRADIO_H
+#define ANDROID_HARDWARE_IRADIO_H
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/IMemory.h>
+#include <binder/Parcel.h>
+#include <system/radio.h>
+
+namespace android {
+
+class IRadio : public IInterface
+{
+public:
+
+    DECLARE_META_INTERFACE(Radio);
+
+    virtual void detach() = 0;
+
+    virtual status_t setConfiguration(const struct radio_band_config *config) = 0;
+
+    virtual status_t getConfiguration(struct radio_band_config *config) = 0;
+
+    virtual status_t setMute(bool mute) = 0;
+
+    virtual status_t getMute(bool *mute) = 0;
+
+    virtual status_t step(radio_direction_t direction, bool skipSubChannel) = 0;
+
+    virtual status_t scan(radio_direction_t direction, bool skipSubChannel) = 0;
+
+    virtual status_t tune(unsigned int channel, unsigned int subChannel) = 0;
+
+    virtual status_t cancel() = 0;
+
+    virtual status_t getProgramInformation(struct radio_program_info *info) = 0;
+
+    virtual status_t hasControl(bool *hasControl) = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnRadio: public BnInterface<IRadio>
+{
+public:
+    virtual status_t    onTransact( uint32_t code,
+                                    const Parcel& data,
+                                    Parcel* reply,
+                                    uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif //ANDROID_HARDWARE_IRADIO_H
diff --git a/include/radio/IRadioClient.h b/include/radio/IRadioClient.h
new file mode 100644
index 0000000..9062ad6
--- /dev/null
+++ b/include/radio/IRadioClient.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_IRADIO_CLIENT_H
+#define ANDROID_HARDWARE_IRADIO_CLIENT_H
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/IMemory.h>
+#include <binder/Parcel.h>
+
+namespace android {
+
+class IRadioClient : public IInterface
+{
+public:
+
+    DECLARE_META_INTERFACE(RadioClient);
+
+    virtual void onEvent(const sp<IMemory>& eventMemory) = 0;
+
+};
+
+// ----------------------------------------------------------------------------
+
+class BnRadioClient : public BnInterface<IRadioClient>
+{
+public:
+    virtual status_t    onTransact( uint32_t code,
+                                    const Parcel& data,
+                                    Parcel* reply,
+                                    uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif //ANDROID_HARDWARE_IRADIO_CLIENT_H
diff --git a/include/radio/IRadioService.h b/include/radio/IRadioService.h
new file mode 100644
index 0000000..a946dd5
--- /dev/null
+++ b/include/radio/IRadioService.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_IRADIO_SERVICE_H
+#define ANDROID_HARDWARE_IRADIO_SERVICE_H
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+#include <system/radio.h>
+
+namespace android {
+
+class IRadio;
+class IRadioClient;
+
+class IRadioService : public IInterface
+{
+public:
+
+    DECLARE_META_INTERFACE(RadioService);
+
+    virtual status_t listModules(struct radio_properties *properties,
+                                 uint32_t *numModules) = 0;
+
+    virtual status_t attach(const radio_handle_t handle,
+                            const sp<IRadioClient>& client,
+                            const struct radio_band_config *config,
+                            bool withAudio,
+                            sp<IRadio>& radio) = 0;
+};
+
+// ----------------------------------------------------------------------------
+
+class BnRadioService: public BnInterface<IRadioService>
+{
+public:
+    virtual status_t    onTransact( uint32_t code,
+                                    const Parcel& data,
+                                    Parcel* reply,
+                                    uint32_t flags = 0);
+};
+
+}; // namespace android
+
+#endif //ANDROID_HARDWARE_IRADIO_SERVICE_H
diff --git a/include/radio/Radio.h b/include/radio/Radio.h
new file mode 100644
index 0000000..302bf16
--- /dev/null
+++ b/include/radio/Radio.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_RADIO_H
+#define ANDROID_HARDWARE_RADIO_H
+
+#include <binder/IBinder.h>
+#include <utils/threads.h>
+#include <radio/RadioCallback.h>
+#include <radio/IRadio.h>
+#include <radio/IRadioService.h>
+#include <radio/IRadioClient.h>
+#include <system/radio.h>
+
+namespace android {
+
+class MemoryDealer;
+
+class Radio : public BnRadioClient,
+                        public IBinder::DeathRecipient
+{
+public:
+
+    virtual ~Radio();
+
+    static  status_t listModules(struct radio_properties *properties,
+                                 uint32_t *numModules);
+    static  sp<Radio> attach(radio_handle_t handle,
+                             const struct radio_band_config *config,
+                             bool withAudio,
+                             const sp<RadioCallback>& callback);
+
+
+            void detach();
+
+            status_t setConfiguration(const struct radio_band_config *config);
+
+            status_t getConfiguration(struct radio_band_config *config);
+
+            status_t setMute(bool mute);
+
+            status_t getMute(bool *mute);
+
+            status_t step(radio_direction_t direction, bool skipSubChannel);
+
+            status_t scan(radio_direction_t direction, bool skipSubChannel);
+
+            status_t tune(unsigned int channel, unsigned int subChannel);
+
+            status_t cancel();
+
+            status_t getProgramInformation(struct radio_program_info *info);
+
+            status_t hasControl(bool *hasControl);
+
+            // BpRadioClient
+            virtual void onEvent(const sp<IMemory>& eventMemory);
+
+            //IBinder::DeathRecipient
+            virtual void binderDied(const wp<IBinder>& who);
+
+private:
+            Radio(radio_handle_t handle,
+                            const sp<RadioCallback>&);
+            static const sp<IRadioService>& getRadioService();
+
+            Mutex                   mLock;
+            sp<IRadio>              mIRadio;
+            const radio_handle_t    mHandle;
+            sp<RadioCallback>       mCallback;
+};
+
+}; // namespace android
+
+#endif //ANDROID_HARDWARE_RADIO_H
diff --git a/include/radio/RadioCallback.h b/include/radio/RadioCallback.h
new file mode 100644
index 0000000..4a7f1a6
--- /dev/null
+++ b/include/radio/RadioCallback.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_RADIO_CALLBACK_H
+#define ANDROID_HARDWARE_RADIO_CALLBACK_H
+
+#include <utils/RefBase.h>
+#include <system/radio.h>
+
+namespace android {
+
+class RadioCallback : public RefBase
+{
+public:
+
+            RadioCallback() {}
+    virtual ~RadioCallback() {}
+
+    virtual void onEvent(struct radio_event *event) = 0;
+
+};
+
+}; // namespace android
+
+#endif //ANDROID_HARDWARE_RADIO_CALLBACK_H
diff --git a/media/img_utils/include/img_utils/DngUtils.h b/media/img_utils/include/img_utils/DngUtils.h
index 4389b02..3dcedc5 100644
--- a/media/img_utils/include/img_utils/DngUtils.h
+++ b/media/img_utils/include/img_utils/DngUtils.h
@@ -31,6 +31,7 @@
 namespace img_utils {
 
 #define NELEMS(x) ((int) (sizeof(x) / sizeof((x)[0])))
+#define CLAMP(x, low, high) (((x) > (high)) ? (high) : (((x) < (low)) ? (low) : (x)))
 
 /**
  * Utility class for building values for the OpcodeList tags specified
@@ -107,13 +108,49 @@
                                     uint32_t mapPlanes,
                                     const float* mapGains);
 
+        /**
+         * Add WarpRectilinear opcode for the given metadata parameters.
+         *
+         * Returns OK on success, or a negative error code.
+         */
+        virtual status_t addWarpRectilinearForMetadata(const float* kCoeffs,
+                                                       uint32_t activeArrayWidth,
+                                                       uint32_t activeArrayHeight,
+                                                       float opticalCenterX,
+                                                       float opticalCenterY);
+
+        /**
+         * Add a WarpRectilinear opcode.
+         *
+         * numPlanes - Number of planes included in this opcode.
+         * opticalCenterX, opticalCenterY - Normalized x,y coordinates of the sensor optical
+         *          center relative to the top,left pixel of the produced images (e.g. [0.5, 0.5]
+         *          gives a sensor optical center in the image center.
+         * kCoeffs - A list of coefficients for the polynomial equation representing the distortion
+         *          correction.  For each plane, 6 coefficients must be included:
+         *          {k_r0, k_r1, k_r2, k_r3, k_t0, k_t1}.  See the DNG 1.4 specification for an
+         *          outline of the polynomial used here.
+         *
+         * Returns OK on success, or a negative error code.
+         */
+        virtual status_t addWarpRectilinear(uint32_t numPlanes,
+                                            double opticalCenterX,
+                                            double opticalCenterY,
+                                            const double* kCoeffs);
+
         // TODO: Add other Opcode methods
     protected:
         static const uint32_t FLAG_OPTIONAL = 0x1u;
         static const uint32_t FLAG_OPTIONAL_FOR_PREVIEW = 0x2u;
 
+        // Opcode IDs
         enum {
+            WARP_RECTILINEAR_ID = 1,
             GAIN_MAP_ID = 9,
+        };
+
+        // LSM mosaic indices
+        enum {
             LSM_R_IND = 0,
             LSM_GE_IND = 1,
             LSM_GO_IND = 2,
diff --git a/media/img_utils/src/DngUtils.cpp b/media/img_utils/src/DngUtils.cpp
index d3b4a35..b213403 100644
--- a/media/img_utils/src/DngUtils.cpp
+++ b/media/img_utils/src/DngUtils.cpp
@@ -16,6 +16,10 @@
 
 #include <img_utils/DngUtils.h>
 
+#include <inttypes.h>
+
+#include <math.h>
+
 namespace android {
 namespace img_utils {
 
@@ -229,7 +233,7 @@
     err = mEndianOut.write(version, 0, NELEMS(version));
     if (err != OK) return err;
 
-    // Do not include optional flag for preview, as this can have a large effect on the output.
+    // Allow this opcode to be skipped if not supported
     uint32_t flags = FLAG_OPTIONAL;
 
     err = mEndianOut.write(&flags, 0, 1);
@@ -278,5 +282,96 @@
     return OK;
 }
 
+status_t OpcodeListBuilder::addWarpRectilinearForMetadata(const float* kCoeffs,
+                                                          uint32_t activeArrayWidth,
+                                                          uint32_t activeArrayHeight,
+                                                          float opticalCenterX,
+                                                          float opticalCenterY) {
+    if (activeArrayWidth <= 1 || activeArrayHeight <= 1) {
+        ALOGE("%s: Cannot add opcode for active array with dimensions w=%" PRIu32 ", h=%" PRIu32,
+                __FUNCTION__, activeArrayWidth, activeArrayHeight);
+        return BAD_VALUE;
+    }
+
+    double normalizedOCX = opticalCenterX / static_cast<double>(activeArrayWidth - 1);
+    double normalizedOCY = opticalCenterY / static_cast<double>(activeArrayHeight - 1);
+
+    normalizedOCX = CLAMP(normalizedOCX, 0, 1);
+    normalizedOCY = CLAMP(normalizedOCY, 0, 1);
+
+    // Conversion factors from Camera2 K factors to DNG spec. K factors:
+    //
+    //      Note: these are necessary because our unit system assumes a
+    //      normalized max radius of sqrt(2), whereas the DNG spec's
+    //      WarpRectilinear opcode assumes a normalized max radius of 1.
+    //      Thus, each K coefficient must include the domain scaling
+    //      factor (the DNG domain is scaled by sqrt(2) to emulate the
+    //      domain used by the Camera2 specification).
+
+    const double c_0 = sqrt(2);
+    const double c_1 = 2 * sqrt(2);
+    const double c_2 = 4 * sqrt(2);
+    const double c_3 = 8 * sqrt(2);
+    const double c_4 = 2;
+    const double c_5 = 2;
+
+    const double coeffs[] = { c_0 * kCoeffs[0],
+                              c_1 * kCoeffs[1],
+                              c_2 * kCoeffs[2],
+                              c_3 * kCoeffs[3],
+                              c_4 * kCoeffs[4],
+                              c_5 * kCoeffs[5] };
+
+
+    return addWarpRectilinear(/*numPlanes*/1,
+                              /*opticalCenterX*/normalizedOCX,
+                              /*opticalCenterY*/normalizedOCY,
+                              coeffs);
+}
+
+status_t OpcodeListBuilder::addWarpRectilinear(uint32_t numPlanes,
+                                               double opticalCenterX,
+                                               double opticalCenterY,
+                                               const double* kCoeffs) {
+
+    uint32_t opcodeId = WARP_RECTILINEAR_ID;
+
+    status_t err = mEndianOut.write(&opcodeId, 0, 1);
+    if (err != OK) return err;
+
+    uint8_t version[] = {1, 3, 0, 0};
+    err = mEndianOut.write(version, 0, NELEMS(version));
+    if (err != OK) return err;
+
+    // Allow this opcode to be skipped if not supported
+    uint32_t flags = FLAG_OPTIONAL;
+
+    err = mEndianOut.write(&flags, 0, 1);
+    if (err != OK) return err;
+
+    const uint32_t NUMBER_CENTER_ARGS = 2;
+    const uint32_t NUMBER_COEFFS = numPlanes * 6;
+    uint32_t totalSize = (NUMBER_CENTER_ARGS + NUMBER_COEFFS) * sizeof(double) + sizeof(uint32_t);
+
+    err = mEndianOut.write(&totalSize, 0, 1);
+    if (err != OK) return err;
+
+    err = mEndianOut.write(&numPlanes, 0, 1);
+    if (err != OK) return err;
+
+    err = mEndianOut.write(kCoeffs, 0, NUMBER_COEFFS);
+    if (err != OK) return err;
+
+    err = mEndianOut.write(&opticalCenterX, 0, 1);
+    if (err != OK) return err;
+
+    err = mEndianOut.write(&opticalCenterY, 0, 1);
+    if (err != OK) return err;
+
+    mCount++;
+
+    return OK;
+}
+
 } /*namespace img_utils*/
 } /*namespace android*/
diff --git a/media/libeffects/factory/EffectsFactory.c b/media/libeffects/factory/EffectsFactory.c
index 6d30d64..db7865a 100644
--- a/media/libeffects/factory/EffectsFactory.c
+++ b/media/libeffects/factory/EffectsFactory.c
@@ -24,10 +24,12 @@
 
 #include <cutils/misc.h>
 #include <cutils/config_utils.h>
+#include <cutils/properties.h>
 #include <audio_effects/audio_effects_conf.h>
 
 static list_elem_t *gEffectList; // list of effect_entry_t: all currently created effects
 static list_elem_t *gLibraryList; // list of lib_entry_t: all currently loaded libraries
+static list_elem_t *gSkippedEffects; // list of effects skipped because of duplicate uuid
 // list of effect_descriptor and list of sub effects : all currently loaded
 // It does not contain effects without sub effects.
 static list_sub_elem_t *gSubEffectList;
@@ -63,10 +65,10 @@
                lib_entry_t **lib,
                effect_descriptor_t **desc);
 // To search a subeffect in the gSubEffectList
-int findSubEffect(const effect_uuid_t *uuid,
+static int findSubEffect(const effect_uuid_t *uuid,
                lib_entry_t **lib,
                effect_descriptor_t **desc);
-static void dumpEffectDescriptor(effect_descriptor_t *desc, char *str, size_t len);
+static void dumpEffectDescriptor(effect_descriptor_t *desc, char *str, size_t len, int indent);
 static int stringToUuid(const char *str, effect_uuid_t *uuid);
 static int uuidToString(const effect_uuid_t *uuid, char *str, size_t maxLen);
 
@@ -237,8 +239,8 @@
     }
 
 #if (LOG_NDEBUG == 0)
-    char str[256];
-    dumpEffectDescriptor(pDescriptor, str, 256);
+    char str[512];
+    dumpEffectDescriptor(pDescriptor, str, sizeof(str), 0 /* indent */);
     ALOGV("EffectQueryEffect() desc:%s", str);
 #endif
     pthread_mutex_unlock(&gLibLock);
@@ -446,12 +448,19 @@
         return 0;
     }
 
+    // ignore effects or not?
+    const bool ignoreFxConfFiles = property_get_bool(PROPERTY_IGNORE_EFFECTS, false);
+
     pthread_mutex_init(&gLibLock, NULL);
 
-    if (access(AUDIO_EFFECT_VENDOR_CONFIG_FILE, R_OK) == 0) {
-        loadEffectConfigFile(AUDIO_EFFECT_VENDOR_CONFIG_FILE);
-    } else if (access(AUDIO_EFFECT_DEFAULT_CONFIG_FILE, R_OK) == 0) {
-        loadEffectConfigFile(AUDIO_EFFECT_DEFAULT_CONFIG_FILE);
+    if (ignoreFxConfFiles) {
+        ALOGI("Audio effects in configuration files will be ignored");
+    } else {
+        if (access(AUDIO_EFFECT_VENDOR_CONFIG_FILE, R_OK) == 0) {
+            loadEffectConfigFile(AUDIO_EFFECT_VENDOR_CONFIG_FILE);
+        } else if (access(AUDIO_EFFECT_DEFAULT_CONFIG_FILE, R_OK) == 0) {
+            loadEffectConfigFile(AUDIO_EFFECT_DEFAULT_CONFIG_FILE);
+        }
     }
 
     updateNumEffects();
@@ -503,15 +512,31 @@
     audio_effect_library_t *desc;
     list_elem_t *e;
     lib_entry_t *l;
+    char path[PATH_MAX];
+    char *str;
+    size_t len;
 
     node = config_find(root, PATH_TAG);
     if (node == NULL) {
         return -EINVAL;
     }
+    // audio_effects.conf always specifies 32 bit lib path: convert to 64 bit path if needed
+    strlcpy(path, node->value, PATH_MAX);
+#ifdef __LP64__
+    str = strstr(path, "/lib/");
+    if (str == NULL)
+        return -EINVAL;
+    len = str - path;
+    path[len] = '\0';
+    strlcat(path, "/lib64/", PATH_MAX);
+    strlcat(path, node->value + len + strlen("/lib/"), PATH_MAX);
+#endif
+    if (strlen(path) >= PATH_MAX - 1)
+        return -EINVAL;
 
-    hdl = dlopen(node->value, RTLD_NOW);
+    hdl = dlopen(path, RTLD_NOW);
     if (hdl == NULL) {
-        ALOGW("loadLibrary() failed to open %s", node->value);
+        ALOGW("loadLibrary() failed to open %s", path);
         goto error;
     }
 
@@ -535,7 +560,7 @@
     // add entry for library in gLibraryList
     l = malloc(sizeof(lib_entry_t));
     l->name = strndup(name, PATH_MAX);
-    l->path = strndup(node->value, PATH_MAX);
+    l->path = strndup(path, PATH_MAX);
     l->handle = hdl;
     l->desc = desc;
     l->effects = NULL;
@@ -547,7 +572,7 @@
     e->next = gLibraryList;
     gLibraryList = e;
     pthread_mutex_unlock(&gLibLock);
-    ALOGV("getLibrary() linked library %p for path %s", l, node->value);
+    ALOGV("getLibrary() linked library %p for path %s", l, path);
 
     return 0;
 
@@ -595,8 +620,8 @@
         return -EINVAL;
     }
 #if (LOG_NDEBUG==0)
-    char s[256];
-    dumpEffectDescriptor(d, s, 256);
+    char s[512];
+    dumpEffectDescriptor(d, s, sizeof(s), 0 /* indent */);
     ALOGV("addSubEffect() read descriptor %p:%s",d, s);
 #endif
     if (EFFECT_API_VERSION_MAJOR(d->apiVersion) !=
@@ -660,6 +685,13 @@
         ALOGW("loadEffect() invalid uuid %s", node->value);
         return -EINVAL;
     }
+    lib_entry_t *tmp;
+    bool skip = false;
+    if (findEffect(NULL, &uuid, &tmp, NULL) == 0) {
+        ALOGW("skipping duplicate uuid %s %s", node->value,
+                node->next ? "and its sub-effects" : "");
+        skip = true;
+    }
 
     d = malloc(sizeof(effect_descriptor_t));
     if (l->desc->get_descriptor(&uuid, d) != 0) {
@@ -670,8 +702,8 @@
         return -EINVAL;
     }
 #if (LOG_NDEBUG==0)
-    char s[256];
-    dumpEffectDescriptor(d, s, 256);
+    char s[512];
+    dumpEffectDescriptor(d, s, sizeof(s), 0 /* indent */);
     ALOGV("loadEffect() read descriptor %p:%s",d, s);
 #endif
     if (EFFECT_API_VERSION_MAJOR(d->apiVersion) !=
@@ -682,8 +714,14 @@
     }
     e = malloc(sizeof(list_elem_t));
     e->object = d;
-    e->next = l->effects;
-    l->effects = e;
+    if (skip) {
+        e->next = gSkippedEffects;
+        gSkippedEffects = e;
+        return -EINVAL;
+    } else {
+        e->next = l->effects;
+        l->effects = e;
+    }
 
     // After the UUID node in the config_tree, if node->next is valid,
     // that would be sub effect node.
@@ -876,22 +914,30 @@
     return ret;
 }
 
-void dumpEffectDescriptor(effect_descriptor_t *desc, char *str, size_t len) {
+void dumpEffectDescriptor(effect_descriptor_t *desc, char *str, size_t len, int indent) {
     char s[256];
+    char ss[256];
+    char idt[indent + 1];
 
-    snprintf(str, len, "\nEffect Descriptor %p:\n", desc);
-    strncat(str, "- TYPE: ", len);
-    uuidToString(&desc->uuid, s, 256);
-    snprintf(str, len, "- UUID: %s\n", s);
-    uuidToString(&desc->type, s, 256);
-    snprintf(str, len, "- TYPE: %s\n", s);
-    sprintf(s, "- apiVersion: %08X\n- flags: %08X\n",
-            desc->apiVersion, desc->flags);
-    strncat(str, s, len);
-    sprintf(s, "- name: %s\n", desc->name);
-    strncat(str, s, len);
-    sprintf(s, "- implementor: %s\n", desc->implementor);
-    strncat(str, s, len);
+    memset(idt, ' ', indent);
+    idt[indent] = 0;
+
+    str[0] = 0;
+
+    snprintf(s, sizeof(s), "%s%s / %s\n", idt, desc->name, desc->implementor);
+    strlcat(str, s, len);
+
+    uuidToString(&desc->uuid, s, sizeof(s));
+    snprintf(ss, sizeof(ss), "%s  UUID: %s\n", idt, s);
+    strlcat(str, ss, len);
+
+    uuidToString(&desc->type, s, sizeof(s));
+    snprintf(ss, sizeof(ss), "%s  TYPE: %s\n", idt, s);
+    strlcat(str, ss, len);
+
+    sprintf(s, "%s  apiVersion: %08X\n%s  flags: %08X\n", idt,
+            desc->apiVersion, idt, desc->flags);
+    strlcat(str, s, len);
 }
 
 int stringToUuid(const char *str, effect_uuid_t *uuid)
@@ -934,3 +980,40 @@
     return 0;
 }
 
+int EffectDumpEffects(int fd) {
+    char s[512];
+    list_elem_t *e = gLibraryList;
+    lib_entry_t *l = NULL;
+    effect_descriptor_t *d = NULL;
+    int found = 0;
+    int ret = 0;
+
+    while (e) {
+        l = (lib_entry_t *)e->object;
+        list_elem_t *efx = l->effects;
+        dprintf(fd, "Library %s\n", l->name);
+        if (!efx) {
+            dprintf(fd, "  (no effects)\n");
+        }
+        while (efx) {
+            d = (effect_descriptor_t *)efx->object;
+            dumpEffectDescriptor(d, s, sizeof(s), 2);
+            dprintf(fd, "%s", s);
+            efx = efx->next;
+        }
+        e = e->next;
+    }
+
+    e = gSkippedEffects;
+    if (e) {
+        dprintf(fd, "Skipped effects\n");
+        while(e) {
+            d = (effect_descriptor_t *)e->object;
+            dumpEffectDescriptor(d, s, sizeof(s), 2 /* indent */);
+            dprintf(fd, "%s", s);
+            e = e->next;
+        }
+    }
+    return ret;
+}
+
diff --git a/media/libeffects/factory/EffectsFactory.h b/media/libeffects/factory/EffectsFactory.h
index 560b485..518800d 100644
--- a/media/libeffects/factory/EffectsFactory.h
+++ b/media/libeffects/factory/EffectsFactory.h
@@ -26,6 +26,7 @@
 extern "C" {
 #endif
 
+#define PROPERTY_IGNORE_EFFECTS "ro.audio.ignore_effects"
 
 typedef struct list_elem_s {
     void *object;
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
index 40c7fef..af904a6 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.cpp
@@ -27,6 +27,7 @@
 
 #include <cutils/log.h>
 #include "EffectBundle.h"
+#include "math.h"
 
 
 // effect_handle_t interface implementation for bass boost
@@ -830,32 +831,69 @@
     int gainCorrection = 0;
     //Count the energy contribution per band for EQ and BassBoost only if they are active.
     float energyContribution = 0;
+    float energyCross = 0;
+    float energyBassBoost = 0;
+    float crossCorrection = 0;
 
     //EQ contribution
     if (pContext->pBundledContext->bEqualizerEnabled == LVM_TRUE) {
         for (int i = 0; i < FIVEBAND_NUMBANDS; i++) {
-            float bandEnergy = (pContext->pBundledContext->bandGaindB[i] *
-                    LimitLevel_bandEnergyContribution[i])/15.0;
+            float bandFactor = pContext->pBundledContext->bandGaindB[i]/15.0;
+            float bandCoefficient = LimitLevel_bandEnergyCoefficient[i];
+            float bandEnergy = bandFactor * bandCoefficient * bandCoefficient;
             if (bandEnergy > 0)
                 energyContribution += bandEnergy;
         }
+
+        //cross EQ coefficients
+        float bandFactorSum = 0;
+        for (int i = 0; i < FIVEBAND_NUMBANDS-1; i++) {
+            float bandFactor1 = pContext->pBundledContext->bandGaindB[i]/15.0;
+            float bandFactor2 = pContext->pBundledContext->bandGaindB[i+1]/15.0;
+
+            if (bandFactor1 > 0 && bandFactor2 > 0) {
+                float crossEnergy = bandFactor1 * bandFactor2 *
+                        LimitLevel_bandEnergyCrossCoefficient[i];
+                bandFactorSum += bandFactor1 * bandFactor2;
+
+                if (crossEnergy > 0)
+                    energyCross += crossEnergy;
+            }
+        }
+        bandFactorSum -= 1.0;
+        if (bandFactorSum > 0)
+            crossCorrection = bandFactorSum * 0.7;
     }
 
     //BassBoost contribution
     if (pContext->pBundledContext->bBassEnabled == LVM_TRUE) {
-        float bandEnergy = (pContext->pBundledContext->BassStrengthSaved *
-                LimitLevel_bassBoostEnergyContribution)/1000.0;
-        if (bandEnergy > 0)
-            energyContribution += bandEnergy;
+        float boostFactor = (pContext->pBundledContext->BassStrengthSaved)/1000.0;
+        float boostCoefficient = LimitLevel_bassBoostEnergyCoefficient;
+
+        energyContribution += boostFactor * boostCoefficient * boostCoefficient;
+
+        for (int i = 0; i < FIVEBAND_NUMBANDS; i++) {
+            float bandFactor = pContext->pBundledContext->bandGaindB[i]/15.0;
+            float bandCrossCoefficient = LimitLevel_bassBoostEnergyCrossCoefficient[i];
+            float bandEnergy = boostFactor * bandFactor *
+                    bandCrossCoefficient;
+            if (bandEnergy > 0)
+                energyBassBoost += bandEnergy;
+        }
     }
 
     //Virtualizer contribution
     if (pContext->pBundledContext->bVirtualizerEnabled == LVM_TRUE) {
-                   energyContribution += LimitLevel_virtualizerContribution;
-        }
+        energyContribution += LimitLevel_virtualizerContribution *
+                LimitLevel_virtualizerContribution;
+    }
+
+    double totalEnergyEstimation = sqrt(energyContribution + energyCross + energyBassBoost) -
+            crossCorrection;
+    ALOGV(" TOTAL energy estimation: %0.2f", totalEnergyEstimation);
 
     //roundoff
-    int maxLevelRound = (int)(energyContribution + 0.99);
+    int maxLevelRound = (int)(totalEnergyEstimation + 0.99);
     if (maxLevelRound + pContext->pBundledContext->volume > 0) {
         gainCorrection = maxLevelRound + pContext->pBundledContext->volume;
     }
diff --git a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
index b3071f4..9459b87 100644
--- a/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
+++ b/media/libeffects/lvm/wrapper/Bundle/EffectBundle.h
@@ -200,10 +200,16 @@
  * updated.
  */
 
-static const float LimitLevel_bandEnergyContribution[FIVEBAND_NUMBANDS] = {
-        5.0, 6.5, 6.45, 4.8, 1.7 };
+static const float LimitLevel_bandEnergyCoefficient[FIVEBAND_NUMBANDS] = {
+        7.56, 9.69, 9.59, 7.37, 2.88};
 
-static const float LimitLevel_bassBoostEnergyContribution = 6.7;
+static const float LimitLevel_bandEnergyCrossCoefficient[FIVEBAND_NUMBANDS-1] = {
+        126.0, 115.0, 125.0, 104.0 };
+
+static const float LimitLevel_bassBoostEnergyCrossCoefficient[FIVEBAND_NUMBANDS] = {
+        221.21, 208.10, 28.16, 0.0, 0.0 };
+
+static const float LimitLevel_bassBoostEnergyCoefficient = 7.12;
 
 static const float LimitLevel_virtualizerContribution = 1.9;
 
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 6c585fb..a3c3d3c 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -7,6 +7,9 @@
 LOCAL_MODULE:= libmedia_helper
 LOCAL_MODULE_TAGS := optional
 
+LOCAL_C_FLAGS += -Werror -Wno-error=deprecated-declarations -Wall
+LOCAL_CLANG := true
+
 include $(BUILD_STATIC_LIBRARY)
 
 include $(CLEAR_VARS)
@@ -19,6 +22,7 @@
     IAudioTrack.cpp \
     IAudioRecord.cpp \
     ICrypto.cpp \
+    IDataSource.cpp \
     IDrm.cpp \
     IDrmClient.cpp \
     IHDCP.cpp \
@@ -36,6 +40,8 @@
     IMediaRecorder.cpp \
     IRemoteDisplay.cpp \
     IRemoteDisplayClient.cpp \
+    IResourceManagerClient.cpp \
+    IResourceManagerService.cpp \
     IStreamSource.cpp \
     MediaCodecInfo.cpp \
     Metadata.cpp \
@@ -53,6 +59,8 @@
     CharacterEncodingDetector.cpp \
     IMediaDeathNotifier.cpp \
     MediaProfiles.cpp \
+    MediaResource.cpp \
+    MediaResourcePolicy.cpp \
     IEffect.cpp \
     IEffectClient.cpp \
     AudioEffect.cpp \
@@ -61,15 +69,11 @@
     StringArray.cpp \
     AudioPolicy.cpp
 
-LOCAL_SRC_FILES += ../libnbaio/roundup.c
-
 LOCAL_SHARED_LIBRARIES := \
 	libui liblog libcutils libutils libbinder libsonivox libicuuc libicui18n libexpat \
         libcamera_client libstagefright_foundation \
         libgui libdl libaudioutils libnbaio
 
-LOCAL_STATIC_LIBRARIES += libinstantssq
-
 LOCAL_WHOLE_STATIC_LIBRARIES := libmedia_helper
 
 LOCAL_MODULE:= libmedia
@@ -83,14 +87,8 @@
     $(call include-path-for, audio-effects) \
     $(call include-path-for, audio-utils)
 
+LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
+LOCAL_CLANG := true
+
 include $(BUILD_SHARED_LIBRARY)
 
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES += SingleStateQueue.cpp
-LOCAL_CFLAGS += -DSINGLE_STATE_QUEUE_INSTANTIATIONS='"SingleStateQueueInstantiations.cpp"'
-
-LOCAL_MODULE := libinstantssq
-LOCAL_MODULE_TAGS := optional
-
-include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp
index af103c1..ff82544 100644
--- a/media/libmedia/AudioEffect.cpp
+++ b/media/libmedia/AudioEffect.cpp
@@ -35,13 +35,14 @@
 
 // ---------------------------------------------------------------------------
 
-AudioEffect::AudioEffect()
-    : mStatus(NO_INIT)
+AudioEffect::AudioEffect(const String16& opPackageName)
+    : mStatus(NO_INIT), mOpPackageName(opPackageName)
 {
 }
 
 
 AudioEffect::AudioEffect(const effect_uuid_t *type,
+                const String16& opPackageName,
                 const effect_uuid_t *uuid,
                 int32_t priority,
                 effect_callback_t cbf,
@@ -49,12 +50,13 @@
                 int sessionId,
                 audio_io_handle_t io
                 )
-    : mStatus(NO_INIT)
+    : mStatus(NO_INIT), mOpPackageName(opPackageName)
 {
     mStatus = set(type, uuid, priority, cbf, user, sessionId, io);
 }
 
 AudioEffect::AudioEffect(const char *typeStr,
+                const String16& opPackageName,
                 const char *uuidStr,
                 int32_t priority,
                 effect_callback_t cbf,
@@ -62,7 +64,7 @@
                 int sessionId,
                 audio_io_handle_t io
                 )
-    : mStatus(NO_INIT)
+    : mStatus(NO_INIT), mOpPackageName(opPackageName)
 {
     effect_uuid_t type;
     effect_uuid_t *pType = NULL;
@@ -128,16 +130,18 @@
     mIEffectClient = new EffectClient(this);
 
     iEffect = audioFlinger->createEffect((effect_descriptor_t *)&mDescriptor,
-            mIEffectClient, priority, io, mSessionId, &mStatus, &mId, &enabled);
+            mIEffectClient, priority, io, mSessionId, mOpPackageName, &mStatus, &mId, &enabled);
 
     if (iEffect == 0 || (mStatus != NO_ERROR && mStatus != ALREADY_EXISTS)) {
         ALOGE("set(): AudioFlinger could not create effect, status: %d", mStatus);
+        if (iEffect == 0) {
+            mStatus = NO_INIT;
+        }
         return mStatus;
     }
 
     mEnabled = (volatile int32_t)enabled;
 
-    mIEffect = iEffect;
     cblk = iEffect->getCblk();
     if (cblk == 0) {
         mStatus = NO_INIT;
@@ -145,6 +149,7 @@
         return mStatus;
     }
 
+    mIEffect = iEffect;
     mCblkMemory = cblk;
     mCblk = static_cast<effect_param_cblk_t*>(cblk->pointer());
     int bufOffset = ((sizeof(effect_param_cblk_t) - 1) / sizeof(int) + 1) * sizeof(int);
@@ -175,11 +180,11 @@
             mIEffect->disconnect();
             IInterface::asBinder(mIEffect)->unlinkToDeath(mIEffectClient);
         }
+        mIEffect.clear();
+        mCblkMemory.clear();
+        mIEffectClient.clear();
         IPCThreadState::self()->flushCommands();
     }
-    mIEffect.clear();
-    mIEffectClient.clear();
-    mCblkMemory.clear();
 }
 
 
@@ -486,4 +491,4 @@
 }
 
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/AudioParameter.cpp b/media/libmedia/AudioParameter.cpp
index 33dbf0b..8c8cf45 100644
--- a/media/libmedia/AudioParameter.cpp
+++ b/media/libmedia/AudioParameter.cpp
@@ -180,4 +180,4 @@
     }
 }
 
-};  // namespace android
+} // namespace android
diff --git a/media/libmedia/AudioPolicy.cpp b/media/libmedia/AudioPolicy.cpp
index d2d0971..9d07011 100644
--- a/media/libmedia/AudioPolicy.cpp
+++ b/media/libmedia/AudioPolicy.cpp
@@ -68,6 +68,7 @@
     mFormat.format = (audio_format_t)parcel->readInt32();
     mRouteFlags = parcel->readInt32();
     mRegistrationId = parcel->readString8();
+    mCbFlags = (uint32_t)parcel->readInt32();
     size_t size = (size_t)parcel->readInt32();
     if (size > MAX_CRITERIA_PER_MIX) {
         size = MAX_CRITERIA_PER_MIX;
@@ -89,6 +90,7 @@
     parcel->writeInt32(mFormat.format);
     parcel->writeInt32(mRouteFlags);
     parcel->writeString8(mRegistrationId);
+    parcel->writeInt32(mCbFlags);
     size_t size = mCriteria.size();
     if (size > MAX_CRITERIA_PER_MIX) {
         size = MAX_CRITERIA_PER_MIX;
@@ -112,4 +114,4 @@
     return NO_ERROR;
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index 07ca14f..011b31f 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -65,9 +65,10 @@
 
 // ---------------------------------------------------------------------------
 
-AudioRecord::AudioRecord()
-    : mStatus(NO_INIT), mSessionId(AUDIO_SESSION_ALLOCATE),
-      mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT)
+AudioRecord::AudioRecord(const String16 &opPackageName)
+    : mStatus(NO_INIT), mOpPackageName(opPackageName), mSessionId(AUDIO_SESSION_ALLOCATE),
+      mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT),
+      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
 {
 }
 
@@ -76,6 +77,7 @@
         uint32_t sampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
+        const String16& opPackageName,
         size_t frameCount,
         callback_t cbf,
         void* user,
@@ -83,15 +85,20 @@
         int sessionId,
         transfer_type transferType,
         audio_input_flags_t flags,
+        int uid,
+        pid_t pid,
         const audio_attributes_t* pAttributes)
-    : mStatus(NO_INIT), mSessionId(AUDIO_SESSION_ALLOCATE),
+    : mStatus(NO_INIT),
+      mOpPackageName(opPackageName),
+      mSessionId(AUDIO_SESSION_ALLOCATE),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
-      mProxy(NULL)
+      mProxy(NULL),
+      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
 {
     mStatus = set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
             notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
-            pAttributes);
+            uid, pid, pAttributes);
 }
 
 AudioRecord::~AudioRecord()
@@ -107,12 +114,18 @@
             mAudioRecordThread->requestExitAndWait();
             mAudioRecordThread.clear();
         }
+        // No lock here: worst case we remove a NULL callback which will be a nop
+        if (mDeviceCallback != 0 && mInput != AUDIO_IO_HANDLE_NONE) {
+            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mInput);
+        }
         IInterface::asBinder(mAudioRecord)->unlinkToDeath(mDeathNotifier, this);
         mAudioRecord.clear();
         mCblkMemory.clear();
         mBufferMemory.clear();
         IPCThreadState::self()->flushCommands();
-        AudioSystem::releaseAudioSessionId(mSessionId, -1);
+        ALOGV("~AudioRecord, releasing session id %d",
+                mSessionId);
+        AudioSystem::releaseAudioSessionId(mSessionId, -1 /*pid*/);
     }
 }
 
@@ -129,12 +142,15 @@
         int sessionId,
         transfer_type transferType,
         audio_input_flags_t flags,
+        int uid,
+        pid_t pid,
         const audio_attributes_t* pAttributes)
 {
     ALOGV("set(): inputSource %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
-          "notificationFrames %u, sessionId %d, transferType %d, flags %#x",
+          "notificationFrames %u, sessionId %d, transferType %d, flags %#x, opPackageName %s "
+          "uid %d, pid %d",
           inputSource, sampleRate, format, channelMask, frameCount, notificationFrames,
-          sessionId, transferType, flags);
+          sessionId, transferType, flags, String8(mOpPackageName).string(), uid, pid);
 
     switch (transferType) {
     case TRANSFER_DEFAULT:
@@ -159,8 +175,6 @@
     }
     mTransfer = transferType;
 
-    AutoMutex lock(mLock);
-
     // invariant that mAudioRecord != 0 is true only after set() returns successfully
     if (mAudioRecord != 0) {
         ALOGE("Track already in use");
@@ -189,13 +203,9 @@
     }
 
     // validate parameters
-    if (!audio_is_valid_format(format)) {
-        ALOGE("Invalid format %#x", format);
-        return BAD_VALUE;
-    }
-    // Temporary restriction: AudioFlinger currently supports 16-bit PCM only
-    if (format != AUDIO_FORMAT_PCM_16_BIT) {
-        ALOGE("Format %#x is not supported", format);
+    // AudioFlinger capture only supports linear PCM
+    if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) {
+        ALOGE("Format %#x is not linear pcm", format);
         return BAD_VALUE;
     }
     mFormat = format;
@@ -227,16 +237,30 @@
     }
     ALOGV("set(): mSessionId %d", mSessionId);
 
+    int callingpid = IPCThreadState::self()->getCallingPid();
+    int mypid = getpid();
+    if (uid == -1 || (callingpid != mypid)) {
+        mClientUid = IPCThreadState::self()->getCallingUid();
+    } else {
+        mClientUid = uid;
+    }
+    if (pid == -1 || (callingpid != mypid)) {
+        mClientPid = callingpid;
+    } else {
+        mClientPid = pid;
+    }
+
     mFlags = flags;
     mCbf = cbf;
 
     if (cbf != NULL) {
         mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
         mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
+        // thread begins in paused state, and will not reference us until start()
     }
 
     // create the IAudioRecord
-    status_t status = openRecord_l(0 /*epoch*/);
+    status_t status = openRecord_l(0 /*epoch*/, mOpPackageName);
 
     if (status != NO_ERROR) {
         if (mAudioRecordThread != 0) {
@@ -284,9 +308,10 @@
     mNewPosition = mProxy->getPosition() + mUpdatePeriod;
     int32_t flags = android_atomic_acquire_load(&mCblk->mFlags);
 
+    mActive = true;
+
     status_t status = NO_ERROR;
     if (!(flags & CBLK_INVALID)) {
-        ALOGV("mAudioRecord->start()");
         status = mAudioRecord->start(event, triggerSession);
         if (status == DEAD_OBJECT) {
             flags |= CBLK_INVALID;
@@ -297,9 +322,9 @@
     }
 
     if (status != NO_ERROR) {
+        mActive = false;
         ALOGE("start() status %d", status);
     } else {
-        mActive = true;
         sp<AudioRecordThread> t = mAudioRecordThread;
         if (t != 0) {
             t->resume();
@@ -352,6 +377,10 @@
     mMarkerPosition = marker;
     mMarkerReached = false;
 
+    sp<AudioRecordThread> t = mAudioRecordThread;
+    if (t != 0) {
+        t->wake();
+    }
     return NO_ERROR;
 }
 
@@ -378,6 +407,10 @@
     mNewPosition = mProxy->getPosition() + updatePeriod;
     mUpdatePeriod = updatePeriod;
 
+    sp<AudioRecordThread> t = mAudioRecordThread;
+    if (t != 0) {
+        t->wake();
+    }
     return NO_ERROR;
 }
 
@@ -408,15 +441,42 @@
 uint32_t AudioRecord::getInputFramesLost() const
 {
     // no need to check mActive, because if inactive this will return 0, which is what we want
-    return AudioSystem::getInputFramesLost(getInput());
+    return AudioSystem::getInputFramesLost(getInputPrivate());
+}
+
+// ---- Explicit Routing ---------------------------------------------------
+status_t AudioRecord::setInputDevice(audio_port_handle_t deviceId) {
+    AutoMutex lock(mLock);
+    if (mSelectedDeviceId != deviceId) {
+        mSelectedDeviceId = deviceId;
+        // stop capture so that audio policy manager does not reject the new instance start request
+        // as only one capture can be active at a time.
+        if (mAudioRecord != 0 && mActive) {
+            mAudioRecord->stop();
+        }
+        android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
+    }
+    return NO_ERROR;
+}
+
+audio_port_handle_t AudioRecord::getInputDevice() {
+    AutoMutex lock(mLock);
+    return mSelectedDeviceId;
+}
+
+audio_port_handle_t AudioRecord::getRoutedDeviceId() {
+    AutoMutex lock(mLock);
+    if (mInput == AUDIO_IO_HANDLE_NONE) {
+        return AUDIO_PORT_HANDLE_NONE;
+    }
+    return AudioSystem::getDeviceIdForIo(mInput);
 }
 
 // -------------------------------------------------------------------------
 
 // must be called with mLock held
-status_t AudioRecord::openRecord_l(size_t epoch)
+status_t AudioRecord::openRecord_l(size_t epoch, const String16& opPackageName)
 {
-    status_t status;
     const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
     if (audioFlinger == 0) {
         ALOGE("Could not get audioflinger");
@@ -431,12 +491,16 @@
     }
 
     // Client can only express a preference for FAST.  Server will perform additional tests.
-    if ((mFlags & AUDIO_INPUT_FLAG_FAST) && !(
-            // use case: callback transfer mode
-            (mTransfer == TRANSFER_CALLBACK) &&
+    if ((mFlags & AUDIO_INPUT_FLAG_FAST) && !((
+            // either of these use cases:
+            // use case 1: callback transfer mode
+            (mTransfer == TRANSFER_CALLBACK) ||
+            // use case 2: obtain/release mode
+            (mTransfer == TRANSFER_OBTAIN)) &&
             // matching sample rate
             (mSampleRate == afSampleRate))) {
-        ALOGW("AUDIO_INPUT_FLAG_FAST denied by client");
+        ALOGW("AUDIO_INPUT_FLAG_FAST denied by client; transfer %d, track %u Hz, primary %u Hz",
+                mTransfer, mSampleRate, afSampleRate);
         // once denied, do not request again if IAudioRecord is re-created
         mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
     }
@@ -451,9 +515,16 @@
         }
     }
 
+    if (mDeviceCallback != 0 && mInput != AUDIO_IO_HANDLE_NONE) {
+        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mInput);
+    }
+
     audio_io_handle_t input;
-    status = AudioSystem::getInputForAttr(&mAttributes, &input, (audio_session_t)mSessionId,
-                                        mSampleRate, mFormat, mChannelMask, mFlags);
+    status_t status = AudioSystem::getInputForAttr(&mAttributes, &input,
+                                        (audio_session_t)mSessionId,
+                                        IPCThreadState::self()->getCallingUid(),
+                                        mSampleRate, mFormat, mChannelMask,
+                                        mFlags, mSelectedDeviceId);
 
     if (status != NO_ERROR) {
         ALOGE("Could not get audio input for record source %d, sample rate %u, format %#x, "
@@ -476,11 +547,14 @@
     sp<IMemory> iMem;           // for cblk
     sp<IMemory> bufferMem;
     sp<IAudioRecord> record = audioFlinger->openRecord(input,
-                                                       mSampleRate, mFormat,
+                                                       mSampleRate,
+                                                       mFormat,
                                                        mChannelMask,
+                                                       opPackageName,
                                                        &temp,
                                                        &trackFlags,
                                                        tid,
+                                                       mClientUid,
                                                        &mSessionId,
                                                        &notificationFrames,
                                                        iMem,
@@ -577,6 +651,10 @@
     mDeathNotifier = new DeathNotifier(this);
     IInterface::asBinder(mAudioRecord)->linkToDeath(mDeathNotifier, this);
 
+    if (mDeviceCallback != 0) {
+        AudioSystem::addAudioDeviceCallback(mDeviceCallback, mInput);
+    }
+
     return NO_ERROR;
     }
 
@@ -588,15 +666,21 @@
     return status;
 }
 
-status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
+status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
 {
     if (audioBuffer == NULL) {
+        if (nonContig != NULL) {
+            *nonContig = 0;
+        }
         return BAD_VALUE;
     }
     if (mTransfer != TRANSFER_OBTAIN) {
         audioBuffer->frameCount = 0;
         audioBuffer->size = 0;
         audioBuffer->raw = NULL;
+        if (nonContig != NULL) {
+            *nonContig = 0;
+        }
         return INVALID_OPERATION;
     }
 
@@ -615,7 +699,7 @@
         ALOGE("%s invalid waitCount %d", __func__, waitCount);
         requested = NULL;
     }
-    return obtainBuffer(audioBuffer, requested);
+    return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
 }
 
 status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
@@ -684,9 +768,9 @@
     return status;
 }
 
-void AudioRecord::releaseBuffer(Buffer* audioBuffer)
+void AudioRecord::releaseBuffer(const Buffer* audioBuffer)
 {
-    // all TRANSFER_* are valid
+    // FIXME add error checking on mode, by adding an internal version
 
     size_t stepCount = audioBuffer->size / mFrameSize;
     if (stepCount == 0) {
@@ -704,7 +788,7 @@
     // the server does not automatically disable recorder on overrun, so no need to restart
 }
 
-audio_io_handle_t AudioRecord::getInput() const
+audio_io_handle_t AudioRecord::getInputPrivate() const
 {
     AutoMutex lock(mLock);
     return mInput;
@@ -712,7 +796,7 @@
 
 // -------------------------------------------------------------------------
 
-ssize_t AudioRecord::read(void* buffer, size_t userSize)
+ssize_t AudioRecord::read(void* buffer, size_t userSize, bool blocking)
 {
     if (mTransfer != TRANSFER_SYNC) {
         return INVALID_OPERATION;
@@ -731,7 +815,8 @@
     while (userSize >= mFrameSize) {
         audioBuffer.frameCount = userSize / mFrameSize;
 
-        status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
+        status_t err = obtainBuffer(&audioBuffer,
+                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
         if (err < 0) {
             if (read > 0) {
                 break;
@@ -863,8 +948,11 @@
     if (!markerReached && position < markerPosition) {
         minFrames = markerPosition - position;
     }
-    if (updatePeriod > 0 && updatePeriod < minFrames) {
-        minFrames = updatePeriod;
+    if (updatePeriod > 0) {
+        uint32_t remaining = newPosition - position;
+        if (remaining < minFrames) {
+            minFrames = remaining;
+        }
     }
 
     // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
@@ -990,14 +1078,13 @@
 {
     ALOGW("dead IAudioRecord, creating a new one from %s()", from);
     ++mSequence;
-    status_t result;
 
     // if the new IAudioRecord is created, openRecord_l() will modify the
     // following member variables: mAudioRecord, mCblkMemory, mCblk, mBufferMemory.
     // It will also delete the strong references on previous IAudioRecord and IMemory
     size_t position = mProxy->getPosition();
     mNewPosition = position + mUpdatePeriod;
-    result = openRecord_l(position);
+    status_t result = openRecord_l(position, mOpPackageName);
     if (result == NO_ERROR) {
         if (mActive) {
             // callback thread or sync event hasn't changed
@@ -1013,6 +1100,48 @@
     return result;
 }
 
+status_t AudioRecord::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
+{
+    if (callback == 0) {
+        ALOGW("%s adding NULL callback!", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    AutoMutex lock(mLock);
+    if (mDeviceCallback == callback) {
+        ALOGW("%s adding same callback!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+    status_t status = NO_ERROR;
+    if (mInput != AUDIO_IO_HANDLE_NONE) {
+        if (mDeviceCallback != 0) {
+            ALOGW("%s callback already present!", __FUNCTION__);
+            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mInput);
+        }
+        status = AudioSystem::addAudioDeviceCallback(callback, mInput);
+    }
+    mDeviceCallback = callback;
+    return status;
+}
+
+status_t AudioRecord::removeAudioDeviceCallback(
+        const sp<AudioSystem::AudioDeviceCallback>& callback)
+{
+    if (callback == 0) {
+        ALOGW("%s removing NULL callback!", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    AutoMutex lock(mLock);
+    if (mDeviceCallback != callback) {
+        ALOGW("%s removing different callback!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+    if (mInput != AUDIO_IO_HANDLE_NONE) {
+        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mInput);
+    }
+    mDeviceCallback = 0;
+    return NO_ERROR;
+}
+
 // =========================================================================
 
 void AudioRecord::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
@@ -1069,8 +1198,8 @@
     case NS_NEVER:
         return false;
     case NS_WHENEVER:
-        // FIXME increase poll interval, or make event-driven
-        ns = 1000000000LL;
+        // Event driven: call wake() when callback notifications conditions change.
+        ns = INT64_MAX;
         // fall through
     default:
         LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
@@ -1103,6 +1232,21 @@
     }
 }
 
+void AudioRecord::AudioRecordThread::wake()
+{
+    AutoMutex _l(mMyLock);
+    if (!mPaused) {
+        // wake() might be called while servicing a callback - ignore the next
+        // pause time and call processAudioBuffer.
+        mIgnoreNextPausedInt = true;
+        if (mPausedInt && mPausedNs > 0) {
+            // audio record is active and internally paused with timeout.
+            mPausedInt = false;
+            mMyCond.signal();
+        }
+    }
+}
+
 void AudioRecord::AudioRecordThread::pauseInternal(nsecs_t ns)
 {
     AutoMutex _l(mMyLock);
@@ -1112,4 +1256,4 @@
 
 // -------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 9cae21c..3bfb09a 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -32,23 +32,12 @@
 
 // client singleton for AudioFlinger binder interface
 Mutex AudioSystem::gLock;
-Mutex AudioSystem::gLockCache;
 Mutex AudioSystem::gLockAPS;
-Mutex AudioSystem::gLockAPC;
 sp<IAudioFlinger> AudioSystem::gAudioFlinger;
 sp<AudioSystem::AudioFlingerClient> AudioSystem::gAudioFlingerClient;
 audio_error_callback AudioSystem::gAudioErrorCallback = NULL;
+dynamic_policy_callback AudioSystem::gDynPolicyCallback = NULL;
 
-// Cached values for output handles
-DefaultKeyedVector<audio_io_handle_t, AudioSystem::OutputDescriptor *> AudioSystem::gOutputs(NULL);
-
-// Cached values for recording queries, all protected by gLock
-uint32_t AudioSystem::gPrevInSamplingRate;
-audio_format_t AudioSystem::gPrevInFormat;
-audio_channel_mask_t AudioSystem::gPrevInChannelMask;
-size_t AudioSystem::gInBuffSize = 0;    // zero indicates cache is invalid
-
-sp<AudioSystem::AudioPortCallback> AudioSystem::gAudioPortCallback;
 
 // establish binder interface to AudioFlinger service
 const sp<IAudioFlinger> AudioSystem::get_audio_flinger()
@@ -87,6 +76,25 @@
     return af;
 }
 
+const sp<AudioSystem::AudioFlingerClient> AudioSystem::getAudioFlingerClient()
+{
+    // calling get_audio_flinger() will initialize gAudioFlingerClient if needed
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == 0) return 0;
+    Mutex::Autolock _l(gLock);
+    return gAudioFlingerClient;
+}
+
+sp<AudioIoDescriptor> AudioSystem::getIoDescriptor(audio_io_handle_t ioHandle)
+{
+    sp<AudioIoDescriptor> desc;
+    const sp<AudioFlingerClient> afc = getAudioFlingerClient();
+    if (afc != 0) {
+        desc = afc->getIoDescriptor(ioHandle);
+    }
+    return desc;
+}
+
 /* static */ status_t AudioSystem::checkAudioFlinger()
 {
     if (defaultServiceManager()->checkService(String16("media.audio_flinger")) != 0) {
@@ -260,18 +268,13 @@
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
-
-    Mutex::Autolock _l(gLockCache);
-
-    OutputDescriptor *outputDesc = AudioSystem::gOutputs.valueFor(output);
-    if (outputDesc == NULL) {
+    sp<AudioIoDescriptor> outputDesc = getIoDescriptor(output);
+    if (outputDesc == 0) {
         ALOGV("getOutputSamplingRate() no output descriptor for output %d in gOutputs", output);
-        gLockCache.unlock();
         *samplingRate = af->sampleRate(output);
-        gLockCache.lock();
     } else {
         ALOGV("getOutputSamplingRate() reading from output desc");
-        *samplingRate = outputDesc->samplingRate;
+        *samplingRate = outputDesc->mSamplingRate;
     }
     if (*samplingRate == 0) {
         ALOGE("AudioSystem::getSamplingRate failed for output %d", output);
@@ -304,16 +307,11 @@
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
-
-    Mutex::Autolock _l(gLockCache);
-
-    OutputDescriptor *outputDesc = AudioSystem::gOutputs.valueFor(output);
-    if (outputDesc == NULL) {
-        gLockCache.unlock();
+    sp<AudioIoDescriptor> outputDesc = getIoDescriptor(output);
+    if (outputDesc == 0) {
         *frameCount = af->frameCount(output);
-        gLockCache.lock();
     } else {
-        *frameCount = outputDesc->frameCount;
+        *frameCount = outputDesc->mFrameCount;
     }
     if (*frameCount == 0) {
         ALOGE("AudioSystem::getFrameCount failed for output %d", output);
@@ -346,16 +344,11 @@
 {
     const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
     if (af == 0) return PERMISSION_DENIED;
-
-    Mutex::Autolock _l(gLockCache);
-
-    OutputDescriptor *outputDesc = AudioSystem::gOutputs.valueFor(output);
-    if (outputDesc == NULL) {
-        gLockCache.unlock();
+    sp<AudioIoDescriptor> outputDesc = getIoDescriptor(output);
+    if (outputDesc == 0) {
         *latency = af->latency(output);
-        gLockCache.lock();
     } else {
-        *latency = outputDesc->latency;
+        *latency = outputDesc->mLatency;
     }
 
     ALOGV("getLatency() output %d, latency %d", output, *latency);
@@ -366,34 +359,11 @@
 status_t AudioSystem::getInputBufferSize(uint32_t sampleRate, audio_format_t format,
         audio_channel_mask_t channelMask, size_t* buffSize)
 {
-    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
-    if (af == 0) {
-        return PERMISSION_DENIED;
+    const sp<AudioFlingerClient> afc = getAudioFlingerClient();
+    if (afc == 0) {
+        return NO_INIT;
     }
-    Mutex::Autolock _l(gLockCache);
-    // Do we have a stale gInBufferSize or are we requesting the input buffer size for new values
-    size_t inBuffSize = gInBuffSize;
-    if ((inBuffSize == 0) || (sampleRate != gPrevInSamplingRate) || (format != gPrevInFormat)
-        || (channelMask != gPrevInChannelMask)) {
-        gLockCache.unlock();
-        inBuffSize = af->getInputBufferSize(sampleRate, format, channelMask);
-        gLockCache.lock();
-        if (inBuffSize == 0) {
-            ALOGE("AudioSystem::getInputBufferSize failed sampleRate %d format %#x channelMask %x",
-                    sampleRate, format, channelMask);
-            return BAD_VALUE;
-        }
-        // A benign race is possible here: we could overwrite a fresher cache entry
-        // save the request params
-        gPrevInSamplingRate = sampleRate;
-        gPrevInFormat = format;
-        gPrevInChannelMask = channelMask;
-
-        gInBuffSize = inBuffSize;
-    }
-    *buffSize = inBuffSize;
-
-    return NO_ERROR;
+    return afc->getInputBufferSize(sampleRate, format, channelMask, buffSize);
 }
 
 status_t AudioSystem::setVoiceVolume(float value)
@@ -453,8 +423,26 @@
     return af->getAudioHwSyncForSession(sessionId);
 }
 
+status_t AudioSystem::systemReady()
+{
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == 0) return NO_INIT;
+    return af->systemReady();
+}
+
 // ---------------------------------------------------------------------------
 
+
+void AudioSystem::AudioFlingerClient::clearIoCache()
+{
+    Mutex::Autolock _l(mLock);
+    mIoDescriptors.clear();
+    mInBuffSize = 0;
+    mInSamplingRate = 0;
+    mInFormat = AUDIO_FORMAT_DEFAULT;
+    mInChannelMask = AUDIO_CHANNEL_NONE;
+}
+
 void AudioSystem::AudioFlingerClient::binderDied(const wp<IBinder>& who __unused)
 {
     audio_error_callback cb = NULL;
@@ -464,11 +452,8 @@
         cb = gAudioErrorCallback;
     }
 
-    {
-        // clear output handles and stream to output map caches
-        Mutex::Autolock _l(gLockCache);
-        AudioSystem::gOutputs.clear();
-    }
+    // clear output handles and stream to output map caches
+    clearIoCache();
 
     if (cb) {
         cb(DEAD_OBJECT);
@@ -476,76 +461,191 @@
     ALOGW("AudioFlinger server died!");
 }
 
-void AudioSystem::AudioFlingerClient::ioConfigChanged(int event, audio_io_handle_t ioHandle,
-        const void *param2) {
+void AudioSystem::AudioFlingerClient::ioConfigChanged(audio_io_config_event event,
+                                                      const sp<AudioIoDescriptor>& ioDesc) {
     ALOGV("ioConfigChanged() event %d", event);
-    const OutputDescriptor *desc;
-    audio_stream_type_t stream;
 
-    if (ioHandle == AUDIO_IO_HANDLE_NONE) return;
+    if (ioDesc == 0 || ioDesc->mIoHandle == AUDIO_IO_HANDLE_NONE) return;
 
-    Mutex::Autolock _l(AudioSystem::gLockCache);
+    audio_port_handle_t deviceId = AUDIO_PORT_HANDLE_NONE;
+    Vector < sp<AudioDeviceCallback> > callbacks;
 
-    switch (event) {
-    case STREAM_CONFIG_CHANGED:
-        break;
-    case OUTPUT_OPENED: {
-        if (gOutputs.indexOfKey(ioHandle) >= 0) {
-            ALOGV("ioConfigChanged() opening already existing output! %d", ioHandle);
-            break;
-        }
-        if (param2 == NULL) break;
-        desc = (const OutputDescriptor *)param2;
+    {
+        Mutex::Autolock _l(mLock);
 
-        OutputDescriptor *outputDesc =  new OutputDescriptor(*desc);
-        gOutputs.add(ioHandle, outputDesc);
-        ALOGV("ioConfigChanged() new output samplingRate %u, format %#x channel mask %#x frameCount %zu "
-                "latency %d",
-                outputDesc->samplingRate, outputDesc->format, outputDesc->channelMask,
-                outputDesc->frameCount, outputDesc->latency);
+        switch (event) {
+        case AUDIO_OUTPUT_OPENED:
+        case AUDIO_INPUT_OPENED: {
+            sp<AudioIoDescriptor> oldDesc = getIoDescriptor(ioDesc->mIoHandle);
+            if (oldDesc == 0) {
+                mIoDescriptors.add(ioDesc->mIoHandle, ioDesc);
+            } else {
+                deviceId = oldDesc->getDeviceId();
+                mIoDescriptors.replaceValueFor(ioDesc->mIoHandle, ioDesc);
+            }
+
+            if (ioDesc->getDeviceId() != AUDIO_PORT_HANDLE_NONE) {
+                deviceId = ioDesc->getDeviceId();
+                ssize_t ioIndex = mAudioDeviceCallbacks.indexOfKey(ioDesc->mIoHandle);
+                if (ioIndex >= 0) {
+                    callbacks = mAudioDeviceCallbacks.valueAt(ioIndex);
+                }
+            }
+            ALOGV("ioConfigChanged() new %s opened %d samplingRate %u, format %#x channel mask %#x "
+                    "frameCount %zu deviceId %d", event == AUDIO_OUTPUT_OPENED ? "output" : "input",
+                    ioDesc->mIoHandle, ioDesc->mSamplingRate, ioDesc->mFormat, ioDesc->mChannelMask,
+                    ioDesc->mFrameCount, ioDesc->getDeviceId());
+            } break;
+        case AUDIO_OUTPUT_CLOSED:
+        case AUDIO_INPUT_CLOSED: {
+            if (getIoDescriptor(ioDesc->mIoHandle) == 0) {
+                ALOGW("ioConfigChanged() closing unknown %s %d",
+                      event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->mIoHandle);
+                break;
+            }
+            ALOGV("ioConfigChanged() %s %d closed",
+                  event == AUDIO_OUTPUT_CLOSED ? "output" : "input", ioDesc->mIoHandle);
+
+            mIoDescriptors.removeItem(ioDesc->mIoHandle);
+            mAudioDeviceCallbacks.removeItem(ioDesc->mIoHandle);
+            } break;
+
+        case AUDIO_OUTPUT_CONFIG_CHANGED:
+        case AUDIO_INPUT_CONFIG_CHANGED: {
+            sp<AudioIoDescriptor> oldDesc = getIoDescriptor(ioDesc->mIoHandle);
+            if (oldDesc == 0) {
+                ALOGW("ioConfigChanged() modifying unknown output! %d", ioDesc->mIoHandle);
+                break;
+            }
+
+            deviceId = oldDesc->getDeviceId();
+            mIoDescriptors.replaceValueFor(ioDesc->mIoHandle, ioDesc);
+
+            if (deviceId != ioDesc->getDeviceId()) {
+                deviceId = ioDesc->getDeviceId();
+                ssize_t ioIndex = mAudioDeviceCallbacks.indexOfKey(ioDesc->mIoHandle);
+                if (ioIndex >= 0) {
+                    callbacks = mAudioDeviceCallbacks.valueAt(ioIndex);
+                }
+            }
+            ALOGV("ioConfigChanged() new config for %s %d samplingRate %u, format %#x "
+                    "channel mask %#x frameCount %zu deviceId %d",
+                    event == AUDIO_OUTPUT_CONFIG_CHANGED ? "output" : "input",
+                    ioDesc->mIoHandle, ioDesc->mSamplingRate, ioDesc->mFormat,
+                    ioDesc->mChannelMask, ioDesc->mFrameCount, ioDesc->getDeviceId());
+
         } break;
-    case OUTPUT_CLOSED: {
-        if (gOutputs.indexOfKey(ioHandle) < 0) {
-            ALOGW("ioConfigChanged() closing unknown output! %d", ioHandle);
-            break;
         }
-        ALOGV("ioConfigChanged() output %d closed", ioHandle);
-
-        gOutputs.removeItem(ioHandle);
-        } break;
-
-    case OUTPUT_CONFIG_CHANGED: {
-        int index = gOutputs.indexOfKey(ioHandle);
-        if (index < 0) {
-            ALOGW("ioConfigChanged() modifying unknown output! %d", ioHandle);
-            break;
-        }
-        if (param2 == NULL) break;
-        desc = (const OutputDescriptor *)param2;
-
-        ALOGV("ioConfigChanged() new config for output %d samplingRate %u, format %#x channel mask %#x "
-                "frameCount %zu latency %d",
-                ioHandle, desc->samplingRate, desc->format,
-                desc->channelMask, desc->frameCount, desc->latency);
-        OutputDescriptor *outputDesc = gOutputs.valueAt(index);
-        delete outputDesc;
-        outputDesc =  new OutputDescriptor(*desc);
-        gOutputs.replaceValueFor(ioHandle, outputDesc);
-    } break;
-    case INPUT_OPENED:
-    case INPUT_CLOSED:
-    case INPUT_CONFIG_CHANGED:
-        break;
-
+    }
+    // callbacks.size() != 0 =>  ioDesc->mIoHandle and deviceId are valid
+    for (size_t i = 0; i < callbacks.size(); i++) {
+        callbacks[i]->onAudioDeviceUpdate(ioDesc->mIoHandle, deviceId);
     }
 }
 
-void AudioSystem::setErrorCallback(audio_error_callback cb)
+status_t AudioSystem::AudioFlingerClient::getInputBufferSize(
+                                                uint32_t sampleRate, audio_format_t format,
+                                                audio_channel_mask_t channelMask, size_t* buffSize)
+{
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == 0) {
+        return PERMISSION_DENIED;
+    }
+    Mutex::Autolock _l(mLock);
+    // Do we have a stale mInBuffSize or are we requesting the input buffer size for new values
+    if ((mInBuffSize == 0) || (sampleRate != mInSamplingRate) || (format != mInFormat)
+        || (channelMask != mInChannelMask)) {
+        size_t inBuffSize = af->getInputBufferSize(sampleRate, format, channelMask);
+        if (inBuffSize == 0) {
+            ALOGE("AudioSystem::getInputBufferSize failed sampleRate %d format %#x channelMask %x",
+                    sampleRate, format, channelMask);
+            return BAD_VALUE;
+        }
+        // A benign race is possible here: we could overwrite a fresher cache entry
+        // save the request params
+        mInSamplingRate = sampleRate;
+        mInFormat = format;
+        mInChannelMask = channelMask;
+
+        mInBuffSize = inBuffSize;
+    }
+
+    *buffSize = mInBuffSize;
+
+    return NO_ERROR;
+}
+
+sp<AudioIoDescriptor> AudioSystem::AudioFlingerClient::getIoDescriptor(audio_io_handle_t ioHandle)
+{
+    sp<AudioIoDescriptor> desc;
+    ssize_t index = mIoDescriptors.indexOfKey(ioHandle);
+    if (index >= 0) {
+        desc = mIoDescriptors.valueAt(index);
+    }
+    return desc;
+}
+
+status_t AudioSystem::AudioFlingerClient::addAudioDeviceCallback(
+        const sp<AudioDeviceCallback>& callback, audio_io_handle_t audioIo)
+{
+    Mutex::Autolock _l(mLock);
+    Vector < sp<AudioDeviceCallback> > callbacks;
+    ssize_t ioIndex = mAudioDeviceCallbacks.indexOfKey(audioIo);
+    if (ioIndex >= 0) {
+        callbacks = mAudioDeviceCallbacks.valueAt(ioIndex);
+    }
+
+    for (size_t cbIndex = 0; cbIndex < callbacks.size(); cbIndex++) {
+        if (callbacks[cbIndex] == callback) {
+            return INVALID_OPERATION;
+        }
+    }
+    callbacks.add(callback);
+
+    mAudioDeviceCallbacks.replaceValueFor(audioIo, callbacks);
+    return NO_ERROR;
+}
+
+status_t AudioSystem::AudioFlingerClient::removeAudioDeviceCallback(
+        const sp<AudioDeviceCallback>& callback, audio_io_handle_t audioIo)
+{
+    Mutex::Autolock _l(mLock);
+    ssize_t ioIndex = mAudioDeviceCallbacks.indexOfKey(audioIo);
+    if (ioIndex < 0) {
+        return INVALID_OPERATION;
+    }
+    Vector < sp<AudioDeviceCallback> > callbacks = mAudioDeviceCallbacks.valueAt(ioIndex);
+
+    size_t cbIndex;
+    for (cbIndex = 0; cbIndex < callbacks.size(); cbIndex++) {
+        if (callbacks[cbIndex] == callback) {
+            break;
+        }
+    }
+    if (cbIndex == callbacks.size()) {
+        return INVALID_OPERATION;
+    }
+    callbacks.removeAt(cbIndex);
+    if (callbacks.size() != 0) {
+        mAudioDeviceCallbacks.replaceValueFor(audioIo, callbacks);
+    } else {
+        mAudioDeviceCallbacks.removeItem(audioIo);
+    }
+    return NO_ERROR;
+}
+
+/* static */ void AudioSystem::setErrorCallback(audio_error_callback cb)
 {
     Mutex::Autolock _l(gLock);
     gAudioErrorCallback = cb;
 }
 
+/*static*/ void AudioSystem::setDynPolicyCallback(dynamic_policy_callback cb)
+{
+    Mutex::Autolock _l(gLock);
+    gDynPolicyCallback = cb;
+}
+
 // client singleton for AudioPolicyService binder interface
 // protected by gLockAPS
 sp<IAudioPolicyService> AudioSystem::gAudioPolicyService;
@@ -590,18 +690,22 @@
 
 status_t AudioSystem::setDeviceConnectionState(audio_devices_t device,
                                                audio_policy_dev_state_t state,
-                                               const char *device_address)
+                                               const char *device_address,
+                                               const char *device_name)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     const char *address = "";
+    const char *name = "";
 
     if (aps == 0) return PERMISSION_DENIED;
 
     if (device_address != NULL) {
         address = device_address;
     }
-
-    return aps->setDeviceConnectionState(device, state, address);
+    if (device_name != NULL) {
+        name = device_name;
+    }
+    return aps->setDeviceConnectionState(device, state, address, name);
 }
 
 audio_policy_dev_state_t AudioSystem::getDeviceConnectionState(audio_devices_t device,
@@ -653,17 +757,19 @@
                                         audio_io_handle_t *output,
                                         audio_session_t session,
                                         audio_stream_type_t *stream,
+                                        uid_t uid,
                                         uint32_t samplingRate,
                                         audio_format_t format,
                                         audio_channel_mask_t channelMask,
                                         audio_output_flags_t flags,
+                                        audio_port_handle_t selectedDeviceId,
                                         const audio_offload_info_t *offloadInfo)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return NO_INIT;
-    return aps->getOutputForAttr(attr, output, session, stream,
+    return aps->getOutputForAttr(attr, output, session, stream, uid,
                                  samplingRate, format, channelMask,
-                                 flags, offloadInfo);
+                                 flags, selectedDeviceId, offloadInfo);
 }
 
 status_t AudioSystem::startOutput(audio_io_handle_t output,
@@ -696,14 +802,17 @@
 status_t AudioSystem::getInputForAttr(const audio_attributes_t *attr,
                                 audio_io_handle_t *input,
                                 audio_session_t session,
+                                uid_t uid,
                                 uint32_t samplingRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
-                                audio_input_flags_t flags)
+                                audio_input_flags_t flags,
+                                audio_port_handle_t selectedDeviceId)
 {
     const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
     if (aps == 0) return NO_INIT;
-    return aps->getInputForAttr(attr, input, session, samplingRate, format, channelMask, flags);
+    return aps->getInputForAttr(
+            attr, input, session, uid, samplingRate, format, channelMask, flags, selectedDeviceId);
 }
 
 status_t AudioSystem::startInput(audio_io_handle_t input,
@@ -858,18 +967,16 @@
     // called by restoreTrack_l(), which needs new IAudioFlinger and IAudioPolicyService instances
     ALOGV("clearAudioConfigCache()");
     {
-        Mutex::Autolock _l(gLockCache);
-        gOutputs.clear();
-    }
-    {
         Mutex::Autolock _l(gLock);
+        if (gAudioFlingerClient != 0) {
+            gAudioFlingerClient->clearIoCache();
+        }
         gAudioFlinger.clear();
     }
     {
         Mutex::Autolock _l(gLockAPS);
         gAudioPolicyService.clear();
     }
-    // Do not clear gAudioPortCallback
 }
 
 bool AudioSystem::isOffloadSupported(const audio_offload_info_t& info)
@@ -929,10 +1036,75 @@
     return aps->setAudioPortConfig(config);
 }
 
-void AudioSystem::setAudioPortCallback(sp<AudioPortCallback> callBack)
+status_t AudioSystem::addAudioPortCallback(const sp<AudioPortCallback>& callback)
 {
-    Mutex::Autolock _l(gLockAPC);
-    gAudioPortCallback = callBack;
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+
+    Mutex::Autolock _l(gLockAPS);
+    if (gAudioPolicyServiceClient == 0) {
+        return NO_INIT;
+    }
+    int ret = gAudioPolicyServiceClient->addAudioPortCallback(callback);
+    if (ret == 1) {
+        aps->setAudioPortCallbacksEnabled(true);
+    }
+    return (ret < 0) ? INVALID_OPERATION : NO_ERROR;
+}
+
+/*static*/
+status_t AudioSystem::removeAudioPortCallback(const sp<AudioPortCallback>& callback)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+
+    Mutex::Autolock _l(gLockAPS);
+    if (gAudioPolicyServiceClient == 0) {
+        return NO_INIT;
+    }
+    int ret = gAudioPolicyServiceClient->removeAudioPortCallback(callback);
+    if (ret == 0) {
+        aps->setAudioPortCallbacksEnabled(false);
+    }
+    return (ret < 0) ? INVALID_OPERATION : NO_ERROR;
+}
+
+status_t AudioSystem::addAudioDeviceCallback(
+        const sp<AudioDeviceCallback>& callback, audio_io_handle_t audioIo)
+{
+    const sp<AudioFlingerClient> afc = getAudioFlingerClient();
+    if (afc == 0) {
+        return NO_INIT;
+    }
+    status_t status = afc->addAudioDeviceCallback(callback, audioIo);
+    if (status == NO_ERROR) {
+        const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+        if (af != 0) {
+            af->registerClient(afc);
+        }
+    }
+    return status;
+}
+
+status_t AudioSystem::removeAudioDeviceCallback(
+        const sp<AudioDeviceCallback>& callback, audio_io_handle_t audioIo)
+{
+    const sp<AudioFlingerClient> afc = getAudioFlingerClient();
+    if (afc == 0) {
+        return NO_INIT;
+    }
+    return afc->removeAudioDeviceCallback(callback, audioIo);
+}
+
+audio_port_handle_t AudioSystem::getDeviceIdForIo(audio_io_handle_t audioIo)
+{
+    const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+    if (af == 0) return PERMISSION_DENIED;
+    const sp<AudioIoDescriptor> desc = getIoDescriptor(audioIo);
+    if (desc == 0) {
+        return AUDIO_PORT_HANDLE_NONE;
+    }
+    return desc->getDeviceId();
 }
 
 status_t AudioSystem::acquireSoundTriggerSession(audio_session_t *session,
@@ -965,14 +1137,92 @@
     return aps->registerPolicyMixes(mixes, registration);
 }
 
+status_t AudioSystem::startAudioSource(const struct audio_port_config *source,
+                                       const audio_attributes_t *attributes,
+                                       audio_io_handle_t *handle)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+    return aps->startAudioSource(source, attributes, handle);
+}
+
+status_t AudioSystem::stopAudioSource(audio_io_handle_t handle)
+{
+    const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+    if (aps == 0) return PERMISSION_DENIED;
+    return aps->stopAudioSource(handle);
+}
+
 // ---------------------------------------------------------------------------
 
+int AudioSystem::AudioPolicyServiceClient::addAudioPortCallback(
+        const sp<AudioPortCallback>& callback)
+{
+    Mutex::Autolock _l(mLock);
+    for (size_t i = 0; i < mAudioPortCallbacks.size(); i++) {
+        if (mAudioPortCallbacks[i] == callback) {
+            return -1;
+        }
+    }
+    mAudioPortCallbacks.add(callback);
+    return mAudioPortCallbacks.size();
+}
+
+int AudioSystem::AudioPolicyServiceClient::removeAudioPortCallback(
+        const sp<AudioPortCallback>& callback)
+{
+    Mutex::Autolock _l(mLock);
+    size_t i;
+    for (i = 0; i < mAudioPortCallbacks.size(); i++) {
+        if (mAudioPortCallbacks[i] == callback) {
+            break;
+        }
+    }
+    if (i == mAudioPortCallbacks.size()) {
+        return -1;
+    }
+    mAudioPortCallbacks.removeAt(i);
+    return mAudioPortCallbacks.size();
+}
+
+
+void AudioSystem::AudioPolicyServiceClient::onAudioPortListUpdate()
+{
+    Mutex::Autolock _l(mLock);
+    for (size_t i = 0; i < mAudioPortCallbacks.size(); i++) {
+        mAudioPortCallbacks[i]->onAudioPortListUpdate();
+    }
+}
+
+void AudioSystem::AudioPolicyServiceClient::onAudioPatchListUpdate()
+{
+    Mutex::Autolock _l(mLock);
+    for (size_t i = 0; i < mAudioPortCallbacks.size(); i++) {
+        mAudioPortCallbacks[i]->onAudioPatchListUpdate();
+    }
+}
+
+void AudioSystem::AudioPolicyServiceClient::onDynamicPolicyMixStateUpdate(
+        String8 regId, int32_t state)
+{
+    ALOGV("AudioPolicyServiceClient::onDynamicPolicyMixStateUpdate(%s, %d)", regId.string(), state);
+    dynamic_policy_callback cb = NULL;
+    {
+        Mutex::Autolock _l(AudioSystem::gLock);
+        cb = gDynPolicyCallback;
+    }
+
+    if (cb != NULL) {
+        cb(DYNAMIC_POLICY_EVENT_MIX_STATE_UPDATE, regId, state);
+    }
+}
+
 void AudioSystem::AudioPolicyServiceClient::binderDied(const wp<IBinder>& who __unused)
 {
     {
-        Mutex::Autolock _l(gLockAPC);
-        if (gAudioPortCallback != 0) {
-            gAudioPortCallback->onServiceDied();
+        Mutex::Autolock _l(mLock);
+        for (size_t i = 0; i < mAudioPortCallbacks.size(); i++) {
+            mAudioPortCallbacks[i]->onServiceDied();
         }
     }
     {
@@ -983,20 +1233,4 @@
     ALOGW("AudioPolicyService server died!");
 }
 
-void AudioSystem::AudioPolicyServiceClient::onAudioPortListUpdate()
-{
-    Mutex::Autolock _l(gLockAPC);
-    if (gAudioPortCallback != 0) {
-        gAudioPortCallback->onAudioPortListUpdate();
-    }
-}
-
-void AudioSystem::AudioPolicyServiceClient::onAudioPatchListUpdate()
-{
-    Mutex::Autolock _l(gLockAPC);
-    if (gAudioPortCallback != 0) {
-        gAudioPortCallback->onAudioPatchListUpdate();
-    }
-}
-
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index 735db5c..444f4d8 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -33,11 +33,28 @@
 
 #define WAIT_PERIOD_MS                  10
 #define WAIT_STREAM_END_TIMEOUT_SEC     120
-
+static const int kMaxLoopCountNotifications = 32;
 
 namespace android {
 // ---------------------------------------------------------------------------
 
+// TODO: Move to a separate .h
+
+template <typename T>
+static inline const T &min(const T &x, const T &y) {
+    return x < y ? x : y;
+}
+
+template <typename T>
+static inline const T &max(const T &x, const T &y) {
+    return x > y ? x : y;
+}
+
+static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
+{
+    return ((double)frames * 1000000000) / ((double)sampleRate * speed);
+}
+
 static int64_t convertTimespecToUs(const struct timespec &tv)
 {
     return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
@@ -51,6 +68,42 @@
     return convertTimespecToUs(tv);
 }
 
+// FIXME: we don't use the pitch setting in the time stretcher (not working);
+// instead we emulate it using our sample rate converter.
+static const bool kFixPitch = true; // enable pitch fix
+static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
+{
+    return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
+}
+
+static inline float adjustSpeed(float speed, float pitch)
+{
+    return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
+}
+
+static inline float adjustPitch(float pitch)
+{
+    return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
+}
+
+// Must match similar computation in createTrack_l in Threads.cpp.
+// TODO: Move to a common library
+static size_t calculateMinFrameCount(
+        uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
+        uint32_t sampleRate, float speed)
+{
+    // Ensure that buffer depth covers at least audio hardware latency
+    uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
+    if (minBufCount < 2) {
+        minBufCount = 2;
+    }
+    ALOGV("calculateMinFrameCount afLatency %u  afFrameCount %u  afSampleRate %u  "
+            "sampleRate %u  speed %f  minBufCount: %u",
+            afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount);
+    return minBufCount * sourceFramesNeededWithTimestretch(
+            sampleRate, afFrameCount, afSampleRate, speed);
+}
+
 // static
 status_t AudioTrack::getMinFrameCount(
         size_t* frameCount,
@@ -61,12 +114,11 @@
         return BAD_VALUE;
     }
 
-    // FIXME merge with similar code in createTrack_l(), except we're missing
-    //       some information here that is available in createTrack_l():
+    // FIXME handle in server, like createTrack_l(), possible missing info:
     //          audio_io_handle_t output
     //          audio_format_t format
     //          audio_channel_mask_t channelMask
-    //          audio_output_flags_t flags
+    //          audio_output_flags_t flags (FAST)
     uint32_t afSampleRate;
     status_t status;
     status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
@@ -90,23 +142,20 @@
         return status;
     }
 
-    // Ensure that buffer depth covers at least audio hardware latency
-    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
-    if (minBufCount < 2) {
-        minBufCount = 2;
-    }
+    // When called from createTrack, speed is 1.0f (normal speed).
+    // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
+    *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f);
 
-    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
-            afFrameCount * minBufCount * uint64_t(sampleRate) / afSampleRate;
-    // The formula above should always produce a non-zero value, but return an error
-    // in the unlikely event that it does not, as that's part of the API contract.
+    // The formula above should always produce a non-zero value under normal circumstances:
+    // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
+    // Return error in the unlikely event that it does not, as that's part of the API contract.
     if (*frameCount == 0) {
-        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
+        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
                 streamType, sampleRate);
         return BAD_VALUE;
     }
-    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%d, afSampleRate=%d, afLatency=%d",
-            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
+    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
+            *frameCount, afFrameCount, afSampleRate, afLatency);
     return NO_ERROR;
 }
 
@@ -117,7 +166,8 @@
       mIsTimed(false),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
-      mPausedPosition(0)
+      mPausedPosition(0),
+      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
 {
     mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
     mAttributes.usage = AUDIO_USAGE_UNKNOWN;
@@ -140,17 +190,19 @@
         const audio_offload_info_t *offloadInfo,
         int uid,
         pid_t pid,
-        const audio_attributes_t* pAttributes)
+        const audio_attributes_t* pAttributes,
+        bool doNotReconnect)
     : mStatus(NO_INIT),
       mIsTimed(false),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
-      mPausedPosition(0)
+      mPausedPosition(0),
+      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             frameCount, flags, cbf, user, notificationFrames,
             0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
-            offloadInfo, uid, pid, pAttributes);
+            offloadInfo, uid, pid, pAttributes, doNotReconnect);
 }
 
 AudioTrack::AudioTrack(
@@ -168,17 +220,19 @@
         const audio_offload_info_t *offloadInfo,
         int uid,
         pid_t pid,
-        const audio_attributes_t* pAttributes)
+        const audio_attributes_t* pAttributes,
+        bool doNotReconnect)
     : mStatus(NO_INIT),
       mIsTimed(false),
       mPreviousPriority(ANDROID_PRIORITY_NORMAL),
       mPreviousSchedulingGroup(SP_DEFAULT),
-      mPausedPosition(0)
+      mPausedPosition(0),
+      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
 {
     mStatus = set(streamType, sampleRate, format, channelMask,
             0 /*frameCount*/, flags, cbf, user, notificationFrames,
             sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
-            uid, pid, pAttributes);
+            uid, pid, pAttributes, doNotReconnect);
 }
 
 AudioTrack::~AudioTrack()
@@ -194,13 +248,17 @@
             mAudioTrackThread->requestExitAndWait();
             mAudioTrackThread.clear();
         }
+        // No lock here: worst case we remove a NULL callback which will be a nop
+        if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
+            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
+        }
         IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
         mAudioTrack.clear();
         mCblkMemory.clear();
         mSharedBuffer.clear();
         IPCThreadState::self()->flushCommands();
-        ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
-                IPCThreadState::self()->getCallingPid(), mClientPid);
+        ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
+                mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
         AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
     }
 }
@@ -222,12 +280,13 @@
         const audio_offload_info_t *offloadInfo,
         int uid,
         pid_t pid,
-        const audio_attributes_t* pAttributes)
+        const audio_attributes_t* pAttributes,
+        bool doNotReconnect)
 {
     ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
-          "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
+          "flags #%x, notificationFrames %u, sessionId %d, transferType %d, uid %d, pid %d",
           streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
-          sessionId, transferType);
+          sessionId, transferType, uid, pid);
 
     switch (transferType) {
     case TRANSFER_DEFAULT:
@@ -264,14 +323,13 @@
     }
     mSharedBuffer = sharedBuffer;
     mTransfer = transferType;
+    mDoNotReconnect = doNotReconnect;
 
-    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
+    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
             sharedBuffer->size());
 
     ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
 
-    AutoMutex lock(mLock);
-
     // invariant that mAudioTrack != 0 is true only after set() returns successfully
     if (mAudioTrack != 0) {
         ALOGE("Track already in use");
@@ -295,6 +353,9 @@
         ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
                 mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
         mStreamType = AUDIO_STREAM_DEFAULT;
+        if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
+            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
+        }
     }
 
     // these below should probably come from the audioFlinger too...
@@ -317,12 +378,6 @@
     uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
     mChannelCount = channelCount;
 
-    // AudioFlinger does not currently support 8-bit data in shared memory
-    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
-        ALOGE("8-bit data in shared memory is not supported");
-        return BAD_VALUE;
-    }
-
     // force direct flag if format is not linear PCM
     // or offload was requested
     if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
@@ -346,12 +401,9 @@
         } else {
             mFrameSize = sizeof(uint8_t);
         }
-        mFrameSizeAF = mFrameSize;
     } else {
         ALOG_ASSERT(audio_is_linear_pcm(format));
         mFrameSize = channelCount * audio_bytes_per_sample(format);
-        mFrameSizeAF = channelCount * audio_bytes_per_sample(
-                format == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : format);
         // createTrack will return an error if PCM format is not supported by server,
         // so no need to check for specific PCM formats here
     }
@@ -361,6 +413,8 @@
         return BAD_VALUE;
     }
     mSampleRate = sampleRate;
+    mOriginalSampleRate = sampleRate;
+    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
 
     // Make copy of input parameter offloadInfo so that in the future:
     //  (a) createTrack_l doesn't need it as an input parameter
@@ -403,6 +457,7 @@
     if (cbf != NULL) {
         mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
         mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
+        // thread begins in paused state, and will not reference us until start()
     }
 
     // create the IAudioTrack
@@ -420,12 +475,14 @@
     mStatus = NO_ERROR;
     mState = STATE_STOPPED;
     mUserData = user;
-    mLoopPeriod = 0;
+    mLoopCount = 0;
+    mLoopStart = 0;
+    mLoopEnd = 0;
+    mLoopCountNotified = 0;
     mMarkerPosition = 0;
     mMarkerReached = false;
     mNewPosition = 0;
     mUpdatePeriod = 0;
-    mServer = 0;
     mPosition = 0;
     mReleased = 0;
     mStartUs = 0;
@@ -433,6 +490,9 @@
     mSequence = 1;
     mObservedSequence = mSequence;
     mInUnderrun = false;
+    mPreviousTimestampValid = false;
+    mTimestampStartupGlitchReported = false;
+    mRetrogradeMotionReported = false;
 
     return NO_ERROR;
 }
@@ -459,6 +519,10 @@
     if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
         // reset current position as seen by client to 0
         mPosition = 0;
+        mPreviousTimestampValid = false;
+        mTimestampStartupGlitchReported = false;
+        mRetrogradeMotionReported = false;
+
         // For offloaded tracks, we don't know if the hardware counters are really zero here,
         // since the flush is asynchronous and stop may not fully drain.
         // We save the time when the track is started to later verify whether
@@ -531,14 +595,12 @@
     // the playback head position will reset to 0, so if a marker is set, we need
     // to activate it again
     mMarkerReached = false;
-#if 0
-    // Force flush if a shared buffer is used otherwise audioflinger
-    // will not stop before end of buffer is reached.
-    // It may be needed to make sure that we stop playback, likely in case looping is on.
+
     if (mSharedBuffer != 0) {
-        flush_l();
+        // clear buffer position and loop count.
+        mStaticProxy->setBufferPositionAndLoop(0 /* position */,
+                0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
     }
-#endif
 
     sp<AudioTrackThread> t = mAudioTrackThread;
     if (t != 0) {
@@ -669,24 +731,31 @@
 
 status_t AudioTrack::setSampleRate(uint32_t rate)
 {
-    if (mIsTimed || isOffloadedOrDirect()) {
+    AutoMutex lock(mLock);
+    if (rate == mSampleRate) {
+        return NO_ERROR;
+    }
+    if (mIsTimed || isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
         return INVALID_OPERATION;
     }
-
-    AutoMutex lock(mLock);
     if (mOutput == AUDIO_IO_HANDLE_NONE) {
         return NO_INIT;
     }
+    // NOTE: it is theoretically possible, but highly unlikely, that a device change
+    // could mean a previously allowed sampling rate is no longer allowed.
     uint32_t afSamplingRate;
     if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
         return NO_INIT;
     }
-    if (rate == 0 || rate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
+    // pitch is emulated by adjusting speed and sampleRate
+    const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
+    if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
         return BAD_VALUE;
     }
+    // TODO: Should we also check if the buffer size is compatible?
 
     mSampleRate = rate;
-    mProxy->setSampleRate(rate);
+    mProxy->setSampleRate(effectiveSampleRate);
 
     return NO_ERROR;
 }
@@ -714,6 +783,69 @@
     return mSampleRate;
 }
 
+uint32_t AudioTrack::getOriginalSampleRate() const
+{
+    if (mIsTimed) {
+        return 0;
+    }
+
+    return mOriginalSampleRate;
+}
+
+status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
+{
+    AutoMutex lock(mLock);
+    if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
+        return NO_ERROR;
+    }
+    if (mIsTimed || isOffloadedOrDirect_l()) {
+        return INVALID_OPERATION;
+    }
+    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
+        return INVALID_OPERATION;
+    }
+    // pitch is emulated by adjusting speed and sampleRate
+    const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
+    const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
+    const float effectivePitch = adjustPitch(playbackRate.mPitch);
+    AudioPlaybackRate playbackRateTemp = playbackRate;
+    playbackRateTemp.mSpeed = effectiveSpeed;
+    playbackRateTemp.mPitch = effectivePitch;
+
+    if (!isAudioPlaybackRateValid(playbackRateTemp)) {
+        return BAD_VALUE;
+    }
+    // Check if the buffer size is compatible.
+    if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
+        ALOGV("setPlaybackRate(%f, %f) failed", playbackRate.mSpeed, playbackRate.mPitch);
+        return BAD_VALUE;
+    }
+
+    // Check resampler ratios are within bounds
+    if (effectiveRate > mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
+        ALOGV("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
+                playbackRate.mSpeed, playbackRate.mPitch);
+        return BAD_VALUE;
+    }
+
+    if (effectiveRate * AUDIO_RESAMPLER_UP_RATIO_MAX < mSampleRate) {
+        ALOGV("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
+                        playbackRate.mSpeed, playbackRate.mPitch);
+        return BAD_VALUE;
+    }
+    mPlaybackRate = playbackRate;
+    //set effective rates
+    mProxy->setPlaybackRate(playbackRateTemp);
+    mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
+    return NO_ERROR;
+}
+
+const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
+{
+    AutoMutex lock(mLock);
+    return mPlaybackRate;
+}
+
 status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
 {
     if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
@@ -740,10 +872,15 @@
 
 void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
 {
-    // Setting the loop will reset next notification update period (like setPosition).
-    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
-    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
+    // We do not update the periodic notification point.
+    // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
+    mLoopCount = loopCount;
+    mLoopEnd = loopEnd;
+    mLoopStart = loopStart;
+    mLoopCountNotified = loopCount;
     mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
+
+    // Waking the AudioTrackThread is not needed as this cannot be called when active.
 }
 
 status_t AudioTrack::setMarkerPosition(uint32_t marker)
@@ -757,6 +894,10 @@
     mMarkerPosition = marker;
     mMarkerReached = false;
 
+    sp<AudioTrackThread> t = mAudioTrackThread;
+    if (t != 0) {
+        t->wake();
+    }
     return NO_ERROR;
 }
 
@@ -786,6 +927,10 @@
     mNewPosition = updateAndGetPosition_l() + updatePeriod;
     mUpdatePeriod = updatePeriod;
 
+    sp<AudioTrackThread> t = mAudioTrackThread;
+    if (t != 0) {
+        t->wake();
+    }
     return NO_ERROR;
 }
 
@@ -823,12 +968,11 @@
     if (mState == STATE_ACTIVE) {
         return INVALID_OPERATION;
     }
+    // After setting the position, use full update period before notification.
     mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
-    mLoopPeriod = 0;
-    // FIXME Check whether loops and setting position are incompatible in old code.
-    // If we use setLoop for both purposes we lose the capability to set the position while looping.
-    mStaticProxy->setLoop(position, mFrameCount, 0);
+    mStaticProxy->setBufferPosition(position);
 
+    // Waking the AudioTrackThread is not needed as this cannot be called when active.
     return NO_ERROR;
 }
 
@@ -849,15 +993,18 @@
         }
 
         if (mOutput != AUDIO_IO_HANDLE_NONE) {
-            uint32_t halFrames;
-            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
+            uint32_t halFrames; // actually unused
+            (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
+            // FIXME: on getRenderPosition() error, we return OK with frame position 0.
         }
         // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
         // due to hardware latency. We leave this behavior for now.
         *position = dspFrames;
     } else {
         if (mCblk->mFlags & CBLK_INVALID) {
-            restoreTrack_l("getPosition");
+            (void) restoreTrack_l("getPosition");
+            // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
+            // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
         }
 
         // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
@@ -893,10 +1040,19 @@
         return INVALID_OPERATION;
     }
     mNewPosition = mUpdatePeriod;
-    mLoopPeriod = 0;
-    // FIXME The new code cannot reload while keeping a loop specified.
-    // Need to check how the old code handled this, and whether it's a significant change.
-    mStaticProxy->setLoop(0, mFrameCount, 0);
+    (void) updateAndGetPosition_l();
+    mPosition = 0;
+    mPreviousTimestampValid = false;
+#if 0
+    // The documentation is not clear on the behavior of reload() and the restoration
+    // of loop count. Historically we have not restored loop count, start, end,
+    // but it makes sense if one desires to repeat playing a particular sound.
+    if (mLoopCount != 0) {
+        mLoopCountNotified = mLoopCount;
+        mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
+    }
+#endif
+    mStaticProxy->setBufferPosition(0);
     return NO_ERROR;
 }
 
@@ -906,6 +1062,28 @@
     return mOutput;
 }
 
+status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
+    AutoMutex lock(mLock);
+    if (mSelectedDeviceId != deviceId) {
+        mSelectedDeviceId = deviceId;
+        android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
+    }
+    return NO_ERROR;
+}
+
+audio_port_handle_t AudioTrack::getOutputDevice() {
+    AutoMutex lock(mLock);
+    return mSelectedDeviceId;
+}
+
+audio_port_handle_t AudioTrack::getRoutedDeviceId() {
+    AutoMutex lock(mLock);
+    if (mOutput == AUDIO_IO_HANDLE_NONE) {
+        return AUDIO_PORT_HANDLE_NONE;
+    }
+    return AudioSystem::getDeviceIdForIo(mOutput);
+}
+
 status_t AudioTrack::attachAuxEffect(int effectId)
 {
     AutoMutex lock(mLock);
@@ -935,19 +1113,23 @@
         return NO_INIT;
     }
 
+    if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
+        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
+    }
     audio_io_handle_t output;
     audio_stream_type_t streamType = mStreamType;
     audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
-    status_t status = AudioSystem::getOutputForAttr(attr, &output,
-                                                    (audio_session_t)mSessionId, &streamType,
-                                                    mSampleRate, mFormat, mChannelMask,
-                                                    mFlags, mOffloadInfo);
 
+    status_t status;
+    status = AudioSystem::getOutputForAttr(attr, &output,
+                                           (audio_session_t)mSessionId, &streamType, mClientUid,
+                                           mSampleRate, mFormat, mChannelMask,
+                                           mFlags, mSelectedDeviceId, mOffloadInfo);
 
     if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
-        ALOGE("Could not get audio output for stream type %d, usage %d, sample rate %u, format %#x,"
+        ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u, format %#x,"
               " channel mask %#x, flags %#x",
-              streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
+              mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
         return BAD_VALUE;
     }
     {
@@ -955,29 +1137,27 @@
     // we must release it ourselves if anything goes wrong.
 
     // Not all of these values are needed under all conditions, but it is easier to get them all
-
-    uint32_t afLatency;
-    status = AudioSystem::getLatency(output, &afLatency);
+    status = AudioSystem::getLatency(output, &mAfLatency);
     if (status != NO_ERROR) {
         ALOGE("getLatency(%d) failed status %d", output, status);
         goto release;
     }
+    ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency);
 
-    size_t afFrameCount;
-    status = AudioSystem::getFrameCount(output, &afFrameCount);
+    status = AudioSystem::getFrameCount(output, &mAfFrameCount);
     if (status != NO_ERROR) {
         ALOGE("getFrameCount(output=%d) status %d", output, status);
         goto release;
     }
 
-    uint32_t afSampleRate;
-    status = AudioSystem::getSamplingRate(output, &afSampleRate);
+    status = AudioSystem::getSamplingRate(output, &mAfSampleRate);
     if (status != NO_ERROR) {
         ALOGE("getSamplingRate(output=%d) status %d", output, status);
         goto release;
     }
     if (mSampleRate == 0) {
-        mSampleRate = afSampleRate;
+        mSampleRate = mAfSampleRate;
+        mOriginalSampleRate = mAfSampleRate;
     }
     // Client decides whether the track is TIMED (see below), but can only express a preference
     // for FAST.  Server will perform additional tests.
@@ -986,23 +1166,23 @@
             // use case 1: shared buffer
             (mSharedBuffer != 0) ||
             // use case 2: callback transfer mode
-            (mTransfer == TRANSFER_CALLBACK)) &&
+            (mTransfer == TRANSFER_CALLBACK) ||
+            // use case 3: obtain/release mode
+            (mTransfer == TRANSFER_OBTAIN)) &&
             // matching sample rate
-            (mSampleRate == afSampleRate))) {
-        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
+            (mSampleRate == mAfSampleRate))) {
+        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, track %u Hz, output %u Hz",
+                mTransfer, mSampleRate, mAfSampleRate);
         // once denied, do not request again if IAudioTrack is re-created
         mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
     }
-    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
 
     // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
     //  n = 1   fast track with single buffering; nBuffering is ignored
     //  n = 2   fast track with double buffering
-    //  n = 2   normal track, no sample rate conversion
-    //  n = 3   normal track, with sample rate conversion
-    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
-    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
-    const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
+    //  n = 2   normal track, (including those with sample rate conversion)
+    //  n >= 3  very high latency or very small notification interval (unused).
+    const uint32_t nBuffering = 2;
 
     mNotificationFramesAct = mNotificationFramesReq;
 
@@ -1013,18 +1193,18 @@
             // Same comment as below about ignoring frameCount parameter for set()
             frameCount = mSharedBuffer->size();
         } else if (frameCount == 0) {
-            frameCount = afFrameCount;
+            frameCount = mAfFrameCount;
         }
         if (mNotificationFramesAct != frameCount) {
             mNotificationFramesAct = frameCount;
         }
     } else if (mSharedBuffer != 0) {
-
-        // Ensure that buffer alignment matches channel count
-        // 8-bit data in shared memory is not currently supported by AudioFlinger
-        size_t alignment = audio_bytes_per_sample(
-                mFormat == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : mFormat);
+        // FIXME: Ensure client side memory buffers need
+        // not have additional alignment beyond sample
+        // (e.g. 16 bit stereo accessed as 32 bit frame).
+        size_t alignment = audio_bytes_per_sample(mFormat);
         if (alignment & 1) {
+            // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
             alignment = 1;
         }
         if (mChannelCount > 1) {
@@ -1042,40 +1222,19 @@
         // there's no frameCount parameter.
         // But when initializing a shared buffer AudioTrack via set(),
         // there _is_ a frameCount parameter.  We silently ignore it.
-        frameCount = mSharedBuffer->size() / mFrameSizeAF;
-
-    } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
-
-        // FIXME move these calculations and associated checks to server
-
-        // Ensure that buffer depth covers at least audio hardware latency
-        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
-        ALOGV("afFrameCount=%zu, minBufCount=%d, afSampleRate=%u, afLatency=%d",
-                afFrameCount, minBufCount, afSampleRate, afLatency);
-        if (minBufCount <= nBuffering) {
-            minBufCount = nBuffering;
-        }
-
-        size_t minFrameCount = afFrameCount * minBufCount * uint64_t(mSampleRate) / afSampleRate;
-        ALOGV("minFrameCount: %zu, afFrameCount=%zu, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
-                ", afLatency=%d",
-                minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
-
-        if (frameCount == 0) {
-            frameCount = minFrameCount;
-        } else if (frameCount < minFrameCount) {
-            // not ALOGW because it happens all the time when playing key clicks over A2DP
-            ALOGV("Minimum buffer size corrected from %zu to %zu",
-                     frameCount, minFrameCount);
-            frameCount = minFrameCount;
-        }
-        // Make sure that application is notified with sufficient margin before underrun
-        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
-            mNotificationFramesAct = frameCount/nBuffering;
-        }
-
+        frameCount = mSharedBuffer->size() / mFrameSize;
     } else {
-        // For fast tracks, the frame count calculations and checks are done by server
+        // For fast tracks the frame count calculations and checks are done by server
+
+        if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
+            // for normal tracks precompute the frame count based on speed.
+            const size_t minFrameCount = calculateMinFrameCount(
+                    mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
+                    mPlaybackRate.mSpeed);
+            if (frameCount < minFrameCount) {
+                frameCount = minFrameCount;
+            }
+        }
     }
 
     IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
@@ -1101,12 +1260,10 @@
 
     size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
                                 // but we will still need the original value also
+    int originalSessionId = mSessionId;
     sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
                                                       mSampleRate,
-                                                      // AudioFlinger only sees 16-bit PCM
-                                                      mFormat == AUDIO_FORMAT_PCM_8_BIT &&
-                                                          !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT) ?
-                                                              AUDIO_FORMAT_PCM_16_BIT : mFormat,
+                                                      mFormat,
                                                       mChannelMask,
                                                       &temp,
                                                       &trackFlags,
@@ -1116,6 +1273,8 @@
                                                       &mSessionId,
                                                       mClientUid,
                                                       &status);
+    ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
+            "session ID changed from %d to %d", originalSessionId, mSessionId);
 
     if (status != NO_ERROR) {
         ALOGE("AudioFlinger could not create track, status: %d", status);
@@ -1161,23 +1320,10 @@
         if (trackFlags & IAudioFlinger::TRACK_FAST) {
             ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
             mAwaitBoost = true;
-            if (mSharedBuffer == 0) {
-                // Theoretically double-buffering is not required for fast tracks,
-                // due to tighter scheduling.  But in practice, to accommodate kernels with
-                // scheduling jitter, and apps with computation jitter, we use double-buffering.
-                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
-                    mNotificationFramesAct = frameCount/nBuffering;
-                }
-            }
         } else {
             ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
             // once denied, do not request again if IAudioTrack is re-created
             mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
-            if (mSharedBuffer == 0) {
-                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
-                    mNotificationFramesAct = frameCount/nBuffering;
-                }
-            }
         }
     }
     if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
@@ -1200,6 +1346,16 @@
             //return NO_INIT;
         }
     }
+    // Make sure that application is notified with sufficient margin before underrun
+    if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
+        // Theoretically double-buffering is not required for fast tracks,
+        // due to tighter scheduling.  But in practice, to accommodate kernels with
+        // scheduling jitter, and apps with computation jitter, we use double-buffering
+        // for fast tracks just like normal streaming tracks.
+        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount / nBuffering) {
+            mNotificationFramesAct = frameCount / nBuffering;
+        }
+    }
 
     // We retain a copy of the I/O handle, but don't own the reference
     mOutput = output;
@@ -1211,14 +1367,19 @@
     // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
     void* buffers;
     if (mSharedBuffer == 0) {
-        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
+        buffers = cblk + 1;
     } else {
         buffers = mSharedBuffer->pointer();
+        if (buffers == NULL) {
+            ALOGE("Could not get buffer pointer");
+            return NO_INIT;
+        }
     }
 
     mAudioTrack->attachAuxEffect(mAuxEffectId);
+    // FIXME doesn't take into account speed or future sample rate changes (until restoreTrack)
     // FIXME don't believe this lie
-    mLatency = afLatency + (1000*frameCount) / mSampleRate;
+    mLatency = mAfLatency + (1000*frameCount) / mSampleRate;
 
     mFrameCount = frameCount;
     // If IAudioTrack is re-created, don't let the requested frameCount
@@ -1227,12 +1388,15 @@
         mReqFrameCount = frameCount;
     }
 
+    // reset server position to 0 as we have new cblk.
+    mServer = 0;
+
     // update proxy
     if (mSharedBuffer == 0) {
         mStaticProxy.clear();
-        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
+        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
     } else {
-        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
+        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
         mProxy = mStaticProxy;
     }
 
@@ -1241,12 +1405,24 @@
             gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
 
     mProxy->setSendLevel(mSendLevel);
-    mProxy->setSampleRate(mSampleRate);
+    const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
+    const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
+    const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
+    mProxy->setSampleRate(effectiveSampleRate);
+
+    AudioPlaybackRate playbackRateTemp = mPlaybackRate;
+    playbackRateTemp.mSpeed = effectiveSpeed;
+    playbackRateTemp.mPitch = effectivePitch;
+    mProxy->setPlaybackRate(playbackRateTemp);
     mProxy->setMinimum(mNotificationFramesAct);
 
     mDeathNotifier = new DeathNotifier(this);
     IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
 
+    if (mDeviceCallback != 0) {
+        AudioSystem::addAudioDeviceCallback(mDeviceCallback, mOutput);
+    }
+
     return NO_ERROR;
     }
 
@@ -1258,15 +1434,21 @@
     return status;
 }
 
-status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
+status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
 {
     if (audioBuffer == NULL) {
+        if (nonContig != NULL) {
+            *nonContig = 0;
+        }
         return BAD_VALUE;
     }
     if (mTransfer != TRANSFER_OBTAIN) {
         audioBuffer->frameCount = 0;
         audioBuffer->size = 0;
         audioBuffer->raw = NULL;
+        if (nonContig != NULL) {
+            *nonContig = 0;
+        }
         return INVALID_OPERATION;
     }
 
@@ -1285,7 +1467,7 @@
         ALOGE("%s invalid waitCount %d", __func__, waitCount);
         requested = NULL;
     }
-    return obtainBuffer(audioBuffer, requested);
+    return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
 }
 
 status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
@@ -1352,7 +1534,7 @@
     } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
 
     audioBuffer->frameCount = buffer.mFrameCount;
-    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
+    audioBuffer->size = buffer.mFrameCount * mFrameSize;
     audioBuffer->raw = buffer.mRaw;
     if (nonContig != NULL) {
         *nonContig = buffer.mNonContig;
@@ -1360,13 +1542,14 @@
     return status;
 }
 
-void AudioTrack::releaseBuffer(Buffer* audioBuffer)
+void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
 {
+    // FIXME add error checking on mode, by adding an internal version
     if (mTransfer == TRANSFER_SHARED) {
         return;
     }
 
-    size_t stepCount = audioBuffer->size / mFrameSizeAF;
+    size_t stepCount = audioBuffer->size / mFrameSize;
     if (stepCount == 0) {
         return;
     }
@@ -1431,15 +1614,8 @@
             return ssize_t(err);
         }
 
-        size_t toWrite;
-        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
-            // Divide capacity by 2 to take expansion into account
-            toWrite = audioBuffer.size >> 1;
-            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
-        } else {
-            toWrite = audioBuffer.size;
-            memcpy(audioBuffer.i8, buffer, toWrite);
-        }
+        size_t toWrite = audioBuffer.size;
+        memcpy(audioBuffer.i8, buffer, toWrite);
         buffer = ((const char *) buffer) + toWrite;
         userSize -= toWrite;
         written += toWrite;
@@ -1558,10 +1734,10 @@
         // AudioSystem cache. We should not exit here but after calling the callback so
         // that the upper layers can recreate the track
         if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
-            status_t status = restoreTrack_l("processAudioBuffer");
-            mLock.unlock();
-            // Run again immediately, but with a new IAudioTrack
-            return 0;
+            status_t status __unused = restoreTrack_l("processAudioBuffer");
+            // FIXME unused status
+            // after restoration, continue below to make sure that the loop and buffer events
+            // are notified because they have been cleared from mCblk->mFlags above.
         }
     }
 
@@ -1610,9 +1786,9 @@
     }
 
     // Cache other fields that will be needed soon
-    uint32_t loopPeriod = mLoopPeriod;
     uint32_t sampleRate = mSampleRate;
-    uint32_t notificationFrames = mNotificationFramesAct;
+    float speed = mPlaybackRate.mSpeed;
+    const uint32_t notificationFrames = mNotificationFramesAct;
     if (mRefreshRemaining) {
         mRefreshRemaining = false;
         mRemainingFrames = notificationFrames;
@@ -1622,13 +1798,42 @@
     uint32_t sequence = mSequence;
     sp<AudioTrackClientProxy> proxy = mProxy;
 
+    // Determine the number of new loop callback(s) that will be needed, while locked.
+    int loopCountNotifications = 0;
+    uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
+
+    if (mLoopCount > 0) {
+        int loopCount;
+        size_t bufferPosition;
+        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
+        loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
+        loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
+        mLoopCountNotified = loopCount; // discard any excess notifications
+    } else if (mLoopCount < 0) {
+        // FIXME: We're not accurate with notification count and position with infinite looping
+        // since loopCount from server side will always return -1 (we could decrement it).
+        size_t bufferPosition = mStaticProxy->getBufferPosition();
+        loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
+        loopPeriod = mLoopEnd - bufferPosition;
+    } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
+        size_t bufferPosition = mStaticProxy->getBufferPosition();
+        loopPeriod = mFrameCount - bufferPosition;
+    }
+
     // These fields don't need to be cached, because they are assigned only by set():
-    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
+    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
     // mFlags is also assigned by createTrack_l(), but not the bit we care about.
 
     mLock.unlock();
 
+    // get anchor time to account for callbacks.
+    const nsecs_t timeBeforeCallbacks = systemTime();
+
     if (waitStreamEnd) {
+        // FIXME:  Instead of blocking in proxy->waitStreamEndDone(), Callback thread
+        // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
+        // (and make sure we don't callback for more data while we're stopping).
+        // This helps with position, marker notifications, and track invalidation.
         struct timespec timeout;
         timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
         timeout.tv_nsec = 0;
@@ -1662,10 +1867,9 @@
     if (newUnderrun) {
         mCbf(EVENT_UNDERRUN, mUserData, NULL);
     }
-    // FIXME we will miss loops if loop cycle was signaled several times since last call
-    //       to processAudioBuffer()
-    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
+    while (loopCountNotifications > 0) {
         mCbf(EVENT_LOOP_END, mUserData, NULL);
+        --loopCountNotifications;
     }
     if (flags & CBLK_BUFFER_END) {
         mCbf(EVENT_BUFFER_END, mUserData, NULL);
@@ -1701,10 +1905,11 @@
         minFrames = markerPosition - position;
     }
     if (loopPeriod > 0 && loopPeriod < minFrames) {
+        // loopPeriod is already adjusted for actual position.
         minFrames = loopPeriod;
     }
-    if (updatePeriod > 0 && updatePeriod < minFrames) {
-        minFrames = updatePeriod;
+    if (updatePeriod > 0) {
+        minFrames = min(minFrames, uint32_t(newPosition - position));
     }
 
     // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
@@ -1713,12 +1918,17 @@
         minFrames = kPoll * notificationFrames;
     }
 
+    // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
+    static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
+    const nsecs_t timeAfterCallbacks = systemTime();
+
     // Convert frame units to time units
     nsecs_t ns = NS_WHENEVER;
     if (minFrames != (uint32_t) ~0) {
-        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
-        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
-        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
+        ns = framesToNanoseconds(minFrames, sampleRate, speed) + kWaitPeriodNs;
+        ns -= (timeAfterCallbacks - timeBeforeCallbacks);  // account for callback time
+        // TODO: Should we warn if the callback time is too long?
+        if (ns < 0) ns = 0;
     }
 
     // If not supplying data by EVENT_MORE_DATA, then we're done
@@ -1726,6 +1936,13 @@
         return ns;
     }
 
+    // EVENT_MORE_DATA callback handling.
+    // Timing for linear pcm audio data formats can be derived directly from the
+    // buffer fill level.
+    // Timing for compressed data is not directly available from the buffer fill level,
+    // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
+    // to return a certain fill level.
+
     struct timespec timeout;
     const struct timespec *requested = &ClientProxy::kForever;
     if (ns != NS_WHENEVER) {
@@ -1756,24 +1973,21 @@
             return NS_NEVER;
         }
 
-        if (mRetryOnPartialBuffer && !isOffloaded()) {
+        if (mRetryOnPartialBuffer && audio_is_linear_pcm(mFormat)) {
             mRetryOnPartialBuffer = false;
             if (avail < mRemainingFrames) {
-                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
-                if (ns < 0 || myns < ns) {
+                if (ns > 0) { // account for obtain time
+                    const nsecs_t timeNow = systemTime();
+                    ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
+                }
+                nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
+                if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
                     ns = myns;
                 }
                 return ns;
             }
         }
 
-        // Divide buffer size by 2 to take into account the expansion
-        // due to 8 to 16 bit conversion: the callback must fill only half
-        // of the destination buffer
-        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
-            audioBuffer.size >>= 1;
-        }
-
         size_t reqSize = audioBuffer.size;
         mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
         size_t writtenSize = audioBuffer.size;
@@ -1790,16 +2004,45 @@
             // Keep this thread going to handle timed events and
             // still try to get more data in intervals of WAIT_PERIOD_MS
             // but don't just loop and block the CPU, so wait
-            return WAIT_PERIOD_MS * 1000000LL;
+
+            // mCbf(EVENT_MORE_DATA, ...) might either
+            // (1) Block until it can fill the buffer, returning 0 size on EOS.
+            // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
+            // (3) Return 0 size when no data is available, does not wait for more data.
+            //
+            // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
+            // We try to compute the wait time to avoid a tight sleep-wait cycle,
+            // especially for case (3).
+            //
+            // The decision to support (1) and (2) affect the sizing of mRemainingFrames
+            // and this loop; whereas for case (3) we could simply check once with the full
+            // buffer size and skip the loop entirely.
+
+            nsecs_t myns;
+            if (audio_is_linear_pcm(mFormat)) {
+                // time to wait based on buffer occupancy
+                const nsecs_t datans = mRemainingFrames <= avail ? 0 :
+                        framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
+                // audio flinger thread buffer size (TODO: adjust for fast tracks)
+                const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
+                // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
+                myns = datans + (afns / 2);
+            } else {
+                // FIXME: This could ping quite a bit if the buffer isn't full.
+                // Note that when mState is stopping we waitStreamEnd, so it never gets here.
+                myns = kWaitPeriodNs;
+            }
+            if (ns > 0) { // account for obtain and callback time
+                const nsecs_t timeNow = systemTime();
+                ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
+            }
+            if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
+                ns = myns;
+            }
+            return ns;
         }
 
-        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
-            // 8 to 16 bit conversion, note that source and destination are the same address
-            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
-            audioBuffer.size <<= 1;
-        }
-
-        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
+        size_t releasedFrames = writtenSize / mFrameSize;
         audioBuffer.frameCount = releasedFrames;
         mRemainingFrames -= releasedFrames;
         if (misalignment >= releasedFrames) {
@@ -1827,7 +2070,7 @@
         // that total to a sum == notificationFrames.
         if (0 < misalignment && misalignment <= mRemainingFrames) {
             mRemainingFrames = misalignment;
-            return (mRemainingFrames * 1100000000LL) / sampleRate;
+            return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
         }
 #endif
 
@@ -1844,51 +2087,49 @@
     ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
           isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
     ++mSequence;
-    status_t result;
 
     // refresh the audio configuration cache in this process to make sure we get new
     // output parameters and new IAudioFlinger in createTrack_l()
     AudioSystem::clearAudioConfigCache();
 
-    if (isOffloadedOrDirect_l()) {
-        // FIXME re-creation of offloaded tracks is not yet implemented
+    if (isOffloadedOrDirect_l() || mDoNotReconnect) {
+        // FIXME re-creation of offloaded and direct tracks is not yet implemented;
+        // reconsider enabling for linear PCM encodings when position can be preserved.
         return DEAD_OBJECT;
     }
 
     // save the old static buffer position
-    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
+    size_t bufferPosition = 0;
+    int loopCount = 0;
+    if (mStaticProxy != 0) {
+        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
+    }
 
     // If a new IAudioTrack is successfully created, createTrack_l() will modify the
     // following member variables: mAudioTrack, mCblkMemory and mCblk.
     // It will also delete the strong references on previous IAudioTrack and IMemory.
     // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
-    result = createTrack_l();
-
-    // take the frames that will be lost by track recreation into account in saved position
-    (void) updateAndGetPosition_l();
-    mPosition = mReleased;
+    status_t result = createTrack_l();
 
     if (result == NO_ERROR) {
-        // continue playback from last known position, but
-        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
-        if (mStaticProxy != NULL) {
-            mLoopPeriod = 0;
-            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
+        // take the frames that will be lost by track recreation into account in saved position
+        // For streaming tracks, this is the amount we obtained from the user/client
+        // (not the number actually consumed at the server - those are already lost).
+        if (mStaticProxy == 0) {
+            mPosition = mReleased;
         }
-        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
-        //       track destruction have been played? This is critical for SoundPool implementation
-        //       This must be broken, and needs to be tested/debugged.
-#if 0
-        // restore write index and set other indexes to reflect empty buffer status
-        if (!strcmp(from, "start")) {
-            // Make sure that a client relying on callback events indicating underrun or
-            // the actual amount of audio frames played (e.g SoundPool) receives them.
-            if (mSharedBuffer == 0) {
-                // restart playback even if buffer is not completely filled.
-                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
+        // Continue playback from last known position and restore loop.
+        if (mStaticProxy != 0) {
+            if (loopCount != 0) {
+                mStaticProxy->setBufferPositionAndLoop(bufferPosition,
+                        mLoopStart, mLoopEnd, loopCount);
+            } else {
+                mStaticProxy->setBufferPosition(bufferPosition);
+                if (bufferPosition == mFrameCount) {
+                    ALOGD("restoring track at end of static buffer");
+                }
             }
         }
-#endif
         if (mState == STATE_ACTIVE) {
             result = mAudioTrack->start();
         }
@@ -1923,6 +2164,19 @@
     return mPosition += (uint32_t) delta;
 }
 
+bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const
+{
+    // applicable for mixing tracks only (not offloaded or direct)
+    if (mStaticProxy != 0) {
+        return true; // static tracks do not have issues with buffer sizing.
+    }
+    const size_t minFrameCount =
+            calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed);
+    ALOGV("isSampleRateSpeedAllowed_l mFrameCount %zu  minFrameCount %zu",
+            mFrameCount, minFrameCount);
+    return mFrameCount >= minFrameCount;
+}
+
 status_t AudioTrack::setParameters(const String8& keyValuePairs)
 {
     AutoMutex lock(mLock);
@@ -1932,6 +2186,11 @@
 status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
 {
     AutoMutex lock(mLock);
+
+    bool previousTimestampValid = mPreviousTimestampValid;
+    // Set false here to cover all the error return cases.
+    mPreviousTimestampValid = false;
+
     // FIXME not implemented for fast tracks; should use proxy and SSQ
     if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
         return INVALID_OPERATION;
@@ -1956,7 +2215,12 @@
     }
 
     if (mCblk->mFlags & CBLK_INVALID) {
-        restoreTrack_l("getTimestamp");
+        const status_t status = restoreTrack_l("getTimestamp");
+        if (status != OK) {
+            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
+            // recommending that the track be recreated.
+            return DEAD_OBJECT;
+        }
     }
 
     // The presented frame count must always lag behind the consumed frame count.
@@ -1975,7 +2239,12 @@
         }
 
         // Check whether a pending flush or stop has completed, as those commands may
-        // be asynchronous or return near finish.
+        // be asynchronous or return near finish or exhibit glitchy behavior.
+        //
+        // Originally this showed up as the first timestamp being a continuation of
+        // the previous song under gapless playback.
+        // However, we sometimes see zero timestamps, then a glitch of
+        // the previous song's position, and then correct timestamps afterwards.
         if (mStartUs != 0 && mSampleRate != 0) {
             static const int kTimeJitterUs = 100000; // 100 ms
             static const int k1SecUs = 1000000;
@@ -1988,20 +2257,34 @@
                     return WOULD_BLOCK;  // stale timestamp time, occurs before start.
                 }
                 const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
-                const int64_t deltaPositionByUs = timestamp.mPosition * 1000000LL / mSampleRate;
+                const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
+                        / ((double)mSampleRate * mPlaybackRate.mSpeed);
 
                 if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
                     // Verify that the counter can't count faster than the sample rate
-                    // since the start time.  If greater, then that means we have failed
+                    // since the start time.  If greater, then that means we may have failed
                     // to completely flush or stop the previous playing track.
-                    ALOGW("incomplete flush or stop:"
+                    ALOGW_IF(!mTimestampStartupGlitchReported,
+                            "getTimestamp startup glitch detected"
                             " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
                             (long long)deltaTimeUs, (long long)deltaPositionByUs,
                             timestamp.mPosition);
+                    mTimestampStartupGlitchReported = true;
+                    if (previousTimestampValid
+                            && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
+                        timestamp = mPreviousTimestamp;
+                        mPreviousTimestampValid = true;
+                        return NO_ERROR;
+                    }
                     return WOULD_BLOCK;
                 }
+                if (deltaPositionByUs != 0) {
+                    mStartUs = 0; // don't check again, we got valid nonzero position.
+                }
+            } else {
+                mStartUs = 0; // don't check again, start time expired.
             }
-            mStartUs = 0; // no need to check again, start timestamp has either expired or unneeded.
+            mTimestampStartupGlitchReported = false;
         }
     } else {
         // Update the mapping between local consumed (mPosition) and server consumed (mServer)
@@ -2029,6 +2312,46 @@
         // IAudioTrack.  And timestamp.mPosition is initially in server's
         // point of view, so we need to apply the same fudge factor to it.
     }
+
+    // Prevent retrograde motion in timestamp.
+    // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
+    if (status == NO_ERROR) {
+        if (previousTimestampValid) {
+#define TIME_TO_NANOS(time) ((uint64_t)time.tv_sec * 1000000000 + time.tv_nsec)
+            const uint64_t previousTimeNanos = TIME_TO_NANOS(mPreviousTimestamp.mTime);
+            const uint64_t currentTimeNanos = TIME_TO_NANOS(timestamp.mTime);
+#undef TIME_TO_NANOS
+            if (currentTimeNanos < previousTimeNanos) {
+                ALOGW("retrograde timestamp time");
+                // FIXME Consider blocking this from propagating upwards.
+            }
+
+            // Looking at signed delta will work even when the timestamps
+            // are wrapping around.
+            int32_t deltaPosition = static_cast<int32_t>(timestamp.mPosition
+                    - mPreviousTimestamp.mPosition);
+            // position can bobble slightly as an artifact; this hides the bobble
+            static const int32_t MINIMUM_POSITION_DELTA = 8;
+            if (deltaPosition < 0) {
+                // Only report once per position instead of spamming the log.
+                if (!mRetrogradeMotionReported) {
+                    ALOGW("retrograde timestamp position corrected, %d = %u - %u",
+                            deltaPosition,
+                            timestamp.mPosition,
+                            mPreviousTimestamp.mPosition);
+                    mRetrogradeMotionReported = true;
+                }
+            } else {
+                mRetrogradeMotionReported = false;
+            }
+            if (deltaPosition < MINIMUM_POSITION_DELTA) {
+                timestamp = mPreviousTimestamp;  // Use last valid timestamp.
+            }
+        }
+        mPreviousTimestamp = timestamp;
+        mPreviousTimestampValid = true;
+    }
+
     return status;
 }
 
@@ -2075,7 +2398,8 @@
     snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
             mChannelCount, mFrameCount);
     result.append(buffer);
-    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
+    snprintf(buffer, 255, "  sample rate(%u), speed(%f), status(%d)\n",
+            mSampleRate, mPlaybackRate.mSpeed, mStatus);
     result.append(buffer);
     snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
     result.append(buffer);
@@ -2089,6 +2413,48 @@
     return mProxy->getUnderrunFrames();
 }
 
+status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
+{
+    if (callback == 0) {
+        ALOGW("%s adding NULL callback!", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    AutoMutex lock(mLock);
+    if (mDeviceCallback == callback) {
+        ALOGW("%s adding same callback!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+    status_t status = NO_ERROR;
+    if (mOutput != AUDIO_IO_HANDLE_NONE) {
+        if (mDeviceCallback != 0) {
+            ALOGW("%s callback already present!", __FUNCTION__);
+            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
+        }
+        status = AudioSystem::addAudioDeviceCallback(callback, mOutput);
+    }
+    mDeviceCallback = callback;
+    return status;
+}
+
+status_t AudioTrack::removeAudioDeviceCallback(
+        const sp<AudioSystem::AudioDeviceCallback>& callback)
+{
+    if (callback == 0) {
+        ALOGW("%s removing NULL callback!", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    AutoMutex lock(mLock);
+    if (mDeviceCallback != callback) {
+        ALOGW("%s removing different callback!", __FUNCTION__);
+        return INVALID_OPERATION;
+    }
+    if (mOutput != AUDIO_IO_HANDLE_NONE) {
+        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
+    }
+    mDeviceCallback = 0;
+    return NO_ERROR;
+}
+
 // =========================================================================
 
 void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
@@ -2148,8 +2514,8 @@
     case NS_NEVER:
         return false;
     case NS_WHENEVER:
-        // FIXME increase poll interval, or make event-driven
-        ns = 1000000000LL;
+        // Event driven: call wake() when callback notifications conditions change.
+        ns = INT64_MAX;
         // fall through
     default:
         LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
@@ -2182,6 +2548,21 @@
     }
 }
 
+void AudioTrack::AudioTrackThread::wake()
+{
+    AutoMutex _l(mMyLock);
+    if (!mPaused) {
+        // wake() might be called while servicing a callback - ignore the next
+        // pause time and call processAudioBuffer.
+        mIgnoreNextPausedInt = true;
+        if (mPausedInt && mPausedNs > 0) {
+            // audio track is active and internally paused with timeout.
+            mPausedInt = false;
+            mMyCond.signal();
+        }
+    }
+}
+
 void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
 {
     AutoMutex _l(mMyLock);
@@ -2189,4 +2570,4 @@
     mPausedNs = ns;
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index ff24475..6a51a76 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -28,7 +28,21 @@
 // used to clamp a value to size_t.  TODO: move to another file.
 template <typename T>
 size_t clampToSize(T x) {
-    return x > SIZE_MAX ? SIZE_MAX : x < 0 ? 0 : (size_t) x;
+    return sizeof(T) > sizeof(size_t) && x > (T) SIZE_MAX ? SIZE_MAX : x < 0 ? 0 : (size_t) x;
+}
+
+// incrementSequence is used to determine the next sequence value
+// for the loop and position sequence counters.  It should return
+// a value between "other" + 1 and "other" + INT32_MAX, the choice of
+// which needs to be the "least recently used" sequence value for "self".
+// In general, this means (new_self) returned is max(self, other) + 1.
+
+static uint32_t incrementSequence(uint32_t self, uint32_t other) {
+    int32_t diff = self - other;
+    if (diff >= 0 && diff < INT32_MAX) {
+        return self + 1; // we're already ahead of other.
+    }
+    return other + 1; // we're behind, so move just ahead of other.
 }
 
 audio_track_cblk_t::audio_track_cblk_t()
@@ -360,6 +374,9 @@
     size_t increment = mFrameCountP2 << 1;
     size_t mask = increment - 1;
     audio_track_cblk_t* cblk = mCblk;
+    // mFlush is 32 bits concatenated as [ flush_counter ] [ newfront_offset ]
+    // Should newFlush = cblk->u.mStreaming.mRear?  Only problem is
+    // if you want to flush twice to the same rear location after a 32 bit wrap.
     int32_t newFlush = (cblk->u.mStreaming.mRear & mask) |
                         ((cblk->u.mStreaming.mFlush & ~mask) + increment);
     android_atomic_release_store(newFlush, &cblk->u.mStreaming.mFlush);
@@ -409,7 +426,6 @@
             goto end;
         }
         // check for obtainBuffer interrupted by client
-        // check for obtainBuffer interrupted by client
         if (flags & CBLK_INTERRUPT) {
             ALOGV("waitStreamEndDone() interrupted by client");
             status = -EINTR;
@@ -485,8 +501,11 @@
 StaticAudioTrackClientProxy::StaticAudioTrackClientProxy(audio_track_cblk_t* cblk, void *buffers,
         size_t frameCount, size_t frameSize)
     : AudioTrackClientProxy(cblk, buffers, frameCount, frameSize),
-      mMutator(&cblk->u.mStatic.mSingleStateQueue), mBufferPosition(0)
+      mMutator(&cblk->u.mStatic.mSingleStateQueue),
+      mPosLoopObserver(&cblk->u.mStatic.mPosLoopQueue)
 {
+    memset(&mState, 0, sizeof(mState));
+    memset(&mPosLoop, 0, sizeof(mPosLoop));
 }
 
 void StaticAudioTrackClientProxy::flush()
@@ -501,30 +520,72 @@
         // FIXME Should return an error status
         return;
     }
-    StaticAudioTrackState newState;
-    newState.mLoopStart = (uint32_t) loopStart;
-    newState.mLoopEnd = (uint32_t) loopEnd;
-    newState.mLoopCount = loopCount;
-    size_t bufferPosition;
-    if (loopCount == 0 || (bufferPosition = getBufferPosition()) >= loopEnd) {
-        bufferPosition = loopStart;
+    mState.mLoopStart = (uint32_t) loopStart;
+    mState.mLoopEnd = (uint32_t) loopEnd;
+    mState.mLoopCount = loopCount;
+    mState.mLoopSequence = incrementSequence(mState.mLoopSequence, mState.mPositionSequence);
+    // set patch-up variables until the mState is acknowledged by the ServerProxy.
+    // observed buffer position and loop count will freeze until then to give the
+    // illusion of a synchronous change.
+    getBufferPositionAndLoopCount(NULL, NULL);
+    // preserve behavior to restart at mState.mLoopStart if position exceeds mState.mLoopEnd.
+    if (mState.mLoopCount != 0 && mPosLoop.mBufferPosition >= mState.mLoopEnd) {
+        mPosLoop.mBufferPosition = mState.mLoopStart;
     }
-    mBufferPosition = bufferPosition; // snapshot buffer position until loop is acknowledged.
-    (void) mMutator.push(newState);
+    mPosLoop.mLoopCount = mState.mLoopCount;
+    (void) mMutator.push(mState);
+}
+
+void StaticAudioTrackClientProxy::setBufferPosition(size_t position)
+{
+    // This can only happen on a 64-bit client
+    if (position > UINT32_MAX) {
+        // FIXME Should return an error status
+        return;
+    }
+    mState.mPosition = (uint32_t) position;
+    mState.mPositionSequence = incrementSequence(mState.mPositionSequence, mState.mLoopSequence);
+    // set patch-up variables until the mState is acknowledged by the ServerProxy.
+    // observed buffer position and loop count will freeze until then to give the
+    // illusion of a synchronous change.
+    if (mState.mLoopCount > 0) {  // only check if loop count is changing
+        getBufferPositionAndLoopCount(NULL, NULL); // get last position
+    }
+    mPosLoop.mBufferPosition = position;
+    if (position >= mState.mLoopEnd) {
+        // no ongoing loop is possible if position is greater than loopEnd.
+        mPosLoop.mLoopCount = 0;
+    }
+    (void) mMutator.push(mState);
+}
+
+void StaticAudioTrackClientProxy::setBufferPositionAndLoop(size_t position, size_t loopStart,
+        size_t loopEnd, int loopCount)
+{
+    setLoop(loopStart, loopEnd, loopCount);
+    setBufferPosition(position);
 }
 
 size_t StaticAudioTrackClientProxy::getBufferPosition()
 {
-    size_t bufferPosition;
-    if (mMutator.ack()) {
-        bufferPosition = (size_t) mCblk->u.mStatic.mBufferPosition;
-        if (bufferPosition > mFrameCount) {
-            bufferPosition = mFrameCount;
-        }
-    } else {
-        bufferPosition = mBufferPosition;
+    getBufferPositionAndLoopCount(NULL, NULL);
+    return mPosLoop.mBufferPosition;
+}
+
+void StaticAudioTrackClientProxy::getBufferPositionAndLoopCount(
+        size_t *position, int *loopCount)
+{
+    if (mMutator.ack() == StaticAudioTrackSingleStateQueue::SSQ_DONE) {
+         if (mPosLoopObserver.poll(mPosLoop)) {
+             ; // a valid mPosLoop should be available if ackDone is true.
+         }
     }
-    return bufferPosition;
+    if (position != NULL) {
+        *position = mPosLoop.mBufferPosition;
+    }
+    if (loopCount != NULL) {
+        *loopCount = mPosLoop.mLoopCount;
+    }
 }
 
 // ---------------------------------------------------------------------------
@@ -555,13 +616,24 @@
         front = cblk->u.mStreaming.mFront;
         if (flush != mFlush) {
             // effectively obtain then release whatever is in the buffer
-            size_t mask = (mFrameCountP2 << 1) - 1;
+            const size_t overflowBit = mFrameCountP2 << 1;
+            const size_t mask = overflowBit - 1;
             int32_t newFront = (front & ~mask) | (flush & mask);
             ssize_t filled = rear - newFront;
+            if (filled >= (ssize_t)overflowBit) {
+                // front and rear offsets span the overflow bit of the p2 mask
+                // so rebasing newFront on the front offset is off by the overflow bit.
+                // adjust newFront to match rear offset.
+                ALOGV("flush wrap: filled %zx >= overflowBit %zx", filled, overflowBit);
+                newFront += overflowBit;
+                filled -= overflowBit;
+            }
             // Rather than shutting down on a corrupt flush, just treat it as a full flush
             if (!(0 <= filled && (size_t) filled <= mFrameCount)) {
-                ALOGE("mFlush %#x -> %#x, front %#x, rear %#x, mask %#x, newFront %#x, filled %d=%#x",
-                        mFlush, flush, front, rear, mask, newFront, filled, filled);
+                ALOGE("mFlush %#x -> %#x, front %#x, rear %#x, mask %#x, newFront %#x, "
+                        "filled %zd=%#x",
+                        mFlush, flush, front, rear,
+                        (unsigned)mask, newFront, filled, (unsigned)filled);
                 newFront = rear;
             }
             mFlush = flush;
@@ -734,18 +806,23 @@
     (void) android_atomic_or(CBLK_UNDERRUN, &cblk->mFlags);
 }
 
+AudioPlaybackRate AudioTrackServerProxy::getPlaybackRate()
+{   // do not call from multiple threads without holding lock
+    mPlaybackRateObserver.poll(mPlaybackRate);
+    return mPlaybackRate;
+}
+
 // ---------------------------------------------------------------------------
 
 StaticAudioTrackServerProxy::StaticAudioTrackServerProxy(audio_track_cblk_t* cblk, void *buffers,
         size_t frameCount, size_t frameSize)
     : AudioTrackServerProxy(cblk, buffers, frameCount, frameSize),
-      mObserver(&cblk->u.mStatic.mSingleStateQueue), mPosition(0),
+      mObserver(&cblk->u.mStatic.mSingleStateQueue),
+      mPosLoopMutator(&cblk->u.mStatic.mPosLoopQueue),
       mFramesReadySafe(frameCount), mFramesReady(frameCount),
       mFramesReadyIsCalledByMultipleThreads(false)
 {
-    mState.mLoopStart = 0;
-    mState.mLoopEnd = 0;
-    mState.mLoopCount = 0;
+    memset(&mState, 0, sizeof(mState));
 }
 
 void StaticAudioTrackServerProxy::framesReadyIsCalledByMultipleThreads()
@@ -762,55 +839,97 @@
     return mFramesReadySafe;
 }
 
-ssize_t StaticAudioTrackServerProxy::pollPosition()
+status_t StaticAudioTrackServerProxy::updateStateWithLoop(
+        StaticAudioTrackState *localState, const StaticAudioTrackState &update) const
 {
-    size_t position = mPosition;
-    StaticAudioTrackState state;
-    if (mObserver.poll(state)) {
+    if (localState->mLoopSequence != update.mLoopSequence) {
         bool valid = false;
-        size_t loopStart = state.mLoopStart;
-        size_t loopEnd = state.mLoopEnd;
-        if (state.mLoopCount == 0) {
-            if (loopStart > mFrameCount) {
-                loopStart = mFrameCount;
-            }
-            // ignore loopEnd
-            mPosition = position = loopStart;
-            mFramesReady = mFrameCount - mPosition;
-            mState.mLoopCount = 0;
+        const size_t loopStart = update.mLoopStart;
+        const size_t loopEnd = update.mLoopEnd;
+        size_t position = localState->mPosition;
+        if (update.mLoopCount == 0) {
             valid = true;
-        } else if (state.mLoopCount >= -1) {
+        } else if (update.mLoopCount >= -1) {
             if (loopStart < loopEnd && loopEnd <= mFrameCount &&
                     loopEnd - loopStart >= MIN_LOOP) {
                 // If the current position is greater than the end of the loop
                 // we "wrap" to the loop start. This might cause an audible pop.
                 if (position >= loopEnd) {
-                    mPosition = position = loopStart;
+                    position = loopStart;
                 }
-                if (state.mLoopCount == -1) {
-                    mFramesReady = INT64_MAX;
-                } else {
-                    // mFramesReady is 64 bits to handle the effective number of frames
-                    // that the static audio track contains, including loops.
-                    // TODO: Later consider fixing overflow, but does not seem needed now
-                    // as will not overflow if loopStart and loopEnd are Java "ints".
-                    mFramesReady = int64_t(state.mLoopCount) * (loopEnd - loopStart)
-                            + mFrameCount - mPosition;
-                }
-                mState = state;
                 valid = true;
             }
         }
-        if (!valid || mPosition > mFrameCount) {
+        if (!valid || position > mFrameCount) {
+            return NO_INIT;
+        }
+        localState->mPosition = position;
+        localState->mLoopCount = update.mLoopCount;
+        localState->mLoopEnd = loopEnd;
+        localState->mLoopStart = loopStart;
+        localState->mLoopSequence = update.mLoopSequence;
+    }
+    return OK;
+}
+
+status_t StaticAudioTrackServerProxy::updateStateWithPosition(
+        StaticAudioTrackState *localState, const StaticAudioTrackState &update) const
+{
+    if (localState->mPositionSequence != update.mPositionSequence) {
+        if (update.mPosition > mFrameCount) {
+            return NO_INIT;
+        } else if (localState->mLoopCount != 0 && update.mPosition >= localState->mLoopEnd) {
+            localState->mLoopCount = 0; // disable loop count if position is beyond loop end.
+        }
+        localState->mPosition = update.mPosition;
+        localState->mPositionSequence = update.mPositionSequence;
+    }
+    return OK;
+}
+
+ssize_t StaticAudioTrackServerProxy::pollPosition()
+{
+    StaticAudioTrackState state;
+    if (mObserver.poll(state)) {
+        StaticAudioTrackState trystate = mState;
+        bool result;
+        const int32_t diffSeq = state.mLoopSequence - state.mPositionSequence;
+
+        if (diffSeq < 0) {
+            result = updateStateWithLoop(&trystate, state) == OK &&
+                    updateStateWithPosition(&trystate, state) == OK;
+        } else {
+            result = updateStateWithPosition(&trystate, state) == OK &&
+                    updateStateWithLoop(&trystate, state) == OK;
+        }
+        if (!result) {
+            mObserver.done();
+            // caution: no update occurs so server state will be inconsistent with client state.
             ALOGE("%s client pushed an invalid state, shutting down", __func__);
             mIsShutdown = true;
             return (ssize_t) NO_INIT;
         }
+        mState = trystate;
+        if (mState.mLoopCount == -1) {
+            mFramesReady = INT64_MAX;
+        } else if (mState.mLoopCount == 0) {
+            mFramesReady = mFrameCount - mState.mPosition;
+        } else if (mState.mLoopCount > 0) {
+            // TODO: Later consider fixing overflow, but does not seem needed now
+            // as will not overflow if loopStart and loopEnd are Java "ints".
+            mFramesReady = int64_t(mState.mLoopCount) * (mState.mLoopEnd - mState.mLoopStart)
+                    + mFrameCount - mState.mPosition;
+        }
         mFramesReadySafe = clampToSize(mFramesReady);
         // This may overflow, but client is not supposed to rely on it
-        mCblk->u.mStatic.mBufferPosition = (uint32_t) position;
+        StaticAudioTrackPosLoop posLoop;
+
+        posLoop.mLoopCount = (int32_t) mState.mLoopCount;
+        posLoop.mBufferPosition = (uint32_t) mState.mPosition;
+        mPosLoopMutator.push(posLoop);
+        mObserver.done(); // safe to read mStatic variables.
     }
-    return (ssize_t) position;
+    return (ssize_t) mState.mPosition;
 }
 
 status_t StaticAudioTrackServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush __unused)
@@ -849,7 +968,7 @@
     }
     // As mFramesReady is the total remaining frames in the static audio track,
     // it is always larger or equal to avail.
-    LOG_ALWAYS_FATAL_IF(mFramesReady < avail);
+    LOG_ALWAYS_FATAL_IF(mFramesReady < (int64_t) avail);
     buffer->mNonContig = mFramesReady == INT64_MAX ? SIZE_MAX : clampToSize(mFramesReady - avail);
     mUnreleased = avail;
     return NO_ERROR;
@@ -858,7 +977,7 @@
 void StaticAudioTrackServerProxy::releaseBuffer(Buffer* buffer)
 {
     size_t stepCount = buffer->mFrameCount;
-    LOG_ALWAYS_FATAL_IF(!(stepCount <= mFramesReady));
+    LOG_ALWAYS_FATAL_IF(!((int64_t) stepCount <= mFramesReady));
     LOG_ALWAYS_FATAL_IF(!(stepCount <= mUnreleased));
     if (stepCount == 0) {
         // prevent accidental re-use of buffer
@@ -868,11 +987,12 @@
     }
     mUnreleased -= stepCount;
     audio_track_cblk_t* cblk = mCblk;
-    size_t position = mPosition;
+    size_t position = mState.mPosition;
     size_t newPosition = position + stepCount;
     int32_t setFlags = 0;
     if (!(position <= newPosition && newPosition <= mFrameCount)) {
-        ALOGW("%s newPosition %zu outside [%zu, %zu]", __func__, newPosition, position, mFrameCount);
+        ALOGW("%s newPosition %zu outside [%zu, %zu]", __func__, newPosition, position,
+                mFrameCount);
         newPosition = mFrameCount;
     } else if (mState.mLoopCount != 0 && newPosition == mState.mLoopEnd) {
         newPosition = mState.mLoopStart;
@@ -885,7 +1005,7 @@
     if (newPosition == mFrameCount) {
         setFlags |= CBLK_BUFFER_END;
     }
-    mPosition = newPosition;
+    mState.mPosition = newPosition;
     if (mFramesReady != INT64_MAX) {
         mFramesReady -= stepCount;
     }
@@ -893,7 +1013,10 @@
 
     cblk->mServer += stepCount;
     // This may overflow, but client is not supposed to rely on it
-    cblk->u.mStatic.mBufferPosition = (uint32_t) newPosition;
+    StaticAudioTrackPosLoop posLoop;
+    posLoop.mBufferPosition = mState.mPosition;
+    posLoop.mLoopCount = mState.mLoopCount;
+    mPosLoopMutator.push(posLoop);
     if (setFlags != 0) {
         (void) android_atomic_or(setFlags, &cblk->mFlags);
         // this would be a good place to wake a futex
diff --git a/media/libmedia/CharacterEncodingDetector.cpp b/media/libmedia/CharacterEncodingDetector.cpp
index 41994dc..3020136 100644
--- a/media/libmedia/CharacterEncodingDetector.cpp
+++ b/media/libmedia/CharacterEncodingDetector.cpp
@@ -89,7 +89,6 @@
         // try combined detection of artist/album/title etc.
         char buf[1024];
         buf[0] = 0;
-        int idx;
         bool allprintable = true;
         for (int i = 0; i < size; i++) {
             const char *name = mNames.getEntry(i);
@@ -169,7 +168,6 @@
             const char *name = mNames.getEntry(i);
             uint8_t* src = (uint8_t *)mValues.getEntry(i);
             int len = strlen((char *)src);
-            uint8_t* dest = src;
 
             ALOGV("@@@ checking %s", name);
             const char *s = mValues.getEntry(i);
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 3e5c883..a3f014b 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -80,7 +80,8 @@
     RELEASE_AUDIO_PATCH,
     LIST_AUDIO_PATCHES,
     SET_AUDIO_PORT_CONFIG,
-    GET_AUDIO_HW_SYNC
+    GET_AUDIO_HW_SYNC,
+    SYSTEM_READY
 };
 
 #define MAX_ITEMS_PER_LIST 1024
@@ -174,9 +175,11 @@
                                 uint32_t sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
+                                const String16& opPackageName,
                                 size_t *pFrameCount,
                                 track_flags_t *flags,
                                 pid_t tid,
+                                int clientUid,
                                 int *sessionId,
                                 size_t *notificationFrames,
                                 sp<IMemory>& cblk,
@@ -190,11 +193,13 @@
         data.writeInt32(sampleRate);
         data.writeInt32(format);
         data.writeInt32(channelMask);
+        data.writeString16(opPackageName);
         size_t frameCount = pFrameCount != NULL ? *pFrameCount : 0;
         data.writeInt64(frameCount);
         track_flags_t lFlags = flags != NULL ? *flags : (track_flags_t) TRACK_DEFAULT;
         data.writeInt32(lFlags);
         data.writeInt32((int32_t) tid);
+        data.writeInt32((int32_t) clientUid);
         int lSessionId = AUDIO_SESSION_ALLOCATE;
         if (sessionId != NULL) {
             lSessionId = *sessionId;
@@ -702,6 +707,7 @@
                                     int32_t priority,
                                     audio_io_handle_t output,
                                     int sessionId,
+                                    const String16& opPackageName,
                                     status_t *status,
                                     int *id,
                                     int *enabled)
@@ -722,6 +728,7 @@
         data.writeInt32(priority);
         data.writeInt32((int32_t) output);
         data.writeInt32(sessionId);
+        data.writeString16(opPackageName);
 
         status_t lStatus = remote()->transact(CREATE_EFFECT, data, &reply);
         if (lStatus != NO_ERROR) {
@@ -897,6 +904,12 @@
         }
         return (audio_hw_sync_t)reply.readInt32();
     }
+    virtual status_t systemReady()
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+        return remote()->transact(SYSTEM_READY, data, &reply, IBinder::FLAG_ONEWAY);
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioFlinger, "android.media.IAudioFlinger");
@@ -950,18 +963,19 @@
             uint32_t sampleRate = data.readInt32();
             audio_format_t format = (audio_format_t) data.readInt32();
             audio_channel_mask_t channelMask = data.readInt32();
+            const String16& opPackageName = data.readString16();
             size_t frameCount = data.readInt64();
             track_flags_t flags = (track_flags_t) data.readInt32();
             pid_t tid = (pid_t) data.readInt32();
+            int clientUid = data.readInt32();
             int sessionId = data.readInt32();
             size_t notificationFrames = data.readInt64();
             sp<IMemory> cblk;
             sp<IMemory> buffers;
             status_t status;
             sp<IAudioRecord> record = openRecord(input,
-                    sampleRate, format, channelMask, &frameCount, &flags, tid, &sessionId,
-                    &notificationFrames,
-                    cblk, buffers, &status);
+                    sampleRate, format, channelMask, opPackageName, &frameCount, &flags, tid,
+                    clientUid, &sessionId, &notificationFrames, cblk, buffers, &status);
             LOG_ALWAYS_FATAL_IF((record != 0) != (status == NO_ERROR));
             reply->writeInt64(frameCount);
             reply->writeInt32(flags);
@@ -1247,12 +1261,13 @@
             int32_t priority = data.readInt32();
             audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
             int sessionId = data.readInt32();
+            const String16 opPackageName = data.readString16();
             status_t status;
             int id;
             int enabled;
 
             sp<IEffect> effect = createEffect(&desc, client, priority, output, sessionId,
-                    &status, &id, &enabled);
+                    opPackageName, &status, &id, &enabled);
             reply->writeInt32(status);
             reply->writeInt32(id);
             reply->writeInt32(enabled);
@@ -1388,6 +1403,11 @@
             reply->writeInt32(getAudioHwSyncForSession((audio_session_t)data.readInt32()));
             return NO_ERROR;
         } break;
+        case SYSTEM_READY: {
+            CHECK_INTERFACE(IAudioFlinger, data, reply);
+            systemReady();
+            return NO_ERROR;
+        } break;
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
@@ -1395,4 +1415,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IAudioFlingerClient.cpp b/media/libmedia/IAudioFlingerClient.cpp
index 1c299f7..3429d36 100644
--- a/media/libmedia/IAudioFlingerClient.cpp
+++ b/media/libmedia/IAudioFlingerClient.cpp
@@ -39,25 +39,18 @@
     {
     }
 
-    void ioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2)
+    void ioConfigChanged(audio_io_config_event event, const sp<AudioIoDescriptor>& ioDesc)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioFlingerClient::getInterfaceDescriptor());
         data.writeInt32(event);
-        data.writeInt32((int32_t) ioHandle);
-        if (event == AudioSystem::STREAM_CONFIG_CHANGED) {
-            uint32_t stream = *(const uint32_t *)param2;
-            ALOGV("ioConfigChanged stream %d", stream);
-            data.writeInt32(stream);
-        } else if (event != AudioSystem::OUTPUT_CLOSED && event != AudioSystem::INPUT_CLOSED) {
-            const AudioSystem::OutputDescriptor *desc =
-                    (const AudioSystem::OutputDescriptor *)param2;
-            data.writeInt32(desc->samplingRate);
-            data.writeInt32(desc->format);
-            data.writeInt32(desc->channelMask);
-            data.writeInt64(desc->frameCount);
-            data.writeInt32(desc->latency);
-        }
+        data.writeInt32((int32_t)ioDesc->mIoHandle);
+        data.write(&ioDesc->mPatch, sizeof(struct audio_patch));
+        data.writeInt32(ioDesc->mSamplingRate);
+        data.writeInt32(ioDesc->mFormat);
+        data.writeInt32(ioDesc->mChannelMask);
+        data.writeInt64(ioDesc->mFrameCount);
+        data.writeInt32(ioDesc->mLatency);
         remote()->transact(IO_CONFIG_CHANGED, data, &reply, IBinder::FLAG_ONEWAY);
     }
 };
@@ -72,24 +65,16 @@
     switch (code) {
     case IO_CONFIG_CHANGED: {
             CHECK_INTERFACE(IAudioFlingerClient, data, reply);
-            int event = data.readInt32();
-            audio_io_handle_t ioHandle = (audio_io_handle_t) data.readInt32();
-            const void *param2 = NULL;
-            AudioSystem::OutputDescriptor desc;
-            uint32_t stream;
-            if (event == AudioSystem::STREAM_CONFIG_CHANGED) {
-                stream = data.readInt32();
-                param2 = &stream;
-                ALOGV("STREAM_CONFIG_CHANGED stream %d", stream);
-            } else if (event != AudioSystem::OUTPUT_CLOSED && event != AudioSystem::INPUT_CLOSED) {
-                desc.samplingRate = data.readInt32();
-                desc.format = (audio_format_t) data.readInt32();
-                desc.channelMask = (audio_channel_mask_t) data.readInt32();
-                desc.frameCount = data.readInt64();
-                desc.latency = data.readInt32();
-                param2 = &desc;
-            }
-            ioConfigChanged(event, ioHandle, param2);
+            audio_io_config_event event = (audio_io_config_event)data.readInt32();
+            sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
+            ioDesc->mIoHandle = (audio_io_handle_t) data.readInt32();
+            data.read(&ioDesc->mPatch, sizeof(struct audio_patch));
+            ioDesc->mSamplingRate = data.readInt32();
+            ioDesc->mFormat = (audio_format_t) data.readInt32();
+            ioDesc->mChannelMask = (audio_channel_mask_t) data.readInt32();
+            ioDesc->mFrameCount = data.readInt64();
+            ioDesc->mLatency = data.readInt32();
+            ioConfigChanged(event, ioDesc);
             return NO_ERROR;
         } break;
         default:
@@ -99,4 +84,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index cfb28a9..3348441 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -71,6 +71,9 @@
     RELEASE_SOUNDTRIGGER_SESSION,
     GET_PHONE_STATE,
     REGISTER_POLICY_MIXES,
+    START_AUDIO_SOURCE,
+    STOP_AUDIO_SOURCE,
+    SET_AUDIO_PORT_CALLBACK_ENABLED,
 };
 
 #define MAX_ITEMS_PER_LIST 1024
@@ -86,13 +89,15 @@
     virtual status_t setDeviceConnectionState(
                                     audio_devices_t device,
                                     audio_policy_dev_state_t state,
-                                    const char *device_address)
+                                    const char *device_address,
+                                    const char *device_name)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
         data.writeInt32(static_cast <uint32_t>(device));
         data.writeInt32(static_cast <uint32_t>(state));
         data.writeCString(device_address);
+        data.writeCString(device_name);
         remote()->transact(SET_DEVICE_CONNECTION_STATE, data, &reply);
         return static_cast <status_t> (reply.readInt32());
     }
@@ -167,10 +172,12 @@
                                         audio_io_handle_t *output,
                                         audio_session_t session,
                                         audio_stream_type_t *stream,
+                                        uid_t uid,
                                         uint32_t samplingRate,
                                         audio_format_t format,
                                         audio_channel_mask_t channelMask,
                                         audio_output_flags_t flags,
+                                        audio_port_handle_t selectedDeviceId,
                                         const audio_offload_info_t *offloadInfo)
         {
             Parcel data, reply;
@@ -202,10 +209,12 @@
                 data.writeInt32(1);
                 data.writeInt32(*stream);
             }
+            data.writeInt32(uid);
             data.writeInt32(samplingRate);
             data.writeInt32(static_cast <uint32_t>(format));
             data.writeInt32(channelMask);
             data.writeInt32(static_cast <uint32_t>(flags));
+            data.writeInt32(selectedDeviceId);
             // hasOffloadInfo
             if (offloadInfo == NULL) {
                 data.writeInt32(0);
@@ -269,10 +278,12 @@
     virtual status_t getInputForAttr(const audio_attributes_t *attr,
                                      audio_io_handle_t *input,
                                      audio_session_t session,
+                                     uid_t uid,
                                      uint32_t samplingRate,
                                      audio_format_t format,
                                      audio_channel_mask_t channelMask,
-                                     audio_input_flags_t flags)
+                                     audio_input_flags_t flags,
+                                     audio_port_handle_t selectedDeviceId)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
@@ -286,10 +297,12 @@
         }
         data.write(attr, sizeof(audio_attributes_t));
         data.writeInt32(session);
+        data.writeInt32(uid);
         data.writeInt32(samplingRate);
         data.writeInt32(static_cast <uint32_t>(format));
         data.writeInt32(channelMask);
         data.writeInt32(flags);
+        data.writeInt32(selectedDeviceId);
         status_t status = remote()->transact(GET_INPUT_FOR_ATTR, data, &reply);
         if (status != NO_ERROR) {
             return status;
@@ -634,6 +647,14 @@
         remote()->transact(REGISTER_CLIENT, data, &reply);
     }
 
+    virtual void setAudioPortCallbacksEnabled(bool enabled)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.writeInt32(enabled ? 1 : 0);
+        remote()->transact(SET_AUDIO_PORT_CALLBACK_ENABLED, data, &reply);
+    }
+
     virtual status_t acquireSoundTriggerSession(audio_session_t *session,
                                             audio_io_handle_t *ioHandle,
                                             audio_devices_t *device)
@@ -710,6 +731,42 @@
         }
         return status;
     }
+
+    virtual status_t startAudioSource(const struct audio_port_config *source,
+                                      const audio_attributes_t *attributes,
+                                      audio_io_handle_t *handle)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        if (source == NULL || attributes == NULL || handle == NULL) {
+            return BAD_VALUE;
+        }
+        data.write(source, sizeof(struct audio_port_config));
+        data.write(attributes, sizeof(audio_attributes_t));
+        status_t status = remote()->transact(START_AUDIO_SOURCE, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        status = (status_t)reply.readInt32();
+        if (status != NO_ERROR) {
+            return status;
+        }
+        *handle = (audio_io_handle_t)reply.readInt32();
+        return status;
+    }
+
+    virtual status_t stopAudioSource(audio_io_handle_t handle)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+        data.writeInt32(handle);
+        status_t status = remote()->transact(STOP_AUDIO_SOURCE, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        status = (status_t)reply.readInt32();
+        return status;
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioPolicyService, "android.media.IAudioPolicyService");
@@ -728,9 +785,11 @@
             audio_policy_dev_state_t state =
                     static_cast <audio_policy_dev_state_t>(data.readInt32());
             const char *device_address = data.readCString();
+            const char *device_name = data.readCString();
             reply->writeInt32(static_cast<uint32_t> (setDeviceConnectionState(device,
                                                                               state,
-                                                                              device_address)));
+                                                                              device_address,
+                                                                              device_name)));
             return NO_ERROR;
         } break;
 
@@ -806,11 +865,13 @@
             if (hasStream) {
                 stream = (audio_stream_type_t)data.readInt32();
             }
+            uid_t uid = (uid_t)data.readInt32();
             uint32_t samplingRate = data.readInt32();
             audio_format_t format = (audio_format_t) data.readInt32();
             audio_channel_mask_t channelMask = data.readInt32();
             audio_output_flags_t flags =
                     static_cast <audio_output_flags_t>(data.readInt32());
+            audio_port_handle_t selectedDeviceId = data.readInt32();
             bool hasOffloadInfo = data.readInt32() != 0;
             audio_offload_info_t offloadInfo;
             if (hasOffloadInfo) {
@@ -818,9 +879,9 @@
             }
             audio_io_handle_t output;
             status_t status = getOutputForAttr(hasAttributes ? &attr : NULL,
-                    &output, session, &stream,
+                    &output, session, &stream, uid,
                     samplingRate, format, channelMask,
-                    flags, hasOffloadInfo ? &offloadInfo : NULL);
+                    flags, selectedDeviceId, hasOffloadInfo ? &offloadInfo : NULL);
             reply->writeInt32(status);
             reply->writeInt32(output);
             reply->writeInt32(stream);
@@ -865,14 +926,16 @@
             audio_attributes_t attr;
             data.read(&attr, sizeof(audio_attributes_t));
             audio_session_t session = (audio_session_t)data.readInt32();
+            uid_t uid = (uid_t)data.readInt32();
             uint32_t samplingRate = data.readInt32();
             audio_format_t format = (audio_format_t) data.readInt32();
             audio_channel_mask_t channelMask = data.readInt32();
             audio_input_flags_t flags = (audio_input_flags_t) data.readInt32();
+            audio_port_handle_t selectedDeviceId = (audio_port_handle_t) data.readInt32();
             audio_io_handle_t input;
-            status_t status = getInputForAttr(&attr, &input, session,
+            status_t status = getInputForAttr(&attr, &input, session, uid,
                                               samplingRate, format, channelMask,
-                                              flags);
+                                              flags, selectedDeviceId);
             reply->writeInt32(status);
             if (status == NO_ERROR) {
                 reply->writeInt32(input);
@@ -1165,6 +1228,12 @@
             return NO_ERROR;
         } break;
 
+        case SET_AUDIO_PORT_CALLBACK_ENABLED: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            setAudioPortCallbacksEnabled(data.readInt32() == 1);
+            return NO_ERROR;
+        } break;
+
         case ACQUIRE_SOUNDTRIGGER_SESSION: {
             CHECK_INTERFACE(IAudioPolicyService, data, reply);
             sp<IAudioPolicyServiceClient> client = interface_cast<IAudioPolicyServiceClient>(
@@ -1217,6 +1286,27 @@
             return NO_ERROR;
         } break;
 
+        case START_AUDIO_SOURCE: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            struct audio_port_config source;
+            data.read(&source, sizeof(struct audio_port_config));
+            audio_attributes_t attributes;
+            data.read(&attributes, sizeof(audio_attributes_t));
+            audio_io_handle_t handle;
+            status_t status = startAudioSource(&source, &attributes, &handle);
+            reply->writeInt32(status);
+            reply->writeInt32(handle);
+            return NO_ERROR;
+        } break;
+
+        case STOP_AUDIO_SOURCE: {
+            CHECK_INTERFACE(IAudioPolicyService, data, reply);
+            audio_io_handle_t handle = (audio_io_handle_t)data.readInt32();
+            status_t status = stopAudioSource(handle);
+            reply->writeInt32(status);
+            return NO_ERROR;
+        } break;
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
@@ -1224,4 +1314,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IAudioPolicyServiceClient.cpp b/media/libmedia/IAudioPolicyServiceClient.cpp
index e802277..65cc7d6 100644
--- a/media/libmedia/IAudioPolicyServiceClient.cpp
+++ b/media/libmedia/IAudioPolicyServiceClient.cpp
@@ -29,7 +29,8 @@
 
 enum {
     PORT_LIST_UPDATE = IBinder::FIRST_CALL_TRANSACTION,
-    PATCH_LIST_UPDATE
+    PATCH_LIST_UPDATE,
+    MIX_STATE_UPDATE
 };
 
 class BpAudioPolicyServiceClient : public BpInterface<IAudioPolicyServiceClient>
@@ -53,6 +54,15 @@
         data.writeInterfaceToken(IAudioPolicyServiceClient::getInterfaceDescriptor());
         remote()->transact(PATCH_LIST_UPDATE, data, &reply, IBinder::FLAG_ONEWAY);
     }
+
+    void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IAudioPolicyServiceClient::getInterfaceDescriptor());
+        data.writeString8(regId);
+        data.writeInt32(state);
+        remote()->transact(MIX_STATE_UPDATE, data, &reply, IBinder::FLAG_ONEWAY);
+    }
 };
 
 IMPLEMENT_META_INTERFACE(AudioPolicyServiceClient, "android.media.IAudioPolicyServiceClient");
@@ -73,6 +83,13 @@
             onAudioPatchListUpdate();
             return NO_ERROR;
         } break;
+    case MIX_STATE_UPDATE: {
+            CHECK_INTERFACE(IAudioPolicyServiceClient, data, reply);
+            String8 regId = data.readString8();
+            int32_t state = data.readInt32();
+            onDynamicPolicyMixStateUpdate(regId, state);
+            return NO_ERROR;
+    }
     default:
         return BBinder::onTransact(code, data, reply, flags);
     }
@@ -80,4 +97,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IAudioRecord.cpp b/media/libmedia/IAudioRecord.cpp
index 8a4a383..9d80753 100644
--- a/media/libmedia/IAudioRecord.cpp
+++ b/media/libmedia/IAudioRecord.cpp
@@ -91,4 +91,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IAudioTrack.cpp b/media/libmedia/IAudioTrack.cpp
index df209fd..651cb61 100644
--- a/media/libmedia/IAudioTrack.cpp
+++ b/media/libmedia/IAudioTrack.cpp
@@ -292,4 +292,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/ICrypto.cpp b/media/libmedia/ICrypto.cpp
index f7d8bc6..947294f 100644
--- a/media/libmedia/ICrypto.cpp
+++ b/media/libmedia/ICrypto.cpp
@@ -19,6 +19,7 @@
 #include <utils/Log.h>
 
 #include <binder/Parcel.h>
+#include <binder/IMemory.h>
 #include <media/ICrypto.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -34,6 +35,7 @@
     REQUIRES_SECURE_COMPONENT,
     DECRYPT,
     NOTIFY_RESOLUTION,
+    SET_MEDIADRM_SESSION,
 };
 
 struct BpCrypto : public BpInterface<ICrypto> {
@@ -97,7 +99,7 @@
             const uint8_t key[16],
             const uint8_t iv[16],
             CryptoPlugin::Mode mode,
-            const void *srcPtr,
+            const sp<IMemory> &sharedBuffer, size_t offset,
             const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
             void *dstPtr,
             AString *errorDetailMsg) {
@@ -126,7 +128,8 @@
         }
 
         data.writeInt32(totalSize);
-        data.write(srcPtr, totalSize);
+        data.writeStrongBinder(IInterface::asBinder(sharedBuffer));
+        data.writeInt32(offset);
 
         data.writeInt32(numSubSamples);
         data.write(subSamples, sizeof(CryptoPlugin::SubSample) * numSubSamples);
@@ -139,7 +142,7 @@
 
         ssize_t result = reply.readInt32();
 
-        if (result >= ERROR_DRM_VENDOR_MIN && result <= ERROR_DRM_VENDOR_MAX) {
+        if (isCryptoError(result)) {
             errorDetailMsg->setTo(reply.readCString());
         }
 
@@ -159,7 +162,28 @@
         remote()->transact(NOTIFY_RESOLUTION, data, &reply);
     }
 
+    virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId) {
+        Parcel data, reply;
+        data.writeInterfaceToken(ICrypto::getInterfaceDescriptor());
+
+        writeVector(data, sessionId);
+        remote()->transact(SET_MEDIADRM_SESSION, data, &reply);
+
+        return reply.readInt32();
+    }
+
 private:
+    void readVector(Parcel &reply, Vector<uint8_t> &vector) const {
+        uint32_t size = reply.readInt32();
+        vector.insertAt((size_t)0, size);
+        reply.read(vector.editArray(), size);
+    }
+
+    void writeVector(Parcel &data, Vector<uint8_t> const &vector) const {
+        data.writeInt32(vector.size());
+        data.write(vector.array(), vector.size());
+    }
+
     DISALLOW_EVIL_CONSTRUCTORS(BpCrypto);
 };
 
@@ -167,6 +191,17 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
+void BnCrypto::readVector(const Parcel &data, Vector<uint8_t> &vector) const {
+    uint32_t size = data.readInt32();
+    vector.insertAt((size_t)0, size);
+    data.read(vector.editArray(), size);
+}
+
+void BnCrypto::writeVector(Parcel *reply, Vector<uint8_t> const &vector) const {
+    reply->writeInt32(vector.size());
+    reply->write(vector.array(), vector.size());
+}
+
 status_t BnCrypto::onTransact(
     uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags) {
     switch (code) {
@@ -245,8 +280,9 @@
             data.read(iv, sizeof(iv));
 
             size_t totalSize = data.readInt32();
-            void *srcData = malloc(totalSize);
-            data.read(srcData, totalSize);
+            sp<IMemory> sharedBuffer =
+                interface_cast<IMemory>(data.readStrongBinder());
+            int32_t offset = data.readInt32();
 
             int32_t numSubSamples = data.readInt32();
 
@@ -265,20 +301,25 @@
             }
 
             AString errorDetailMsg;
-            ssize_t result = decrypt(
+            ssize_t result;
+
+            if (offset + totalSize > sharedBuffer->size()) {
+                result = -EINVAL;
+            } else {
+                result = decrypt(
                     secure,
                     key,
                     iv,
                     mode,
-                    srcData,
+                    sharedBuffer, offset,
                     subSamples, numSubSamples,
                     secure ? secureBufferId : dstPtr,
                     &errorDetailMsg);
+            }
 
             reply->writeInt32(result);
 
-            if (result >= ERROR_DRM_VENDOR_MIN
-                && result <= ERROR_DRM_VENDOR_MAX) {
+            if (isCryptoError(result)) {
                 reply->writeCString(errorDetailMsg.c_str());
             }
 
@@ -294,9 +335,6 @@
             delete[] subSamples;
             subSamples = NULL;
 
-            free(srcData);
-            srcData = NULL;
-
             return OK;
         }
 
@@ -311,6 +349,15 @@
             return OK;
         }
 
+        case SET_MEDIADRM_SESSION:
+        {
+            CHECK_INTERFACE(IDrm, data, reply);
+            Vector<uint8_t> sessionId;
+            readVector(data, sessionId);
+            reply->writeInt32(setMediaDrmSession(sessionId));
+            return OK;
+        }
+
         default:
             return BBinder::onTransact(code, data, reply, flags);
     }
diff --git a/media/libmedia/IDataSource.cpp b/media/libmedia/IDataSource.cpp
new file mode 100644
index 0000000..76d1d68
--- /dev/null
+++ b/media/libmedia/IDataSource.cpp
@@ -0,0 +1,108 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "IDataSource"
+#include <utils/Log.h>
+#include <utils/Timers.h>
+
+#include <media/IDataSource.h>
+
+#include <binder/IMemory.h>
+#include <binder/Parcel.h>
+#include <media/stagefright/foundation/ADebug.h>
+
+namespace android {
+
+enum {
+    GET_IMEMORY = IBinder::FIRST_CALL_TRANSACTION,
+    READ_AT,
+    GET_SIZE,
+    CLOSE,
+};
+
+struct BpDataSource : public BpInterface<IDataSource> {
+    BpDataSource(const sp<IBinder>& impl) : BpInterface<IDataSource>(impl) {}
+
+    virtual sp<IMemory> getIMemory() {
+        Parcel data, reply;
+        data.writeInterfaceToken(IDataSource::getInterfaceDescriptor());
+        remote()->transact(GET_IMEMORY, data, &reply);
+        sp<IBinder> binder = reply.readStrongBinder();
+        return interface_cast<IMemory>(binder);
+    }
+
+    virtual ssize_t readAt(off64_t offset, size_t size) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IDataSource::getInterfaceDescriptor());
+        data.writeInt64(offset);
+        data.writeInt64(size);
+        remote()->transact(READ_AT, data, &reply);
+        return reply.readInt64();
+    }
+
+    virtual status_t getSize(off64_t* size) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IDataSource::getInterfaceDescriptor());
+        remote()->transact(GET_SIZE, data, &reply);
+        status_t err = reply.readInt32();
+        *size = reply.readInt64();
+        return err;
+    }
+
+    virtual void close() {
+        Parcel data, reply;
+        data.writeInterfaceToken(IDataSource::getInterfaceDescriptor());
+        remote()->transact(CLOSE, data, &reply);
+    }
+};
+
+IMPLEMENT_META_INTERFACE(DataSource, "android.media.IDataSource");
+
+status_t BnDataSource::onTransact(
+    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) {
+    switch (code) {
+        case GET_IMEMORY: {
+            CHECK_INTERFACE(IDataSource, data, reply);
+            reply->writeStrongBinder(IInterface::asBinder(getIMemory()));
+            return NO_ERROR;
+        } break;
+        case READ_AT: {
+            CHECK_INTERFACE(IDataSource, data, reply);
+            off64_t offset = (off64_t) data.readInt64();
+            size_t size = (size_t) data.readInt64();
+            reply->writeInt64(readAt(offset, size));
+            return NO_ERROR;
+        } break;
+        case GET_SIZE: {
+            CHECK_INTERFACE(IDataSource, data, reply);
+            off64_t size;
+            status_t err = getSize(&size);
+            reply->writeInt32(err);
+            reply->writeInt64(size);
+            return NO_ERROR;
+        } break;
+        case CLOSE: {
+            CHECK_INTERFACE(IDataSource, data, reply);
+            close();
+            return NO_ERROR;
+        } break;
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+}  // namespace android
diff --git a/media/libmedia/IDrm.cpp b/media/libmedia/IDrm.cpp
index b08fa82..b1ad0c5 100644
--- a/media/libmedia/IDrm.cpp
+++ b/media/libmedia/IDrm.cpp
@@ -67,7 +67,10 @@
     virtual status_t initCheck() const {
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
-        remote()->transact(INIT_CHECK, data, &reply);
+        status_t status = remote()->transact(INIT_CHECK, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         return reply.readInt32();
     }
@@ -77,7 +80,11 @@
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
         data.write(uuid, 16);
         data.writeString8(mimeType);
-        remote()->transact(IS_CRYPTO_SUPPORTED, data, &reply);
+        status_t status = remote()->transact(IS_CRYPTO_SUPPORTED, data, &reply);
+        if (status != OK) {
+            ALOGE("isCryptoSchemeSupported: binder call failed: %d", status);
+            return false;
+        }
 
         return reply.readInt32() != 0;
     }
@@ -87,7 +94,10 @@
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
         data.write(uuid, 16);
 
-        remote()->transact(CREATE_PLUGIN, data, &reply);
+        status_t status = remote()->transact(CREATE_PLUGIN, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         return reply.readInt32();
     }
@@ -95,7 +105,10 @@
     virtual status_t destroyPlugin() {
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
-        remote()->transact(DESTROY_PLUGIN, data, &reply);
+        status_t status = remote()->transact(DESTROY_PLUGIN, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         return reply.readInt32();
     }
@@ -104,7 +117,10 @@
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
 
-        remote()->transact(OPEN_SESSION, data, &reply);
+        status_t status = remote()->transact(OPEN_SESSION, data, &reply);
+        if (status != OK) {
+            return status;
+        }
         readVector(reply, sessionId);
 
         return reply.readInt32();
@@ -115,7 +131,10 @@
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
 
         writeVector(data, sessionId);
-        remote()->transact(CLOSE_SESSION, data, &reply);
+        status_t status = remote()->transact(CLOSE_SESSION, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         return reply.readInt32();
     }
@@ -125,7 +144,8 @@
                       Vector<uint8_t> const &initData,
                       String8 const &mimeType, DrmPlugin::KeyType keyType,
                       KeyedVector<String8, String8> const &optionalParameters,
-                      Vector<uint8_t> &request, String8 &defaultUrl) {
+                      Vector<uint8_t> &request, String8 &defaultUrl,
+                      DrmPlugin::KeyRequestType *keyRequestType) {
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
 
@@ -139,10 +159,15 @@
             data.writeString8(optionalParameters.keyAt(i));
             data.writeString8(optionalParameters.valueAt(i));
         }
-        remote()->transact(GET_KEY_REQUEST, data, &reply);
+
+        status_t status = remote()->transact(GET_KEY_REQUEST, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         readVector(reply, request);
         defaultUrl = reply.readString8();
+        *keyRequestType = static_cast<DrmPlugin::KeyRequestType>(reply.readInt32());
 
         return reply.readInt32();
     }
@@ -154,7 +179,12 @@
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
         writeVector(data, sessionId);
         writeVector(data, response);
-        remote()->transact(PROVIDE_KEY_RESPONSE, data, &reply);
+
+        status_t status = remote()->transact(PROVIDE_KEY_RESPONSE, data, &reply);
+        if (status != OK) {
+            return status;
+        }
+
         readVector(reply, keySetId);
 
         return reply.readInt32();
@@ -165,7 +195,10 @@
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
 
         writeVector(data, keySetId);
-        remote()->transact(REMOVE_KEYS, data, &reply);
+        status_t status = remote()->transact(REMOVE_KEYS, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         return reply.readInt32();
     }
@@ -177,7 +210,10 @@
 
         writeVector(data, sessionId);
         writeVector(data, keySetId);
-        remote()->transact(RESTORE_KEYS, data, &reply);
+        status_t status = remote()->transact(RESTORE_KEYS, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         return reply.readInt32();
     }
@@ -188,7 +224,10 @@
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
 
         writeVector(data, sessionId);
-        remote()->transact(QUERY_KEY_STATUS, data, &reply);
+        status_t status = remote()->transact(QUERY_KEY_STATUS, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         infoMap.clear();
         size_t count = reply.readInt32();
@@ -209,7 +248,10 @@
 
         data.writeString8(certType);
         data.writeString8(certAuthority);
-        remote()->transact(GET_PROVISION_REQUEST, data, &reply);
+        status_t status = remote()->transact(GET_PROVISION_REQUEST, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         readVector(reply, request);
         defaultUrl = reply.readString8();
@@ -224,7 +266,10 @@
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
 
         writeVector(data, response);
-        remote()->transact(PROVIDE_PROVISION_RESPONSE, data, &reply);
+        status_t status = remote()->transact(PROVIDE_PROVISION_RESPONSE, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         readVector(reply, certificate);
         readVector(reply, wrappedKey);
@@ -236,7 +281,10 @@
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
 
-        remote()->transact(UNPROVISION_DEVICE, data, &reply);
+        status_t status = remote()->transact(UNPROVISION_DEVICE, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         return reply.readInt32();
     }
@@ -245,7 +293,10 @@
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
 
-        remote()->transact(GET_SECURE_STOPS, data, &reply);
+        status_t status = remote()->transact(GET_SECURE_STOPS, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         secureStops.clear();
         uint32_t count = reply.readInt32();
@@ -262,7 +313,10 @@
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
 
         writeVector(data, ssid);
-        remote()->transact(GET_SECURE_STOP, data, &reply);
+        status_t status = remote()->transact(GET_SECURE_STOP, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         readVector(reply, secureStop);
         return reply.readInt32();
@@ -273,7 +327,10 @@
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
 
         writeVector(data, ssRelease);
-        remote()->transact(RELEASE_SECURE_STOPS, data, &reply);
+        status_t status = remote()->transact(RELEASE_SECURE_STOPS, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         return reply.readInt32();
     }
@@ -282,7 +339,10 @@
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
 
-        remote()->transact(RELEASE_ALL_SECURE_STOPS, data, &reply);
+        status_t status = remote()->transact(RELEASE_ALL_SECURE_STOPS, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         return reply.readInt32();
     }
@@ -292,7 +352,10 @@
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
 
         data.writeString8(name);
-        remote()->transact(GET_PROPERTY_STRING, data, &reply);
+        status_t status = remote()->transact(GET_PROPERTY_STRING, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         value = reply.readString8();
         return reply.readInt32();
@@ -303,7 +366,10 @@
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
 
         data.writeString8(name);
-        remote()->transact(GET_PROPERTY_BYTE_ARRAY, data, &reply);
+        status_t status = remote()->transact(GET_PROPERTY_BYTE_ARRAY, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         readVector(reply, value);
         return reply.readInt32();
@@ -315,7 +381,10 @@
 
         data.writeString8(name);
         data.writeString8(value);
-        remote()->transact(SET_PROPERTY_STRING, data, &reply);
+        status_t status = remote()->transact(SET_PROPERTY_STRING, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         return reply.readInt32();
     }
@@ -327,7 +396,10 @@
 
         data.writeString8(name);
         writeVector(data, value);
-        remote()->transact(SET_PROPERTY_BYTE_ARRAY, data, &reply);
+        status_t status = remote()->transact(SET_PROPERTY_BYTE_ARRAY, data, &reply);
+        if (status != OK) {
+            return status;
+        }
 
         return reply.readInt32();
     }
@@ -340,7 +412,10 @@
 
         writeVector(data, sessionId);
         data.writeString8(algorithm);
-        remote()->transact(SET_CIPHER_ALGORITHM, data, &reply);
+        status_t status = remote()->transact(SET_CIPHER_ALGORITHM, data, &reply);
+        if (status != OK) {
+            return status;
+        }
         return reply.readInt32();
     }
 
@@ -351,7 +426,10 @@
 
         writeVector(data, sessionId);
         data.writeString8(algorithm);
-        remote()->transact(SET_MAC_ALGORITHM, data, &reply);
+        status_t status = remote()->transact(SET_MAC_ALGORITHM, data, &reply);
+        if (status != OK) {
+            return status;
+        }
         return reply.readInt32();
     }
 
@@ -368,7 +446,10 @@
         writeVector(data, input);
         writeVector(data, iv);
 
-        remote()->transact(ENCRYPT, data, &reply);
+        status_t status = remote()->transact(ENCRYPT, data, &reply);
+        if (status != OK) {
+            return status;
+        }
         readVector(reply, output);
 
         return reply.readInt32();
@@ -387,7 +468,10 @@
         writeVector(data, input);
         writeVector(data, iv);
 
-        remote()->transact(DECRYPT, data, &reply);
+        status_t status = remote()->transact(DECRYPT, data, &reply);
+        if (status != OK) {
+            return status;
+        }
         readVector(reply, output);
 
         return reply.readInt32();
@@ -404,7 +488,10 @@
         writeVector(data, keyId);
         writeVector(data, message);
 
-        remote()->transact(SIGN, data, &reply);
+        status_t status = remote()->transact(SIGN, data, &reply);
+        if (status != OK) {
+            return status;
+        }
         readVector(reply, signature);
 
         return reply.readInt32();
@@ -423,7 +510,10 @@
         writeVector(data, message);
         writeVector(data, signature);
 
-        remote()->transact(VERIFY, data, &reply);
+        status_t status = remote()->transact(VERIFY, data, &reply);
+        if (status != OK) {
+            return status;
+        }
         match = (bool)reply.readInt32();
         return reply.readInt32();
     }
@@ -441,7 +531,10 @@
         writeVector(data, message);
         writeVector(data, wrappedKey);
 
-        remote()->transact(SIGN_RSA, data, &reply);
+        status_t status = remote()->transact(SIGN_RSA, data, &reply);
+        if (status != OK) {
+            return status;
+        }
         readVector(reply, signature);
 
         return reply.readInt32();
@@ -451,7 +544,10 @@
         Parcel data, reply;
         data.writeInterfaceToken(IDrm::getInterfaceDescriptor());
         data.writeStrongBinder(IInterface::asBinder(listener));
-        remote()->transact(SET_LISTENER, data, &reply);
+        status_t status = remote()->transact(SET_LISTENER, data, &reply);
+        if (status != OK) {
+            return status;
+        }
         return reply.readInt32();
     }
 
@@ -562,13 +658,15 @@
 
             Vector<uint8_t> request;
             String8 defaultUrl;
+            DrmPlugin::KeyRequestType keyRequestType;
 
-            status_t result = getKeyRequest(sessionId, initData,
-                                            mimeType, keyType,
-                                            optionalParameters,
-                                            request, defaultUrl);
+            status_t result = getKeyRequest(sessionId, initData, mimeType,
+                    keyType, optionalParameters, request, defaultUrl,
+                    &keyRequestType);
+
             writeVector(reply, request);
             reply->writeString8(defaultUrl);
+            reply->writeInt32(static_cast<int32_t>(keyRequestType));
             reply->writeInt32(result);
             return OK;
         }
diff --git a/media/libmedia/IDrmClient.cpp b/media/libmedia/IDrmClient.cpp
index f50715e..490c6ed 100644
--- a/media/libmedia/IDrmClient.cpp
+++ b/media/libmedia/IDrmClient.cpp
@@ -78,4 +78,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IEffect.cpp b/media/libmedia/IEffect.cpp
index c2fff78..eb4b098 100644
--- a/media/libmedia/IEffect.cpp
+++ b/media/libmedia/IEffect.cpp
@@ -201,4 +201,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IEffectClient.cpp b/media/libmedia/IEffectClient.cpp
index aef4371..1322e72 100644
--- a/media/libmedia/IEffectClient.cpp
+++ b/media/libmedia/IEffectClient.cpp
@@ -141,4 +141,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IHDCP.cpp b/media/libmedia/IHDCP.cpp
index 79944ee..f3a8902 100644
--- a/media/libmedia/IHDCP.cpp
+++ b/media/libmedia/IHDCP.cpp
@@ -284,11 +284,17 @@
             size_t offset = data.readInt32();
             size_t size = data.readInt32();
             uint32_t streamCTR = data.readInt32();
-            void *outData = malloc(size);
+            void *outData = NULL;
             uint64_t inputCTR;
 
-            status_t err = encryptNative(graphicBuffer, offset, size,
-                                         streamCTR, &inputCTR, outData);
+            status_t err = ERROR_OUT_OF_RANGE;
+
+            outData = malloc(size);
+
+            if (outData != NULL) {
+                err = encryptNative(graphicBuffer, offset, size,
+                                             streamCTR, &inputCTR, outData);
+            }
 
             reply->writeInt32(err);
 
diff --git a/media/libmedia/IMediaCodecList.cpp b/media/libmedia/IMediaCodecList.cpp
index bf7c5ca..e2df104 100644
--- a/media/libmedia/IMediaCodecList.cpp
+++ b/media/libmedia/IMediaCodecList.cpp
@@ -30,6 +30,7 @@
     CREATE = IBinder::FIRST_CALL_TRANSACTION,
     COUNT_CODECS,
     GET_CODEC_INFO,
+    GET_GLOBAL_SETTINGS,
     FIND_CODEC_BY_TYPE,
     FIND_CODEC_BY_NAME,
 };
@@ -64,6 +65,19 @@
         }
     }
 
+    virtual const sp<AMessage> getGlobalSettings() const
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaCodecList::getInterfaceDescriptor());
+        remote()->transact(GET_GLOBAL_SETTINGS, data, &reply);
+        status_t err = reply.readInt32();
+        if (err == OK) {
+            return AMessage::FromParcel(reply);
+        } else {
+            return NULL;
+        }
+    }
+
     virtual ssize_t findCodecByType(
             const char *type, bool encoder, size_t startIndex = 0) const
     {
@@ -125,6 +139,20 @@
         }
         break;
 
+        case GET_GLOBAL_SETTINGS:
+        {
+            CHECK_INTERFACE(IMediaCodecList, data, reply);
+            const sp<AMessage> info = getGlobalSettings();
+            if (info != NULL) {
+                reply->writeInt32(OK);
+                info->writeToParcel(reply);
+            } else {
+                reply->writeInt32(-ERANGE);
+            }
+            return NO_ERROR;
+        }
+        break;
+
         case FIND_CODEC_BY_TYPE:
         {
             CHECK_INTERFACE(IMediaCodecList, data, reply);
@@ -160,4 +188,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IMediaDeathNotifier.cpp b/media/libmedia/IMediaDeathNotifier.cpp
index 38e9ca0..d4360ea 100644
--- a/media/libmedia/IMediaDeathNotifier.cpp
+++ b/media/libmedia/IMediaDeathNotifier.cpp
@@ -108,4 +108,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IMediaHTTPConnection.cpp b/media/libmedia/IMediaHTTPConnection.cpp
index a5a3714..0dda0be9 100644
--- a/media/libmedia/IMediaHTTPConnection.cpp
+++ b/media/libmedia/IMediaHTTPConnection.cpp
@@ -70,7 +70,7 @@
         int32_t exceptionCode = reply.readExceptionCode();
 
         if (exceptionCode) {
-            return UNKNOWN_ERROR;
+            return false;
         }
 
         sp<IBinder> binder = reply.readStrongBinder();
@@ -107,7 +107,14 @@
             return UNKNOWN_ERROR;
         }
 
-        size_t len = reply.readInt32();
+        int32_t lenOrErrorCode = reply.readInt32();
+
+        // Negative values are error codes
+        if (lenOrErrorCode < 0) {
+            return lenOrErrorCode;
+        }
+
+        size_t len = lenOrErrorCode;
 
         if (len > size) {
             ALOGE("requested %zu, got %zu", size, len);
@@ -186,5 +193,4 @@
 IMPLEMENT_META_INTERFACE(
         MediaHTTPConnection, "android.media.IMediaHTTPConnection");
 
-}  // namespace android
-
+} // namespace android
diff --git a/media/libmedia/IMediaHTTPService.cpp b/media/libmedia/IMediaHTTPService.cpp
index 1260582..0c16a2b 100644
--- a/media/libmedia/IMediaHTTPService.cpp
+++ b/media/libmedia/IMediaHTTPService.cpp
@@ -44,6 +44,7 @@
         status_t err = reply.readInt32();
 
         if (err != OK) {
+            ALOGE("Unable to make HTTP connection (err = %d)", err);
             return NULL;
         }
 
@@ -54,5 +55,4 @@
 IMPLEMENT_META_INTERFACE(
         MediaHTTPService, "android.media.IMediaHTTPService");
 
-}  // namespace android
-
+} // namespace android
diff --git a/media/libmedia/IMediaLogService.cpp b/media/libmedia/IMediaLogService.cpp
index a4af7b7..1536337 100644
--- a/media/libmedia/IMediaLogService.cpp
+++ b/media/libmedia/IMediaLogService.cpp
@@ -45,7 +45,7 @@
         data.writeStrongBinder(IInterface::asBinder(shared));
         data.writeInt64((int64_t) size);
         data.writeCString(name);
-        status_t status = remote()->transact(REGISTER_WRITER, data, &reply);
+        status_t status __unused = remote()->transact(REGISTER_WRITER, data, &reply);
         // FIXME ignores status
     }
 
@@ -53,7 +53,7 @@
         Parcel data, reply;
         data.writeInterfaceToken(IMediaLogService::getInterfaceDescriptor());
         data.writeStrongBinder(IInterface::asBinder(shared));
-        status_t status = remote()->transact(UNREGISTER_WRITER, data, &reply);
+        status_t status __unused = remote()->transact(UNREGISTER_WRITER, data, &reply);
         // FIXME ignores status
     }
 
@@ -91,4 +91,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp
index aa2665a..9765f0d 100644
--- a/media/libmedia/IMediaMetadataRetriever.cpp
+++ b/media/libmedia/IMediaMetadataRetriever.cpp
@@ -20,6 +20,7 @@
 #include <sys/types.h>
 
 #include <binder/Parcel.h>
+#include <media/IDataSource.h>
 #include <media/IMediaHTTPService.h>
 #include <media/IMediaMetadataRetriever.h>
 #include <utils/String8.h>
@@ -65,6 +66,7 @@
     DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
     SET_DATA_SOURCE_URL,
     SET_DATA_SOURCE_FD,
+    SET_DATA_SOURCE_CALLBACK,
     GET_FRAME_AT_TIME,
     EXTRACT_ALBUM_ART,
     EXTRACT_METADATA,
@@ -125,6 +127,15 @@
         return reply.readInt32();
     }
 
+    status_t setDataSource(const sp<IDataSource>& source)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor());
+        data.writeStrongBinder(IInterface::asBinder(source));
+        remote()->transact(SET_DATA_SOURCE_CALLBACK, data, &reply);
+        return reply.readInt32();
+    }
+
     sp<IMemory> getFrameAtTime(int64_t timeUs, int option)
     {
         ALOGV("getTimeAtTime: time(%" PRId64 " us) and option(%d)", timeUs, option);
@@ -235,6 +246,13 @@
             reply->writeInt32(setDataSource(fd, offset, length));
             return NO_ERROR;
         } break;
+        case SET_DATA_SOURCE_CALLBACK: {
+            CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
+            sp<IDataSource> source =
+                interface_cast<IDataSource>(data.readStrongBinder());
+            reply->writeInt32(setDataSource(source));
+            return NO_ERROR;
+        } break;
         case GET_FRAME_AT_TIME: {
             CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
             int64_t timeUs = data.readInt64();
@@ -297,4 +315,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IMediaPlayer.cpp b/media/libmedia/IMediaPlayer.cpp
index 7f3e5cc..bde35f2 100644
--- a/media/libmedia/IMediaPlayer.cpp
+++ b/media/libmedia/IMediaPlayer.cpp
@@ -21,6 +21,10 @@
 
 #include <binder/Parcel.h>
 
+#include <media/AudioResamplerPublic.h>
+#include <media/AVSyncSettings.h>
+
+#include <media/IDataSource.h>
 #include <media/IMediaHTTPService.h>
 #include <media/IMediaPlayer.h>
 #include <media/IStreamSource.h>
@@ -35,10 +39,15 @@
     SET_DATA_SOURCE_URL,
     SET_DATA_SOURCE_FD,
     SET_DATA_SOURCE_STREAM,
+    SET_DATA_SOURCE_CALLBACK,
     PREPARE_ASYNC,
     START,
     STOP,
     IS_PLAYING,
+    SET_PLAYBACK_SETTINGS,
+    GET_PLAYBACK_SETTINGS,
+    SET_SYNC_SETTINGS,
+    GET_SYNC_SETTINGS,
     PAUSE,
     SEEK_TO,
     GET_CURRENT_POSITION,
@@ -120,6 +129,14 @@
         return reply.readInt32();
     }
 
+    status_t setDataSource(const sp<IDataSource> &source) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+        data.writeStrongBinder(IInterface::asBinder(source));
+        remote()->transact(SET_DATA_SOURCE_CALLBACK, data, &reply);
+        return reply.readInt32();
+    }
+
     // pass the buffered IGraphicBufferProducer to the media player service
     status_t setVideoSurfaceTexture(const sp<IGraphicBufferProducer>& bufferProducer)
     {
@@ -164,6 +181,63 @@
         return reply.readInt32();
     }
 
+    status_t setPlaybackSettings(const AudioPlaybackRate& rate)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+        data.writeFloat(rate.mSpeed);
+        data.writeFloat(rate.mPitch);
+        data.writeInt32((int32_t)rate.mFallbackMode);
+        data.writeInt32((int32_t)rate.mStretchMode);
+        remote()->transact(SET_PLAYBACK_SETTINGS, data, &reply);
+        return reply.readInt32();
+    }
+
+    status_t getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+        remote()->transact(GET_PLAYBACK_SETTINGS, data, &reply);
+        status_t err = reply.readInt32();
+        if (err == OK) {
+            *rate = AUDIO_PLAYBACK_RATE_DEFAULT;
+            rate->mSpeed = reply.readFloat();
+            rate->mPitch = reply.readFloat();
+            rate->mFallbackMode = (AudioTimestretchFallbackMode)reply.readInt32();
+            rate->mStretchMode = (AudioTimestretchStretchMode)reply.readInt32();
+        }
+        return err;
+    }
+
+    status_t setSyncSettings(const AVSyncSettings& sync, float videoFpsHint)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+        data.writeInt32((int32_t)sync.mSource);
+        data.writeInt32((int32_t)sync.mAudioAdjustMode);
+        data.writeFloat(sync.mTolerance);
+        data.writeFloat(videoFpsHint);
+        remote()->transact(SET_SYNC_SETTINGS, data, &reply);
+        return reply.readInt32();
+    }
+
+    status_t getSyncSettings(AVSyncSettings* sync /* nonnull */, float* videoFps /* nonnull */)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaPlayer::getInterfaceDescriptor());
+        remote()->transact(GET_SYNC_SETTINGS, data, &reply);
+        status_t err = reply.readInt32();
+        if (err == OK) {
+            AVSyncSettings settings;
+            settings.mSource = (AVSyncSource)reply.readInt32();
+            settings.mAudioAdjustMode = (AVSyncAudioAdjustMode)reply.readInt32();
+            settings.mTolerance = reply.readFloat();
+            *sync = settings;
+            *videoFps = reply.readFloat();
+        }
+        return err;
+    }
+
     status_t pause()
     {
         Parcel data, reply;
@@ -396,6 +470,13 @@
             reply->writeInt32(setDataSource(source));
             return NO_ERROR;
         }
+        case SET_DATA_SOURCE_CALLBACK: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+            sp<IDataSource> source =
+                interface_cast<IDataSource>(data.readStrongBinder());
+            reply->writeInt32(setDataSource(source));
+            return NO_ERROR;
+        }
         case SET_VIDEO_SURFACETEXTURE: {
             CHECK_INTERFACE(IMediaPlayer, data, reply);
             sp<IGraphicBufferProducer> bufferProducer =
@@ -426,6 +507,53 @@
             reply->writeInt32(ret);
             return NO_ERROR;
         } break;
+        case SET_PLAYBACK_SETTINGS: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+            AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
+            rate.mSpeed = data.readFloat();
+            rate.mPitch = data.readFloat();
+            rate.mFallbackMode = (AudioTimestretchFallbackMode)data.readInt32();
+            rate.mStretchMode = (AudioTimestretchStretchMode)data.readInt32();
+            reply->writeInt32(setPlaybackSettings(rate));
+            return NO_ERROR;
+        } break;
+        case GET_PLAYBACK_SETTINGS: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+            AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
+            status_t err = getPlaybackSettings(&rate);
+            reply->writeInt32(err);
+            if (err == OK) {
+                reply->writeFloat(rate.mSpeed);
+                reply->writeFloat(rate.mPitch);
+                reply->writeInt32((int32_t)rate.mFallbackMode);
+                reply->writeInt32((int32_t)rate.mStretchMode);
+            }
+            return NO_ERROR;
+        } break;
+        case SET_SYNC_SETTINGS: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+            AVSyncSettings sync;
+            sync.mSource = (AVSyncSource)data.readInt32();
+            sync.mAudioAdjustMode = (AVSyncAudioAdjustMode)data.readInt32();
+            sync.mTolerance = data.readFloat();
+            float videoFpsHint = data.readFloat();
+            reply->writeInt32(setSyncSettings(sync, videoFpsHint));
+            return NO_ERROR;
+        } break;
+        case GET_SYNC_SETTINGS: {
+            CHECK_INTERFACE(IMediaPlayer, data, reply);
+            AVSyncSettings sync;
+            float videoFps;
+            status_t err = getSyncSettings(&sync, &videoFps);
+            reply->writeInt32(err);
+            if (err == OK) {
+                reply->writeInt32((int32_t)sync.mSource);
+                reply->writeInt32((int32_t)sync.mAudioAdjustMode);
+                reply->writeFloat(sync.mTolerance);
+                reply->writeFloat(videoFps);
+            }
+            return NO_ERROR;
+        } break;
         case PAUSE: {
             CHECK_INTERFACE(IMediaPlayer, data, reply);
             reply->writeInt32(pause());
@@ -559,4 +687,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IMediaPlayerClient.cpp b/media/libmedia/IMediaPlayerClient.cpp
index a670c96..d608386 100644
--- a/media/libmedia/IMediaPlayerClient.cpp
+++ b/media/libmedia/IMediaPlayerClient.cpp
@@ -75,4 +75,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IMediaPlayerService.cpp b/media/libmedia/IMediaPlayerService.cpp
index feea267..05f8670 100644
--- a/media/libmedia/IMediaPlayerService.cpp
+++ b/media/libmedia/IMediaPlayerService.cpp
@@ -78,10 +78,11 @@
         return interface_cast<IMediaPlayer>(reply.readStrongBinder());
     }
 
-    virtual sp<IMediaRecorder> createMediaRecorder()
+    virtual sp<IMediaRecorder> createMediaRecorder(const String16 &opPackageName)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
+        data.writeString16(opPackageName);
         remote()->transact(CREATE_MEDIA_RECORDER, data, &reply);
         return interface_cast<IMediaRecorder>(reply.readStrongBinder());
     }
@@ -128,11 +129,12 @@
         return remote()->transact(PULL_BATTERY_DATA, data, reply);
     }
 
-    virtual sp<IRemoteDisplay> listenForRemoteDisplay(const sp<IRemoteDisplayClient>& client,
-            const String8& iface)
+    virtual sp<IRemoteDisplay> listenForRemoteDisplay(const String16 &opPackageName,
+            const sp<IRemoteDisplayClient>& client, const String8& iface)
     {
         Parcel data, reply;
         data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
+        data.writeString16(opPackageName);
         data.writeStrongBinder(IInterface::asBinder(client));
         data.writeString8(iface);
         remote()->transact(LISTEN_FOR_REMOTE_DISPLAY, data, &reply);
@@ -166,7 +168,8 @@
         } break;
         case CREATE_MEDIA_RECORDER: {
             CHECK_INTERFACE(IMediaPlayerService, data, reply);
-            sp<IMediaRecorder> recorder = createMediaRecorder();
+            const String16 opPackageName = data.readString16();
+            sp<IMediaRecorder> recorder = createMediaRecorder(opPackageName);
             reply->writeStrongBinder(IInterface::asBinder(recorder));
             return NO_ERROR;
         } break;
@@ -214,10 +217,11 @@
         } break;
         case LISTEN_FOR_REMOTE_DISPLAY: {
             CHECK_INTERFACE(IMediaPlayerService, data, reply);
+            const String16 opPackageName = data.readString16();
             sp<IRemoteDisplayClient> client(
                     interface_cast<IRemoteDisplayClient>(data.readStrongBinder()));
             String8 iface(data.readString8());
-            sp<IRemoteDisplay> display(listenForRemoteDisplay(client, iface));
+            sp<IRemoteDisplay> display(listenForRemoteDisplay(opPackageName, client, iface));
             reply->writeStrongBinder(IInterface::asBinder(display));
             return NO_ERROR;
         } break;
@@ -234,4 +238,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index a733b68..ee3b584 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -35,6 +35,7 @@
     RELEASE = IBinder::FIRST_CALL_TRANSACTION,
     INIT,
     CLOSE,
+    SET_INPUT_SURFACE,
     QUERY_SURFACE_MEDIASOURCE,
     RESET,
     STOP,
@@ -46,7 +47,6 @@
     SET_OUTPUT_FORMAT,
     SET_VIDEO_ENCODER,
     SET_AUDIO_ENCODER,
-    SET_OUTPUT_FILE_PATH,
     SET_OUTPUT_FILE_FD,
     SET_VIDEO_SIZE,
     SET_VIDEO_FRAMERATE,
@@ -76,6 +76,16 @@
         return reply.readInt32();
     }
 
+    status_t setInputSurface(const sp<IGraphicBufferConsumer>& surface)
+    {
+        ALOGV("setInputSurface(%p)", surface.get());
+        Parcel data, reply;
+        data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+        data.writeStrongBinder(IInterface::asBinder(surface));
+        remote()->transact(SET_INPUT_SURFACE, data, &reply);
+        return reply.readInt32();
+    }
+
     sp<IGraphicBufferProducer> querySurfaceMediaSource()
     {
         ALOGV("Query SurfaceMediaSource");
@@ -158,16 +168,6 @@
         return reply.readInt32();
     }
 
-    status_t setOutputFile(const char* path)
-    {
-        ALOGV("setOutputFile(%s)", path);
-        Parcel data, reply;
-        data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
-        data.writeCString(path);
-        remote()->transact(SET_OUTPUT_FILE_PATH, data, &reply);
-        return reply.readInt32();
-    }
-
     status_t setOutputFile(int fd, int64_t offset, int64_t length) {
         ALOGV("setOutputFile(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
         Parcel data, reply;
@@ -300,7 +300,8 @@
 // ----------------------------------------------------------------------
 
 status_t BnMediaRecorder::onTransact(
-                                     uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+                                     uint32_t code, const Parcel& data, Parcel* reply,
+                                     uint32_t flags)
 {
     switch (code) {
         case RELEASE: {
@@ -390,13 +391,6 @@
             return NO_ERROR;
 
         } break;
-        case SET_OUTPUT_FILE_PATH: {
-            ALOGV("SET_OUTPUT_FILE_PATH");
-            CHECK_INTERFACE(IMediaRecorder, data, reply);
-            const char* path = data.readCString();
-            reply->writeInt32(setOutputFile(path));
-            return NO_ERROR;
-        } break;
         case SET_OUTPUT_FILE_FD: {
             ALOGV("SET_OUTPUT_FILE_FD");
             CHECK_INTERFACE(IMediaRecorder, data, reply);
@@ -445,7 +439,8 @@
         case SET_PREVIEW_SURFACE: {
             ALOGV("SET_PREVIEW_SURFACE");
             CHECK_INTERFACE(IMediaRecorder, data, reply);
-            sp<IGraphicBufferProducer> surface = interface_cast<IGraphicBufferProducer>(data.readStrongBinder());
+            sp<IGraphicBufferProducer> surface = interface_cast<IGraphicBufferProducer>(
+                    data.readStrongBinder());
             reply->writeInt32(setPreviewSurface(surface));
             return NO_ERROR;
         } break;
@@ -458,6 +453,14 @@
             reply->writeInt32(setCamera(camera, proxy));
             return NO_ERROR;
         } break;
+        case SET_INPUT_SURFACE: {
+            ALOGV("SET_INPUT_SURFACE");
+            CHECK_INTERFACE(IMediaRecorder, data, reply);
+            sp<IGraphicBufferConsumer> surface = interface_cast<IGraphicBufferConsumer>(
+                    data.readStrongBinder());
+            reply->writeInt32(setInputSurface(surface));
+            return NO_ERROR;
+        } break;
         case QUERY_SURFACE_MEDIASOURCE: {
             ALOGV("QUERY_SURFACE_MEDIASOURCE");
             CHECK_INTERFACE(IMediaRecorder, data, reply);
@@ -479,4 +482,4 @@
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IMediaRecorderClient.cpp b/media/libmedia/IMediaRecorderClient.cpp
index e7907e3..6795d23 100644
--- a/media/libmedia/IMediaRecorderClient.cpp
+++ b/media/libmedia/IMediaRecorderClient.cpp
@@ -67,4 +67,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index e208df9..16da65e 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -41,6 +41,8 @@
     USE_BUFFER,
     USE_GRAPHIC_BUFFER,
     CREATE_INPUT_SURFACE,
+    CREATE_PERSISTENT_INPUT_SURFACE,
+    SET_INPUT_SURFACE,
     SIGNAL_END_OF_INPUT_STREAM,
     STORE_META_DATA_IN_BUFFERS,
     PREPARE_FOR_ADAPTIVE_PLAYBACK,
@@ -243,12 +245,13 @@
 
     virtual status_t useBuffer(
             node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-            buffer_id *buffer) {
+            buffer_id *buffer, OMX_U32 allottedSize) {
         Parcel data, reply;
         data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
         data.writeInt32((int32_t)node);
         data.writeInt32(port_index);
         data.writeStrongBinder(IInterface::asBinder(params));
+        data.writeInt32(allottedSize);
         remote()->transact(USE_BUFFER, data, &reply);
 
         status_t err = reply.readInt32();
@@ -303,7 +306,7 @@
 
     virtual status_t createInputSurface(
             node_id node, OMX_U32 port_index,
-            sp<IGraphicBufferProducer> *bufferProducer) {
+            sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
         Parcel data, reply;
         status_t err;
         data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
@@ -315,6 +318,35 @@
             return err;
         }
 
+        // read type even if createInputSurface failed
+        int negotiatedType = reply.readInt32();
+        if (type != NULL) {
+            *type = (MetadataBufferType)negotiatedType;
+        }
+
+        err = reply.readInt32();
+        if (err != OK) {
+            return err;
+        }
+
+        *bufferProducer = IGraphicBufferProducer::asInterface(
+                reply.readStrongBinder());
+
+        return err;
+    }
+
+    virtual status_t createPersistentInputSurface(
+            sp<IGraphicBufferProducer> *bufferProducer,
+            sp<IGraphicBufferConsumer> *bufferConsumer) {
+        Parcel data, reply;
+        status_t err;
+        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
+        err = remote()->transact(CREATE_PERSISTENT_INPUT_SURFACE, data, &reply);
+        if (err != OK) {
+            ALOGW("binder transaction failed: %d", err);
+            return err;
+        }
+
         err = reply.readInt32();
         if (err != OK) {
             return err;
@@ -322,10 +354,38 @@
 
         *bufferProducer = IGraphicBufferProducer::asInterface(
                 reply.readStrongBinder());
+        *bufferConsumer = IGraphicBufferConsumer::asInterface(
+                reply.readStrongBinder());
 
         return err;
     }
 
+    virtual status_t setInputSurface(
+            node_id node, OMX_U32 port_index,
+            const sp<IGraphicBufferConsumer> &bufferConsumer, MetadataBufferType *type) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
+        status_t err;
+        data.writeInt32((int32_t)node);
+        data.writeInt32(port_index);
+        data.writeStrongBinder(IInterface::asBinder(bufferConsumer));
+
+        err = remote()->transact(SET_INPUT_SURFACE, data, &reply);
+
+        if (err != OK) {
+            ALOGW("binder transaction failed: %d", err);
+            return err;
+        }
+
+        // read type even if setInputSurface failed
+        int negotiatedType = reply.readInt32();
+        if (type != NULL) {
+            *type = (MetadataBufferType)negotiatedType;
+        }
+
+        return reply.readInt32();
+    }
+
     virtual status_t signalEndOfInputStream(node_id node) {
         Parcel data, reply;
         status_t err;
@@ -341,7 +401,7 @@
     }
 
     virtual status_t storeMetaDataInBuffers(
-            node_id node, OMX_U32 port_index, OMX_BOOL enable) {
+            node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type) {
         Parcel data, reply;
         data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
         data.writeInt32((int32_t)node);
@@ -349,8 +409,13 @@
         data.writeInt32((uint32_t)enable);
         remote()->transact(STORE_META_DATA_IN_BUFFERS, data, &reply);
 
-        status_t err = reply.readInt32();
-        return err;
+        // read type even storeMetaDataInBuffers failed
+        int negotiatedType = reply.readInt32();
+        if (type != NULL) {
+            *type = (MetadataBufferType)negotiatedType;
+        }
+
+        return reply.readInt32();
     }
 
     virtual status_t prepareForAdaptivePlayback(
@@ -413,12 +478,13 @@
 
     virtual status_t allocateBufferWithBackup(
             node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-            buffer_id *buffer) {
+            buffer_id *buffer, OMX_U32 allottedSize) {
         Parcel data, reply;
         data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
         data.writeInt32((int32_t)node);
         data.writeInt32(port_index);
         data.writeStrongBinder(IInterface::asBinder(params));
+        data.writeInt32(allottedSize);
         remote()->transact(ALLOC_BUFFER_WITH_BACKUP, data, &reply);
 
         status_t err = reply.readInt32();
@@ -445,11 +511,15 @@
         return reply.readInt32();
     }
 
-    virtual status_t fillBuffer(node_id node, buffer_id buffer) {
+    virtual status_t fillBuffer(node_id node, buffer_id buffer, int fenceFd) {
         Parcel data, reply;
         data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
         data.writeInt32((int32_t)node);
         data.writeInt32((int32_t)buffer);
+        data.writeInt32(fenceFd >= 0);
+        if (fenceFd >= 0) {
+            data.writeFileDescriptor(fenceFd, true /* takeOwnership */);
+        }
         remote()->transact(FILL_BUFFER, data, &reply);
 
         return reply.readInt32();
@@ -459,7 +529,7 @@
             node_id node,
             buffer_id buffer,
             OMX_U32 range_offset, OMX_U32 range_length,
-            OMX_U32 flags, OMX_TICKS timestamp) {
+            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
         Parcel data, reply;
         data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
         data.writeInt32((int32_t)node);
@@ -468,6 +538,10 @@
         data.writeInt32(range_length);
         data.writeInt32(flags);
         data.writeInt64(timestamp);
+        data.writeInt32(fenceFd >= 0);
+        if (fenceFd >= 0) {
+            data.writeFileDescriptor(fenceFd, true /* takeOwnership */);
+        }
         remote()->transact(EMPTY_BUFFER, data, &reply);
 
         return reply.readInt32();
@@ -711,9 +785,10 @@
             OMX_U32 port_index = data.readInt32();
             sp<IMemory> params =
                 interface_cast<IMemory>(data.readStrongBinder());
+            OMX_U32 allottedSize = data.readInt32();
 
             buffer_id buffer;
-            status_t err = useBuffer(node, port_index, params, &buffer);
+            status_t err = useBuffer(node, port_index, params, &buffer, allottedSize);
             reply->writeInt32(err);
 
             if (err == OK) {
@@ -769,9 +844,10 @@
             OMX_U32 port_index = data.readInt32();
 
             sp<IGraphicBufferProducer> bufferProducer;
-            status_t err = createInputSurface(node, port_index,
-                    &bufferProducer);
+            MetadataBufferType type;
+            status_t err = createInputSurface(node, port_index, &bufferProducer, &type);
 
+            reply->writeInt32(type);
             reply->writeInt32(err);
 
             if (err == OK) {
@@ -781,6 +857,43 @@
             return NO_ERROR;
         }
 
+        case CREATE_PERSISTENT_INPUT_SURFACE:
+        {
+            CHECK_OMX_INTERFACE(IOMX, data, reply);
+
+            sp<IGraphicBufferProducer> bufferProducer;
+            sp<IGraphicBufferConsumer> bufferConsumer;
+            status_t err = createPersistentInputSurface(
+                    &bufferProducer, &bufferConsumer);
+
+            reply->writeInt32(err);
+
+            if (err == OK) {
+                reply->writeStrongBinder(IInterface::asBinder(bufferProducer));
+                reply->writeStrongBinder(IInterface::asBinder(bufferConsumer));
+            }
+
+            return NO_ERROR;
+        }
+
+        case SET_INPUT_SURFACE:
+        {
+            CHECK_OMX_INTERFACE(IOMX, data, reply);
+
+            node_id node = (node_id)data.readInt32();
+            OMX_U32 port_index = data.readInt32();
+
+            sp<IGraphicBufferConsumer> bufferConsumer =
+                    interface_cast<IGraphicBufferConsumer>(data.readStrongBinder());
+
+            MetadataBufferType type;
+            status_t err = setInputSurface(node, port_index, bufferConsumer, &type);
+
+            reply->writeInt32(type);
+            reply->writeInt32(err);
+            return NO_ERROR;
+        }
+
         case SIGNAL_END_OF_INPUT_STREAM:
         {
             CHECK_OMX_INTERFACE(IOMX, data, reply);
@@ -801,7 +914,9 @@
             OMX_U32 port_index = data.readInt32();
             OMX_BOOL enable = (OMX_BOOL)data.readInt32();
 
-            status_t err = storeMetaDataInBuffers(node, port_index, enable);
+            MetadataBufferType type;
+            status_t err = storeMetaDataInBuffers(node, port_index, enable, &type);
+            reply->writeInt32(type);
             reply->writeInt32(err);
 
             return NO_ERROR;
@@ -872,10 +987,11 @@
             OMX_U32 port_index = data.readInt32();
             sp<IMemory> params =
                 interface_cast<IMemory>(data.readStrongBinder());
+            OMX_U32 allottedSize = data.readInt32();
 
             buffer_id buffer;
             status_t err = allocateBufferWithBackup(
-                    node, port_index, params, &buffer);
+                    node, port_index, params, &buffer, allottedSize);
 
             reply->writeInt32(err);
 
@@ -904,7 +1020,9 @@
 
             node_id node = (node_id)data.readInt32();
             buffer_id buffer = (buffer_id)data.readInt32();
-            reply->writeInt32(fillBuffer(node, buffer));
+            bool haveFence = data.readInt32();
+            int fenceFd = haveFence ? ::dup(data.readFileDescriptor()) : -1;
+            reply->writeInt32(fillBuffer(node, buffer, fenceFd));
 
             return NO_ERROR;
         }
@@ -919,11 +1037,10 @@
             OMX_U32 range_length = data.readInt32();
             OMX_U32 flags = data.readInt32();
             OMX_TICKS timestamp = data.readInt64();
-
-            reply->writeInt32(
-                    emptyBuffer(
-                        node, buffer, range_offset, range_length,
-                        flags, timestamp));
+            bool haveFence = data.readInt32();
+            int fenceFd = haveFence ? ::dup(data.readFileDescriptor()) : -1;
+            reply->writeInt32(emptyBuffer(
+                    node, buffer, range_offset, range_length, flags, timestamp, fenceFd));
 
             return NO_ERROR;
         }
@@ -960,14 +1077,29 @@
         : BpInterface<IOMXObserver>(impl) {
     }
 
-    virtual void onMessage(const omx_message &msg) {
+    virtual void onMessages(const std::list<omx_message> &messages) {
         Parcel data, reply;
-        data.writeInterfaceToken(IOMXObserver::getInterfaceDescriptor());
-        data.write(&msg, sizeof(msg));
-
-        ALOGV("onMessage writing message %d, size %zu", msg.type, sizeof(msg));
-
-        remote()->transact(OBSERVER_ON_MSG, data, &reply, IBinder::FLAG_ONEWAY);
+        std::list<omx_message>::const_iterator it = messages.cbegin();
+        bool first = true;
+        while (it != messages.cend()) {
+            const omx_message &msg = *it++;
+            if (first) {
+                data.writeInterfaceToken(IOMXObserver::getInterfaceDescriptor());
+                data.writeInt32(msg.node);
+                first = false;
+            }
+            data.writeInt32(msg.fenceFd >= 0);
+            if (msg.fenceFd >= 0) {
+                data.writeFileDescriptor(msg.fenceFd, true /* takeOwnership */);
+            }
+            data.writeInt32(msg.type);
+            data.write(&msg.u, sizeof(msg.u));
+            ALOGV("onMessage writing message %d, size %zu", msg.type, sizeof(msg));
+        }
+        if (!first) {
+            data.writeInt32(-1); // mark end
+            remote()->transact(OBSERVER_ON_MSG, data, &reply, IBinder::FLAG_ONEWAY);
+        }
     }
 };
 
@@ -979,16 +1111,28 @@
         case OBSERVER_ON_MSG:
         {
             CHECK_OMX_INTERFACE(IOMXObserver, data, reply);
+            IOMX::node_id node = data.readInt32();
+            std::list<omx_message> messages;
+            status_t err = FAILED_TRANSACTION; // must receive at least one message
+            do {
+                int haveFence = data.readInt32();
+                if (haveFence < 0) { // we use -1 to mark end of messages
+                    break;
+                }
+                omx_message msg;
+                msg.node = node;
+                msg.fenceFd = haveFence ? ::dup(data.readFileDescriptor()) : -1;
+                msg.type = (typeof(msg.type))data.readInt32();
+                err = data.read(&msg.u, sizeof(msg.u));
+                ALOGV("onTransact reading message %d, size %zu", msg.type, sizeof(msg));
+                messages.push_back(msg);
+            } while (err == OK);
 
-            omx_message msg;
-            data.read(&msg, sizeof(msg));
+            if (err == OK) {
+                onMessages(messages);
+            }
 
-            ALOGV("onTransact reading message %d, size %zu", msg.type, sizeof(msg));
-
-            // XXX Could use readInplace maybe?
-            onMessage(msg);
-
-            return NO_ERROR;
+            return err;
         }
 
         default:
diff --git a/media/libmedia/IRemoteDisplay.cpp b/media/libmedia/IRemoteDisplay.cpp
index 1e15434..869d11a 100644
--- a/media/libmedia/IRemoteDisplay.cpp
+++ b/media/libmedia/IRemoteDisplay.cpp
@@ -91,4 +91,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IRemoteDisplayClient.cpp b/media/libmedia/IRemoteDisplayClient.cpp
index 9d63bc9..bedeb6c 100644
--- a/media/libmedia/IRemoteDisplayClient.cpp
+++ b/media/libmedia/IRemoteDisplayClient.cpp
@@ -101,4 +101,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/IResourceManagerClient.cpp b/media/libmedia/IResourceManagerClient.cpp
new file mode 100644
index 0000000..b3f56e8
--- /dev/null
+++ b/media/libmedia/IResourceManagerClient.cpp
@@ -0,0 +1,90 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#include <utils/RefBase.h>
+#include <binder/IInterface.h>
+#include <binder/Parcel.h>
+
+#include <media/IResourceManagerClient.h>
+
+namespace android {
+
+enum {
+    RECLAIM_RESOURCE = IBinder::FIRST_CALL_TRANSACTION,
+    GET_NAME,
+};
+
+class BpResourceManagerClient: public BpInterface<IResourceManagerClient>
+{
+public:
+    BpResourceManagerClient(const sp<IBinder> &impl)
+        : BpInterface<IResourceManagerClient>(impl)
+    {
+    }
+
+    virtual bool reclaimResource() {
+        Parcel data, reply;
+        data.writeInterfaceToken(IResourceManagerClient::getInterfaceDescriptor());
+
+        bool ret = false;
+        status_t status = remote()->transact(RECLAIM_RESOURCE, data, &reply);
+        if (status == NO_ERROR) {
+            ret = (bool)reply.readInt32();
+        }
+        return ret;
+    }
+
+    virtual String8 getName() {
+        Parcel data, reply;
+        data.writeInterfaceToken(IResourceManagerClient::getInterfaceDescriptor());
+
+        String8 ret;
+        status_t status = remote()->transact(GET_NAME, data, &reply);
+        if (status == NO_ERROR) {
+            ret = reply.readString8();
+        }
+        return ret;
+    }
+
+};
+
+IMPLEMENT_META_INTERFACE(ResourceManagerClient, "android.media.IResourceManagerClient");
+
+// ----------------------------------------------------------------------
+
+status_t BnResourceManagerClient::onTransact(
+    uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags)
+{
+    switch (code) {
+        case RECLAIM_RESOURCE: {
+            CHECK_INTERFACE(IResourceManagerClient, data, reply);
+            bool ret = reclaimResource();
+            reply->writeInt32(ret);
+            return NO_ERROR;
+        } break;
+        case GET_NAME: {
+            CHECK_INTERFACE(IResourceManagerClient, data, reply);
+            String8 ret = getName();
+            reply->writeString8(ret);
+            return NO_ERROR;
+        } break;
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+}; // namespace android
diff --git a/media/libmedia/IResourceManagerService.cpp b/media/libmedia/IResourceManagerService.cpp
new file mode 100644
index 0000000..4598686
--- /dev/null
+++ b/media/libmedia/IResourceManagerService.cpp
@@ -0,0 +1,166 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "IResourceManagerService"
+#include <utils/Log.h>
+
+#include "media/IResourceManagerService.h"
+
+#include <binder/Parcel.h>
+
+#include <stdint.h>
+#include <sys/types.h>
+
+namespace android {
+
+enum {
+    CONFIG = IBinder::FIRST_CALL_TRANSACTION,
+    ADD_RESOURCE,
+    REMOVE_RESOURCE,
+    RECLAIM_RESOURCE,
+};
+
+template <typename T>
+static void writeToParcel(Parcel *data, const Vector<T> &items) {
+    size_t size = items.size();
+    // truncates size, but should be okay for this usecase
+    data->writeUint32(static_cast<uint32_t>(size));
+    for (size_t i = 0; i < size; i++) {
+        items[i].writeToParcel(data);
+    }
+}
+
+template <typename T>
+static void readFromParcel(const Parcel &data, Vector<T> *items) {
+    size_t size = (size_t)data.readUint32();
+    for (size_t i = 0; i < size && data.dataAvail() > 0; i++) {
+        T item;
+        item.readFromParcel(data);
+        items->add(item);
+    }
+}
+
+class BpResourceManagerService : public BpInterface<IResourceManagerService>
+{
+public:
+    BpResourceManagerService(const sp<IBinder> &impl)
+        : BpInterface<IResourceManagerService>(impl)
+    {
+    }
+
+    virtual void config(const Vector<MediaResourcePolicy> &policies) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IResourceManagerService::getInterfaceDescriptor());
+        writeToParcel(&data, policies);
+        remote()->transact(CONFIG, data, &reply);
+    }
+
+    virtual void addResource(
+            int pid,
+            int64_t clientId,
+            const sp<IResourceManagerClient> client,
+            const Vector<MediaResource> &resources) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IResourceManagerService::getInterfaceDescriptor());
+        data.writeInt32(pid);
+        data.writeInt64(clientId);
+        data.writeStrongBinder(IInterface::asBinder(client));
+        writeToParcel(&data, resources);
+
+        remote()->transact(ADD_RESOURCE, data, &reply);
+    }
+
+    virtual void removeResource(int pid, int64_t clientId) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IResourceManagerService::getInterfaceDescriptor());
+        data.writeInt32(pid);
+        data.writeInt64(clientId);
+
+        remote()->transact(REMOVE_RESOURCE, data, &reply);
+    }
+
+    virtual bool reclaimResource(int callingPid, const Vector<MediaResource> &resources) {
+        Parcel data, reply;
+        data.writeInterfaceToken(IResourceManagerService::getInterfaceDescriptor());
+        data.writeInt32(callingPid);
+        writeToParcel(&data, resources);
+
+        bool ret = false;
+        status_t status = remote()->transact(RECLAIM_RESOURCE, data, &reply);
+        if (status == NO_ERROR) {
+            ret = (bool)reply.readInt32();
+        }
+        return ret;
+    }
+};
+
+IMPLEMENT_META_INTERFACE(ResourceManagerService, "android.media.IResourceManagerService");
+
+// ----------------------------------------------------------------------
+
+
+status_t BnResourceManagerService::onTransact(
+    uint32_t code, const Parcel &data, Parcel *reply, uint32_t flags)
+{
+    switch (code) {
+        case CONFIG: {
+            CHECK_INTERFACE(IResourceManagerService, data, reply);
+            Vector<MediaResourcePolicy> policies;
+            readFromParcel(data, &policies);
+            config(policies);
+            return NO_ERROR;
+        } break;
+
+        case ADD_RESOURCE: {
+            CHECK_INTERFACE(IResourceManagerService, data, reply);
+            int pid = data.readInt32();
+            int64_t clientId = data.readInt64();
+            sp<IResourceManagerClient> client(
+                    interface_cast<IResourceManagerClient>(data.readStrongBinder()));
+            Vector<MediaResource> resources;
+            readFromParcel(data, &resources);
+            addResource(pid, clientId, client, resources);
+            return NO_ERROR;
+        } break;
+
+        case REMOVE_RESOURCE: {
+            CHECK_INTERFACE(IResourceManagerService, data, reply);
+            int pid = data.readInt32();
+            int64_t clientId = data.readInt64();
+            removeResource(pid, clientId);
+            return NO_ERROR;
+        } break;
+
+        case RECLAIM_RESOURCE: {
+            CHECK_INTERFACE(IResourceManagerService, data, reply);
+            int callingPid = data.readInt32();
+            Vector<MediaResource> resources;
+            readFromParcel(data, &resources);
+            bool ret = reclaimResource(callingPid, resources);
+            reply->writeInt32(ret);
+            return NO_ERROR;
+        } break;
+
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
diff --git a/media/libmedia/IStreamSource.cpp b/media/libmedia/IStreamSource.cpp
index d480aef..840e453 100644
--- a/media/libmedia/IStreamSource.cpp
+++ b/media/libmedia/IStreamSource.cpp
@@ -35,6 +35,9 @@
 // static
 const char *const IStreamListener::kKeyMediaTimeUs = "media-time-us";
 
+// static
+const char *const IStreamListener::kKeyRecentMediaTimeUs = "recent-media-time-us";
+
 enum {
     // IStreamSource
     SET_LISTENER = IBinder::FIRST_CALL_TRANSACTION,
diff --git a/media/libmedia/JetPlayer.cpp b/media/libmedia/JetPlayer.cpp
index 721d8d7..34deb59 100644
--- a/media/libmedia/JetPlayer.cpp
+++ b/media/libmedia/JetPlayer.cpp
@@ -85,12 +85,18 @@
 
     // create the output AudioTrack
     mAudioTrack = new AudioTrack();
-    mAudioTrack->set(AUDIO_STREAM_MUSIC,  //TODO parameterize this
+    status_t status = mAudioTrack->set(AUDIO_STREAM_MUSIC,  //TODO parameterize this
             pLibConfig->sampleRate,
             AUDIO_FORMAT_PCM_16_BIT,
             audio_channel_out_mask_from_count(pLibConfig->numChannels),
             (size_t) mTrackBufferSize,
             AUDIO_OUTPUT_FLAG_NONE);
+    if (status != OK) {
+        ALOGE("JetPlayer::init(): Error initializing JET library; AudioTrack error %d", status);
+        mAudioTrack.clear();
+        mState = EAS_STATE_ERROR;
+        return EAS_FAILURE;
+    }
 
     // create render and playback thread
     {
@@ -408,7 +414,8 @@
     ALOGV("JetPlayer::queueSegment segmentNum=%d, libNum=%d, repeatCount=%d, transpose=%d",
         segmentNum, libNum, repeatCount, transpose);
     Mutex::Autolock lock(mMutex);
-    return JET_QueueSegment(mEasData, segmentNum, libNum, repeatCount, transpose, muteFlags, userID);
+    return JET_QueueSegment(mEasData, segmentNum, libNum, repeatCount, transpose, muteFlags,
+            userID);
 }
 
 //-------------------------------------------------------------------------------------------------
@@ -449,7 +456,8 @@
 void JetPlayer::dumpJetStatus(S_JET_STATUS* pJetStatus)
 {
     if (pJetStatus!=NULL)
-        ALOGV(">> current JET player status: userID=%d segmentRepeatCount=%d numQueuedSegments=%d paused=%d",
+        ALOGV(">> current JET player status: userID=%d segmentRepeatCount=%d numQueuedSegments=%d "
+                "paused=%d",
                 pJetStatus->currentUserID, pJetStatus->segmentRepeatCount,
                 pJetStatus->numQueuedSegments, pJetStatus->paused);
     else
diff --git a/media/libmedia/MediaCodecInfo.cpp b/media/libmedia/MediaCodecInfo.cpp
index 7b4c4e2..8d3fa7b 100644
--- a/media/libmedia/MediaCodecInfo.cpp
+++ b/media/libmedia/MediaCodecInfo.cpp
@@ -206,6 +206,17 @@
     return OK;
 }
 
+status_t MediaCodecInfo::updateMime(const char *mime) {
+    ssize_t ix = getCapabilityIndex(mime);
+    if (ix < 0) {
+        ALOGE("updateMime mime not found %s", mime);
+        return -EINVAL;
+    }
+
+    mCurrentCaps = mCaps.valueAt(ix);
+    return OK;
+}
+
 void MediaCodecInfo::removeMime(const char *mime) {
     ssize_t ix = getCapabilityIndex(mime);
     if (ix >= 0) {
diff --git a/media/libmedia/MediaProfiles.cpp b/media/libmedia/MediaProfiles.cpp
index e2e6042..c5790fb 100644
--- a/media/libmedia/MediaProfiles.cpp
+++ b/media/libmedia/MediaProfiles.cpp
@@ -152,18 +152,9 @@
     ALOGV("codec = %d", cap.mCodec);
 }
 
-/*static*/ void
-MediaProfiles::logVideoEditorCap(const MediaProfiles::VideoEditorCap& cap UNUSED)
-{
-    ALOGV("videoeditor cap:");
-    ALOGV("mMaxInputFrameWidth = %d", cap.mMaxInputFrameWidth);
-    ALOGV("mMaxInputFrameHeight = %d", cap.mMaxInputFrameHeight);
-    ALOGV("mMaxOutputFrameWidth = %d", cap.mMaxOutputFrameWidth);
-    ALOGV("mMaxOutputFrameHeight = %d", cap.mMaxOutputFrameHeight);
-}
-
 /*static*/ int
-MediaProfiles::findTagForName(const MediaProfiles::NameToTagMap *map, size_t nMappings, const char *name)
+MediaProfiles::findTagForName(const MediaProfiles::NameToTagMap *map, size_t nMappings,
+        const char *name)
 {
     int tag = -1;
     for (size_t i = 0; i < nMappings; ++i) {
@@ -295,9 +286,8 @@
     CHECK(codec != -1);
 
     MediaProfiles::AudioEncoderCap *cap =
-        new MediaProfiles::AudioEncoderCap(static_cast<audio_encoder>(codec), atoi(atts[5]), atoi(atts[7]),
-            atoi(atts[9]), atoi(atts[11]), atoi(atts[13]),
-            atoi(atts[15]));
+        new MediaProfiles::AudioEncoderCap(static_cast<audio_encoder>(codec), atoi(atts[5]),
+            atoi(atts[7]), atoi(atts[9]), atoi(atts[11]), atoi(atts[13]), atoi(atts[15]));
     logAudioEncoderCap(*cap);
     return cap;
 }
@@ -330,7 +320,8 @@
           !strcmp("fileFormat", atts[2]) &&
           !strcmp("duration",   atts[4]));
 
-    const size_t nProfileMappings = sizeof(sCamcorderQualityNameMap)/sizeof(sCamcorderQualityNameMap[0]);
+    const size_t nProfileMappings = sizeof(sCamcorderQualityNameMap)/
+            sizeof(sCamcorderQualityNameMap[0]);
     const int quality = findTagForName(sCamcorderQualityNameMap, nProfileMappings, atts[1]);
     CHECK(quality != -1);
 
@@ -397,42 +388,6 @@
     ALOGV("%s: cameraId=%d, offset=%d ms", __func__, cameraId, offsetTimeMs);
     mStartTimeOffsets.replaceValueFor(cameraId, offsetTimeMs);
 }
-/*static*/ MediaProfiles::ExportVideoProfile*
-MediaProfiles::createExportVideoProfile(const char **atts)
-{
-    CHECK(!strcmp("name", atts[0]) &&
-          !strcmp("profile", atts[2]) &&
-          !strcmp("level", atts[4]));
-
-    const size_t nMappings =
-        sizeof(sVideoEncoderNameMap)/sizeof(sVideoEncoderNameMap[0]);
-    const int codec = findTagForName(sVideoEncoderNameMap, nMappings, atts[1]);
-    CHECK(codec != -1);
-
-    MediaProfiles::ExportVideoProfile *profile =
-        new MediaProfiles::ExportVideoProfile(
-            codec, atoi(atts[3]), atoi(atts[5]));
-
-    return profile;
-}
-/*static*/ MediaProfiles::VideoEditorCap*
-MediaProfiles::createVideoEditorCap(const char **atts, MediaProfiles *profiles)
-{
-    CHECK(!strcmp("maxInputFrameWidth", atts[0]) &&
-          !strcmp("maxInputFrameHeight", atts[2])  &&
-          !strcmp("maxOutputFrameWidth", atts[4]) &&
-          !strcmp("maxOutputFrameHeight", atts[6]) &&
-          !strcmp("maxPrefetchYUVFrames", atts[8]));
-
-    MediaProfiles::VideoEditorCap *pVideoEditorCap =
-        new MediaProfiles::VideoEditorCap(atoi(atts[1]), atoi(atts[3]),
-                atoi(atts[5]), atoi(atts[7]), atoi(atts[9]));
-
-    logVideoEditorCap(*pVideoEditorCap);
-    profiles->mVideoEditorCap = pVideoEditorCap;
-
-    return pVideoEditorCap;
-}
 
 /*static*/ void
 MediaProfiles::startElementHandler(void *userData, const char *name, const char **atts)
@@ -464,10 +419,6 @@
             createCamcorderProfile(profiles->mCurrentCameraId, atts, profiles->mCameraIds));
     } else if (strcmp("ImageEncoding", name) == 0) {
         profiles->addImageEncodingQualityLevel(profiles->mCurrentCameraId, atts);
-    } else if (strcmp("VideoEditorCap", name) == 0) {
-        createVideoEditorCap(atts, profiles);
-    } else if (strcmp("ExportVideoProfile", name) == 0) {
-        profiles->mVideoEditorExportProfiles.add(createExportVideoProfile(atts));
     }
 }
 
@@ -531,7 +482,6 @@
         CHECK(refIndex != -1);
         RequiredProfileRefInfo *info;
         camcorder_quality refQuality;
-        VideoCodec *codec = NULL;
 
         // Check high and low from either camcorder profile, timelapse profile
         // or high speed profile, but not all of them. Default, check camcorder profile
@@ -722,16 +672,20 @@
 MediaProfiles::createDefaultCamcorderTimeLapseLowProfiles(
         MediaProfiles::CamcorderProfile **lowTimeLapseProfile,
         MediaProfiles::CamcorderProfile **lowSpecificTimeLapseProfile) {
-    *lowTimeLapseProfile = createDefaultCamcorderTimeLapseQcifProfile(CAMCORDER_QUALITY_TIME_LAPSE_LOW);
-    *lowSpecificTimeLapseProfile = createDefaultCamcorderTimeLapseQcifProfile(CAMCORDER_QUALITY_TIME_LAPSE_QCIF);
+    *lowTimeLapseProfile = createDefaultCamcorderTimeLapseQcifProfile(
+            CAMCORDER_QUALITY_TIME_LAPSE_LOW);
+    *lowSpecificTimeLapseProfile = createDefaultCamcorderTimeLapseQcifProfile(
+            CAMCORDER_QUALITY_TIME_LAPSE_QCIF);
 }
 
 /*static*/ void
 MediaProfiles::createDefaultCamcorderTimeLapseHighProfiles(
         MediaProfiles::CamcorderProfile **highTimeLapseProfile,
         MediaProfiles::CamcorderProfile **highSpecificTimeLapseProfile) {
-    *highTimeLapseProfile = createDefaultCamcorderTimeLapse480pProfile(CAMCORDER_QUALITY_TIME_LAPSE_HIGH);
-    *highSpecificTimeLapseProfile = createDefaultCamcorderTimeLapse480pProfile(CAMCORDER_QUALITY_TIME_LAPSE_480P);
+    *highTimeLapseProfile = createDefaultCamcorderTimeLapse480pProfile(
+            CAMCORDER_QUALITY_TIME_LAPSE_HIGH);
+    *highSpecificTimeLapseProfile = createDefaultCamcorderTimeLapse480pProfile(
+            CAMCORDER_QUALITY_TIME_LAPSE_480P);
 }
 
 /*static*/ MediaProfiles::CamcorderProfile*
@@ -809,7 +763,8 @@
 
     // high camcorder time lapse profiles.
     MediaProfiles::CamcorderProfile *highTimeLapseProfile, *highSpecificTimeLapseProfile;
-    createDefaultCamcorderTimeLapseHighProfiles(&highTimeLapseProfile, &highSpecificTimeLapseProfile);
+    createDefaultCamcorderTimeLapseHighProfiles(&highTimeLapseProfile,
+            &highSpecificTimeLapseProfile);
     profiles->mCamcorderProfiles.add(highTimeLapseProfile);
     profiles->mCamcorderProfiles.add(highSpecificTimeLapseProfile);
 
@@ -868,32 +823,6 @@
     profiles->mImageEncodingQualityLevels.add(levels);
 }
 
-/*static*/ void
-MediaProfiles::createDefaultVideoEditorCap(MediaProfiles *profiles)
-{
-    profiles->mVideoEditorCap =
-        new MediaProfiles::VideoEditorCap(
-                VIDEOEDITOR_DEFAULT_MAX_INPUT_FRAME_WIDTH,
-                VIDEOEDITOR_DEFUALT_MAX_INPUT_FRAME_HEIGHT,
-                VIDEOEDITOR_DEFAULT_MAX_OUTPUT_FRAME_WIDTH,
-                VIDEOEDITOR_DEFUALT_MAX_OUTPUT_FRAME_HEIGHT,
-                VIDEOEDITOR_DEFAULT_MAX_PREFETCH_YUV_FRAMES);
-}
-/*static*/ void
-MediaProfiles::createDefaultExportVideoProfiles(MediaProfiles *profiles)
-{
-    // Create default video export profiles
-    profiles->mVideoEditorExportProfiles.add(
-        new ExportVideoProfile(VIDEO_ENCODER_H263,
-            OMX_VIDEO_H263ProfileBaseline, OMX_VIDEO_H263Level10));
-    profiles->mVideoEditorExportProfiles.add(
-        new ExportVideoProfile(VIDEO_ENCODER_MPEG_4_SP,
-            OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level1));
-    profiles->mVideoEditorExportProfiles.add(
-        new ExportVideoProfile(VIDEO_ENCODER_H264,
-            OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel13));
-}
-
 /*static*/ MediaProfiles*
 MediaProfiles::createDefaultInstance()
 {
@@ -905,8 +834,6 @@
     createDefaultAudioDecoders(profiles);
     createDefaultEncoderOutputFileFormats(profiles);
     createDefaultImageEncodingQualityLevels(profiles);
-    createDefaultVideoEditorCap(profiles);
-    createDefaultExportVideoProfiles(profiles);
     return profiles;
 }
 
@@ -1004,54 +931,6 @@
     ALOGE("The given video encoder param name %s is not found", name);
     return -1;
 }
-int MediaProfiles::getVideoEditorExportParamByName(
-    const char *name, int codec) const
-{
-    ALOGV("getVideoEditorExportParamByName: name %s codec %d", name, codec);
-    ExportVideoProfile *exportProfile = NULL;
-    int index = -1;
-    for (size_t i =0; i < mVideoEditorExportProfiles.size(); i++) {
-        exportProfile = mVideoEditorExportProfiles[i];
-        if (exportProfile->mCodec == codec) {
-            index = i;
-            break;
-        }
-    }
-    if (index == -1) {
-        ALOGE("The given video decoder %d is not found", codec);
-        return -1;
-    }
-    if (!strcmp("videoeditor.export.profile", name))
-        return exportProfile->mProfile;
-    if (!strcmp("videoeditor.export.level", name))
-        return exportProfile->mLevel;
-
-    ALOGE("The given video editor export param name %s is not found", name);
-    return -1;
-}
-int MediaProfiles::getVideoEditorCapParamByName(const char *name) const
-{
-    ALOGV("getVideoEditorCapParamByName: %s", name);
-
-    if (mVideoEditorCap == NULL) {
-        ALOGE("The mVideoEditorCap is not created, then create default cap.");
-        createDefaultVideoEditorCap(sInstance);
-    }
-
-    if (!strcmp("videoeditor.input.width.max", name))
-        return mVideoEditorCap->mMaxInputFrameWidth;
-    if (!strcmp("videoeditor.input.height.max", name))
-        return mVideoEditorCap->mMaxInputFrameHeight;
-    if (!strcmp("videoeditor.output.width.max", name))
-        return mVideoEditorCap->mMaxOutputFrameWidth;
-    if (!strcmp("videoeditor.output.height.max", name))
-        return mVideoEditorCap->mMaxOutputFrameHeight;
-    if (!strcmp("maxPrefetchYUVFrames", name))
-        return mVideoEditorCap->mMaxPrefetchYUVFrames;
-
-    ALOGE("The given video editor param name %s is not found", name);
-    return -1;
-}
 
 Vector<audio_encoder> MediaProfiles::getAudioEncoders() const
 {
diff --git a/media/libmedia/MediaResource.cpp b/media/libmedia/MediaResource.cpp
new file mode 100644
index 0000000..40ec0cb
--- /dev/null
+++ b/media/libmedia/MediaResource.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaResource"
+#include <utils/Log.h>
+#include <media/MediaResource.h>
+
+namespace android {
+
+const char kResourceSecureCodec[] = "secure-codec";
+const char kResourceNonSecureCodec[] = "non-secure-codec";
+const char kResourceAudioCodec[] = "audio-codec";
+const char kResourceVideoCodec[] = "video-codec";
+const char kResourceGraphicMemory[] = "graphic-memory";
+
+MediaResource::MediaResource() : mValue(0) {}
+
+MediaResource::MediaResource(String8 type, uint64_t value)
+        : mType(type),
+          mValue(value) {}
+
+MediaResource::MediaResource(String8 type, String8 subType, uint64_t value)
+        : mType(type),
+          mSubType(subType),
+          mValue(value) {}
+
+void MediaResource::readFromParcel(const Parcel &parcel) {
+    mType = parcel.readString8();
+    mSubType = parcel.readString8();
+    mValue = parcel.readUint64();
+}
+
+void MediaResource::writeToParcel(Parcel *parcel) const {
+    parcel->writeString8(mType);
+    parcel->writeString8(mSubType);
+    parcel->writeUint64(mValue);
+}
+
+String8 MediaResource::toString() const {
+    String8 str;
+    str.appendFormat("%s/%s:%llu", mType.string(), mSubType.string(), (unsigned long long)mValue);
+    return str;
+}
+
+bool MediaResource::operator==(const MediaResource &other) const {
+    return (other.mType == mType) && (other.mSubType == mSubType) && (other.mValue == mValue);
+}
+
+bool MediaResource::operator!=(const MediaResource &other) const {
+    return !(*this == other);
+}
+
+}; // namespace android
diff --git a/media/libmedia/MediaResourcePolicy.cpp b/media/libmedia/MediaResourcePolicy.cpp
new file mode 100644
index 0000000..5210825
--- /dev/null
+++ b/media/libmedia/MediaResourcePolicy.cpp
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaResourcePolicy"
+#include <utils/Log.h>
+#include <media/MediaResourcePolicy.h>
+
+namespace android {
+
+const char kPolicySupportsMultipleSecureCodecs[] = "supports-multiple-secure-codecs";
+const char kPolicySupportsSecureWithNonSecureCodec[] = "supports-secure-with-non-secure-codec";
+
+MediaResourcePolicy::MediaResourcePolicy() {}
+
+MediaResourcePolicy::MediaResourcePolicy(String8 type, String8 value)
+        : mType(type),
+          mValue(value) {}
+
+void MediaResourcePolicy::readFromParcel(const Parcel &parcel) {
+    mType = parcel.readString8();
+    mValue = parcel.readString8();
+}
+
+void MediaResourcePolicy::writeToParcel(Parcel *parcel) const {
+    parcel->writeString8(mType);
+    parcel->writeString8(mValue);
+}
+
+String8 MediaResourcePolicy::toString() const {
+    String8 str;
+    str.appendFormat("%s:%s", mType.string(), mValue.string());
+    return str;
+}
+
+}; // namespace android
diff --git a/media/libmedia/MemoryLeakTrackUtil.cpp b/media/libmedia/MemoryLeakTrackUtil.cpp
index d31f721..554dbae 100644
--- a/media/libmedia/MemoryLeakTrackUtil.cpp
+++ b/media/libmedia/MemoryLeakTrackUtil.cpp
@@ -173,7 +173,7 @@
 
 #else
 // Does nothing
-void dumpMemoryAddresses(int fd) {}
+void dumpMemoryAddresses(int fd __unused) {}
 
 #endif
 }  // namespace android
diff --git a/media/libmedia/SingleStateQueue.cpp b/media/libmedia/SingleStateQueue.cpp
deleted file mode 100644
index c241184..0000000
--- a/media/libmedia/SingleStateQueue.cpp
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <new>
-#include <cutils/atomic.h>
-#include <media/SingleStateQueue.h>
-
-namespace android {
-
-template<typename T> SingleStateQueue<T>::Mutator::Mutator(Shared *shared)
-    : mSequence(0), mShared((Shared *) shared)
-{
-    // exactly one of Mutator and Observer must initialize, currently it is Observer
-    //shared->init();
-}
-
-template<typename T> int32_t SingleStateQueue<T>::Mutator::push(const T& value)
-{
-    Shared *shared = mShared;
-    int32_t sequence = mSequence;
-    sequence++;
-    android_atomic_acquire_store(sequence, &shared->mSequence);
-    shared->mValue = value;
-    sequence++;
-    android_atomic_release_store(sequence, &shared->mSequence);
-    mSequence = sequence;
-    // consider signalling a futex here, if we know that observer is waiting
-    return sequence;
-}
-
-template<typename T> bool SingleStateQueue<T>::Mutator::ack()
-{
-    return mShared->mAck - mSequence == 0;
-}
-
-template<typename T> bool SingleStateQueue<T>::Mutator::ack(int32_t sequence)
-{
-    // this relies on 2's complement rollover to detect an ancient sequence number
-    return mShared->mAck - sequence >= 0;
-}
-
-template<typename T> SingleStateQueue<T>::Observer::Observer(Shared *shared)
-    : mSequence(0), mSeed(1), mShared((Shared *) shared)
-{
-    // exactly one of Mutator and Observer must initialize, currently it is Observer
-    shared->init();
-}
-
-template<typename T> bool SingleStateQueue<T>::Observer::poll(T& value)
-{
-    Shared *shared = mShared;
-    int32_t before = shared->mSequence;
-    if (before == mSequence) {
-        return false;
-    }
-    for (int tries = 0; ; ) {
-        const int MAX_TRIES = 5;
-        if (before & 1) {
-            if (++tries >= MAX_TRIES) {
-                return false;
-            }
-            before = shared->mSequence;
-        } else {
-            android_memory_barrier();
-            T temp = shared->mValue;
-            int32_t after = android_atomic_release_load(&shared->mSequence);
-            if (after == before) {
-                value = temp;
-                shared->mAck = before;
-                mSequence = before;
-                return true;
-            }
-            if (++tries >= MAX_TRIES) {
-                return false;
-            }
-            before = after;
-        }
-    }
-}
-
-#if 0
-template<typename T> SingleStateQueue<T>::SingleStateQueue(void /*Shared*/ *shared)
-{
-    ((Shared *) shared)->init();
-}
-#endif
-
-}   // namespace android
-
-// hack for gcc
-#ifdef SINGLE_STATE_QUEUE_INSTANTIATIONS
-#include SINGLE_STATE_QUEUE_INSTANTIATIONS
-#endif
diff --git a/media/libmedia/SingleStateQueueInstantiations.cpp b/media/libmedia/SingleStateQueueInstantiations.cpp
deleted file mode 100644
index 0265c8c..0000000
--- a/media/libmedia/SingleStateQueueInstantiations.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <media/SingleStateQueue.h>
-#include <private/media/StaticAudioTrackState.h>
-#include <media/AudioTimestamp.h>
-
-// FIXME hack for gcc
-
-namespace android {
-
-template class SingleStateQueue<StaticAudioTrackState>; // typedef StaticAudioTrackSingleStateQueue
-template class SingleStateQueue<AudioTimestamp>;        // typedef AudioTimestampSingleStateQueue
-
-}
diff --git a/media/libmedia/StringArray.cpp b/media/libmedia/StringArray.cpp
index 5f5b57a..b2e5907 100644
--- a/media/libmedia/StringArray.cpp
+++ b/media/libmedia/StringArray.cpp
@@ -16,7 +16,7 @@
 
 //
 // Sortable array of strings.  STL-ish, but STL-free.
-//  
+//
 
 #include <stdlib.h>
 #include <string.h>
@@ -110,4 +110,4 @@
 }
 
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/ToneGenerator.cpp b/media/libmedia/ToneGenerator.cpp
index 2cc4685..6da5348 100644
--- a/media/libmedia/ToneGenerator.cpp
+++ b/media/libmedia/ToneGenerator.cpp
@@ -984,7 +984,6 @@
             if ((mStartTime.tv_sec != 0) && (clock_gettime(CLOCK_MONOTONIC, &stopTime) == 0)) {
                 time_t sec = stopTime.tv_sec - mStartTime.tv_sec;
                 long nsec = stopTime.tv_nsec - mStartTime.tv_nsec;
-                long durationMs;
                 if (nsec < 0) {
                     --sec;
                     nsec += 1000000000;
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index f91e3e4..f5c1b1f 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -34,11 +34,12 @@
 
 // ---------------------------------------------------------------------------
 
-Visualizer::Visualizer (int32_t priority,
+Visualizer::Visualizer (const String16& opPackageName,
+         int32_t priority,
          effect_callback_t cbf,
          void* user,
          int sessionId)
-    :   AudioEffect(SL_IID_VISUALIZATION, NULL, priority, cbf, user, sessionId),
+    :   AudioEffect(SL_IID_VISUALIZATION, opPackageName, NULL, priority, cbf, user, sessionId),
         mCaptureRate(CAPTURE_RATE_DEF),
         mCaptureSize(CAPTURE_SIZE_DEF),
         mSampleRate(44100000),
@@ -53,12 +54,8 @@
 Visualizer::~Visualizer()
 {
     ALOGV("Visualizer::~Visualizer()");
-    if (mCaptureThread != NULL) {
-        mCaptureThread->requestExitAndWait();
-        mCaptureThread.clear();
-    }
-    mCaptureCallBack = NULL;
-    mCaptureFlags = 0;
+    setEnabled(false);
+    setCaptureCallBack(NULL, NULL, 0, 0, true);
 }
 
 status_t Visualizer::setEnabled(bool enabled)
@@ -98,14 +95,14 @@
 }
 
 status_t Visualizer::setCaptureCallBack(capture_cbk_t cbk, void* user, uint32_t flags,
-        uint32_t rate)
+        uint32_t rate, bool force)
 {
     if (rate > CAPTURE_RATE_MAX) {
         return BAD_VALUE;
     }
     Mutex::Autolock _l(mCaptureLock);
 
-    if (mEnabled) {
+    if (force || mEnabled) {
         return INVALID_OPERATION;
     }
 
@@ -429,4 +426,4 @@
     return false;
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/docs/Makefile b/media/libmedia/docs/Makefile
new file mode 100644
index 0000000..bddbc9b
--- /dev/null
+++ b/media/libmedia/docs/Makefile
@@ -0,0 +1,2 @@
+paused.png : paused.dot
+	dot -Tpng < $< > $@
diff --git a/media/libmedia/docs/paused.dot b/media/libmedia/docs/paused.dot
new file mode 100644
index 0000000..11e1777
--- /dev/null
+++ b/media/libmedia/docs/paused.dot
@@ -0,0 +1,85 @@
+digraph paused {
+initial [label="INITIAL\n\
+mIgnoreNextPausedInt = false\n\
+mPaused = false\n\
+mPausedInt = false"];
+
+resume_body [label="mIgnoreNextPausedInt = true\nif (mPaused || mPausedInt)"];
+resume_paused [label="mPaused = false\nmPausedInt = false\nsignal()"];
+resume_paused -> resume_merged;
+resume_merged [label="return"];
+
+Application -> ATstop;
+ATstop [label="AudioTrack::stop()"];
+ATstop -> pause;
+Application -> ATpause;
+ATpause [label="AudioTrack::pause()"];
+ATpause -> pause;
+ATstart -> resume;
+ATstart [label="AudioTrack::start()"];
+destructor [label="~AudioTrack()"];
+destructor -> requestExit;
+requestExit [label="AudioTrackThread::requestExit()"];
+requestExit -> resume;
+Application -> ATsetMarkerPosition
+ATsetMarkerPosition [label="AudioTrack::setMarkerPosition()\n[sets marker variables]"];
+ATsetMarkerPosition -> ATTwake
+Application -> ATsetPositionUpdatePeriod
+ATsetPositionUpdatePeriod [label="AudioTrack::setPositionUpdatePeriod()\n[sets update period variables]"];
+ATsetPositionUpdatePeriod -> ATTwake
+Application -> ATstart;
+
+resume [label="AudioTrackThread::resume()"];
+resume -> resume_body;
+
+resume_body -> resume_paused [label="true"];
+resume_body -> resume_merged [label="false"];
+
+ATTwake [label="AudioTrackThread::wake()\nif (!mPaused && mPausedInt && mPausedNs > 0)"];
+ATTwake-> ATTWake_wakeable [label="true"];
+ATTWake_wakeable [label="mIgnoreNextPausedInt = true\nmPausedInt = false\nsignal()"];
+ATTwake-> ATTWake_cannotwake [label="false"]
+ATTWake_cannotwake [label="ignore"];
+
+pause [label="mPaused = true"];
+pause -> return;
+
+threadLoop [label="AudioTrackThread::threadLoop()\nENTRY"];
+threadLoop -> threadLoop_1;
+threadLoop_1 [label="if (mPaused)"];
+threadLoop_1 -> threadLoop_1_true [label="true"];
+threadLoop_1 -> threadLoop_2 [label="false"];
+threadLoop_1_true [label="wait()\nreturn true"];
+threadLoop_2 [label="if (mIgnoreNextPausedInt)"];
+threadLoop_2 -> threadLoop_2_true [label="true"];
+threadLoop_2 -> threadLoop_3 [label="false"];
+threadLoop_2_true [label="mIgnoreNextPausedInt = false\nmPausedInt = false"];
+threadLoop_2_true -> threadLoop_3;
+threadLoop_3 [label="if (mPausedInt)"];
+threadLoop_3 -> threadLoop_3_true [label="true"];
+threadLoop_3 -> threadLoop_4 [label="false"];
+threadLoop_3_true [label="wait()\nmPausedInt = false\nreturn true"];
+threadLoop_4 [label="if (exitPending)"];
+threadLoop_4 -> threadLoop_4_true [label="true"];
+threadLoop_4 -> threadLoop_5 [label="false"];
+threadLoop_4_true [label="return false"];
+threadLoop_5 [label="ns = processAudioBuffer()"];
+threadLoop_5 -> threadLoop_6;
+threadLoop_6 [label="case ns"];
+threadLoop_6 -> threadLoop_6_0 [label="0"];
+threadLoop_6 -> threadLoop_6_NS_INACTIVE [label="NS_INACTIVE"];
+threadLoop_6 -> threadLoop_6_NS_NEVER [label="NS_NEVER"];
+threadLoop_6 -> threadLoop_6_NS_WHENEVER [label="NS_WHENEVER"];
+threadLoop_6 -> threadLoop_6_default [label="default"];
+threadLoop_6_default [label="if (ns < 0)"];
+threadLoop_6_default -> threadLoop_6_default_true [label="true"];
+threadLoop_6_default -> threadLoop_6_default_false [label="false"];
+threadLoop_6_default_true [label="FATAL"];
+threadLoop_6_default_false [label="pauseInternal(ns) [wake()-able]\nmPausedInternal = true\nmPausedNs = ns\nreturn true"];
+threadLoop_6_0 [label="return true"];
+threadLoop_6_NS_INACTIVE [label="pauseInternal()\nmPausedInternal = true\nmPausedNs = 0\nreturn true"];
+threadLoop_6_NS_NEVER [label="return false"];
+threadLoop_6_NS_WHENEVER [label="ns = 1s"];
+threadLoop_6_NS_WHENEVER -> threadLoop_6_default_false;
+
+}
diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp
index 8e8a1ed..9a76f58 100644
--- a/media/libmedia/mediametadataretriever.cpp
+++ b/media/libmedia/mediametadataretriever.cpp
@@ -129,6 +129,18 @@
     return mRetriever->setDataSource(fd, offset, length);
 }
 
+status_t MediaMetadataRetriever::setDataSource(
+    const sp<IDataSource>& dataSource)
+{
+    ALOGV("setDataSource(IDataSource)");
+    Mutex::Autolock _l(mLock);
+    if (mRetriever == 0) {
+        ALOGE("retriever is not initialized");
+        return INVALID_OPERATION;
+    }
+    return mRetriever->setDataSource(dataSource);
+}
+
 sp<IMemory> MediaMetadataRetriever::getFrameAtTime(int64_t timeUs, int option)
 {
     ALOGV("getFrameAtTime: time(%" PRId64 " us) option(%d)", timeUs, option);
@@ -176,4 +188,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 05c89ed..502ab2d 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -32,7 +32,10 @@
 #include <gui/Surface.h>
 
 #include <media/mediaplayer.h>
+#include <media/AudioResamplerPublic.h>
 #include <media/AudioSystem.h>
+#include <media/AVSyncSettings.h>
+#include <media/IDataSource.h>
 
 #include <binder/MemoryBase.h>
 
@@ -194,6 +197,22 @@
     return err;
 }
 
+status_t MediaPlayer::setDataSource(const sp<IDataSource> &source)
+{
+    ALOGV("setDataSource(IDataSource)");
+    status_t err = UNKNOWN_ERROR;
+    const sp<IMediaPlayerService>& service(getMediaPlayerService());
+    if (service != 0) {
+        sp<IMediaPlayer> player(service->create(this, mAudioSessionId));
+        if ((NO_ERROR != doSetRetransmitEndpoint(player)) ||
+            (NO_ERROR != player->setDataSource(source))) {
+            player.clear();
+        }
+        err = attachNewPlayer(player);
+    }
+    return err;
+}
+
 status_t MediaPlayer::invoke(const Parcel& request, Parcel *reply)
 {
     Mutex::Autolock _l(mLock);
@@ -240,10 +259,11 @@
 // must call with lock held
 status_t MediaPlayer::prepareAsync_l()
 {
-    if ( (mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_INITIALIZED | MEDIA_PLAYER_STOPPED) ) ) {
-        mPlayer->setAudioStreamType(mStreamType);
+    if ( (mPlayer != 0) && ( mCurrentState & (MEDIA_PLAYER_INITIALIZED | MEDIA_PLAYER_STOPPED) ) ) {
         if (mAudioAttributesParcel != NULL) {
             mPlayer->setParameter(KEY_PARAMETER_AUDIO_ATTRIBUTES, *mAudioAttributesParcel);
+        } else {
+            mPlayer->setAudioStreamType(mStreamType);
         }
         mCurrentState = MEDIA_PLAYER_PREPARING;
         return mPlayer->prepareAsync();
@@ -371,6 +391,9 @@
         if ((mCurrentState & MEDIA_PLAYER_STARTED) && ! temp) {
             ALOGE("internal/external state mismatch corrected");
             mCurrentState = MEDIA_PLAYER_PAUSED;
+        } else if ((mCurrentState & MEDIA_PLAYER_PAUSED) && temp) {
+            ALOGE("internal/external state mismatch corrected");
+            mCurrentState = MEDIA_PLAYER_STARTED;
         }
         return temp;
     }
@@ -378,6 +401,52 @@
     return false;
 }
 
+status_t MediaPlayer::setPlaybackSettings(const AudioPlaybackRate& rate)
+{
+    ALOGV("setPlaybackSettings: %f %f %d %d",
+            rate.mSpeed, rate.mPitch, rate.mFallbackMode, rate.mStretchMode);
+    // Negative speed and pitch does not make sense. Further validation will
+    // be done by the respective mediaplayers.
+    if (rate.mSpeed < 0.f || rate.mPitch < 0.f) {
+        return BAD_VALUE;
+    }
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == 0) return INVALID_OPERATION;
+    status_t err = mPlayer->setPlaybackSettings(rate);
+    if (err == OK) {
+        if (rate.mSpeed == 0.f && mCurrentState == MEDIA_PLAYER_STARTED) {
+            mCurrentState = MEDIA_PLAYER_PAUSED;
+        } else if (rate.mSpeed != 0.f && mCurrentState == MEDIA_PLAYER_PAUSED) {
+            mCurrentState = MEDIA_PLAYER_STARTED;
+        }
+    }
+    return err;
+}
+
+status_t MediaPlayer::getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */)
+{
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == 0) return INVALID_OPERATION;
+    return mPlayer->getPlaybackSettings(rate);
+}
+
+status_t MediaPlayer::setSyncSettings(const AVSyncSettings& sync, float videoFpsHint)
+{
+    ALOGV("setSyncSettings: %u %u %f %f",
+            sync.mSource, sync.mAudioAdjustMode, sync.mTolerance, videoFpsHint);
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == 0) return INVALID_OPERATION;
+    return mPlayer->setSyncSettings(sync, videoFpsHint);
+}
+
+status_t MediaPlayer::getSyncSettings(
+        AVSyncSettings* sync /* nonnull */, float* videoFps /* nonnull */)
+{
+    Mutex::Autolock _l(mLock);
+    if (mPlayer == 0) return INVALID_OPERATION;
+    return mPlayer->getSyncSettings(sync, videoFps);
+}
+
 status_t MediaPlayer::getVideoWidth(int *w)
 {
     ALOGV("getVideoWidth");
@@ -414,7 +483,8 @@
 status_t MediaPlayer::getDuration_l(int *msec)
 {
     ALOGV("getDuration_l");
-    bool isValidState = (mCurrentState & (MEDIA_PLAYER_PREPARED | MEDIA_PLAYER_STARTED | MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_STOPPED | MEDIA_PLAYER_PLAYBACK_COMPLETE));
+    bool isValidState = (mCurrentState & (MEDIA_PLAYER_PREPARED | MEDIA_PLAYER_STARTED |
+            MEDIA_PLAYER_PAUSED | MEDIA_PLAYER_STOPPED | MEDIA_PLAYER_PLAYBACK_COMPLETE));
     if (mPlayer != 0 && isValidState) {
         int durationMs;
         status_t ret = mPlayer->getDuration(&durationMs);
@@ -443,7 +513,8 @@
 status_t MediaPlayer::seekTo_l(int msec)
 {
     ALOGV("seekTo %d", msec);
-    if ((mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_STARTED | MEDIA_PLAYER_PREPARED | MEDIA_PLAYER_PAUSED |  MEDIA_PLAYER_PLAYBACK_COMPLETE) ) ) {
+    if ((mPlayer != 0) && ( mCurrentState & ( MEDIA_PLAYER_STARTED | MEDIA_PLAYER_PREPARED |
+            MEDIA_PLAYER_PAUSED |  MEDIA_PLAYER_PLAYBACK_COMPLETE) ) ) {
         if ( msec < 0 ) {
             ALOGW("Attempt to seek to invalid position: %d", msec);
             msec = 0;
@@ -477,7 +548,8 @@
             return NO_ERROR;
         }
     }
-    ALOGE("Attempt to perform seekTo in wrong state: mPlayer=%p, mCurrentState=%u", mPlayer.get(), mCurrentState);
+    ALOGE("Attempt to perform seekTo in wrong state: mPlayer=%p, mCurrentState=%u", mPlayer.get(),
+            mCurrentState);
     return INVALID_OPERATION;
 }
 
@@ -502,6 +574,7 @@
             ALOGE("reset() failed with return code (%d)", ret);
             mCurrentState = MEDIA_PLAYER_STATE_ERROR;
         } else {
+            mPlayer->disconnect();
             mCurrentState = MEDIA_PLAYER_IDLE;
         }
         // setDataSource has to be called again to create a
@@ -663,24 +736,28 @@
 status_t MediaPlayer::setParameter(int key, const Parcel& request)
 {
     ALOGV("MediaPlayer::setParameter(%d)", key);
+    status_t status = INVALID_OPERATION;
     Mutex::Autolock _l(mLock);
     if (checkStateForKeySet_l(key) != OK) {
-        return INVALID_OPERATION;
-    }
-    if (mPlayer != NULL) {
-        return  mPlayer->setParameter(key, request);
+        return status;
     }
     switch (key) {
     case KEY_PARAMETER_AUDIO_ATTRIBUTES:
-        // no player, save the marshalled audio attributes
+        // save the marshalled audio attributes
         if (mAudioAttributesParcel != NULL) { delete mAudioAttributesParcel; };
         mAudioAttributesParcel = new Parcel();
         mAudioAttributesParcel->appendFrom(&request, 0, request.dataSize());
-        return OK;
+        status = OK;
+        break;
     default:
-        ALOGV("setParameter: no active player");
-        return INVALID_OPERATION;
+        ALOGV_IF(mPlayer == NULL, "setParameter: no active player");
+        break;
     }
+
+    if (mPlayer != NULL) {
+        status = mPlayer->setParameter(key, request);
+    }
+    return status;
 }
 
 status_t MediaPlayer::getParameter(int key, Parcel *reply)
@@ -818,6 +895,9 @@
     case MEDIA_SUBTITLE_DATA:
         ALOGV("Received subtitle data message");
         break;
+    case MEDIA_META_DATA:
+        ALOGV("Received timed metadata message");
+        break;
     default:
         ALOGV("unrecognized message: (%d, %d, %d)", msg, ext1, ext2);
         break;
@@ -855,4 +935,4 @@
     return mPlayer->setNextPlayer(next == NULL ? NULL : next->mPlayer);
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index 1952b86..8bbd8f1 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -27,6 +27,7 @@
 #include <media/IMediaPlayerService.h>
 #include <media/IMediaRecorder.h>
 #include <media/mediaplayer.h>  // for MEDIA_ERROR_SERVER_DIED
+#include <media/stagefright/PersistentSurface.h>
 #include <gui/IGraphicBufferProducer.h>
 
 namespace android {
@@ -264,32 +265,6 @@
     return ret;
 }
 
-status_t MediaRecorder::setOutputFile(const char* path)
-{
-    ALOGV("setOutputFile(%s)", path);
-    if (mMediaRecorder == NULL) {
-        ALOGE("media recorder is not initialized yet");
-        return INVALID_OPERATION;
-    }
-    if (mIsOutputFileSet) {
-        ALOGE("output file has already been set");
-        return INVALID_OPERATION;
-    }
-    if (!(mCurrentState & MEDIA_RECORDER_DATASOURCE_CONFIGURED)) {
-        ALOGE("setOutputFile called in an invalid state(%d)", mCurrentState);
-        return INVALID_OPERATION;
-    }
-
-    status_t ret = mMediaRecorder->setOutputFile(path);
-    if (OK != ret) {
-        ALOGV("setOutputFile failed: %d", ret);
-        mCurrentState = MEDIA_RECORDER_ERROR;
-        return ret;
-    }
-    mIsOutputFileSet = true;
-    return ret;
-}
-
 status_t MediaRecorder::setOutputFile(int fd, int64_t offset, int64_t length)
 {
     ALOGV("setOutputFile(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
@@ -370,6 +345,24 @@
 
 
 
+status_t MediaRecorder::setInputSurface(const sp<PersistentSurface>& surface)
+{
+    ALOGV("setInputSurface");
+    if (mMediaRecorder == NULL) {
+        ALOGE("media recorder is not initialized yet");
+        return INVALID_OPERATION;
+    }
+    bool isInvalidState = (mCurrentState &
+                           (MEDIA_RECORDER_PREPARED |
+                            MEDIA_RECORDER_RECORDING));
+    if (isInvalidState) {
+        ALOGE("setInputSurface is called in an invalid state: %d", mCurrentState);
+        return INVALID_OPERATION;
+    }
+
+    return mMediaRecorder->setInputSurface(surface->getBufferConsumer());
+}
+
 status_t MediaRecorder::setVideoFrameRate(int frames_per_second)
 {
     ALOGV("setVideoFrameRate(%d)", frames_per_second);
@@ -620,13 +613,13 @@
     return INVALID_OPERATION;
 }
 
-MediaRecorder::MediaRecorder() : mSurfaceMediaSource(NULL)
+MediaRecorder::MediaRecorder(const String16& opPackageName) : mSurfaceMediaSource(NULL)
 {
     ALOGV("constructor");
 
     const sp<IMediaPlayerService>& service(getMediaPlayerService());
     if (service != NULL) {
-        mMediaRecorder = service->createMediaRecorder();
+        mMediaRecorder = service->createMediaRecorder(opPackageName);
     }
     if (mMediaRecorder != NULL) {
         mCurrentState = MEDIA_RECORDER_IDLE;
@@ -706,4 +699,4 @@
     notify(MEDIA_RECORDER_EVENT_ERROR, MEDIA_ERROR_SERVER_DIED, 0);
 }
 
-}; // namespace android
+} // namespace android
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 9d8fe62..4d1b587 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -10,6 +10,7 @@
     ActivityManager.cpp         \
     Crypto.cpp                  \
     Drm.cpp                     \
+    DrmSessionManager.cpp       \
     HDCP.cpp                    \
     MediaPlayerFactory.cpp      \
     MediaPlayerService.cpp      \
@@ -20,7 +21,6 @@
     StagefrightPlayer.cpp       \
     StagefrightRecorder.cpp     \
     TestPlayerStub.cpp          \
-    VideoFrameScheduler.cpp     \
 
 LOCAL_SHARED_LIBRARIES :=       \
     libbinder                   \
@@ -32,6 +32,7 @@
     libdl                       \
     libgui                      \
     libmedia                    \
+    libmediautils               \
     libsonivox                  \
     libstagefright              \
     libstagefright_foundation   \
@@ -53,6 +54,9 @@
     $(TOP)/frameworks/native/include/media/openmax                  \
     $(TOP)/external/tremolo/Tremolo                                 \
 
+LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
+LOCAL_CLANG := true
+
 LOCAL_MODULE:= libmediaplayerservice
 
 LOCAL_32_BIT_ONLY := true
diff --git a/media/libmediaplayerservice/Crypto.cpp b/media/libmediaplayerservice/Crypto.cpp
index 8ee7c0b..147d35f 100644
--- a/media/libmediaplayerservice/Crypto.cpp
+++ b/media/libmediaplayerservice/Crypto.cpp
@@ -22,6 +22,7 @@
 
 #include "Crypto.h"
 
+#include <binder/IMemory.h>
 #include <media/hardware/CryptoAPI.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AString.h>
@@ -88,7 +89,7 @@
 
     // first check cache
     Vector<uint8_t> uuidVector;
-    uuidVector.appendArray(uuid, sizeof(uuid));
+    uuidVector.appendArray(uuid, sizeof(uuid[0]) * 16);
     ssize_t index = mUUIDToLibraryPathMap.indexOfKey(uuidVector);
     if (index >= 0) {
         if (loadLibraryForScheme(mUUIDToLibraryPathMap[index], uuid)) {
@@ -238,7 +239,7 @@
         const uint8_t key[16],
         const uint8_t iv[16],
         CryptoPlugin::Mode mode,
-        const void *srcPtr,
+        const sp<IMemory> &sharedBuffer, size_t offset,
         const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
         void *dstPtr,
         AString *errorDetailMsg) {
@@ -252,6 +253,8 @@
         return -EINVAL;
     }
 
+    const void *srcPtr = static_cast<uint8_t *>(sharedBuffer->pointer()) + offset;
+
     return mPlugin->decrypt(
             secure, key, iv, mode, srcPtr, subSamples, numSubSamples, dstPtr,
             errorDetailMsg);
@@ -265,4 +268,14 @@
     }
 }
 
+status_t Crypto::setMediaDrmSession(const Vector<uint8_t> &sessionId) {
+    Mutex::Autolock autoLock(mLock);
+
+    status_t result = NO_INIT;
+    if (mInitCheck == OK && mPlugin != NULL) {
+        result = mPlugin->setMediaDrmSession(sessionId);
+    }
+    return result;
+}
+
 }  // namespace android
diff --git a/media/libmediaplayerservice/Crypto.h b/media/libmediaplayerservice/Crypto.h
index 0037c2e..99ea95d 100644
--- a/media/libmediaplayerservice/Crypto.h
+++ b/media/libmediaplayerservice/Crypto.h
@@ -47,12 +47,14 @@
 
     virtual void notifyResolution(uint32_t width, uint32_t height);
 
+    virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId);
+
     virtual ssize_t decrypt(
             bool secure,
             const uint8_t key[16],
             const uint8_t iv[16],
             CryptoPlugin::Mode mode,
-            const void *srcPtr,
+            const sp<IMemory> &sharedBuffer, size_t offset,
             const CryptoPlugin::SubSample *subSamples, size_t numSubSamples,
             void *dstPtr,
             AString *errorDetailMsg);
diff --git a/media/libmediaplayerservice/Drm.cpp b/media/libmediaplayerservice/Drm.cpp
index 9d5ba15..a7f6f8b 100644
--- a/media/libmediaplayerservice/Drm.cpp
+++ b/media/libmediaplayerservice/Drm.cpp
@@ -23,6 +23,8 @@
 
 #include "Drm.h"
 
+#include "DrmSessionClientInterface.h"
+#include "DrmSessionManager.h"
 #include <media/drm/DrmAPI.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AString.h>
@@ -33,6 +35,10 @@
 
 namespace android {
 
+static inline int getCallingPid() {
+    return IPCThreadState::self()->getCallingPid();
+}
+
 static bool checkPermission(const char* permissionString) {
 #ifndef HAVE_ANDROID_OS
     return true;
@@ -58,14 +64,41 @@
     return memcmp((void *)lhs.array(), (void *)rhs.array(), rhs.size()) < 0;
 }
 
+struct DrmSessionClient : public DrmSessionClientInterface {
+    DrmSessionClient(Drm* drm) : mDrm(drm) {}
+
+    virtual bool reclaimSession(const Vector<uint8_t>& sessionId) {
+        sp<Drm> drm = mDrm.promote();
+        if (drm == NULL) {
+            return true;
+        }
+        status_t err = drm->closeSession(sessionId);
+        if (err != OK) {
+            return false;
+        }
+        drm->sendEvent(DrmPlugin::kDrmPluginEventSessionReclaimed, 0, &sessionId, NULL);
+        return true;
+    }
+
+protected:
+    virtual ~DrmSessionClient() {}
+
+private:
+    wp<Drm> mDrm;
+
+    DISALLOW_EVIL_CONSTRUCTORS(DrmSessionClient);
+};
+
 Drm::Drm()
     : mInitCheck(NO_INIT),
+      mDrmSessionClient(new DrmSessionClient(this)),
       mListener(NULL),
       mFactory(NULL),
       mPlugin(NULL) {
 }
 
 Drm::~Drm() {
+    DrmSessionManager::Instance()->removeDrm(mDrmSessionClient);
     delete mPlugin;
     mPlugin = NULL;
     closeFactory();
@@ -104,25 +137,57 @@
 
     if (listener != NULL) {
         Parcel obj;
-        if (sessionId && sessionId->size()) {
-            obj.writeInt32(sessionId->size());
-            obj.write(sessionId->array(), sessionId->size());
-        } else {
-            obj.writeInt32(0);
-        }
-
-        if (data && data->size()) {
-            obj.writeInt32(data->size());
-            obj.write(data->array(), data->size());
-        } else {
-            obj.writeInt32(0);
-        }
+        writeByteArray(obj, sessionId);
+        writeByteArray(obj, data);
 
         Mutex::Autolock lock(mNotifyLock);
         listener->notify(eventType, extra, &obj);
     }
 }
 
+void Drm::sendExpirationUpdate(Vector<uint8_t> const *sessionId,
+                               int64_t expiryTimeInMS)
+{
+    mEventLock.lock();
+    sp<IDrmClient> listener = mListener;
+    mEventLock.unlock();
+
+    if (listener != NULL) {
+        Parcel obj;
+        writeByteArray(obj, sessionId);
+        obj.writeInt64(expiryTimeInMS);
+
+        Mutex::Autolock lock(mNotifyLock);
+        listener->notify(DrmPlugin::kDrmPluginEventExpirationUpdate, 0, &obj);
+    }
+}
+
+void Drm::sendKeysChange(Vector<uint8_t> const *sessionId,
+                         Vector<DrmPlugin::KeyStatus> const *keyStatusList,
+                         bool hasNewUsableKey)
+{
+    mEventLock.lock();
+    sp<IDrmClient> listener = mListener;
+    mEventLock.unlock();
+
+    if (listener != NULL) {
+        Parcel obj;
+        writeByteArray(obj, sessionId);
+
+        size_t nkeys = keyStatusList->size();
+        obj.writeInt32(keyStatusList->size());
+        for (size_t i = 0; i < nkeys; ++i) {
+            const DrmPlugin::KeyStatus *keyStatus = &keyStatusList->itemAt(i);
+            writeByteArray(obj, &keyStatus->mKeyId);
+            obj.writeInt32(keyStatus->mType);
+        }
+        obj.writeInt32(hasNewUsableKey);
+
+        Mutex::Autolock lock(mNotifyLock);
+        listener->notify(DrmPlugin::kDrmPluginEventKeysChange, 0, &obj);
+    }
+}
+
 /*
  * Search the plugins directory for a plugin that supports the scheme
  * specified by uuid
@@ -145,7 +210,7 @@
 
     // first check cache
     Vector<uint8_t> uuidVector;
-    uuidVector.appendArray(uuid, sizeof(uuid));
+    uuidVector.appendArray(uuid, sizeof(uuid[0]) * 16);
     ssize_t index = mUUIDToLibraryPathMap.indexOfKey(uuidVector);
     if (index >= 0) {
         if (loadLibraryForScheme(mUUIDToLibraryPathMap[index], uuid)) {
@@ -290,7 +355,29 @@
         return -EINVAL;
     }
 
-    return mPlugin->openSession(sessionId);
+    status_t err = mPlugin->openSession(sessionId);
+    if (err == ERROR_DRM_RESOURCE_BUSY) {
+        bool retry = false;
+        mLock.unlock();
+        // reclaimSession may call back to closeSession, since mLock is shared between Drm
+        // instances, we should unlock here to avoid deadlock.
+        retry = DrmSessionManager::Instance()->reclaimSession(getCallingPid());
+        mLock.lock();
+        if (mInitCheck != OK) {
+            return mInitCheck;
+        }
+
+        if (mPlugin == NULL) {
+            return -EINVAL;
+        }
+        if (retry) {
+            err = mPlugin->openSession(sessionId);
+        }
+    }
+    if (err == OK) {
+        DrmSessionManager::Instance()->addSession(getCallingPid(), mDrmSessionClient, sessionId);
+    }
+    return err;
 }
 
 status_t Drm::closeSession(Vector<uint8_t> const &sessionId) {
@@ -304,14 +391,19 @@
         return -EINVAL;
     }
 
-    return mPlugin->closeSession(sessionId);
+    status_t err = mPlugin->closeSession(sessionId);
+    if (err == OK) {
+        DrmSessionManager::Instance()->removeSession(sessionId);
+    }
+    return err;
 }
 
 status_t Drm::getKeyRequest(Vector<uint8_t> const &sessionId,
                             Vector<uint8_t> const &initData,
                             String8 const &mimeType, DrmPlugin::KeyType keyType,
                             KeyedVector<String8, String8> const &optionalParameters,
-                            Vector<uint8_t> &request, String8 &defaultUrl) {
+                            Vector<uint8_t> &request, String8 &defaultUrl,
+                            DrmPlugin::KeyRequestType *keyRequestType) {
     Mutex::Autolock autoLock(mLock);
 
     if (mInitCheck != OK) {
@@ -322,8 +414,11 @@
         return -EINVAL;
     }
 
+    DrmSessionManager::Instance()->useSession(sessionId);
+
     return mPlugin->getKeyRequest(sessionId, initData, mimeType, keyType,
-                                  optionalParameters, request, defaultUrl);
+                                  optionalParameters, request, defaultUrl,
+                                  keyRequestType);
 }
 
 status_t Drm::provideKeyResponse(Vector<uint8_t> const &sessionId,
@@ -339,6 +434,8 @@
         return -EINVAL;
     }
 
+    DrmSessionManager::Instance()->useSession(sessionId);
+
     return mPlugin->provideKeyResponse(sessionId, response, keySetId);
 }
 
@@ -368,6 +465,8 @@
         return -EINVAL;
     }
 
+    DrmSessionManager::Instance()->useSession(sessionId);
+
     return mPlugin->restoreKeys(sessionId, keySetId);
 }
 
@@ -383,6 +482,8 @@
         return -EINVAL;
     }
 
+    DrmSessionManager::Instance()->useSession(sessionId);
+
     return mPlugin->queryKeyStatus(sessionId, infoMap);
 }
 
@@ -562,6 +663,8 @@
         return -EINVAL;
     }
 
+    DrmSessionManager::Instance()->useSession(sessionId);
+
     return mPlugin->setCipherAlgorithm(sessionId, algorithm);
 }
 
@@ -577,6 +680,8 @@
         return -EINVAL;
     }
 
+    DrmSessionManager::Instance()->useSession(sessionId);
+
     return mPlugin->setMacAlgorithm(sessionId, algorithm);
 }
 
@@ -595,6 +700,8 @@
         return -EINVAL;
     }
 
+    DrmSessionManager::Instance()->useSession(sessionId);
+
     return mPlugin->encrypt(sessionId, keyId, input, iv, output);
 }
 
@@ -613,6 +720,8 @@
         return -EINVAL;
     }
 
+    DrmSessionManager::Instance()->useSession(sessionId);
+
     return mPlugin->decrypt(sessionId, keyId, input, iv, output);
 }
 
@@ -630,6 +739,8 @@
         return -EINVAL;
     }
 
+    DrmSessionManager::Instance()->useSession(sessionId);
+
     return mPlugin->sign(sessionId, keyId, message, signature);
 }
 
@@ -648,6 +759,8 @@
         return -EINVAL;
     }
 
+    DrmSessionManager::Instance()->useSession(sessionId);
+
     return mPlugin->verify(sessionId, keyId, message, signature, match);
 }
 
@@ -670,10 +783,12 @@
         return -EPERM;
     }
 
+    DrmSessionManager::Instance()->useSession(sessionId);
+
     return mPlugin->signRSA(sessionId, algorithm, message, wrappedKey, signature);
 }
 
-void Drm::binderDied(const wp<IBinder> &the_late_who)
+void Drm::binderDied(const wp<IBinder> &the_late_who __unused)
 {
     mEventLock.lock();
     mListener.clear();
@@ -685,4 +800,14 @@
     closeFactory();
 }
 
+void Drm::writeByteArray(Parcel &obj, Vector<uint8_t> const *array)
+{
+    if (array && array->size()) {
+        obj.writeInt32(array->size());
+        obj.write(array->array(), array->size());
+    } else {
+        obj.writeInt32(0);
+    }
+}
+
 }  // namespace android
diff --git a/media/libmediaplayerservice/Drm.h b/media/libmediaplayerservice/Drm.h
index 2997da1..056723c 100644
--- a/media/libmediaplayerservice/Drm.h
+++ b/media/libmediaplayerservice/Drm.h
@@ -26,8 +26,9 @@
 
 namespace android {
 
-struct DrmFactory;
-struct DrmPlugin;
+class DrmFactory;
+class DrmPlugin;
+struct DrmSessionClientInterface;
 
 struct Drm : public BnDrm,
              public IBinder::DeathRecipient,
@@ -52,7 +53,8 @@
                       Vector<uint8_t> const &initData,
                       String8 const &mimeType, DrmPlugin::KeyType keyType,
                       KeyedVector<String8, String8> const &optionalParameters,
-                      Vector<uint8_t> &request, String8 &defaultUrl);
+                      Vector<uint8_t> &request, String8 &defaultUrl,
+                      DrmPlugin::KeyRequestType *keyRequestType);
 
     virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId,
                                         Vector<uint8_t> const &response,
@@ -131,6 +133,13 @@
                            Vector<uint8_t> const *sessionId,
                            Vector<uint8_t> const *data);
 
+    virtual void sendExpirationUpdate(Vector<uint8_t> const *sessionId,
+                                      int64_t expiryTimeInMS);
+
+    virtual void sendKeysChange(Vector<uint8_t> const *sessionId,
+                                Vector<DrmPlugin::KeyStatus> const *keyStatusList,
+                                bool hasNewUsableKey);
+
     virtual void binderDied(const wp<IBinder> &the_late_who);
 
 private:
@@ -138,6 +147,8 @@
 
     status_t mInitCheck;
 
+    sp<DrmSessionClientInterface> mDrmSessionClient;
+
     sp<IDrmClient> mListener;
     mutable Mutex mEventLock;
     mutable Mutex mNotifyLock;
@@ -153,7 +164,7 @@
     void findFactoryForScheme(const uint8_t uuid[16]);
     bool loadLibraryForScheme(const String8 &path, const uint8_t uuid[16]);
     void closeFactory();
-
+    void writeByteArray(Parcel &obj, Vector<uint8_t> const *array);
 
     DISALLOW_EVIL_CONSTRUCTORS(Drm);
 };
diff --git a/media/libmediaplayerservice/DrmSessionClientInterface.h b/media/libmediaplayerservice/DrmSessionClientInterface.h
new file mode 100644
index 0000000..17faf08
--- /dev/null
+++ b/media/libmediaplayerservice/DrmSessionClientInterface.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DRM_PROXY_INTERFACE_H_
+#define DRM_PROXY_INTERFACE_H_
+
+#include <utils/RefBase.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+struct DrmSessionClientInterface : public RefBase {
+    virtual bool reclaimSession(const Vector<uint8_t>& sessionId) = 0;
+
+protected:
+    virtual ~DrmSessionClientInterface() {}
+};
+
+}  // namespace android
+
+#endif  // DRM_PROXY_INTERFACE_H_
diff --git a/media/libmediaplayerservice/DrmSessionManager.cpp b/media/libmediaplayerservice/DrmSessionManager.cpp
new file mode 100644
index 0000000..641f881
--- /dev/null
+++ b/media/libmediaplayerservice/DrmSessionManager.cpp
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DrmSessionManager"
+#include <utils/Log.h>
+
+#include "DrmSessionManager.h"
+
+#include "DrmSessionClientInterface.h"
+#include <binder/IPCThreadState.h>
+#include <binder/IProcessInfoService.h>
+#include <binder/IServiceManager.h>
+#include <media/stagefright/ProcessInfo.h>
+#include <unistd.h>
+#include <utils/String8.h>
+
+namespace android {
+
+static String8 GetSessionIdString(const Vector<uint8_t> &sessionId) {
+    String8 sessionIdStr;
+    for (size_t i = 0; i < sessionId.size(); ++i) {
+        sessionIdStr.appendFormat("%u ", sessionId[i]);
+    }
+    return sessionIdStr;
+}
+
+bool isEqualSessionId(const Vector<uint8_t> &sessionId1, const Vector<uint8_t> &sessionId2) {
+    if (sessionId1.size() != sessionId2.size()) {
+        return false;
+    }
+    for (size_t i = 0; i < sessionId1.size(); ++i) {
+        if (sessionId1[i] != sessionId2[i]) {
+            return false;
+        }
+    }
+    return true;
+}
+
+sp<DrmSessionManager> DrmSessionManager::Instance() {
+    static sp<DrmSessionManager> drmSessionManager = new DrmSessionManager();
+    return drmSessionManager;
+}
+
+DrmSessionManager::DrmSessionManager()
+    : mProcessInfo(new ProcessInfo()),
+      mTime(0) {}
+
+DrmSessionManager::DrmSessionManager(sp<ProcessInfoInterface> processInfo)
+    : mProcessInfo(processInfo),
+      mTime(0) {}
+
+DrmSessionManager::~DrmSessionManager() {}
+
+void DrmSessionManager::addSession(
+        int pid, sp<DrmSessionClientInterface> drm, const Vector<uint8_t> &sessionId) {
+    ALOGV("addSession(pid %d, drm %p, sessionId %s)", pid, drm.get(),
+            GetSessionIdString(sessionId).string());
+
+    Mutex::Autolock lock(mLock);
+    SessionInfo info;
+    info.drm = drm;
+    info.sessionId = sessionId;
+    info.timeStamp = getTime_l();
+    ssize_t index = mSessionMap.indexOfKey(pid);
+    if (index < 0) {
+        // new pid
+        SessionInfos infosForPid;
+        infosForPid.push_back(info);
+        mSessionMap.add(pid, infosForPid);
+    } else {
+        mSessionMap.editValueAt(index).push_back(info);
+    }
+}
+
+void DrmSessionManager::useSession(const Vector<uint8_t> &sessionId) {
+    ALOGV("useSession(%s)", GetSessionIdString(sessionId).string());
+
+    Mutex::Autolock lock(mLock);
+    for (size_t i = 0; i < mSessionMap.size(); ++i) {
+        SessionInfos& infos = mSessionMap.editValueAt(i);
+        for (size_t j = 0; j < infos.size(); ++j) {
+            SessionInfo& info = infos.editItemAt(j);
+            if (isEqualSessionId(sessionId, info.sessionId)) {
+                info.timeStamp = getTime_l();
+                return;
+            }
+        }
+    }
+}
+
+void DrmSessionManager::removeSession(const Vector<uint8_t> &sessionId) {
+    ALOGV("removeSession(%s)", GetSessionIdString(sessionId).string());
+
+    Mutex::Autolock lock(mLock);
+    for (size_t i = 0; i < mSessionMap.size(); ++i) {
+        SessionInfos& infos = mSessionMap.editValueAt(i);
+        for (size_t j = 0; j < infos.size(); ++j) {
+            if (isEqualSessionId(sessionId, infos[j].sessionId)) {
+                infos.removeAt(j);
+                return;
+            }
+        }
+    }
+}
+
+void DrmSessionManager::removeDrm(sp<DrmSessionClientInterface> drm) {
+    ALOGV("removeDrm(%p)", drm.get());
+
+    Mutex::Autolock lock(mLock);
+    bool found = false;
+    for (size_t i = 0; i < mSessionMap.size(); ++i) {
+        SessionInfos& infos = mSessionMap.editValueAt(i);
+        for (size_t j = 0; j < infos.size();) {
+            if (infos[j].drm == drm) {
+                ALOGV("removed session (%s)", GetSessionIdString(infos[j].sessionId).string());
+                j = infos.removeAt(j);
+                found = true;
+            } else {
+                ++j;
+            }
+        }
+        if (found) {
+            break;
+        }
+    }
+}
+
+bool DrmSessionManager::reclaimSession(int callingPid) {
+    ALOGV("reclaimSession(%d)", callingPid);
+
+    sp<DrmSessionClientInterface> drm;
+    Vector<uint8_t> sessionId;
+    int lowestPriorityPid;
+    int lowestPriority;
+    {
+        Mutex::Autolock lock(mLock);
+        int callingPriority;
+        if (!mProcessInfo->getPriority(callingPid, &callingPriority)) {
+            return false;
+        }
+        if (!getLowestPriority_l(&lowestPriorityPid, &lowestPriority)) {
+            return false;
+        }
+        if (lowestPriority <= callingPriority) {
+            return false;
+        }
+
+        if (!getLeastUsedSession_l(lowestPriorityPid, &drm, &sessionId)) {
+            return false;
+        }
+    }
+
+    if (drm == NULL) {
+        return false;
+    }
+
+    ALOGV("reclaim session(%s) opened by pid %d",
+            GetSessionIdString(sessionId).string(), lowestPriorityPid);
+
+    return drm->reclaimSession(sessionId);
+}
+
+int64_t DrmSessionManager::getTime_l() {
+    return mTime++;
+}
+
+bool DrmSessionManager::getLowestPriority_l(int* lowestPriorityPid, int* lowestPriority) {
+    int pid = -1;
+    int priority = -1;
+    for (size_t i = 0; i < mSessionMap.size(); ++i) {
+        if (mSessionMap.valueAt(i).size() == 0) {
+            // no opened session by this process.
+            continue;
+        }
+        int tempPid = mSessionMap.keyAt(i);
+        int tempPriority;
+        if (!mProcessInfo->getPriority(tempPid, &tempPriority)) {
+            // shouldn't happen.
+            return false;
+        }
+        if (pid == -1) {
+            pid = tempPid;
+            priority = tempPriority;
+        } else {
+            if (tempPriority > priority) {
+                pid = tempPid;
+                priority = tempPriority;
+            }
+        }
+    }
+    if (pid != -1) {
+        *lowestPriorityPid = pid;
+        *lowestPriority = priority;
+    }
+    return (pid != -1);
+}
+
+bool DrmSessionManager::getLeastUsedSession_l(
+        int pid, sp<DrmSessionClientInterface>* drm, Vector<uint8_t>* sessionId) {
+    ssize_t index = mSessionMap.indexOfKey(pid);
+    if (index < 0) {
+        return false;
+    }
+
+    int leastUsedIndex = -1;
+    int64_t minTs = LLONG_MAX;
+    const SessionInfos& infos = mSessionMap.valueAt(index);
+    for (size_t j = 0; j < infos.size(); ++j) {
+        if (leastUsedIndex == -1) {
+            leastUsedIndex = j;
+            minTs = infos[j].timeStamp;
+        } else {
+            if (infos[j].timeStamp < minTs) {
+                leastUsedIndex = j;
+                minTs = infos[j].timeStamp;
+            }
+        }
+    }
+    if (leastUsedIndex != -1) {
+        *drm = infos[leastUsedIndex].drm;
+        *sessionId = infos[leastUsedIndex].sessionId;
+    }
+    return (leastUsedIndex != -1);
+}
+
+}  // namespace android
diff --git a/media/libmediaplayerservice/DrmSessionManager.h b/media/libmediaplayerservice/DrmSessionManager.h
new file mode 100644
index 0000000..ba5c268
--- /dev/null
+++ b/media/libmediaplayerservice/DrmSessionManager.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DRM_SESSION_MANAGER_H_
+
+#define DRM_SESSION_MANAGER_H_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <utils/RefBase.h>
+#include <utils/KeyedVector.h>
+#include <utils/threads.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+class DrmSessionManagerTest;
+struct DrmSessionClientInterface;
+struct ProcessInfoInterface;
+
+bool isEqualSessionId(const Vector<uint8_t> &sessionId1, const Vector<uint8_t> &sessionId2);
+
+struct SessionInfo {
+    sp<DrmSessionClientInterface> drm;
+    Vector<uint8_t> sessionId;
+    int64_t timeStamp;
+};
+
+typedef Vector<SessionInfo > SessionInfos;
+typedef KeyedVector<int, SessionInfos > PidSessionInfosMap;
+
+struct DrmSessionManager : public RefBase {
+    static sp<DrmSessionManager> Instance();
+
+    DrmSessionManager();
+    DrmSessionManager(sp<ProcessInfoInterface> processInfo);
+
+    void addSession(int pid, sp<DrmSessionClientInterface> drm, const Vector<uint8_t>& sessionId);
+    void useSession(const Vector<uint8_t>& sessionId);
+    void removeSession(const Vector<uint8_t>& sessionId);
+    void removeDrm(sp<DrmSessionClientInterface> drm);
+    bool reclaimSession(int callingPid);
+
+protected:
+    virtual ~DrmSessionManager();
+
+private:
+    friend class DrmSessionManagerTest;
+
+    int64_t getTime_l();
+    bool getLowestPriority_l(int* lowestPriorityPid, int* lowestPriority);
+    bool getLeastUsedSession_l(
+            int pid, sp<DrmSessionClientInterface>* drm, Vector<uint8_t>* sessionId);
+
+    sp<ProcessInfoInterface> mProcessInfo;
+    mutable Mutex mLock;
+    PidSessionInfosMap mSessionMap;
+    int64_t mTime;
+
+    DISALLOW_EVIL_CONSTRUCTORS(DrmSessionManager);
+};
+
+}  // namespace android
+
+#endif  // DRM_SESSION_MANAGER_H_
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.cpp b/media/libmediaplayerservice/MediaPlayerFactory.cpp
index 48884b9..d5d12f7 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.cpp
+++ b/media/libmediaplayerservice/MediaPlayerFactory.cpp
@@ -70,12 +70,6 @@
         return STAGEFRIGHT_PLAYER;
     }
 
-    // TODO: remove this EXPERIMENTAL developer settings property
-    if (property_get("persist.sys.media.use-awesome", value, NULL)
-            && !strcasecmp("true", value)) {
-        return STAGEFRIGHT_PLAYER;
-    }
-
     return NU_PLAYER;
 }
 
@@ -131,12 +125,18 @@
     GET_PLAYER_TYPE_IMPL(client, source);
 }
 
+player_type MediaPlayerFactory::getPlayerType(const sp<IMediaPlayer>& client,
+                                              const sp<DataSource> &source) {
+    GET_PLAYER_TYPE_IMPL(client, source);
+}
+
 #undef GET_PLAYER_TYPE_IMPL
 
 sp<MediaPlayerBase> MediaPlayerFactory::createPlayer(
         player_type playerType,
         void* cookie,
-        notify_callback_f notifyFunc) {
+        notify_callback_f notifyFunc,
+        pid_t pid) {
     sp<MediaPlayerBase> p;
     IFactory* factory;
     status_t init_result;
@@ -150,7 +150,7 @@
 
     factory = sFactoryMap.valueFor(playerType);
     CHECK(NULL != factory);
-    p = factory->createPlayer();
+    p = factory->createPlayer(pid);
 
     if (p == NULL) {
         ALOGE("Failed to create player object of type %d, create failed",
@@ -218,7 +218,7 @@
         return 0.0;
     }
 
-    virtual sp<MediaPlayerBase> createPlayer() {
+    virtual sp<MediaPlayerBase> createPlayer(pid_t /* pid */) {
         ALOGV(" create StagefrightPlayer");
         return new StagefrightPlayer();
     }
@@ -273,9 +273,16 @@
         return 1.0;
     }
 
-    virtual sp<MediaPlayerBase> createPlayer() {
+    virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/,
+                               const sp<DataSource>& /*source*/,
+                               float /*curScore*/) {
+        // Only NuPlayer supports setting a DataSource source directly.
+        return 1.0;
+    }
+
+    virtual sp<MediaPlayerBase> createPlayer(pid_t pid) {
         ALOGV(" create NuPlayer");
-        return new NuPlayerDriver;
+        return new NuPlayerDriver(pid);
     }
 };
 
@@ -291,7 +298,7 @@
         return 0.0;
     }
 
-    virtual sp<MediaPlayerBase> createPlayer() {
+    virtual sp<MediaPlayerBase> createPlayer(pid_t /* pid */) {
         ALOGV("Create Test Player stub");
         return new TestPlayerStub();
     }
diff --git a/media/libmediaplayerservice/MediaPlayerFactory.h b/media/libmediaplayerservice/MediaPlayerFactory.h
index 55ff918..e22a56f 100644
--- a/media/libmediaplayerservice/MediaPlayerFactory.h
+++ b/media/libmediaplayerservice/MediaPlayerFactory.h
@@ -43,7 +43,11 @@
                                    const sp<IStreamSource> &/*source*/,
                                    float /*curScore*/) { return 0.0; }
 
-        virtual sp<MediaPlayerBase> createPlayer() = 0;
+        virtual float scoreFactory(const sp<IMediaPlayer>& /*client*/,
+                                   const sp<DataSource> &/*source*/,
+                                   float /*curScore*/) { return 0.0; }
+
+        virtual sp<MediaPlayerBase> createPlayer(pid_t pid) = 0;
     };
 
     static status_t registerFactory(IFactory* factory,
@@ -57,10 +61,13 @@
                                      int64_t length);
     static player_type getPlayerType(const sp<IMediaPlayer>& client,
                                      const sp<IStreamSource> &source);
+    static player_type getPlayerType(const sp<IMediaPlayer>& client,
+                                     const sp<DataSource> &source);
 
     static sp<MediaPlayerBase> createPlayer(player_type playerType,
                                             void* cookie,
-                                            notify_callback_f notifyFunc);
+                                            notify_callback_f notifyFunc,
+                                            pid_t pid);
 
     static void registerBuiltinFactories();
 
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 694f1a4..56521a2 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -34,7 +34,6 @@
 
 #include <utils/misc.h>
 
-#include <binder/IBatteryStats.h>
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 #include <binder/MemoryHeapBase.h>
@@ -46,6 +45,7 @@
 #include <utils/Timers.h>
 #include <utils/Vector.h>
 
+#include <media/AudioPolicyHelper.h>
 #include <media/IMediaHTTPService.h>
 #include <media/IRemoteDisplay.h>
 #include <media/IRemoteDisplayClient.h>
@@ -60,6 +60,7 @@
 #include <media/stagefright/AudioPlayer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/ALooperRoster.h>
+#include <mediautils/BatteryNotifier.h>
 
 #include <system/audio.h>
 
@@ -287,16 +288,9 @@
     // reset battery stats
     // if the mediaserver has crashed, battery stats could be left
     // in bad state, reset the state upon service start.
-    const sp<IServiceManager> sm(defaultServiceManager());
-    if (sm != NULL) {
-        const String16 name("batterystats");
-        sp<IBatteryStats> batteryStats =
-                interface_cast<IBatteryStats>(sm->getService(name));
-        if (batteryStats != NULL) {
-            batteryStats->noteResetVideo();
-            batteryStats->noteResetAudio();
-        }
-    }
+    BatteryNotifier& notifier(BatteryNotifier::getInstance());
+    notifier.noteResetVideo();
+    notifier.noteResetAudio();
 
     MediaPlayerFactory::registerBuiltinFactories();
 }
@@ -306,10 +300,10 @@
     ALOGV("MediaPlayerService destroyed");
 }
 
-sp<IMediaRecorder> MediaPlayerService::createMediaRecorder()
+sp<IMediaRecorder> MediaPlayerService::createMediaRecorder(const String16 &opPackageName)
 {
     pid_t pid = IPCThreadState::self()->getCallingPid();
-    sp<MediaRecorderClient> recorder = new MediaRecorderClient(this, pid);
+    sp<MediaRecorderClient> recorder = new MediaRecorderClient(this, pid, opPackageName);
     wp<MediaRecorderClient> w = recorder;
     Mutex::Autolock lock(mLock);
     mMediaRecorderClients.add(w);
@@ -380,12 +374,13 @@
 }
 
 sp<IRemoteDisplay> MediaPlayerService::listenForRemoteDisplay(
+        const String16 &opPackageName,
         const sp<IRemoteDisplayClient>& client, const String8& iface) {
     if (!checkPermission("android.permission.CONTROL_WIFI_DISPLAY")) {
         return NULL;
     }
 
-    return new RemoteDisplay(client, iface.string());
+    return new RemoteDisplay(opPackageName, client, iface.string());
 }
 
 status_t MediaPlayerService::AudioOutput::dump(int fd, const Vector<String16>& args) const
@@ -412,7 +407,7 @@
     return NO_ERROR;
 }
 
-status_t MediaPlayerService::Client::dump(int fd, const Vector<String16>& args) const
+status_t MediaPlayerService::Client::dump(int fd, const Vector<String16>& args)
 {
     const size_t SIZE = 256;
     char buffer[SIZE];
@@ -441,6 +436,9 @@
     const size_t SIZE = 256;
     char buffer[SIZE];
     String8 result;
+    SortedVector< sp<Client> > clients; //to serialise the mutex unlock & client destruction.
+    SortedVector< sp<MediaRecorderClient> > mediaRecorderClients;
+
     if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
         snprintf(buffer, SIZE, "Permission Denial: "
                 "can't dump MediaPlayerService from pid=%d, uid=%d\n",
@@ -452,6 +450,7 @@
         for (int i = 0, n = mClients.size(); i < n; ++i) {
             sp<Client> c = mClients[i].promote();
             if (c != 0) c->dump(fd, args);
+            clients.add(c);
         }
         if (mMediaRecorderClients.size() == 0) {
                 result.append(" No media recorder client\n\n");
@@ -464,6 +463,7 @@
                     write(fd, result.string(), result.size());
                     result = "\n";
                     c->dump(fd, args);
+                    mediaRecorderClients.add(c);
                 }
             }
         }
@@ -633,7 +633,7 @@
         p.clear();
     }
     if (p == NULL) {
-        p = MediaPlayerFactory::createPlayer(playerType, this, notify);
+        p = MediaPlayerFactory::createPlayer(playerType, this, notify, mPid);
     }
 
     if (p != NULL) {
@@ -655,6 +655,7 @@
     }
 
     if (!p->hardwareOutput()) {
+        Mutex::Autolock l(mLock);
         mAudioOutput = new AudioOutput(mAudioSessionId, IPCThreadState::self()->getCallingUid(),
                 mPid, mAudioAttributes);
         static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
@@ -740,7 +741,7 @@
         return UNKNOWN_ERROR;
     }
 
-    ALOGV("st_dev  = %llu", sb.st_dev);
+    ALOGV("st_dev  = %llu", static_cast<uint64_t>(sb.st_dev));
     ALOGV("st_mode = %u", sb.st_mode);
     ALOGV("st_uid  = %lu", static_cast<unsigned long>(sb.st_uid));
     ALOGV("st_gid  = %lu", static_cast<unsigned long>(sb.st_gid));
@@ -784,6 +785,19 @@
     return mStatus;
 }
 
+status_t MediaPlayerService::Client::setDataSource(
+        const sp<IDataSource> &source) {
+    sp<DataSource> dataSource = DataSource::CreateFromIDataSource(source);
+    player_type playerType = MediaPlayerFactory::getPlayerType(this, dataSource);
+    sp<MediaPlayerBase> p = setDataSource_pre(playerType);
+    if (p == NULL) {
+        return NO_INIT;
+    }
+    // now set data source
+    setDataSource_post(p, p->setDataSource(dataSource));
+    return mStatus;
+}
+
 void MediaPlayerService::Client::disconnectNativeWindow() {
     if (mConnectedWindow != NULL) {
         status_t err = native_window_api_disconnect(mConnectedWindow.get(),
@@ -961,6 +975,54 @@
     return NO_ERROR;
 }
 
+status_t MediaPlayerService::Client::setPlaybackSettings(const AudioPlaybackRate& rate)
+{
+    ALOGV("[%d] setPlaybackSettings(%f, %f, %d, %d)",
+            mConnId, rate.mSpeed, rate.mPitch, rate.mFallbackMode, rate.mStretchMode);
+    sp<MediaPlayerBase> p = getPlayer();
+    if (p == 0) return UNKNOWN_ERROR;
+    return p->setPlaybackSettings(rate);
+}
+
+status_t MediaPlayerService::Client::getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */)
+{
+    sp<MediaPlayerBase> p = getPlayer();
+    if (p == 0) return UNKNOWN_ERROR;
+    status_t ret = p->getPlaybackSettings(rate);
+    if (ret == NO_ERROR) {
+        ALOGV("[%d] getPlaybackSettings(%f, %f, %d, %d)",
+                mConnId, rate->mSpeed, rate->mPitch, rate->mFallbackMode, rate->mStretchMode);
+    } else {
+        ALOGV("[%d] getPlaybackSettings returned %d", mConnId, ret);
+    }
+    return ret;
+}
+
+status_t MediaPlayerService::Client::setSyncSettings(
+        const AVSyncSettings& sync, float videoFpsHint)
+{
+    ALOGV("[%d] setSyncSettings(%u, %u, %f, %f)",
+            mConnId, sync.mSource, sync.mAudioAdjustMode, sync.mTolerance, videoFpsHint);
+    sp<MediaPlayerBase> p = getPlayer();
+    if (p == 0) return UNKNOWN_ERROR;
+    return p->setSyncSettings(sync, videoFpsHint);
+}
+
+status_t MediaPlayerService::Client::getSyncSettings(
+        AVSyncSettings* sync /* nonnull */, float* videoFps /* nonnull */)
+{
+    sp<MediaPlayerBase> p = getPlayer();
+    if (p == 0) return UNKNOWN_ERROR;
+    status_t ret = p->getSyncSettings(sync, videoFps);
+    if (ret == NO_ERROR) {
+        ALOGV("[%d] getSyncSettings(%u, %u, %f, %f)",
+                mConnId, sync->mSource, sync->mAudioAdjustMode, sync->mTolerance, *videoFps);
+    } else {
+        ALOGV("[%d] getSyncSettings returned %d", mConnId, ret);
+    }
+    return ret;
+}
+
 status_t MediaPlayerService::Client::getCurrentPosition(int *msec)
 {
     ALOGV("getCurrentPosition");
@@ -1040,6 +1102,9 @@
 {
     if (mAudioAttributes != NULL) { free(mAudioAttributes); }
     mAudioAttributes = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
+    if (mAudioAttributes == NULL) {
+        return NO_MEMORY;
+    }
     unmarshallAudioAttributes(parcel, mAudioAttributes);
 
     ALOGV("setAudioAttributes_l() usage=%d content=%d flags=0x%x tags=%s",
@@ -1275,29 +1340,42 @@
       mCallbackCookie(NULL),
       mCallbackData(NULL),
       mBytesWritten(0),
+      mStreamType(AUDIO_STREAM_MUSIC),
+      mLeftVolume(1.0),
+      mRightVolume(1.0),
+      mPlaybackRate(AUDIO_PLAYBACK_RATE_DEFAULT),
+      mSampleRateHz(0),
+      mMsecsPerFrame(0),
+      mFrameSize(0),
       mSessionId(sessionId),
       mUid(uid),
       mPid(pid),
-      mFlags(AUDIO_OUTPUT_FLAG_NONE) {
+      mSendLevel(0.0),
+      mAuxEffectId(0),
+      mFlags(AUDIO_OUTPUT_FLAG_NONE)
+{
     ALOGV("AudioOutput(%d)", sessionId);
-    mStreamType = AUDIO_STREAM_MUSIC;
-    mLeftVolume = 1.0;
-    mRightVolume = 1.0;
-    mPlaybackRatePermille = 1000;
-    mSampleRateHz = 0;
-    mMsecsPerFrame = 0;
-    mAuxEffectId = 0;
-    mSendLevel = 0.0;
+    if (attr != NULL) {
+        mAttributes = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
+        if (mAttributes != NULL) {
+            memcpy(mAttributes, attr, sizeof(audio_attributes_t));
+            mStreamType = audio_attributes_to_stream_type(attr);
+        }
+    } else {
+        mAttributes = NULL;
+    }
+
     setMinBufferCount();
-    mAttributes = attr;
 }
 
 MediaPlayerService::AudioOutput::~AudioOutput()
 {
     close();
+    free(mAttributes);
     delete mCallbackData;
 }
 
+//static
 void MediaPlayerService::AudioOutput::setMinBufferCount()
 {
     char value[PROPERTY_VALUE_MAX];
@@ -1307,92 +1385,123 @@
     }
 }
 
+// static
 bool MediaPlayerService::AudioOutput::isOnEmulator()
 {
-    setMinBufferCount();
+    setMinBufferCount(); // benign race wrt other threads
     return mIsOnEmulator;
 }
 
+// static
 int MediaPlayerService::AudioOutput::getMinBufferCount()
 {
-    setMinBufferCount();
+    setMinBufferCount(); // benign race wrt other threads
     return mMinBufferCount;
 }
 
 ssize_t MediaPlayerService::AudioOutput::bufferSize() const
 {
+    Mutex::Autolock lock(mLock);
     if (mTrack == 0) return NO_INIT;
-    return mTrack->frameCount() * frameSize();
+    return mTrack->frameCount() * mFrameSize;
 }
 
 ssize_t MediaPlayerService::AudioOutput::frameCount() const
 {
+    Mutex::Autolock lock(mLock);
     if (mTrack == 0) return NO_INIT;
     return mTrack->frameCount();
 }
 
 ssize_t MediaPlayerService::AudioOutput::channelCount() const
 {
+    Mutex::Autolock lock(mLock);
     if (mTrack == 0) return NO_INIT;
     return mTrack->channelCount();
 }
 
 ssize_t MediaPlayerService::AudioOutput::frameSize() const
 {
+    Mutex::Autolock lock(mLock);
     if (mTrack == 0) return NO_INIT;
-    return mTrack->frameSize();
+    return mFrameSize;
 }
 
 uint32_t MediaPlayerService::AudioOutput::latency () const
 {
+    Mutex::Autolock lock(mLock);
     if (mTrack == 0) return 0;
     return mTrack->latency();
 }
 
 float MediaPlayerService::AudioOutput::msecsPerFrame() const
 {
+    Mutex::Autolock lock(mLock);
     return mMsecsPerFrame;
 }
 
 status_t MediaPlayerService::AudioOutput::getPosition(uint32_t *position) const
 {
+    Mutex::Autolock lock(mLock);
     if (mTrack == 0) return NO_INIT;
     return mTrack->getPosition(position);
 }
 
 status_t MediaPlayerService::AudioOutput::getTimestamp(AudioTimestamp &ts) const
 {
+    Mutex::Autolock lock(mLock);
     if (mTrack == 0) return NO_INIT;
     return mTrack->getTimestamp(ts);
 }
 
 status_t MediaPlayerService::AudioOutput::getFramesWritten(uint32_t *frameswritten) const
 {
+    Mutex::Autolock lock(mLock);
     if (mTrack == 0) return NO_INIT;
-    *frameswritten = mBytesWritten / frameSize();
+    *frameswritten = mBytesWritten / mFrameSize;
     return OK;
 }
 
 status_t MediaPlayerService::AudioOutput::setParameters(const String8& keyValuePairs)
 {
+    Mutex::Autolock lock(mLock);
     if (mTrack == 0) return NO_INIT;
     return mTrack->setParameters(keyValuePairs);
 }
 
 String8  MediaPlayerService::AudioOutput::getParameters(const String8& keys)
 {
+    Mutex::Autolock lock(mLock);
     if (mTrack == 0) return String8::empty();
     return mTrack->getParameters(keys);
 }
 
 void MediaPlayerService::AudioOutput::setAudioAttributes(const audio_attributes_t * attributes) {
-    mAttributes = attributes;
+    Mutex::Autolock lock(mLock);
+    if (attributes == NULL) {
+        free(mAttributes);
+        mAttributes = NULL;
+    } else {
+        if (mAttributes == NULL) {
+            mAttributes = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
+        }
+        memcpy(mAttributes, attributes, sizeof(audio_attributes_t));
+        mStreamType = audio_attributes_to_stream_type(attributes);
+    }
 }
 
-void MediaPlayerService::AudioOutput::deleteRecycledTrack()
+void MediaPlayerService::AudioOutput::setAudioStreamType(audio_stream_type_t streamType)
 {
-    ALOGV("deleteRecycledTrack");
+    Mutex::Autolock lock(mLock);
+    // do not allow direct stream type modification if attributes have been set
+    if (mAttributes == NULL) {
+        mStreamType = streamType;
+    }
+}
 
+void MediaPlayerService::AudioOutput::deleteRecycledTrack_l()
+{
+    ALOGV("deleteRecycledTrack_l");
     if (mRecycledTrack != 0) {
 
         if (mCallbackData != NULL) {
@@ -1410,33 +1519,28 @@
         // AudioFlinger to drain the track.
 
         mRecycledTrack.clear();
+        close_l();
         delete mCallbackData;
         mCallbackData = NULL;
-        close();
     }
 }
 
+void MediaPlayerService::AudioOutput::close_l()
+{
+    mTrack.clear();
+}
+
 status_t MediaPlayerService::AudioOutput::open(
         uint32_t sampleRate, int channelCount, audio_channel_mask_t channelMask,
         audio_format_t format, int bufferCount,
         AudioCallback cb, void *cookie,
         audio_output_flags_t flags,
-        const audio_offload_info_t *offloadInfo)
+        const audio_offload_info_t *offloadInfo,
+        bool doNotReconnect,
+        uint32_t suggestedFrameCount)
 {
-    mCallback = cb;
-    mCallbackCookie = cookie;
-
-    // Check argument "bufferCount" against the mininum buffer count
-    if (bufferCount < mMinBufferCount) {
-        ALOGD("bufferCount (%d) is too small and increased to %d", bufferCount, mMinBufferCount);
-        bufferCount = mMinBufferCount;
-
-    }
     ALOGV("open(%u, %d, 0x%x, 0x%x, %d, %d 0x%x)", sampleRate, channelCount, channelMask,
                 format, bufferCount, mSessionId, flags);
-    uint32_t afSampleRate;
-    size_t afFrameCount;
-    size_t frameCount;
 
     // offloading is only supported in callback mode for now.
     // offloadInfo must be present if offload flag is set
@@ -1445,20 +1549,36 @@
         return BAD_VALUE;
     }
 
+    // compute frame count for the AudioTrack internal buffer
+    size_t frameCount;
     if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
         frameCount = 0; // AudioTrack will get frame count from AudioFlinger
     } else {
+        // try to estimate the buffer processing fetch size from AudioFlinger.
+        // framesPerBuffer is approximate and generally correct, except when it's not :-).
         uint32_t afSampleRate;
         size_t afFrameCount;
-
         if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) != NO_ERROR) {
             return NO_INIT;
         }
         if (AudioSystem::getOutputSamplingRate(&afSampleRate, mStreamType) != NO_ERROR) {
             return NO_INIT;
         }
+        const size_t framesPerBuffer =
+                (unsigned long long)sampleRate * afFrameCount / afSampleRate;
 
-        frameCount = (sampleRate*afFrameCount*bufferCount)/afSampleRate;
+        if (bufferCount == 0) {
+            // use suggestedFrameCount
+            bufferCount = (suggestedFrameCount + framesPerBuffer - 1) / framesPerBuffer;
+        }
+        // Check argument bufferCount against the mininum buffer count
+        if (bufferCount != 0 && bufferCount < mMinBufferCount) {
+            ALOGV("bufferCount (%d) increased to %d", bufferCount, mMinBufferCount);
+            bufferCount = mMinBufferCount;
+        }
+        // if frameCount is 0, then AudioTrack will get frame count from AudioFlinger
+        // which will be the minimum size permitted.
+        frameCount = bufferCount * framesPerBuffer;
     }
 
     if (channelMask == CHANNEL_MASK_USE_CHANNEL_ORDER) {
@@ -1469,6 +1589,10 @@
         }
     }
 
+    Mutex::Autolock lock(mLock);
+    mCallback = cb;
+    mCallbackCookie = cookie;
+
     // Check whether we can recycle the track
     bool reuse = false;
     bool bothOffloaded = false;
@@ -1508,7 +1632,7 @@
     // we must close the previous output before opening a new one
     if (bothOffloaded && !reuse) {
         ALOGV("both offloaded and not recycling");
-        deleteRecycledTrack();
+        deleteRecycledTrack_l();
     }
 
     sp<AudioTrack> t;
@@ -1539,7 +1663,8 @@
                     offloadInfo,
                     mUid,
                     mPid,
-                    mAttributes);
+                    mAttributes,
+                    doNotReconnect);
         } else {
             t = new AudioTrack(
                     mStreamType,
@@ -1556,12 +1681,14 @@
                     NULL, // offload info
                     mUid,
                     mPid,
-                    mAttributes);
+                    mAttributes,
+                    doNotReconnect);
         }
 
         if ((t == 0) || (t->initCheck() != NO_ERROR)) {
             ALOGE("Unable to create audio track");
             delete newcbd;
+            // t goes out of scope, so reference count drops to zero
             return NO_INIT;
         } else {
             // successful AudioTrack initialization implies a legacy stream type was generated
@@ -1583,7 +1710,7 @@
 
         if (reuse) {
             ALOGV("chaining to next output and recycling track");
-            close();
+            close_l();
             mTrack = mRecycledTrack;
             mRecycledTrack.clear();
             if (mCallbackData != NULL) {
@@ -1597,7 +1724,7 @@
     // we're not going to reuse the track, unblock and flush it
     // this was done earlier if both tracks are offloaded
     if (!bothOffloaded) {
-        deleteRecycledTrack();
+        deleteRecycledTrack_l();
     }
 
     CHECK((t != NULL) && ((mCallback == NULL) || (newcbd != NULL)));
@@ -1607,17 +1734,20 @@
     t->setVolume(mLeftVolume, mRightVolume);
 
     mSampleRateHz = sampleRate;
-    mFlags = flags;
-    mMsecsPerFrame = mPlaybackRatePermille / (float) sampleRate;
+    mFlags = t->getFlags(); // we suggest the flags above, but new AudioTrack() may not grant it.
+    mMsecsPerFrame = 1E3f / (mPlaybackRate.mSpeed * sampleRate);
+    mFrameSize = t->frameSize();
     uint32_t pos;
     if (t->getPosition(&pos) == OK) {
-        mBytesWritten = uint64_t(pos) * t->frameSize();
+        mBytesWritten = uint64_t(pos) * mFrameSize;
     }
     mTrack = t;
 
     status_t res = NO_ERROR;
-    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) {
-        res = t->setSampleRate(mPlaybackRatePermille * mSampleRateHz / 1000);
+    // Note some output devices may give us a direct track even though we don't specify it.
+    // Example: Line application b/17459982.
+    if ((mFlags & (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT)) == 0) {
+        res = t->setPlaybackRate(mPlaybackRate);
         if (res == NO_ERROR) {
             t->setAuxEffectSendLevel(mSendLevel);
             res = t->attachAuxEffect(mAuxEffectId);
@@ -1630,6 +1760,7 @@
 status_t MediaPlayerService::AudioOutput::start()
 {
     ALOGV("start");
+    Mutex::Autolock lock(mLock);
     if (mCallbackData != NULL) {
         mCallbackData->endTrackSwitch();
     }
@@ -1642,35 +1773,93 @@
 }
 
 void MediaPlayerService::AudioOutput::setNextOutput(const sp<AudioOutput>& nextOutput) {
+    Mutex::Autolock lock(mLock);
     mNextOutput = nextOutput;
 }
 
-
 void MediaPlayerService::AudioOutput::switchToNextOutput() {
     ALOGV("switchToNextOutput");
-    if (mNextOutput != NULL) {
-        if (mCallbackData != NULL) {
-            mCallbackData->beginTrackSwitch();
+
+    // Try to acquire the callback lock before moving track (without incurring deadlock).
+    const unsigned kMaxSwitchTries = 100;
+    Mutex::Autolock lock(mLock);
+    for (unsigned tries = 0;;) {
+        if (mTrack == 0) {
+            return;
         }
-        delete mNextOutput->mCallbackData;
-        mNextOutput->mCallbackData = mCallbackData;
-        mCallbackData = NULL;
-        mNextOutput->mRecycledTrack = mTrack;
-        mTrack.clear();
-        mNextOutput->mSampleRateHz = mSampleRateHz;
-        mNextOutput->mMsecsPerFrame = mMsecsPerFrame;
-        mNextOutput->mBytesWritten = mBytesWritten;
-        mNextOutput->mFlags = mFlags;
+        if (mNextOutput != NULL && mNextOutput != this) {
+            if (mCallbackData != NULL) {
+                // two alternative approaches
+#if 1
+                CallbackData *callbackData = mCallbackData;
+                mLock.unlock();
+                // proper acquisition sequence
+                callbackData->lock();
+                mLock.lock();
+                // Caution: it is unlikely that someone deleted our callback or changed our target
+                if (callbackData != mCallbackData || mNextOutput == NULL || mNextOutput == this) {
+                    // fatal if we are starved out.
+                    LOG_ALWAYS_FATAL_IF(++tries > kMaxSwitchTries,
+                            "switchToNextOutput() cannot obtain correct lock sequence");
+                    callbackData->unlock();
+                    continue;
+                }
+                callbackData->mSwitching = true; // begin track switch
+#else
+                // tryBeginTrackSwitch() returns false if the callback has the lock.
+                if (!mCallbackData->tryBeginTrackSwitch()) {
+                    // fatal if we are starved out.
+                    LOG_ALWAYS_FATAL_IF(++tries > kMaxSwitchTries,
+                            "switchToNextOutput() cannot obtain callback lock");
+                    mLock.unlock();
+                    usleep(5 * 1000 /* usec */); // allow callback to use AudioOutput
+                    mLock.lock();
+                    continue;
+                }
+#endif
+            }
+
+            Mutex::Autolock nextLock(mNextOutput->mLock);
+
+            // If the next output track is not NULL, then it has been
+            // opened already for playback.
+            // This is possible even without the next player being started,
+            // for example, the next player could be prepared and seeked.
+            //
+            // Presuming it isn't advisable to force the track over.
+             if (mNextOutput->mTrack == NULL) {
+                ALOGD("Recycling track for gapless playback");
+                delete mNextOutput->mCallbackData;
+                mNextOutput->mCallbackData = mCallbackData;
+                mNextOutput->mRecycledTrack = mTrack;
+                mNextOutput->mSampleRateHz = mSampleRateHz;
+                mNextOutput->mMsecsPerFrame = mMsecsPerFrame;
+                mNextOutput->mBytesWritten = mBytesWritten;
+                mNextOutput->mFlags = mFlags;
+                mNextOutput->mFrameSize = mFrameSize;
+                close_l();
+                mCallbackData = NULL;  // destruction handled by mNextOutput
+            } else {
+                ALOGW("Ignoring gapless playback because next player has already started");
+                // remove track in case resource needed for future players.
+                if (mCallbackData != NULL) {
+                    mCallbackData->endTrackSwitch();  // release lock for callbacks before close.
+                }
+                close_l();
+            }
+        }
+        break;
     }
 }
 
-ssize_t MediaPlayerService::AudioOutput::write(const void* buffer, size_t size)
+ssize_t MediaPlayerService::AudioOutput::write(const void* buffer, size_t size, bool blocking)
 {
+    Mutex::Autolock lock(mLock);
     LOG_ALWAYS_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
 
     //ALOGV("write(%p, %u)", buffer, size);
     if (mTrack != 0) {
-        ssize_t ret = mTrack->write(buffer, size);
+        ssize_t ret = mTrack->write(buffer, size, blocking);
         if (ret >= 0) {
             mBytesWritten += ret;
         }
@@ -1682,30 +1871,37 @@
 void MediaPlayerService::AudioOutput::stop()
 {
     ALOGV("stop");
+    Mutex::Autolock lock(mLock);
+    mBytesWritten = 0;
     if (mTrack != 0) mTrack->stop();
 }
 
 void MediaPlayerService::AudioOutput::flush()
 {
     ALOGV("flush");
+    Mutex::Autolock lock(mLock);
+    mBytesWritten = 0;
     if (mTrack != 0) mTrack->flush();
 }
 
 void MediaPlayerService::AudioOutput::pause()
 {
     ALOGV("pause");
+    Mutex::Autolock lock(mLock);
     if (mTrack != 0) mTrack->pause();
 }
 
 void MediaPlayerService::AudioOutput::close()
 {
     ALOGV("close");
-    mTrack.clear();
+    Mutex::Autolock lock(mLock);
+    close_l();
 }
 
 void MediaPlayerService::AudioOutput::setVolume(float left, float right)
 {
     ALOGV("setVolume(%f, %f)", left, right);
+    Mutex::Autolock lock(mLock);
     mLeftVolume = left;
     mRightVolume = right;
     if (mTrack != 0) {
@@ -1713,25 +1909,44 @@
     }
 }
 
-status_t MediaPlayerService::AudioOutput::setPlaybackRatePermille(int32_t ratePermille)
+status_t MediaPlayerService::AudioOutput::setPlaybackRate(const AudioPlaybackRate &rate)
 {
-    ALOGV("setPlaybackRatePermille(%d)", ratePermille);
-    status_t res = NO_ERROR;
-    if (mTrack != 0) {
-        res = mTrack->setSampleRate(ratePermille * mSampleRateHz / 1000);
-    } else {
-        res = NO_INIT;
+    ALOGV("setPlaybackRate(%f %f %d %d)",
+                rate.mSpeed, rate.mPitch, rate.mFallbackMode, rate.mStretchMode);
+    Mutex::Autolock lock(mLock);
+    if (mTrack == 0) {
+        // remember rate so that we can set it when the track is opened
+        mPlaybackRate = rate;
+        return OK;
     }
-    mPlaybackRatePermille = ratePermille;
+    status_t res = mTrack->setPlaybackRate(rate);
+    if (res != NO_ERROR) {
+        return res;
+    }
+    // rate.mSpeed is always greater than 0 if setPlaybackRate succeeded
+    CHECK_GT(rate.mSpeed, 0.f);
+    mPlaybackRate = rate;
     if (mSampleRateHz != 0) {
-        mMsecsPerFrame = mPlaybackRatePermille / (float) mSampleRateHz;
+        mMsecsPerFrame = 1E3f / (rate.mSpeed * mSampleRateHz);
     }
     return res;
 }
 
+status_t MediaPlayerService::AudioOutput::getPlaybackRate(AudioPlaybackRate *rate)
+{
+    ALOGV("setPlaybackRate");
+    Mutex::Autolock lock(mLock);
+    if (mTrack == 0) {
+        return NO_INIT;
+    }
+    *rate = mTrack->getPlaybackRate();
+    return NO_ERROR;
+}
+
 status_t MediaPlayerService::AudioOutput::setAuxEffectSendLevel(float level)
 {
     ALOGV("setAuxEffectSendLevel(%f)", level);
+    Mutex::Autolock lock(mLock);
     mSendLevel = level;
     if (mTrack != 0) {
         return mTrack->setAuxEffectSendLevel(level);
@@ -1742,6 +1957,7 @@
 status_t MediaPlayerService::AudioOutput::attachAuxEffect(int effectId)
 {
     ALOGV("attachAuxEffect(%d)", effectId);
+    Mutex::Autolock lock(mLock);
     mAuxEffectId = effectId;
     if (mTrack != 0) {
         return mTrack->attachAuxEffect(effectId);
@@ -1754,6 +1970,7 @@
         int event, void *cookie, void *info) {
     //ALOGV("callbackwrapper");
     CallbackData *data = (CallbackData*)cookie;
+    // lock to ensure we aren't caught in the middle of a track switch.
     data->lock();
     AudioOutput *me = data->getOutput();
     AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
@@ -1773,20 +1990,23 @@
                 me, buffer->raw, buffer->size, me->mCallbackCookie,
                 CB_EVENT_FILL_BUFFER);
 
-        if ((me->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0 &&
-            actualSize == 0 && buffer->size > 0 && me->mNextOutput == NULL) {
-            // We've reached EOS but the audio track is not stopped yet,
-            // keep playing silence.
+        // Log when no data is returned from the callback.
+        // (1) We may have no data (especially with network streaming sources).
+        // (2) We may have reached the EOS and the audio track is not stopped yet.
+        // Note that AwesomePlayer/AudioPlayer will only return zero size when it reaches the EOS.
+        // NuPlayerRenderer will return zero when it doesn't have data (it doesn't block to fill).
+        //
+        // This is a benign busy-wait, with the next data request generated 10 ms or more later;
+        // nevertheless for power reasons, we don't want to see too many of these.
 
-            memset(buffer->raw, 0, buffer->size);
-            actualSize = buffer->size;
-        }
+        ALOGV_IF(actualSize == 0 && buffer->size > 0, "callbackwrapper: empty buffer returned");
 
+        me->mBytesWritten += actualSize;  // benign race with reader.
         buffer->size = actualSize;
         } break;
 
-
     case AudioTrack::EVENT_STREAM_END:
+        // currently only occurs for offloaded callbacks
         ALOGV("callbackwrapper: deliver EVENT_STREAM_END");
         (*me->mCallback)(me, NULL /* buffer */, 0 /* size */,
                 me->mCallbackCookie, CB_EVENT_STREAM_END);
@@ -1798,6 +2018,19 @@
                 me->mCallbackCookie, CB_EVENT_TEAR_DOWN);
         break;
 
+    case AudioTrack::EVENT_UNDERRUN:
+        // This occurs when there is no data available, typically
+        // when there is a failure to supply data to the AudioTrack.  It can also
+        // occur in non-offloaded mode when the audio device comes out of standby.
+        //
+        // If an AudioTrack underruns it outputs silence. Since this happens suddenly
+        // it may sound like an audible pop or glitch.
+        //
+        // The underrun event is sent once per track underrun; the condition is reset
+        // when more data is sent to the AudioTrack.
+        ALOGI("callbackwrapper: EVENT_UNDERRUN (discarded)");
+        break;
+
     default:
         ALOGE("received unknown event type: %d inside CallbackWrapper !", event);
     }
@@ -1807,11 +2040,13 @@
 
 int MediaPlayerService::AudioOutput::getSessionId() const
 {
+    Mutex::Autolock lock(mLock);
     return mSessionId;
 }
 
 uint32_t MediaPlayerService::AudioOutput::getSampleRate() const
 {
+    Mutex::Autolock lock(mLock);
     if (mTrack == 0) return 0;
     return mTrack->getSampleRate();
 }
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index fad3447..60d4617 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -34,7 +34,10 @@
 
 namespace android {
 
+struct AudioPlaybackRate;
 class AudioTrack;
+struct AVSyncSettings;
+class IDataSource;
 class IMediaRecorder;
 class IMediaMetadataRetriever;
 class IOMX;
@@ -94,21 +97,24 @@
                 audio_format_t format, int bufferCount,
                 AudioCallback cb, void *cookie,
                 audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
-                const audio_offload_info_t *offloadInfo = NULL);
+                const audio_offload_info_t *offloadInfo = NULL,
+                bool doNotReconnect = false,
+                uint32_t suggestedFrameCount = 0);
 
         virtual status_t        start();
-        virtual ssize_t         write(const void* buffer, size_t size);
+        virtual ssize_t         write(const void* buffer, size_t size, bool blocking = true);
         virtual void            stop();
         virtual void            flush();
         virtual void            pause();
         virtual void            close();
-                void            setAudioStreamType(audio_stream_type_t streamType) {
-                                                                        mStreamType = streamType; }
+                void            setAudioStreamType(audio_stream_type_t streamType);
         virtual audio_stream_type_t getAudioStreamType() const { return mStreamType; }
                 void            setAudioAttributes(const audio_attributes_t * attributes);
 
                 void            setVolume(float left, float right);
-        virtual status_t        setPlaybackRatePermille(int32_t ratePermille);
+        virtual status_t        setPlaybackRate(const AudioPlaybackRate& rate);
+        virtual status_t        getPlaybackRate(AudioPlaybackRate* rate /* nonnull */);
+
                 status_t        setAuxEffectSendLevel(float level);
                 status_t        attachAuxEffect(int effectId);
         virtual status_t        dump(int fd, const Vector<String16>& args) const;
@@ -125,7 +131,8 @@
         static void             setMinBufferCount();
         static void             CallbackWrapper(
                 int event, void *me, void *info);
-               void             deleteRecycledTrack();
+               void             deleteRecycledTrack_l();
+               void             close_l();
 
         sp<AudioTrack>          mTrack;
         sp<AudioTrack>          mRecycledTrack;
@@ -135,38 +142,53 @@
         CallbackData *          mCallbackData;
         uint64_t                mBytesWritten;
         audio_stream_type_t     mStreamType;
-        const audio_attributes_t *mAttributes;
+        audio_attributes_t *    mAttributes;
         float                   mLeftVolume;
         float                   mRightVolume;
-        int32_t                 mPlaybackRatePermille;
+        AudioPlaybackRate       mPlaybackRate;
         uint32_t                mSampleRateHz; // sample rate of the content, as set in open()
         float                   mMsecsPerFrame;
+        size_t                  mFrameSize;
         int                     mSessionId;
         int                     mUid;
         int                     mPid;
         float                   mSendLevel;
         int                     mAuxEffectId;
+        audio_output_flags_t    mFlags;
+        mutable Mutex           mLock;
+
+        // static variables below not protected by mutex
         static bool             mIsOnEmulator;
         static int              mMinBufferCount;  // 12 for emulator; otherwise 4
-        audio_output_flags_t    mFlags;
 
         // CallbackData is what is passed to the AudioTrack as the "user" data.
         // We need to be able to target this to a different Output on the fly,
         // so we can't use the Output itself for this.
         class CallbackData {
+            friend AudioOutput;
         public:
             CallbackData(AudioOutput *cookie) {
                 mData = cookie;
                 mSwitching = false;
             }
-            AudioOutput *   getOutput() { return mData;}
+            AudioOutput *   getOutput() const { return mData; }
             void            setOutput(AudioOutput* newcookie) { mData = newcookie; }
             // lock/unlock are used by the callback before accessing the payload of this object
-            void            lock() { mLock.lock(); }
-            void            unlock() { mLock.unlock(); }
-            // beginTrackSwitch/endTrackSwitch are used when this object is being handed over
+            void            lock() const { mLock.lock(); }
+            void            unlock() const { mLock.unlock(); }
+
+            // tryBeginTrackSwitch/endTrackSwitch are used when the CallbackData is handed over
             // to the next sink.
-            void            beginTrackSwitch() { mLock.lock(); mSwitching = true; }
+
+            // tryBeginTrackSwitch() returns true only if it obtains the lock.
+            bool            tryBeginTrackSwitch() {
+                LOG_ALWAYS_FATAL_IF(mSwitching, "tryBeginTrackSwitch() already called");
+                if (mLock.tryLock() != OK) {
+                    return false;
+                }
+                mSwitching = true;
+                return true;
+            }
             void            endTrackSwitch() {
                 if (mSwitching) {
                     mLock.unlock();
@@ -175,7 +197,7 @@
             }
         private:
             AudioOutput *   mData;
-            mutable Mutex   mLock;
+            mutable Mutex   mLock; // a recursive mutex might make this unnecessary.
             bool            mSwitching;
             DISALLOW_EVIL_CONSTRUCTORS(CallbackData);
         };
@@ -187,7 +209,7 @@
     static  void                instantiate();
 
     // IMediaPlayerService interface
-    virtual sp<IMediaRecorder>  createMediaRecorder();
+    virtual sp<IMediaRecorder>  createMediaRecorder(const String16 &opPackageName);
     void    removeMediaRecorderClient(wp<MediaRecorderClient> client);
     virtual sp<IMediaMetadataRetriever> createMetadataRetriever();
 
@@ -199,8 +221,8 @@
     virtual sp<IDrm>            makeDrm();
     virtual sp<IHDCP>           makeHDCP(bool createEncryptionModule);
 
-    virtual sp<IRemoteDisplay> listenForRemoteDisplay(const sp<IRemoteDisplayClient>& client,
-            const String8& iface);
+    virtual sp<IRemoteDisplay> listenForRemoteDisplay(const String16 &opPackageName,
+            const sp<IRemoteDisplayClient>& client, const String8& iface);
     virtual status_t            dump(int fd, const Vector<String16>& args);
 
             void                removeClient(wp<Client> client);
@@ -261,6 +283,11 @@
         virtual status_t        stop();
         virtual status_t        pause();
         virtual status_t        isPlaying(bool* state);
+        virtual status_t        setPlaybackSettings(const AudioPlaybackRate& rate);
+        virtual status_t        getPlaybackSettings(AudioPlaybackRate* rate /* nonnull */);
+        virtual status_t        setSyncSettings(const AVSyncSettings& rate, float videoFpsHint);
+        virtual status_t        getSyncSettings(AVSyncSettings* rate /* nonnull */,
+                                                float* videoFps /* nonnull */);
         virtual status_t        seekTo(int msec);
         virtual status_t        getCurrentPosition(int* msec);
         virtual status_t        getDuration(int* msec);
@@ -291,6 +318,8 @@
         virtual status_t        setDataSource(int fd, int64_t offset, int64_t length);
 
         virtual status_t        setDataSource(const sp<IStreamSource> &source);
+        virtual status_t        setDataSource(const sp<IDataSource> &source);
+
 
         sp<MediaPlayerBase>     setDataSource_pre(player_type playerType);
         void                    setDataSource_post(const sp<MediaPlayerBase>& p,
@@ -300,7 +329,7 @@
                                        int ext1, int ext2, const Parcel *obj);
 
                 pid_t           pid() const { return mPid; }
-        virtual status_t        dump(int fd, const Vector<String16>& args) const;
+        virtual status_t        dump(int fd, const Vector<String16>& args);
 
                 int             getAudioSessionId() { return mAudioSessionId; }
 
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 194abbb..f761dec 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -55,6 +55,16 @@
     return ok;
 }
 
+status_t MediaRecorderClient::setInputSurface(const sp<IGraphicBufferConsumer>& surface)
+{
+    ALOGV("setInputSurface");
+    Mutex::Autolock lock(mLock);
+    if (mRecorder == NULL) {
+        ALOGE("recorder is not initialized");
+        return NO_INIT;
+    }
+    return mRecorder->setInputSurface(surface);
+}
 
 sp<IGraphicBufferProducer> MediaRecorderClient::querySurfaceMediaSource()
 {
@@ -154,17 +164,6 @@
     return mRecorder->setAudioEncoder((audio_encoder)ae);
 }
 
-status_t MediaRecorderClient::setOutputFile(const char* path)
-{
-    ALOGV("setOutputFile(%s)", path);
-    Mutex::Autolock lock(mLock);
-    if (mRecorder == NULL) {
-        ALOGE("recorder is not initialized");
-        return NO_INIT;
-    }
-    return mRecorder->setOutputFile(path);
-}
-
 status_t MediaRecorderClient::setOutputFile(int fd, int64_t offset, int64_t length)
 {
     ALOGV("setOutputFile(%d, %lld, %lld)", fd, offset, length);
@@ -301,11 +300,12 @@
     return NO_ERROR;
 }
 
-MediaRecorderClient::MediaRecorderClient(const sp<MediaPlayerService>& service, pid_t pid)
+MediaRecorderClient::MediaRecorderClient(const sp<MediaPlayerService>& service, pid_t pid,
+        const String16& opPackageName)
 {
     ALOGV("Client constructor");
     mPid = pid;
-    mRecorder = new StagefrightRecorder;
+    mRecorder = new StagefrightRecorder(opPackageName);
     mMediaPlayerService = service;
 }
 
@@ -336,7 +336,7 @@
     return mRecorder->setClientName(clientName);
 }
 
-status_t MediaRecorderClient::dump(int fd, const Vector<String16>& args) const {
+status_t MediaRecorderClient::dump(int fd, const Vector<String16>& args) {
     if (mRecorder != NULL) {
         return mRecorder->dump(fd, args);
     }
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index a65ec9f..05130d4 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -38,7 +38,6 @@
     virtual     status_t   setOutputFormat(int of);
     virtual     status_t   setVideoEncoder(int ve);
     virtual     status_t   setAudioEncoder(int ae);
-    virtual     status_t   setOutputFile(const char* path);
     virtual     status_t   setOutputFile(int fd, int64_t offset,
                                                   int64_t length);
     virtual     status_t   setVideoSize(int width, int height);
@@ -55,7 +54,8 @@
     virtual     status_t   init();
     virtual     status_t   close();
     virtual     status_t   release();
-    virtual     status_t   dump(int fd, const Vector<String16>& args) const;
+    virtual     status_t   dump(int fd, const Vector<String16>& args);
+    virtual     status_t   setInputSurface(const sp<IGraphicBufferConsumer>& surface);
     virtual     sp<IGraphicBufferProducer> querySurfaceMediaSource();
 
 private:
@@ -63,7 +63,8 @@
 
                            MediaRecorderClient(
                                    const sp<MediaPlayerService>& service,
-                                                               pid_t pid);
+                                                               pid_t pid,
+                                                               const String16& opPackageName);
     virtual                ~MediaRecorderClient();
 
     pid_t                  mPid;
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.cpp b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
index 715cc0c..a5a1fa5 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.cpp
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
@@ -34,6 +34,7 @@
 #include <media/IMediaHTTPService.h>
 #include <media/MediaMetadataRetrieverInterface.h>
 #include <media/MediaPlayerInterface.h>
+#include <media/stagefright/DataSource.h>
 #include <private/media/VideoFrame.h>
 #include "MetadataRetrieverClient.h"
 #include "StagefrightMetadataRetriever.h"
@@ -56,7 +57,7 @@
     disconnect();
 }
 
-status_t MetadataRetrieverClient::dump(int fd, const Vector<String16>& /*args*/) const
+status_t MetadataRetrieverClient::dump(int fd, const Vector<String16>& /*args*/)
 {
     const size_t SIZE = 256;
     char buffer[SIZE];
@@ -140,7 +141,7 @@
         ALOGE("fstat(%d) failed: %d, %s", fd, ret, strerror(errno));
         return BAD_VALUE;
     }
-    ALOGV("st_dev  = %llu", sb.st_dev);
+    ALOGV("st_dev  = %llu", static_cast<uint64_t>(sb.st_dev));
     ALOGV("st_mode = %u", sb.st_mode);
     ALOGV("st_uid  = %lu", static_cast<unsigned long>(sb.st_uid));
     ALOGV("st_gid  = %lu", static_cast<unsigned long>(sb.st_gid));
@@ -173,10 +174,30 @@
     return status;
 }
 
+status_t MetadataRetrieverClient::setDataSource(
+        const sp<IDataSource>& source)
+{
+    ALOGV("setDataSource(IDataSource)");
+    Mutex::Autolock lock(mLock);
+
+    sp<DataSource> dataSource = DataSource::CreateFromIDataSource(source);
+    player_type playerType =
+        MediaPlayerFactory::getPlayerType(NULL /* client */, dataSource);
+    ALOGV("player type = %d", playerType);
+    sp<MediaMetadataRetrieverBase> p = createRetriever(playerType);
+    if (p == NULL) return NO_INIT;
+    status_t ret = p->setDataSource(dataSource);
+    if (ret == NO_ERROR) mRetriever = p;
+    return ret;
+}
+
+Mutex MetadataRetrieverClient::sLock;
+
 sp<IMemory> MetadataRetrieverClient::getFrameAtTime(int64_t timeUs, int option)
 {
     ALOGV("getFrameAtTime: time(%lld us) option(%d)", timeUs, option);
     Mutex::Autolock lock(mLock);
+    Mutex::Autolock glock(sLock);
     mThumbnail.clear();
     if (mRetriever == NULL) {
         ALOGE("retriever is not initialized");
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.h b/media/libmediaplayerservice/MetadataRetrieverClient.h
index 9d3fbe9..fe7547c 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.h
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.h
@@ -49,11 +49,12 @@
             const KeyedVector<String8, String8> *headers);
 
     virtual status_t                setDataSource(int fd, int64_t offset, int64_t length);
+    virtual status_t                setDataSource(const sp<IDataSource>& source);
     virtual sp<IMemory>             getFrameAtTime(int64_t timeUs, int option);
     virtual sp<IMemory>             extractAlbumArt();
     virtual const char*             extractMetadata(int keyCode);
 
-    virtual status_t                dump(int fd, const Vector<String16>& args) const;
+    virtual status_t                dump(int fd, const Vector<String16>& args);
 
 private:
     friend class MediaPlayerService;
@@ -62,6 +63,7 @@
     virtual ~MetadataRetrieverClient();
 
     mutable Mutex                          mLock;
+    static  Mutex                          sLock;
     sp<MediaMetadataRetrieverBase>         mRetriever;
     pid_t                                  mPid;
 
diff --git a/media/libmediaplayerservice/RemoteDisplay.cpp b/media/libmediaplayerservice/RemoteDisplay.cpp
index eb959b4..0eb4b5d 100644
--- a/media/libmediaplayerservice/RemoteDisplay.cpp
+++ b/media/libmediaplayerservice/RemoteDisplay.cpp
@@ -26,13 +26,14 @@
 namespace android {
 
 RemoteDisplay::RemoteDisplay(
+        const String16 &opPackageName,
         const sp<IRemoteDisplayClient> &client,
         const char *iface)
     : mLooper(new ALooper),
       mNetSession(new ANetworkSession) {
     mLooper->setName("wfd_looper");
 
-    mSource = new WifiDisplaySource(mNetSession, client);
+    mSource = new WifiDisplaySource(opPackageName, mNetSession, client);
     mLooper->registerHandler(mSource);
 
     mNetSession->start();
diff --git a/media/libmediaplayerservice/RemoteDisplay.h b/media/libmediaplayerservice/RemoteDisplay.h
index 82a0116..d4573e9 100644
--- a/media/libmediaplayerservice/RemoteDisplay.h
+++ b/media/libmediaplayerservice/RemoteDisplay.h
@@ -28,11 +28,12 @@
 
 struct ALooper;
 struct ANetworkSession;
-struct IRemoteDisplayClient;
+class IRemoteDisplayClient;
 struct WifiDisplaySource;
 
 struct RemoteDisplay : public BnRemoteDisplay {
     RemoteDisplay(
+            const String16 &opPackageName,
             const sp<IRemoteDisplayClient> &client,
             const char *iface);
 
diff --git a/media/libmediaplayerservice/StagefrightPlayer.cpp b/media/libmediaplayerservice/StagefrightPlayer.cpp
index b37aee3..3fedd9b 100644
--- a/media/libmediaplayerservice/StagefrightPlayer.cpp
+++ b/media/libmediaplayerservice/StagefrightPlayer.cpp
@@ -188,6 +188,14 @@
     return mPlayer->getParameter(key, reply);
 }
 
+status_t StagefrightPlayer::setPlaybackSettings(const AudioPlaybackRate &rate) {
+    return mPlayer->setPlaybackSettings(rate);
+}
+
+status_t StagefrightPlayer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
+    return mPlayer->getPlaybackSettings(rate);
+}
+
 status_t StagefrightPlayer::getMetadata(
         const media::Metadata::Filter& /* ids */, Parcel *records) {
     using media::Metadata;
diff --git a/media/libmediaplayerservice/StagefrightPlayer.h b/media/libmediaplayerservice/StagefrightPlayer.h
index e6c30ff..96013df 100644
--- a/media/libmediaplayerservice/StagefrightPlayer.h
+++ b/media/libmediaplayerservice/StagefrightPlayer.h
@@ -60,6 +60,8 @@
     virtual void setAudioSink(const sp<AudioSink> &audioSink);
     virtual status_t setParameter(int key, const Parcel &request);
     virtual status_t getParameter(int key, Parcel *reply);
+    virtual status_t setPlaybackSettings(const AudioPlaybackRate &rate);
+    virtual status_t getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
 
     virtual status_t getMetadata(
             const media::Metadata::Filter& ids, Parcel *records);
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 86639cb..e521fae 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -69,12 +69,12 @@
 }
 
 
-StagefrightRecorder::StagefrightRecorder()
-    : mWriter(NULL),
+StagefrightRecorder::StagefrightRecorder(const String16 &opPackageName)
+    : MediaRecorderBase(opPackageName),
+      mWriter(NULL),
       mOutputFd(-1),
       mAudioSource(AUDIO_SOURCE_CNT),
       mVideoSource(VIDEO_SOURCE_LIST_END),
-      mCaptureTimeLapse(false),
       mStarted(false) {
 
     ALOGV("Constructor");
@@ -206,7 +206,7 @@
 status_t StagefrightRecorder::setVideoFrameRate(int frames_per_second) {
     ALOGV("setVideoFrameRate: %d", frames_per_second);
     if ((frames_per_second <= 0 && frames_per_second != -1) ||
-        frames_per_second > 120) {
+        frames_per_second > kMaxHighSpeedFps) {
         ALOGE("Invalid video frame rate: %d", frames_per_second);
         return BAD_VALUE;
     }
@@ -241,12 +241,11 @@
     return OK;
 }
 
-status_t StagefrightRecorder::setOutputFile(const char * /* path */) {
-    ALOGE("setOutputFile(const char*) must not be called");
-    // We don't actually support this at all, as the media_server process
-    // no longer has permissions to create files.
+status_t StagefrightRecorder::setInputSurface(
+        const sp<IGraphicBufferConsumer>& surface) {
+    mPersistentSurface = surface;
 
-    return -EPERM;
+    return OK;
 }
 
 status_t StagefrightRecorder::setOutputFile(int fd, int64_t offset, int64_t length) {
@@ -271,6 +270,31 @@
     return OK;
 }
 
+// Attempt to parse an float literal optionally surrounded by whitespace,
+// returns true on success, false otherwise.
+static bool safe_strtof(const char *s, float *val) {
+    char *end;
+
+    // It is lame, but according to man page, we have to set errno to 0
+    // before calling strtof().
+    errno = 0;
+    *val = strtof(s, &end);
+
+    if (end == s || errno == ERANGE) {
+        return false;
+    }
+
+    // Skip trailing whitespace
+    while (isspace(*end)) {
+        ++end;
+    }
+
+    // For a successful return, the string must contain nothing but a valid
+    // float literal optionally surrounded by whitespace.
+
+    return *end == '\0';
+}
+
 // Attempt to parse an int64 literal optionally surrounded by whitespace,
 // returns true on success, false otherwise.
 static bool safe_strtoi64(const char *s, int64_t *val) {
@@ -541,29 +565,32 @@
     return OK;
 }
 
-status_t StagefrightRecorder::setParamTimeLapseEnable(int32_t timeLapseEnable) {
-    ALOGV("setParamTimeLapseEnable: %d", timeLapseEnable);
+status_t StagefrightRecorder::setParamCaptureFpsEnable(int32_t captureFpsEnable) {
+    ALOGV("setParamCaptureFpsEnable: %d", captureFpsEnable);
 
-    if(timeLapseEnable == 0) {
-        mCaptureTimeLapse = false;
-    } else if (timeLapseEnable == 1) {
-        mCaptureTimeLapse = true;
+    if(captureFpsEnable == 0) {
+        mCaptureFpsEnable = false;
+    } else if (captureFpsEnable == 1) {
+        mCaptureFpsEnable = true;
     } else {
         return BAD_VALUE;
     }
     return OK;
 }
 
-status_t StagefrightRecorder::setParamTimeBetweenTimeLapseFrameCapture(int64_t timeUs) {
-    ALOGV("setParamTimeBetweenTimeLapseFrameCapture: %lld us", timeUs);
+status_t StagefrightRecorder::setParamCaptureFps(float fps) {
+    ALOGV("setParamCaptureFps: %.2f", fps);
+
+    int64_t timeUs = (int64_t) (1000000.0 / fps + 0.5f);
 
     // Not allowing time more than a day
     if (timeUs <= 0 || timeUs > 86400*1E6) {
-        ALOGE("Time between time lapse frame capture (%lld) is out of range [0, 1 Day]", timeUs);
+        ALOGE("Time between frame capture (%lld) is out of range [0, 1 Day]", timeUs);
         return BAD_VALUE;
     }
 
-    mTimeBetweenTimeLapseFrameCaptureUs = timeUs;
+    mCaptureFps = fps;
+    mTimeBetweenCaptureUs = timeUs;
     return OK;
 }
 
@@ -686,15 +713,14 @@
             return setParamVideoTimeScale(timeScale);
         }
     } else if (key == "time-lapse-enable") {
-        int32_t timeLapseEnable;
-        if (safe_strtoi32(value.string(), &timeLapseEnable)) {
-            return setParamTimeLapseEnable(timeLapseEnable);
+        int32_t captureFpsEnable;
+        if (safe_strtoi32(value.string(), &captureFpsEnable)) {
+            return setParamCaptureFpsEnable(captureFpsEnable);
         }
-    } else if (key == "time-between-time-lapse-frame-capture") {
-        int64_t timeBetweenTimeLapseFrameCaptureUs;
-        if (safe_strtoi64(value.string(), &timeBetweenTimeLapseFrameCaptureUs)) {
-            return setParamTimeBetweenTimeLapseFrameCapture(
-                    timeBetweenTimeLapseFrameCaptureUs);
+    } else if (key == "time-lapse-fps") {
+        float fps;
+        if (safe_strtof(value.string(), &fps)) {
+            return setParamCaptureFps(fps);
         }
     } else {
         ALOGE("setParameter: failed to find key %s", key.string());
@@ -882,11 +908,32 @@
 }
 
 sp<MediaSource> StagefrightRecorder::createAudioSource() {
+    int32_t sourceSampleRate = mSampleRate;
+
+    if (mCaptureFpsEnable && mCaptureFps >= mFrameRate) {
+        // Upscale the sample rate for slow motion recording.
+        // Fail audio source creation if source sample rate is too high, as it could
+        // cause out-of-memory due to large input buffer size. And audio recording
+        // probably doesn't make sense in the scenario, since the slow-down factor
+        // is probably huge (eg. mSampleRate=48K, mCaptureFps=240, mFrameRate=1).
+        const static int32_t SAMPLE_RATE_HZ_MAX = 192000;
+        sourceSampleRate =
+                (mSampleRate * mCaptureFps + mFrameRate / 2) / mFrameRate;
+        if (sourceSampleRate < mSampleRate || sourceSampleRate > SAMPLE_RATE_HZ_MAX) {
+            ALOGE("source sample rate out of range! "
+                    "(mSampleRate %d, mCaptureFps %.2f, mFrameRate %d",
+                    mSampleRate, mCaptureFps, mFrameRate);
+            return NULL;
+        }
+    }
+
     sp<AudioSource> audioSource =
         new AudioSource(
                 mAudioSource,
-                mSampleRate,
-                mAudioChannels);
+                mOpPackageName,
+                sourceSampleRate,
+                mAudioChannels,
+                mSampleRate);
 
     status_t err = audioSource->initCheck();
 
@@ -896,7 +943,6 @@
     }
 
     sp<AMessage> format = new AMessage;
-    const char *mime;
     switch (mAudioEncoder) {
         case AUDIO_ENCODER_AMR_NB:
         case AUDIO_ENCODER_DEFAULT:
@@ -934,6 +980,7 @@
     if (mAudioTimeScale > 0) {
         format->setInt32("time-scale", mAudioTimeScale);
     }
+    format->setInt32("priority", 0 /* realtime */);
 
     sp<MediaSource> audioEncoder =
             MediaCodecSource::Create(mLooper, format, audioSource);
@@ -1165,8 +1212,7 @@
     }
 }
 
-status_t StagefrightRecorder::checkVideoEncoderCapabilities(
-        bool *supportsCameraSourceMetaDataMode) {
+status_t StagefrightRecorder::checkVideoEncoderCapabilities() {
     /* hardware codecs must support camera source meta data mode */
     Vector<CodecCapabilities> codecs;
     OMXClient client;
@@ -1178,11 +1224,8 @@
              mVideoEncoder == VIDEO_ENCODER_VP8 ? MEDIA_MIMETYPE_VIDEO_VP8 :
              mVideoEncoder == VIDEO_ENCODER_H264 ? MEDIA_MIMETYPE_VIDEO_AVC : ""),
             false /* decoder */, true /* hwCodec */, &codecs);
-    *supportsCameraSourceMetaDataMode = codecs.size() > 0;
-    ALOGV("encoder %s camera source meta-data mode",
-            *supportsCameraSourceMetaDataMode ? "supports" : "DOES NOT SUPPORT");
 
-    if (!mCaptureTimeLapse) {
+    if (!mCaptureFpsEnable) {
         // Dont clip for time lapse capture as encoder will have enough
         // time to encode because of slow capture rate of time lapse.
         clipVideoBitRate();
@@ -1389,32 +1432,29 @@
 status_t StagefrightRecorder::setupCameraSource(
         sp<CameraSource> *cameraSource) {
     status_t err = OK;
-    bool encoderSupportsCameraSourceMetaDataMode;
-    if ((err = checkVideoEncoderCapabilities(
-                &encoderSupportsCameraSourceMetaDataMode)) != OK) {
+    if ((err = checkVideoEncoderCapabilities()) != OK) {
         return err;
     }
     Size videoSize;
     videoSize.width = mVideoWidth;
     videoSize.height = mVideoHeight;
-    if (mCaptureTimeLapse) {
-        if (mTimeBetweenTimeLapseFrameCaptureUs < 0) {
+    if (mCaptureFpsEnable) {
+        if (mTimeBetweenCaptureUs < 0) {
             ALOGE("Invalid mTimeBetweenTimeLapseFrameCaptureUs value: %lld",
-                mTimeBetweenTimeLapseFrameCaptureUs);
+                mTimeBetweenCaptureUs);
             return BAD_VALUE;
         }
 
         mCameraSourceTimeLapse = CameraSourceTimeLapse::CreateFromCamera(
                 mCamera, mCameraProxy, mCameraId, mClientName, mClientUid,
                 videoSize, mFrameRate, mPreviewSurface,
-                mTimeBetweenTimeLapseFrameCaptureUs,
-                encoderSupportsCameraSourceMetaDataMode);
+                mTimeBetweenCaptureUs);
         *cameraSource = mCameraSourceTimeLapse;
     } else {
         *cameraSource = CameraSource::CreateFromCamera(
                 mCamera, mCameraProxy, mCameraId, mClientName, mClientUid,
                 videoSize, mFrameRate,
-                mPreviewSurface, encoderSupportsCameraSourceMetaDataMode);
+                mPreviewSurface);
     }
     mCamera.clear();
     mCameraProxy.clear();
@@ -1499,14 +1539,13 @@
         format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
 
         // set up time lapse/slow motion for surface source
-        if (mCaptureTimeLapse) {
-            if (mTimeBetweenTimeLapseFrameCaptureUs <= 0) {
-                ALOGE("Invalid mTimeBetweenTimeLapseFrameCaptureUs value: %lld",
-                    mTimeBetweenTimeLapseFrameCaptureUs);
+        if (mCaptureFpsEnable) {
+            if (mTimeBetweenCaptureUs <= 0) {
+                ALOGE("Invalid mTimeBetweenCaptureUs value: %lld",
+                        mTimeBetweenCaptureUs);
                 return BAD_VALUE;
             }
-            format->setInt64("time-lapse",
-                    mTimeBetweenTimeLapseFrameCaptureUs);
+            format->setInt64("time-lapse", mTimeBetweenCaptureUs);
         }
     }
 
@@ -1524,6 +1563,11 @@
         format->setInt32("level", mVideoEncoderLevel);
     }
 
+    format->setInt32("priority", 0 /* realtime */);
+    if (mCaptureFpsEnable) {
+        format->setFloat("operating-rate", mCaptureFps);
+    }
+
     uint32_t flags = 0;
     if (mIsMetaDataStoredInVideoBuffers) {
         flags |= MediaCodecSource::FLAG_USE_METADATA_INPUT;
@@ -1533,8 +1577,8 @@
         flags |= MediaCodecSource::FLAG_USE_SURFACE_INPUT;
     }
 
-    sp<MediaCodecSource> encoder =
-            MediaCodecSource::Create(mLooper, format, cameraSource, flags);
+    sp<MediaCodecSource> encoder = MediaCodecSource::Create(
+            mLooper, format, cameraSource, mPersistentSurface, flags);
     if (encoder == NULL) {
         ALOGE("Failed to create video encoder");
         // When the encoder fails to be created, we need
@@ -1589,10 +1633,11 @@
 
     status_t err = OK;
     sp<MediaWriter> writer;
+    sp<MPEG4Writer> mp4writer;
     if (mOutputFormat == OUTPUT_FORMAT_WEBM) {
         writer = new WebmWriter(mOutputFd);
     } else {
-        writer = new MPEG4Writer(mOutputFd);
+        writer = mp4writer = new MPEG4Writer(mOutputFd);
     }
 
     if (mVideoSource < VIDEO_SOURCE_LIST_END) {
@@ -1619,19 +1664,23 @@
         // This help make sure that the "recoding" sound is suppressed for
         // camcorder applications in the recorded files.
         // TODO Audio source is currently unsupported for webm output; vorbis encoder needed.
-        if (!mCaptureTimeLapse && (mAudioSource != AUDIO_SOURCE_CNT)) {
+        // disable audio for time lapse recording
+        bool disableAudio = mCaptureFpsEnable && mCaptureFps < mFrameRate;
+        if (!disableAudio && mAudioSource != AUDIO_SOURCE_CNT) {
             err = setupAudioEncoder(writer);
             if (err != OK) return err;
             mTotalBitRate += mAudioBitRate;
         }
 
+        if (mCaptureFpsEnable) {
+            mp4writer->setCaptureRate(mCaptureFps);
+        }
+
         if (mInterleaveDurationUs > 0) {
-            reinterpret_cast<MPEG4Writer *>(writer.get())->
-                setInterleaveDuration(mInterleaveDurationUs);
+            mp4writer->setInterleaveDuration(mInterleaveDurationUs);
         }
         if (mLongitudex10000 > -3600000 && mLatitudex10000 > -3600000) {
-            reinterpret_cast<MPEG4Writer *>(writer.get())->
-                setGeoData(mLatitudex10000, mLongitudex10000);
+            mp4writer->setGeoData(mLatitudex10000, mLongitudex10000);
         }
     }
     if (mMaxFileDurationUs != 0) {
@@ -1704,7 +1753,7 @@
     ALOGV("stop");
     status_t err = OK;
 
-    if (mCaptureTimeLapse && mCameraSourceTimeLapse != NULL) {
+    if (mCaptureFpsEnable && mCameraSourceTimeLapse != NULL) {
         mCameraSourceTimeLapse->startQuickReadReturns();
         mCameraSourceTimeLapse = NULL;
     }
@@ -1715,6 +1764,7 @@
     }
 
     mGraphicBufferProducer.clear();
+    mPersistentSurface.clear();
 
     if (mOutputFd >= 0) {
         ::close(mOutputFd);
@@ -1778,8 +1828,9 @@
     mMaxFileDurationUs = 0;
     mMaxFileSizeBytes = 0;
     mTrackEveryTimeDurationUs = 0;
-    mCaptureTimeLapse = false;
-    mTimeBetweenTimeLapseFrameCaptureUs = -1;
+    mCaptureFpsEnable = false;
+    mCaptureFps = 0.0f;
+    mTimeBetweenCaptureUs = -1;
     mCameraSourceTimeLapse = NULL;
     mIsMetaDataStoredInVideoBuffers = false;
     mEncoderProfiles = MediaProfiles::getInstance();
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 54c38d3..da00bc7 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -35,12 +35,13 @@
 class MetaData;
 struct AudioSource;
 class MediaProfiles;
+class IGraphicBufferConsumer;
 class IGraphicBufferProducer;
 class SurfaceMediaSource;
-class ALooper;
+struct ALooper;
 
 struct StagefrightRecorder : public MediaRecorderBase {
-    StagefrightRecorder();
+    StagefrightRecorder(const String16 &opPackageName);
     virtual ~StagefrightRecorder();
 
     virtual status_t init();
@@ -53,7 +54,7 @@
     virtual status_t setVideoFrameRate(int frames_per_second);
     virtual status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy);
     virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface);
-    virtual status_t setOutputFile(const char *path);
+    virtual status_t setInputSurface(const sp<IGraphicBufferConsumer>& surface);
     virtual status_t setOutputFile(int fd, int64_t offset, int64_t length);
     virtual status_t setParameters(const String8& params);
     virtual status_t setListener(const sp<IMediaRecorderClient>& listener);
@@ -73,6 +74,7 @@
     sp<ICamera> mCamera;
     sp<ICameraRecordingProxy> mCameraProxy;
     sp<IGraphicBufferProducer> mPreviewSurface;
+    sp<IGraphicBufferConsumer> mPersistentSurface;
     sp<IMediaRecorderClient> mListener;
     String16 mClientName;
     uid_t mClientUid;
@@ -109,11 +111,11 @@
     int32_t mStartTimeOffsetMs;
     int32_t mTotalBitRate;
 
-    bool mCaptureTimeLapse;
-    int64_t mTimeBetweenTimeLapseFrameCaptureUs;
+    bool mCaptureFpsEnable;
+    float mCaptureFps;
+    int64_t mTimeBetweenCaptureUs;
     sp<CameraSourceTimeLapse> mCameraSourceTimeLapse;
 
-
     String8 mParams;
 
     bool mIsMetaDataStoredInVideoBuffers;
@@ -127,6 +129,8 @@
     sp<IGraphicBufferProducer> mGraphicBufferProducer;
     sp<ALooper> mLooper;
 
+    static const int kMaxHighSpeedFps = 1000;
+
     status_t prepareInternal();
     status_t setupMPEG4orWEBMRecording();
     void setupMPEG4orWEBMMetaData(sp<MetaData> *meta);
@@ -136,8 +140,7 @@
     status_t setupRTPRecording();
     status_t setupMPEG2TSRecording();
     sp<MediaSource> createAudioSource();
-    status_t checkVideoEncoderCapabilities(
-            bool *supportsCameraSourceMetaDataMode);
+    status_t checkVideoEncoderCapabilities();
     status_t checkAudioEncoderCapabilities();
     // Generic MediaSource set-up. Returns the appropriate
     // source (CameraSource or SurfaceMediaSource)
@@ -153,8 +156,8 @@
     status_t setParamAudioNumberOfChannels(int32_t channles);
     status_t setParamAudioSamplingRate(int32_t sampleRate);
     status_t setParamAudioTimeScale(int32_t timeScale);
-    status_t setParamTimeLapseEnable(int32_t timeLapseEnable);
-    status_t setParamTimeBetweenTimeLapseFrameCapture(int64_t timeUs);
+    status_t setParamCaptureFpsEnable(int32_t timeLapseEnable);
+    status_t setParamCaptureFps(float fps);
     status_t setParamVideoEncodingBitRate(int32_t bitRate);
     status_t setParamVideoIFramesInterval(int32_t seconds);
     status_t setParamVideoEncoderProfile(int32_t profile);
diff --git a/media/libmediaplayerservice/nuplayer/Android.mk b/media/libmediaplayerservice/nuplayer/Android.mk
index 6609874..cd20837 100644
--- a/media/libmediaplayerservice/nuplayer/Android.mk
+++ b/media/libmediaplayerservice/nuplayer/Android.mk
@@ -16,6 +16,7 @@
         StreamingSource.cpp             \
 
 LOCAL_C_INCLUDES := \
+	$(TOP)/frameworks/av/media/libstagefright                     \
 	$(TOP)/frameworks/av/media/libstagefright/httplive            \
 	$(TOP)/frameworks/av/media/libstagefright/include             \
 	$(TOP)/frameworks/av/media/libstagefright/mpeg2ts             \
@@ -24,6 +25,15 @@
 	$(TOP)/frameworks/av/media/libmediaplayerservice              \
 	$(TOP)/frameworks/native/include/media/openmax
 
+LOCAL_CFLAGS += -Werror -Wall
+
+# enable experiments only in userdebug and eng builds
+ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
+LOCAL_CFLAGS += -DENABLE_STAGEFRIGHT_EXPERIMENTS
+endif
+
+LOCAL_CLANG := true
+
 LOCAL_MODULE:= libstagefright_nuplayer
 
 LOCAL_MODULE_TAGS := eng
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 1b2fc5e..7dc9be7 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -56,7 +56,7 @@
       mVideoLastDequeueTimeUs(0),
       mFetchSubtitleDataGeneration(0),
       mFetchTimedTextDataGeneration(0),
-      mDurationUs(0ll),
+      mDurationUs(-1ll),
       mAudioIsVorbis(false),
       mIsWidevine(false),
       mIsSecure(false),
@@ -65,12 +65,12 @@
       mUID(uid),
       mFd(-1),
       mDrmManagerClient(NULL),
-      mMetaDataSize(-1ll),
       mBitrate(-1ll),
       mPollBufferingGeneration(0),
       mPendingReadBufferTypes(0),
       mBuffering(false),
-      mPrepareBuffering(false) {
+      mPrepareBuffering(false),
+      mPrevBufferPercentage(-1) {
     resetDataSource();
     DataSource::RegisterDefaultSniffers();
 }
@@ -124,29 +124,46 @@
     return OK;
 }
 
+status_t NuPlayer::GenericSource::setDataSource(const sp<DataSource>& source) {
+    resetDataSource();
+    mDataSource = source;
+    return OK;
+}
+
 sp<MetaData> NuPlayer::GenericSource::getFileFormatMeta() const {
     return mFileMeta;
 }
 
 status_t NuPlayer::GenericSource::initFromDataSource() {
     sp<MediaExtractor> extractor;
+    String8 mimeType;
+    float confidence;
+    sp<AMessage> dummy;
+    bool isWidevineStreaming = false;
 
     CHECK(mDataSource != NULL);
 
     if (mIsWidevine) {
-        String8 mimeType;
-        float confidence;
-        sp<AMessage> dummy;
-        bool success;
-
-        success = SniffWVM(mDataSource, &mimeType, &confidence, &dummy);
-        if (!success
-                || strcasecmp(
+        isWidevineStreaming = SniffWVM(
+                mDataSource, &mimeType, &confidence, &dummy);
+        if (!isWidevineStreaming ||
+                strcasecmp(
                     mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM)) {
             ALOGE("unsupported widevine mime: %s", mimeType.string());
             return UNKNOWN_ERROR;
         }
+    } else if (mIsStreaming) {
+        if (!mDataSource->sniff(&mimeType, &confidence, &dummy)) {
+            return UNKNOWN_ERROR;
+        }
+        isWidevineStreaming = !strcasecmp(
+                mimeType.string(), MEDIA_MIMETYPE_CONTAINER_WVM);
+    }
 
+    if (isWidevineStreaming) {
+        // we don't want cached source for widevine streaming.
+        mCachedSource.clear();
+        mDataSource = mHttpSource;
         mWVMExtractor = new WVMExtractor(mDataSource);
         mWVMExtractor->setAdaptiveStreamingMode(true);
         if (mUIDValid) {
@@ -155,7 +172,7 @@
         extractor = mWVMExtractor;
     } else {
         extractor = MediaExtractor::Create(mDataSource,
-                mSniffedMIME.empty() ? NULL: mSniffedMIME.c_str());
+                mimeType.isEmpty() ? NULL : mimeType.string());
     }
 
     if (extractor == NULL) {
@@ -181,14 +198,6 @@
             if (mFileMeta->findCString(kKeyMIMEType, &fileMime)
                     && !strncasecmp(fileMime, "video/wvm", 9)) {
                 mIsWidevine = true;
-                if (!mUri.empty()) {
-                  // streaming, but the app forgot to specify widevine:// url
-                  mWVMExtractor = static_cast<WVMExtractor *>(extractor.get());
-                  mWVMExtractor->setAdaptiveStreamingMode(true);
-                  if (mUIDValid) {
-                    mWVMExtractor->setUID(mUID);
-                  }
-                }
             }
         }
     }
@@ -315,6 +324,10 @@
     return INVALID_OPERATION;
 }
 
+bool NuPlayer::GenericSource::isStreaming() const {
+    return mIsStreaming;
+}
+
 NuPlayer::GenericSource::~GenericSource() {
     if (mLooper != NULL) {
         mLooper->unregisterHandler(id());
@@ -332,7 +345,7 @@
         mLooper->registerHandler(this);
     }
 
-    sp<AMessage> msg = new AMessage(kWhatPrepareAsync, id());
+    sp<AMessage> msg = new AMessage(kWhatPrepareAsync, this);
     msg->post();
 }
 
@@ -345,6 +358,7 @@
 
         if (!mUri.empty()) {
             const char* uri = mUri.c_str();
+            String8 contentType;
             mIsWidevine = !strncasecmp(uri, "widevine://", 11);
 
             if (!strncasecmp("http://", uri, 7)
@@ -359,7 +373,7 @@
             }
 
             mDataSource = DataSource::CreateFromURI(
-                   mHTTPService, uri, &mUriHeaders, &mContentType,
+                   mHTTPService, uri, &mUriHeaders, &contentType,
                    static_cast<HTTPBase *>(mHttpSource.get()));
         } else {
             mIsWidevine = false;
@@ -373,34 +387,22 @@
             notifyPreparedAndCleanup(UNKNOWN_ERROR);
             return;
         }
-
-        if (mDataSource->flags() & DataSource::kIsCachingDataSource) {
-            mCachedSource = static_cast<NuCachedSource2 *>(mDataSource.get());
-        }
-
-        // For widevine or other cached streaming cases, we need to wait for
-        // enough buffering before reporting prepared.
-        // Note that even when URL doesn't start with widevine://, mIsWidevine
-        // could still be set to true later, if the streaming or file source
-        // is sniffed to be widevine. We don't want to buffer for file source
-        // in that case, so must check the flag now.
-        mIsStreaming = (mIsWidevine || mCachedSource != NULL);
     }
 
-    // check initial caching status
-    status_t err = prefillCacheIfNecessary();
-    if (err != OK) {
-        if (err == -EAGAIN) {
-            (new AMessage(kWhatPrepareAsync, id()))->post(200000);
-        } else {
-            ALOGE("Failed to prefill data cache!");
-            notifyPreparedAndCleanup(UNKNOWN_ERROR);
-        }
-        return;
+    if (mDataSource->flags() & DataSource::kIsCachingDataSource) {
+        mCachedSource = static_cast<NuCachedSource2 *>(mDataSource.get());
     }
 
-    // init extrator from data source
-    err = initFromDataSource();
+    // For widevine or other cached streaming cases, we need to wait for
+    // enough buffering before reporting prepared.
+    // Note that even when URL doesn't start with widevine://, mIsWidevine
+    // could still be set to true later, if the streaming or file source
+    // is sniffed to be widevine. We don't want to buffer for file source
+    // in that case, so must check the flag now.
+    mIsStreaming = (mIsWidevine || mCachedSource != NULL);
+
+    // init extractor from data source
+    status_t err = initFromDataSource();
 
     if (err != OK) {
         ALOGE("Failed to init from data source!");
@@ -429,7 +431,7 @@
 
     if (mIsSecure) {
         // secure decoders must be instantiated before starting widevine source
-        sp<AMessage> reply = new AMessage(kWhatSecureDecodersInstantiated, id());
+        sp<AMessage> reply = new AMessage(kWhatSecureDecodersInstantiated, this);
         notifyInstantiateSecureDecoders(reply);
     } else {
         finishPrepareAsync();
@@ -465,9 +467,6 @@
 
 void NuPlayer::GenericSource::notifyPreparedAndCleanup(status_t err) {
     if (err != OK) {
-        mMetaDataSize = -1ll;
-        mContentType = "";
-        mSniffedMIME = "";
         mDataSource.clear();
         mCachedSource.clear();
         mHttpSource.clear();
@@ -478,76 +477,6 @@
     notifyPrepared(err);
 }
 
-status_t NuPlayer::GenericSource::prefillCacheIfNecessary() {
-    CHECK(mDataSource != NULL);
-
-    if (mCachedSource == NULL) {
-        // no prefill if the data source is not cached
-        return OK;
-    }
-
-    // We're not doing this for streams that appear to be audio-only
-    // streams to ensure that even low bandwidth streams start
-    // playing back fairly instantly.
-    if (!strncasecmp(mContentType.string(), "audio/", 6)) {
-        return OK;
-    }
-
-    // We're going to prefill the cache before trying to instantiate
-    // the extractor below, as the latter is an operation that otherwise
-    // could block on the datasource for a significant amount of time.
-    // During that time we'd be unable to abort the preparation phase
-    // without this prefill.
-
-    // Initially make sure we have at least 192 KB for the sniff
-    // to complete without blocking.
-    static const size_t kMinBytesForSniffing = 192 * 1024;
-    static const size_t kDefaultMetaSize = 200000;
-
-    status_t finalStatus;
-
-    size_t cachedDataRemaining =
-            mCachedSource->approxDataRemaining(&finalStatus);
-
-    if (finalStatus != OK || (mMetaDataSize >= 0
-            && (off64_t)cachedDataRemaining >= mMetaDataSize)) {
-        ALOGV("stop caching, status %d, "
-                "metaDataSize %lld, cachedDataRemaining %zu",
-                finalStatus, mMetaDataSize, cachedDataRemaining);
-        return OK;
-    }
-
-    ALOGV("now cached %zu bytes of data", cachedDataRemaining);
-
-    if (mMetaDataSize < 0
-            && cachedDataRemaining >= kMinBytesForSniffing) {
-        String8 tmp;
-        float confidence;
-        sp<AMessage> meta;
-        if (!mCachedSource->sniff(&tmp, &confidence, &meta)) {
-            return UNKNOWN_ERROR;
-        }
-
-        // We successfully identified the file's extractor to
-        // be, remember this mime type so we don't have to
-        // sniff it again when we call MediaExtractor::Create()
-        mSniffedMIME = tmp.string();
-
-        if (meta == NULL
-                || !meta->findInt64("meta-data-size",
-                        reinterpret_cast<int64_t*>(&mMetaDataSize))) {
-            mMetaDataSize = kDefaultMetaSize;
-        }
-
-        if (mMetaDataSize < 0ll) {
-            ALOGE("invalid metaDataSize = %lld bytes", mMetaDataSize);
-            return UNKNOWN_ERROR;
-        }
-    }
-
-    return -EAGAIN;
-}
-
 void NuPlayer::GenericSource::start() {
     ALOGI("start");
 
@@ -563,7 +492,7 @@
     setDrmPlaybackStatusIfNeeded(Playback::START, getLastReadPosition() / 1000);
     mStarted = true;
 
-    (new AMessage(kWhatStart, id()))->post();
+    (new AMessage(kWhatStart, this))->post();
 }
 
 void NuPlayer::GenericSource::stop() {
@@ -572,7 +501,7 @@
     mStarted = false;
     if (mIsWidevine || mIsSecure) {
         // For widevine or secure sources we need to prevent any further reads.
-        sp<AMessage> msg = new AMessage(kWhatStopWidevine, id());
+        sp<AMessage> msg = new AMessage(kWhatStopWidevine, this);
         sp<AMessage> response;
         (void) msg->postAndAwaitResponse(&response);
     }
@@ -589,7 +518,7 @@
     setDrmPlaybackStatusIfNeeded(Playback::START, getLastReadPosition() / 1000);
     mStarted = true;
 
-    (new AMessage(kWhatResume, id()))->post();
+    (new AMessage(kWhatResume, this))->post();
 }
 
 void NuPlayer::GenericSource::disconnect() {
@@ -616,7 +545,7 @@
 }
 
 void NuPlayer::GenericSource::schedulePollBuffering() {
-    sp<AMessage> msg = new AMessage(kWhatPollBuffering, id());
+    sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
     msg->setInt32("generation", mPollBufferingGeneration);
     msg->post(1000000ll);
 }
@@ -624,6 +553,7 @@
 void NuPlayer::GenericSource::cancelPollBuffering() {
     mBuffering = false;
     ++mPollBufferingGeneration;
+    mPrevBufferPercentage = -1;
 }
 
 void NuPlayer::GenericSource::restartPollBuffering() {
@@ -633,7 +563,19 @@
     }
 }
 
-void NuPlayer::GenericSource::notifyBufferingUpdate(int percentage) {
+void NuPlayer::GenericSource::notifyBufferingUpdate(int32_t percentage) {
+    // Buffering percent could go backward as it's estimated from remaining
+    // data and last access time. This could cause the buffering position
+    // drawn on media control to jitter slightly. Remember previously reported
+    // percentage and don't allow it to go backward.
+    if (percentage < mPrevBufferPercentage) {
+        percentage = mPrevBufferPercentage;
+    } else if (percentage > 100) {
+        percentage = 100;
+    }
+
+    mPrevBufferPercentage = percentage;
+
     ALOGV("notifyBufferingUpdate: buffering %d%%", percentage);
 
     sp<AMessage> msg = dupNotify();
@@ -687,10 +629,10 @@
     int32_t kbps = 0;
     status_t err = UNKNOWN_ERROR;
 
-    if (mCachedSource != NULL) {
-        err = mCachedSource->getEstimatedBandwidthKbps(&kbps);
-    } else if (mWVMExtractor != NULL) {
+    if (mWVMExtractor != NULL) {
         err = mWVMExtractor->getEstimatedBandwidthKbps(&kbps);
+    } else if (mCachedSource != NULL) {
+        err = mCachedSource->getEstimatedBandwidthKbps(&kbps);
     }
 
     if (err == OK) {
@@ -712,7 +654,13 @@
     int64_t cachedDurationUs = -1ll;
     ssize_t cachedDataRemaining = -1;
 
-    if (mCachedSource != NULL) {
+    ALOGW_IF(mWVMExtractor != NULL && mCachedSource != NULL,
+            "WVMExtractor and NuCachedSource both present");
+
+    if (mWVMExtractor != NULL) {
+        cachedDurationUs =
+                mWVMExtractor->getCachedDurationUs(&finalStatus);
+    } else if (mCachedSource != NULL) {
         cachedDataRemaining =
                 mCachedSource->approxDataRemaining(&finalStatus);
 
@@ -728,9 +676,6 @@
                 cachedDurationUs = cachedDataRemaining * 8000000ll / bitrate;
             }
         }
-    } else if (mWVMExtractor != NULL) {
-        cachedDurationUs
-            = mWVMExtractor->getCachedDurationUs(&finalStatus);
     }
 
     if (finalStatus != OK) {
@@ -762,7 +707,7 @@
             stopBufferingIfNecessary();
         }
     } else if (cachedDataRemaining >= 0) {
-        ALOGV("onPollBuffering: cachedDataRemaining %d bytes",
+        ALOGV("onPollBuffering: cachedDataRemaining %zd bytes",
                 cachedDataRemaining);
 
         if (cachedDataRemaining < kLowWaterMarkBytes) {
@@ -849,7 +794,7 @@
           }
           readBuffer(trackType, timeUs, &actualTimeUs, formatChange);
           readBuffer(counterpartType, -1, NULL, formatChange);
-          ALOGV("timeUs %lld actualTimeUs %lld", timeUs, actualTimeUs);
+          ALOGV("timeUs %lld actualTimeUs %lld", (long long)timeUs, (long long)actualTimeUs);
 
           break;
       }
@@ -918,7 +863,7 @@
               mVideoTrack.mPackets->clear();
           }
           sp<AMessage> response = new AMessage;
-          uint32_t replyID;
+          sp<AReplyToken> replyID;
           CHECK(msg->senderAwaitsResponse(&replyID));
           response->postReply(replyID);
           break;
@@ -958,7 +903,7 @@
         const int64_t oneSecUs = 1000000ll;
         delayUs -= oneSecUs;
     }
-    sp<AMessage> msg2 = new AMessage(sendWhat, id());
+    sp<AMessage> msg2 = new AMessage(sendWhat, this);
     msg2->setInt32("generation", msgGeneration);
     msg2->post(delayUs < 0 ? 0 : delayUs);
 }
@@ -998,7 +943,7 @@
 }
 
 sp<MetaData> NuPlayer::GenericSource::getFormatMeta(bool audio) {
-    sp<AMessage> msg = new AMessage(kWhatGetFormat, id());
+    sp<AMessage> msg = new AMessage(kWhatGetFormat, this);
     msg->setInt32("audio", audio);
 
     sp<AMessage> response;
@@ -1020,7 +965,7 @@
     sp<MetaData> format = doGetFormatMeta(audio);
     response->setPointer("format", format.get());
 
-    uint32_t replyID;
+    sp<AReplyToken> replyID;
     CHECK(msg->senderAwaitsResponse(&replyID));
     response->postReply(replyID);
 }
@@ -1060,7 +1005,9 @@
 
     status_t result = track->mPackets->dequeueAccessUnit(accessUnit);
 
-    if (!track->mPackets->hasBufferAvailable(&finalResult)) {
+    // start pulling in more buffers if we only have one (or no) buffer left
+    // so that decoder has less chance of being starved
+    if (track->mPackets->getAvailableBufferCount(&finalResult) < 2) {
         postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
     }
 
@@ -1087,7 +1034,7 @@
 
     if (mSubtitleTrack.mSource != NULL
             && !mSubtitleTrack.mPackets->hasBufferAvailable(&eosResult)) {
-        sp<AMessage> msg = new AMessage(kWhatFetchSubtitleData, id());
+        sp<AMessage> msg = new AMessage(kWhatFetchSubtitleData, this);
         msg->setInt64("timeUs", timeUs);
         msg->setInt32("generation", mFetchSubtitleDataGeneration);
         msg->post();
@@ -1095,7 +1042,7 @@
 
     if (mTimedTextTrack.mSource != NULL
             && !mTimedTextTrack.mPackets->hasBufferAvailable(&eosResult)) {
-        sp<AMessage> msg = new AMessage(kWhatFetchTimedTextData, id());
+        sp<AMessage> msg = new AMessage(kWhatFetchTimedTextData, this);
         msg->setInt64("timeUs", timeUs);
         msg->setInt32("generation", mFetchTimedTextDataGeneration);
         msg->post();
@@ -1124,6 +1071,7 @@
 
     const char *mime;
     CHECK(meta->findCString(kKeyMIMEType, &mime));
+    format->setString("mime", mime);
 
     int32_t trackType;
     if (!strncasecmp(mime, "video/", 6)) {
@@ -1144,8 +1092,6 @@
     format->setString("language", lang);
 
     if (trackType == MEDIA_TRACK_TYPE_SUBTITLE) {
-        format->setString("mime", mime);
-
         int32_t isAutoselect = 1, isDefault = 0, isForced = 0;
         meta->findInt32(kKeyTrackIsAutoselect, &isAutoselect);
         meta->findInt32(kKeyTrackIsDefault, &isDefault);
@@ -1160,7 +1106,7 @@
 }
 
 ssize_t NuPlayer::GenericSource::getSelectedTrack(media_track_type type) const {
-    sp<AMessage> msg = new AMessage(kWhatGetSelectedTrack, id());
+    sp<AMessage> msg = new AMessage(kWhatGetSelectedTrack, this);
     msg->setInt32("type", type);
 
     sp<AMessage> response;
@@ -1183,7 +1129,7 @@
     ssize_t index = doGetSelectedTrack(type);
     response->setInt32("index", index);
 
-    uint32_t replyID;
+    sp<AReplyToken> replyID;
     CHECK(msg->senderAwaitsResponse(&replyID));
     response->postReply(replyID);
 }
@@ -1216,7 +1162,7 @@
 
 status_t NuPlayer::GenericSource::selectTrack(size_t trackIndex, bool select, int64_t timeUs) {
     ALOGV("%s track: %zu", select ? "select" : "deselect", trackIndex);
-    sp<AMessage> msg = new AMessage(kWhatSelectTrack, id());
+    sp<AMessage> msg = new AMessage(kWhatSelectTrack, this);
     msg->setInt32("trackIndex", trackIndex);
     msg->setInt32("select", select);
     msg->setInt64("timeUs", timeUs);
@@ -1241,7 +1187,7 @@
     status_t err = doSelectTrack(trackIndex, select, timeUs);
     response->setInt32("err", err);
 
-    uint32_t replyID;
+    sp<AReplyToken> replyID;
     CHECK(msg->senderAwaitsResponse(&replyID));
     response->postReply(replyID);
 }
@@ -1302,7 +1248,7 @@
         status_t eosResult; // ignored
         if (mSubtitleTrack.mSource != NULL
                 && !mSubtitleTrack.mPackets->hasBufferAvailable(&eosResult)) {
-            sp<AMessage> msg = new AMessage(kWhatFetchSubtitleData, id());
+            sp<AMessage> msg = new AMessage(kWhatFetchSubtitleData, this);
             msg->setInt64("timeUs", timeUs);
             msg->setInt32("generation", mFetchSubtitleDataGeneration);
             msg->post();
@@ -1310,7 +1256,7 @@
 
         if (mTimedTextTrack.mSource != NULL
                 && !mTimedTextTrack.mPackets->hasBufferAvailable(&eosResult)) {
-            sp<AMessage> msg = new AMessage(kWhatFetchTimedTextData, id());
+            sp<AMessage> msg = new AMessage(kWhatFetchTimedTextData, this);
             msg->setInt64("timeUs", timeUs);
             msg->setInt32("generation", mFetchTimedTextDataGeneration);
             msg->post();
@@ -1324,7 +1270,7 @@
             return OK;
         }
 
-        sp<AMessage> msg = new AMessage(kWhatChangeAVSource, id());
+        sp<AMessage> msg = new AMessage(kWhatChangeAVSource, this);
         msg->setInt32("trackIndex", trackIndex);
         msg->post();
         return OK;
@@ -1334,7 +1280,7 @@
 }
 
 status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs) {
-    sp<AMessage> msg = new AMessage(kWhatSeek, id());
+    sp<AMessage> msg = new AMessage(kWhatSeek, this);
     msg->setInt64("seekTimeUs", seekTimeUs);
 
     sp<AMessage> response;
@@ -1354,7 +1300,7 @@
     status_t err = doSeek(seekTimeUs);
     response->setInt32("err", err);
 
-    uint32_t replyID;
+    sp<AReplyToken> replyID;
     CHECK(msg->senderAwaitsResponse(&replyID));
     response->postReply(replyID);
 }
@@ -1459,6 +1405,14 @@
         meta->setInt32("trackIndex", mSubtitleTrack.mIndex);
     }
 
+    uint32_t dataType; // unused
+    const void *seiData;
+    size_t seiLength;
+    if (mb->meta_data()->findData(kKeySEI, &dataType, &seiData, &seiLength)) {
+        sp<ABuffer> sei = ABuffer::CreateAsCopy(seiData, seiLength);;
+        meta->setBuffer("sei", sei);
+    }
+
     if (actualTimeUs) {
         *actualTimeUs = timeUs;
     }
@@ -1474,7 +1428,7 @@
 
     if ((mPendingReadBufferTypes & (1 << trackType)) == 0) {
         mPendingReadBufferTypes |= (1 << trackType);
-        sp<AMessage> msg = new AMessage(kWhatReadBuffer, id());
+        sp<AMessage> msg = new AMessage(kWhatReadBuffer, this);
         msg->setInt32("trackType", trackType);
         msg->post();
     }
@@ -1506,6 +1460,8 @@
             track = &mVideoTrack;
             if (mIsWidevine) {
                 maxBuffers = 2;
+            } else {
+                maxBuffers = 4;
             }
             break;
         case MEDIA_TRACK_TYPE_AUDIO:
@@ -1562,17 +1518,7 @@
                 mVideoTimeUs = timeUs;
             }
 
-            // formatChange && seeking: track whose source is changed during selection
-            // formatChange && !seeking: track whose source is not changed during selection
-            // !formatChange: normal seek
-            if ((seeking || formatChange)
-                    && (trackType == MEDIA_TRACK_TYPE_AUDIO
-                    || trackType == MEDIA_TRACK_TYPE_VIDEO)) {
-                ATSParser::DiscontinuityType type = (formatChange && seeking)
-                        ? ATSParser::DISCONTINUITY_FORMATCHANGE
-                        : ATSParser::DISCONTINUITY_NONE;
-                track->mPackets->queueDiscontinuity( type, NULL, true /* discard */);
-            }
+            queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
 
             sp<ABuffer> buffer = mediaBufferToABuffer(
                     mbuf, trackType, seekTimeUs, actualTimeUs);
@@ -1590,10 +1536,26 @@
                     false /* discard */);
 #endif
         } else {
+            queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
             track->mPackets->signalEOS(err);
             break;
         }
     }
 }
 
+void NuPlayer::GenericSource::queueDiscontinuityIfNeeded(
+        bool seeking, bool formatChange, media_track_type trackType, Track *track) {
+    // formatChange && seeking: track whose source is changed during selection
+    // formatChange && !seeking: track whose source is not changed during selection
+    // !formatChange: normal seek
+    if ((seeking || formatChange)
+            && (trackType == MEDIA_TRACK_TYPE_AUDIO
+            || trackType == MEDIA_TRACK_TYPE_VIDEO)) {
+        ATSParser::DiscontinuityType type = (formatChange && seeking)
+                ? ATSParser::DISCONTINUITY_FORMATCHANGE
+                : ATSParser::DISCONTINUITY_NONE;
+        track->mPackets->queueDiscontinuity(type, NULL /* extra */, true /* discard */);
+    }
+}
+
 }  // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 2d73ea9..dc85d2d 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -31,12 +31,13 @@
 class DrmManagerClient;
 struct AnotherPacketSource;
 struct ARTSPController;
-struct DataSource;
+class DataSource;
+class IDataSource;
 struct IMediaHTTPService;
 struct MediaSource;
 class MediaBuffer;
 struct NuCachedSource2;
-struct WVMExtractor;
+class WVMExtractor;
 
 struct NuPlayer::GenericSource : public NuPlayer::Source {
     GenericSource(const sp<AMessage> &notify, bool uidValid, uid_t uid);
@@ -48,6 +49,8 @@
 
     status_t setDataSource(int fd, int64_t offset, int64_t length);
 
+    status_t setDataSource(const sp<DataSource>& dataSource);
+
     virtual void prepareAsync();
 
     virtual void start();
@@ -72,6 +75,8 @@
 
     virtual status_t setBuffers(bool audio, Vector<MediaBuffer *> &buffers);
 
+    virtual bool isStreaming() const;
+
 protected:
     virtual ~GenericSource();
 
@@ -140,14 +145,13 @@
     sp<DecryptHandle> mDecryptHandle;
     bool mStarted;
     bool mStopRead;
-    String8 mContentType;
-    AString mSniffedMIME;
-    off64_t mMetaDataSize;
     int64_t mBitrate;
     int32_t mPollBufferingGeneration;
     uint32_t mPendingReadBufferTypes;
     bool mBuffering;
     bool mPrepareBuffering;
+    int32_t mPrevBufferPercentage;
+
     mutable Mutex mReadBufferLock;
 
     sp<ALooper> mLooper;
@@ -159,8 +163,6 @@
     int64_t getLastReadPosition();
     void setDrmPlaybackStatusIfNeeded(int playbackStatus, int64_t position);
 
-    status_t prefillCacheIfNecessary();
-
     void notifyPreparedAndCleanup(status_t err);
     void onSecureDecodersInstantiated(status_t err);
     void finishPrepareAsync();
@@ -200,11 +202,14 @@
             media_track_type trackType,
             int64_t seekTimeUs = -1ll, int64_t *actualTimeUs = NULL, bool formatChange = false);
 
+    void queueDiscontinuityIfNeeded(
+            bool seeking, bool formatChange, media_track_type trackType, Track *track);
+
     void schedulePollBuffering();
     void cancelPollBuffering();
     void restartPollBuffering();
     void onPollBuffering();
-    void notifyBufferingUpdate(int percentage);
+    void notifyBufferingUpdate(int32_t percentage);
     void startBufferingIfNecessary();
     void stopBufferingIfNecessary();
     void sendCacheStats();
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
index a26ef9e..126625a 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.cpp
@@ -22,7 +22,6 @@
 
 #include "AnotherPacketSource.h"
 #include "LiveDataSource.h"
-#include "LiveSession.h"
 
 #include <media/IMediaHTTPService.h>
 #include <media/stagefright/foundation/ABuffer.h>
@@ -30,6 +29,7 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaDefs.h>
 
 namespace android {
 
@@ -44,7 +44,10 @@
       mFlags(0),
       mFinalResult(OK),
       mOffset(0),
-      mFetchSubtitleDataGeneration(0) {
+      mFetchSubtitleDataGeneration(0),
+      mFetchMetaDataGeneration(0),
+      mHasMetadata(false),
+      mMetadataSelected(false) {
     if (headers) {
         mExtraHeaders = *headers;
 
@@ -81,7 +84,7 @@
         mLiveLooper->registerHandler(this);
     }
 
-    sp<AMessage> notify = new AMessage(kWhatSessionNotify, id());
+    sp<AMessage> notify = new AMessage(kWhatSessionNotify, this);
 
     mLiveSession = new LiveSession(
             notify,
@@ -142,19 +145,47 @@
 ssize_t NuPlayer::HTTPLiveSource::getSelectedTrack(media_track_type type) const {
     if (mLiveSession == NULL) {
         return -1;
+    } else if (type == MEDIA_TRACK_TYPE_METADATA) {
+        // MEDIA_TRACK_TYPE_METADATA is always last track
+        // mMetadataSelected can only be true when mHasMetadata is true
+        return mMetadataSelected ? (mLiveSession->getTrackCount() - 1) : -1;
     } else {
         return mLiveSession->getSelectedTrack(type);
     }
 }
 
 status_t NuPlayer::HTTPLiveSource::selectTrack(size_t trackIndex, bool select, int64_t /*timeUs*/) {
-    status_t err = mLiveSession->selectTrack(trackIndex, select);
+    if (mLiveSession == NULL) {
+        return INVALID_OPERATION;
+    }
+
+    status_t err = INVALID_OPERATION;
+    bool postFetchMsg = false, isSub = false;
+    if (!mHasMetadata || trackIndex != mLiveSession->getTrackCount() - 1) {
+        err = mLiveSession->selectTrack(trackIndex, select);
+        postFetchMsg = select;
+        isSub = true;
+    } else {
+        // metadata track; i.e. (mHasMetadata && trackIndex == mLiveSession->getTrackCount() - 1)
+        if (mMetadataSelected && !select) {
+            err = OK;
+        } else if (!mMetadataSelected && select) {
+            postFetchMsg = true;
+            err = OK;
+        } else {
+            err = BAD_VALUE; // behave as LiveSession::selectTrack
+        }
+
+        mMetadataSelected = select;
+    }
 
     if (err == OK) {
-        mFetchSubtitleDataGeneration++;
-        if (select) {
-            sp<AMessage> msg = new AMessage(kWhatFetchSubtitleData, id());
-            msg->setInt32("generation", mFetchSubtitleDataGeneration);
+        int32_t &generation = isSub ? mFetchSubtitleDataGeneration : mFetchMetaDataGeneration;
+        generation++;
+        if (postFetchMsg) {
+            int32_t what = isSub ? kWhatFetchSubtitleData : kWhatFetchMetaData;
+            sp<AMessage> msg = new AMessage(what, this);
+            msg->setInt32("generation", generation);
             msg->post();
         }
     }
@@ -169,6 +200,49 @@
     return mLiveSession->seekTo(seekTimeUs);
 }
 
+void NuPlayer::HTTPLiveSource::pollForRawData(
+        const sp<AMessage> &msg, int32_t currentGeneration,
+        LiveSession::StreamType fetchType, int32_t pushWhat) {
+
+    int32_t generation;
+    CHECK(msg->findInt32("generation", &generation));
+
+    if (generation != currentGeneration) {
+        return;
+    }
+
+    sp<ABuffer> buffer;
+    while (mLiveSession->dequeueAccessUnit(fetchType, &buffer) == OK) {
+
+        sp<AMessage> notify = dupNotify();
+        notify->setInt32("what", pushWhat);
+        notify->setBuffer("buffer", buffer);
+
+        int64_t timeUs, baseUs, delayUs;
+        CHECK(buffer->meta()->findInt64("baseUs", &baseUs));
+        CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+        delayUs = baseUs + timeUs - ALooper::GetNowUs();
+
+        if (fetchType == LiveSession::STREAMTYPE_SUBTITLES) {
+            notify->post();
+            msg->post(delayUs > 0ll ? delayUs : 0ll);
+            return;
+        } else if (fetchType == LiveSession::STREAMTYPE_METADATA) {
+            if (delayUs < -1000000ll) { // 1 second
+                continue;
+            }
+            notify->post();
+            // push all currently available metadata buffers in each invocation of pollForRawData
+            // continue;
+        } else {
+            TRESPASS();
+        }
+    }
+
+    // try again in 1 second
+    msg->post(1000000ll);
+}
+
 void NuPlayer::HTTPLiveSource::onMessageReceived(const sp<AMessage> &msg) {
     switch (msg->what()) {
         case kWhatSessionNotify:
@@ -179,33 +253,24 @@
 
         case kWhatFetchSubtitleData:
         {
-            int32_t generation;
-            CHECK(msg->findInt32("generation", &generation));
+            pollForRawData(
+                    msg, mFetchSubtitleDataGeneration,
+                    /* fetch */ LiveSession::STREAMTYPE_SUBTITLES,
+                    /* push */ kWhatSubtitleData);
 
-            if (generation != mFetchSubtitleDataGeneration) {
-                // stale
+            break;
+        }
+
+        case kWhatFetchMetaData:
+        {
+            if (!mMetadataSelected) {
                 break;
             }
 
-            sp<ABuffer> buffer;
-            if (mLiveSession->dequeueAccessUnit(
-                    LiveSession::STREAMTYPE_SUBTITLES, &buffer) == OK) {
-                sp<AMessage> notify = dupNotify();
-                notify->setInt32("what", kWhatSubtitleData);
-                notify->setBuffer("buffer", buffer);
-                notify->post();
-
-                int64_t timeUs, baseUs, durationUs, delayUs;
-                CHECK(buffer->meta()->findInt64("baseUs", &baseUs));
-                CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
-                CHECK(buffer->meta()->findInt64("durationUs", &durationUs));
-                delayUs = baseUs + timeUs - ALooper::GetNowUs();
-
-                msg->post(delayUs > 0ll ? delayUs : 0ll);
-            } else {
-                // try again in 1 second
-                msg->post(1000000ll);
-            }
+            pollForRawData(
+                    msg, mFetchMetaDataGeneration,
+                    /* fetch */ LiveSession::STREAMTYPE_METADATA,
+                    /* push */ kWhatTimedMetaData);
 
             break;
         }
@@ -281,6 +346,47 @@
             break;
         }
 
+        case LiveSession::kWhatBufferingStart:
+        {
+            sp<AMessage> notify = dupNotify();
+            notify->setInt32("what", kWhatPauseOnBufferingStart);
+            notify->post();
+            break;
+        }
+
+        case LiveSession::kWhatBufferingEnd:
+        {
+            sp<AMessage> notify = dupNotify();
+            notify->setInt32("what", kWhatResumeOnBufferingEnd);
+            notify->post();
+            break;
+        }
+
+
+        case LiveSession::kWhatBufferingUpdate:
+        {
+            sp<AMessage> notify = dupNotify();
+            int32_t percentage;
+            CHECK(msg->findInt32("percentage", &percentage));
+            notify->setInt32("what", kWhatBufferingUpdate);
+            notify->setInt32("percentage", percentage);
+            notify->post();
+            break;
+        }
+
+        case LiveSession::kWhatMetadataDetected:
+        {
+            if (!mHasMetadata) {
+                mHasMetadata = true;
+
+                sp<AMessage> notify = dupNotify();
+                // notification without buffer triggers MEDIA_INFO_METADATA_UPDATE
+                notify->setInt32("what", kWhatTimedMetaData);
+                notify->post();
+            }
+            break;
+        }
+
         case LiveSession::kWhatError:
         {
             break;
diff --git a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
index bbb8981..9e0ec2f 100644
--- a/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
+++ b/media/libmediaplayerservice/nuplayer/HTTPLiveSource.h
@@ -21,6 +21,8 @@
 #include "NuPlayer.h"
 #include "NuPlayerSource.h"
 
+#include "LiveSession.h"
+
 namespace android {
 
 struct LiveSession;
@@ -60,6 +62,7 @@
     enum {
         kWhatSessionNotify,
         kWhatFetchSubtitleData,
+        kWhatFetchMetaData,
     };
 
     sp<IMediaHTTPService> mHTTPService;
@@ -71,8 +74,14 @@
     sp<ALooper> mLiveLooper;
     sp<LiveSession> mLiveSession;
     int32_t mFetchSubtitleDataGeneration;
+    int32_t mFetchMetaDataGeneration;
+    bool mHasMetadata;
+    bool mMetadataSelected;
 
     void onSessionNotify(const sp<AMessage> &msg);
+    void pollForRawData(
+            const sp<AMessage> &msg, int32_t currentGeneration,
+            LiveSession::StreamType fetchType, int32_t pushWhat);
 
     DISALLOW_EVIL_CONSTRUCTORS(HTTPLiveSource);
 };
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index aeea204..77b9799 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -37,6 +37,9 @@
 
 #include <cutils/properties.h>
 
+#include <media/AudioResamplerPublic.h>
+#include <media/AVSyncSettings.h>
+
 #include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -45,7 +48,9 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
+
 #include <gui/IGraphicBufferProducer.h>
+#include <gui/Surface.h>
 
 #include "avc_utils.h"
 
@@ -64,18 +69,16 @@
 };
 
 struct NuPlayer::SeekAction : public Action {
-    SeekAction(int64_t seekTimeUs, bool needNotify)
-        : mSeekTimeUs(seekTimeUs),
-          mNeedNotify(needNotify) {
+    SeekAction(int64_t seekTimeUs)
+        : mSeekTimeUs(seekTimeUs) {
     }
 
     virtual void execute(NuPlayer *player) {
-        player->performSeek(mSeekTimeUs, mNeedNotify);
+        player->performSeek(mSeekTimeUs);
     }
 
 private:
     int64_t mSeekTimeUs;
-    bool mNeedNotify;
 
     DISALLOW_EVIL_CONSTRUCTORS(SeekAction);
 };
@@ -96,16 +99,16 @@
 };
 
 struct NuPlayer::SetSurfaceAction : public Action {
-    SetSurfaceAction(const sp<NativeWindowWrapper> &wrapper)
-        : mWrapper(wrapper) {
+    SetSurfaceAction(const sp<Surface> &surface)
+        : mSurface(surface) {
     }
 
     virtual void execute(NuPlayer *player) {
-        player->performSetSurface(mWrapper);
+        player->performSetSurface(mSurface);
     }
 
 private:
-    sp<NativeWindowWrapper> mWrapper;
+    sp<Surface> mSurface;
 
     DISALLOW_EVIL_CONSTRUCTORS(SetSurfaceAction);
 };
@@ -163,13 +166,15 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-NuPlayer::NuPlayer()
+NuPlayer::NuPlayer(pid_t pid)
     : mUIDValid(false),
+      mPID(pid),
       mSourceFlags(0),
       mOffloadAudio(false),
       mAudioDecoderGeneration(0),
       mVideoDecoderGeneration(0),
       mRendererGeneration(0),
+      mPreviousSeekTimeUs(0),
       mAudioEOS(false),
       mVideoEOS(false),
       mScanSourcesPending(false),
@@ -180,9 +185,13 @@
       mFlushingVideo(NONE),
       mResumePending(false),
       mVideoScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW),
+      mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
+      mVideoFpsHint(-1.f),
       mStarted(false),
+      mSourceStarted(false),
       mPaused(false),
-      mPausedByClient(false) {
+      mPausedByClient(false),
+      mPausedForBuffering(false) {
     clearFlushComplete();
 }
 
@@ -199,9 +208,9 @@
 }
 
 void NuPlayer::setDataSourceAsync(const sp<IStreamSource> &source) {
-    sp<AMessage> msg = new AMessage(kWhatSetDataSource, id());
+    sp<AMessage> msg = new AMessage(kWhatSetDataSource, this);
 
-    sp<AMessage> notify = new AMessage(kWhatSourceNotify, id());
+    sp<AMessage> notify = new AMessage(kWhatSourceNotify, this);
 
     msg->setObject("source", new StreamingSource(notify, source));
     msg->post();
@@ -229,10 +238,10 @@
         const char *url,
         const KeyedVector<String8, String8> *headers) {
 
-    sp<AMessage> msg = new AMessage(kWhatSetDataSource, id());
+    sp<AMessage> msg = new AMessage(kWhatSetDataSource, this);
     size_t len = strlen(url);
 
-    sp<AMessage> notify = new AMessage(kWhatSourceNotify, id());
+    sp<AMessage> notify = new AMessage(kWhatSourceNotify, this);
 
     sp<Source> source;
     if (IsHTTPLiveURL(url)) {
@@ -266,9 +275,9 @@
 }
 
 void NuPlayer::setDataSourceAsync(int fd, int64_t offset, int64_t length) {
-    sp<AMessage> msg = new AMessage(kWhatSetDataSource, id());
+    sp<AMessage> msg = new AMessage(kWhatSetDataSource, this);
 
-    sp<AMessage> notify = new AMessage(kWhatSourceNotify, id());
+    sp<AMessage> notify = new AMessage(kWhatSourceNotify, this);
 
     sp<GenericSource> source =
             new GenericSource(notify, mUIDValid, mUID);
@@ -284,38 +293,108 @@
     msg->post();
 }
 
+void NuPlayer::setDataSourceAsync(const sp<DataSource> &dataSource) {
+    sp<AMessage> msg = new AMessage(kWhatSetDataSource, this);
+    sp<AMessage> notify = new AMessage(kWhatSourceNotify, this);
+
+    sp<GenericSource> source = new GenericSource(notify, mUIDValid, mUID);
+    status_t err = source->setDataSource(dataSource);
+
+    if (err != OK) {
+        ALOGE("Failed to set data source!");
+        source = NULL;
+    }
+
+    msg->setObject("source", source);
+    msg->post();
+}
+
 void NuPlayer::prepareAsync() {
-    (new AMessage(kWhatPrepare, id()))->post();
+    (new AMessage(kWhatPrepare, this))->post();
 }
 
 void NuPlayer::setVideoSurfaceTextureAsync(
         const sp<IGraphicBufferProducer> &bufferProducer) {
-    sp<AMessage> msg = new AMessage(kWhatSetVideoNativeWindow, id());
+    sp<AMessage> msg = new AMessage(kWhatSetVideoSurface, this);
 
     if (bufferProducer == NULL) {
-        msg->setObject("native-window", NULL);
+        msg->setObject("surface", NULL);
     } else {
-        msg->setObject(
-                "native-window",
-                new NativeWindowWrapper(
-                    new Surface(bufferProducer, true /* controlledByApp */)));
+        msg->setObject("surface", new Surface(bufferProducer, true /* controlledByApp */));
     }
 
     msg->post();
 }
 
 void NuPlayer::setAudioSink(const sp<MediaPlayerBase::AudioSink> &sink) {
-    sp<AMessage> msg = new AMessage(kWhatSetAudioSink, id());
+    sp<AMessage> msg = new AMessage(kWhatSetAudioSink, this);
     msg->setObject("sink", sink);
     msg->post();
 }
 
 void NuPlayer::start() {
-    (new AMessage(kWhatStart, id()))->post();
+    (new AMessage(kWhatStart, this))->post();
+}
+
+status_t NuPlayer::setPlaybackSettings(const AudioPlaybackRate &rate) {
+    // do some cursory validation of the settings here. audio modes are
+    // only validated when set on the audiosink.
+     if ((rate.mSpeed != 0.f && rate.mSpeed < AUDIO_TIMESTRETCH_SPEED_MIN)
+            || rate.mSpeed > AUDIO_TIMESTRETCH_SPEED_MAX
+            || rate.mPitch < AUDIO_TIMESTRETCH_SPEED_MIN
+            || rate.mPitch > AUDIO_TIMESTRETCH_SPEED_MAX) {
+        return BAD_VALUE;
+    }
+    sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
+    writeToAMessage(msg, rate);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+    return err;
+}
+
+status_t NuPlayer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
+    sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+        if (err == OK) {
+            readFromAMessage(response, rate);
+        }
+    }
+    return err;
+}
+
+status_t NuPlayer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
+    sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
+    writeToAMessage(msg, sync, videoFpsHint);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+    return err;
+}
+
+status_t NuPlayer::getSyncSettings(
+        AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
+    sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+        if (err == OK) {
+            readFromAMessage(response, sync, videoFps);
+        }
+    }
+    return err;
 }
 
 void NuPlayer::pause() {
-    (new AMessage(kWhatPause, id()))->post();
+    (new AMessage(kWhatPause, this))->post();
 }
 
 void NuPlayer::resetAsync() {
@@ -329,11 +408,11 @@
         mSource->disconnect();
     }
 
-    (new AMessage(kWhatReset, id()))->post();
+    (new AMessage(kWhatReset, this))->post();
 }
 
 void NuPlayer::seekToAsync(int64_t seekTimeUs, bool needNotify) {
-    sp<AMessage> msg = new AMessage(kWhatSeek, id());
+    sp<AMessage> msg = new AMessage(kWhatSeek, this);
     msg->setInt64("seekTimeUs", seekTimeUs);
     msg->setInt32("needNotify", needNotify);
     msg->post();
@@ -345,23 +424,35 @@
     int32_t trackType;
     CHECK(format->findInt32("type", &trackType));
 
+    AString mime;
+    if (!format->findString("mime", &mime)) {
+        // Java MediaPlayer only uses mimetype for subtitle and timedtext tracks.
+        // If we can't find the mimetype here it means that we wouldn't be needing
+        // the mimetype on the Java end. We still write a placeholder mime to keep the
+        // (de)serialization logic simple.
+        if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
+            mime = "audio/";
+        } else if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
+            mime = "video/";
+        } else {
+            TRESPASS();
+        }
+    }
+
     AString lang;
     CHECK(format->findString("language", &lang));
 
     reply->writeInt32(2); // write something non-zero
     reply->writeInt32(trackType);
+    reply->writeString16(String16(mime.c_str()));
     reply->writeString16(String16(lang.c_str()));
 
     if (trackType == MEDIA_TRACK_TYPE_SUBTITLE) {
-        AString mime;
-        CHECK(format->findString("mime", &mime));
-
         int32_t isAuto, isDefault, isForced;
         CHECK(format->findInt32("auto", &isAuto));
         CHECK(format->findInt32("default", &isDefault));
         CHECK(format->findInt32("forced", &isForced));
 
-        reply->writeString16(String16(mime.c_str()));
         reply->writeInt32(isAuto);
         reply->writeInt32(isDefault);
         reply->writeInt32(isForced);
@@ -401,7 +492,7 @@
 
         case kWhatGetTrackInfo:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             Parcel* reply;
@@ -454,7 +545,7 @@
             sp<AMessage> response = new AMessage;
             response->setInt32("err", err);
 
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
             response->postReply(replyID);
             break;
@@ -462,7 +553,7 @@
 
         case kWhatSelectTrack:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             size_t trackIndex;
@@ -532,15 +623,25 @@
             break;
         }
 
-        case kWhatSetVideoNativeWindow:
+        case kWhatSetVideoSurface:
         {
-            ALOGV("kWhatSetVideoNativeWindow");
 
             sp<RefBase> obj;
-            CHECK(msg->findObject("native-window", &obj));
+            CHECK(msg->findObject("surface", &obj));
+            sp<Surface> surface = static_cast<Surface *>(obj.get());
 
-            if (mSource == NULL || mSource->getFormat(false /* audio */) == NULL) {
-                performSetSurface(static_cast<NativeWindowWrapper *>(obj.get()));
+            ALOGD("onSetVideoSurface(%p, %s video decoder)",
+                    surface.get(),
+                    (mSource != NULL && mStarted && mSource->getFormat(false /* audio */) != NULL
+                            && mVideoDecoder != NULL) ? "have" : "no");
+
+            // Need to check mStarted before calling mSource->getFormat because NuPlayer might
+            // be in preparing state and it could take long time.
+            // When mStarted is true, mSource must have been set.
+            if (mSource == NULL || !mStarted || mSource->getFormat(false /* audio */) == NULL
+                    // NOTE: mVideoDecoder's mSurface is always non-null
+                    || (mVideoDecoder != NULL && mVideoDecoder->setVideoSurface(surface) == OK)) {
+                performSetSurface(surface);
                 break;
             }
 
@@ -548,11 +649,9 @@
                     new FlushDecoderAction(FLUSH_CMD_FLUSH /* audio */,
                                            FLUSH_CMD_SHUTDOWN /* video */));
 
-            mDeferredActions.push_back(
-                    new SetSurfaceAction(
-                        static_cast<NativeWindowWrapper *>(obj.get())));
+            mDeferredActions.push_back(new SetSurfaceAction(surface));
 
-            if (obj != NULL) {
+            if (obj != NULL || mAudioDecoder != NULL) {
                 if (mStarted) {
                     // Issue a seek to refresh the video screen only if started otherwise
                     // the extractor may not yet be started and will assert.
@@ -561,7 +660,7 @@
                     int64_t currentPositionUs = 0;
                     if (getCurrentPosition(&currentPositionUs) == OK) {
                         mDeferredActions.push_back(
-                                new SeekAction(currentPositionUs, false /* needNotify */));
+                                new SeekAction(currentPositionUs));
                     }
                 }
 
@@ -596,7 +695,10 @@
         {
             ALOGV("kWhatStart");
             if (mStarted) {
-                onResume();
+                // do not resume yet if the source is still buffering
+                if (!mPausedForBuffering) {
+                    onResume();
+                }
             } else {
                 onStart();
             }
@@ -604,6 +706,117 @@
             break;
         }
 
+        case kWhatConfigPlayback:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            AudioPlaybackRate rate /* sanitized */;
+            readFromAMessage(msg, &rate);
+            status_t err = OK;
+            if (mRenderer != NULL) {
+                err = mRenderer->setPlaybackSettings(rate);
+            }
+            if (err == OK) {
+                if (rate.mSpeed == 0.f) {
+                    onPause();
+                    // save all other settings (using non-paused speed)
+                    // so we can restore them on start
+                    AudioPlaybackRate newRate = rate;
+                    newRate.mSpeed = mPlaybackSettings.mSpeed;
+                    mPlaybackSettings = newRate;
+                } else { /* rate.mSpeed != 0.f */
+                    onResume();
+                    mPlaybackSettings = rate;
+                }
+            }
+
+            if (mVideoDecoder != NULL) {
+                float rate = getFrameRate();
+                if (rate > 0) {
+                    sp<AMessage> params = new AMessage();
+                    params->setFloat("operating-rate", rate * mPlaybackSettings.mSpeed);
+                    mVideoDecoder->setParameters(params);
+                }
+            }
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatGetPlaybackSettings:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            AudioPlaybackRate rate = mPlaybackSettings;
+            status_t err = OK;
+            if (mRenderer != NULL) {
+                err = mRenderer->getPlaybackSettings(&rate);
+            }
+            if (err == OK) {
+                // get playback settings used by renderer, as it may be
+                // slightly off due to audiosink not taking small changes.
+                mPlaybackSettings = rate;
+                if (mPaused) {
+                    rate.mSpeed = 0.f;
+                }
+            }
+            sp<AMessage> response = new AMessage;
+            if (err == OK) {
+                writeToAMessage(response, rate);
+            }
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatConfigSync:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            ALOGV("kWhatConfigSync");
+            AVSyncSettings sync;
+            float videoFpsHint;
+            readFromAMessage(msg, &sync, &videoFpsHint);
+            status_t err = OK;
+            if (mRenderer != NULL) {
+                err = mRenderer->setSyncSettings(sync, videoFpsHint);
+            }
+            if (err == OK) {
+                mSyncSettings = sync;
+                mVideoFpsHint = videoFpsHint;
+            }
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatGetSyncSettings:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            AVSyncSettings sync = mSyncSettings;
+            float videoFps = mVideoFpsHint;
+            status_t err = OK;
+            if (mRenderer != NULL) {
+                err = mRenderer->getSyncSettings(&sync, &videoFps);
+                if (err == OK) {
+                    mSyncSettings = sync;
+                    mVideoFpsHint = videoFps;
+                }
+            }
+            sp<AMessage> response = new AMessage;
+            if (err == OK) {
+                writeToAMessage(response, sync, videoFps);
+            }
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
         case kWhatScanSources:
         {
             int32_t generation;
@@ -623,26 +836,12 @@
 
             // initialize video before audio because successful initialization of
             // video may change deep buffer mode of audio.
-            if (mNativeWindow != NULL) {
+            if (mSurface != NULL) {
                 instantiateDecoder(false, &mVideoDecoder);
             }
 
             // Don't try to re-open audio sink if there's an existing decoder.
             if (mAudioSink != NULL && mAudioDecoder == NULL) {
-                sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
-                sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
-                audio_stream_type_t streamType = mAudioSink->getAudioStreamType();
-                const bool hasVideo = (videoFormat != NULL);
-                const bool canOffload = canOffloadStream(
-                        audioMeta, hasVideo, true /* is_streaming */, streamType);
-                if (canOffload) {
-                    if (!mOffloadAudio) {
-                        mRenderer->signalEnableOffloadAudio();
-                    }
-                    // open audio sink early under offload mode.
-                    sp<AMessage> format = mSource->getFormat(true /*audio*/);
-                    tryOpenAudioSinkForOffload(format, hasVideo);
-                }
                 instantiateDecoder(true, &mAudioDecoder);
             }
 
@@ -671,7 +870,7 @@
             }
 
             if ((mAudioDecoder == NULL && mAudioSink != NULL)
-                    || (mVideoDecoder == NULL && mNativeWindow != NULL)) {
+                    || (mVideoDecoder == NULL && mSurface != NULL)) {
                 msg->post(100000ll);
                 mScanSourcesPending = true;
             }
@@ -810,6 +1009,7 @@
                         // Widevine source reads must stop before releasing the video decoder.
                         if (!audio && mSource != NULL && mSourceFlags & Source::FLAG_SECURE) {
                             mSource->stop();
+                            mSourceStarted = false;
                         }
                         getDecoder(audio)->initiateShutdown(); // In the middle of a seek.
                         *flushing = SHUTTING_DOWN_DECODER;     // Shut down.
@@ -876,6 +1076,11 @@
                 CHECK(msg->findInt32("audio", &audio));
 
                 ALOGV("renderer %s flush completed.", audio ? "audio" : "video");
+                if (audio && (mFlushingAudio == NONE || mFlushingAudio == FLUSHED
+                        || mFlushingAudio == SHUT_DOWN)) {
+                    // Flush has been handled by tear down.
+                    break;
+                }
                 handleFlushComplete(audio, false /* isDecoder */);
                 finishFlushIfPossible();
             } else if (what == Renderer::kWhatVideoRenderingStart) {
@@ -883,15 +1088,28 @@
             } else if (what == Renderer::kWhatMediaRenderingStart) {
                 ALOGV("media rendering started");
                 notifyListener(MEDIA_STARTED, 0, 0);
-            } else if (what == Renderer::kWhatAudioOffloadTearDown) {
-                ALOGV("Tear down audio offload, fall back to s/w path if due to error.");
-                int64_t positionUs;
-                CHECK(msg->findInt64("positionUs", &positionUs));
+            } else if (what == Renderer::kWhatAudioTearDown) {
                 int32_t reason;
                 CHECK(msg->findInt32("reason", &reason));
-                closeAudioSink();
+                ALOGV("Tear down audio with reason %d.", reason);
                 mAudioDecoder.clear();
                 ++mAudioDecoderGeneration;
+                bool needsToCreateAudioDecoder = true;
+                if (mFlushingAudio == FLUSHING_DECODER) {
+                    mFlushComplete[1 /* audio */][1 /* isDecoder */] = true;
+                    mFlushingAudio = FLUSHED;
+                    finishFlushIfPossible();
+                } else if (mFlushingAudio == FLUSHING_DECODER_SHUTDOWN
+                        || mFlushingAudio == SHUTTING_DOWN_DECODER) {
+                    mFlushComplete[1 /* audio */][1 /* isDecoder */] = true;
+                    mFlushingAudio = SHUT_DOWN;
+                    finishFlushIfPossible();
+                    needsToCreateAudioDecoder = false;
+                }
+                if (mRenderer == NULL) {
+                    break;
+                }
+                closeAudioSink();
                 mRenderer->flush(
                         true /* audio */, false /* notifyComplete */);
                 if (mVideoDecoder != NULL) {
@@ -899,10 +1117,13 @@
                             false /* audio */, false /* notifyComplete */);
                 }
 
-                performSeek(positionUs, false /* needNotify */);
-                if (reason == Renderer::kDueToError) {
-                    mRenderer->signalDisableOffloadAudio();
-                    mOffloadAudio = false;
+                int64_t positionUs;
+                if (!msg->findInt64("positionUs", &positionUs)) {
+                    positionUs = mPreviousSeekTimeUs;
+                }
+                performSeek(positionUs);
+
+                if (reason == Renderer::kDueToError && needsToCreateAudioDecoder) {
                     instantiateDecoder(true /* audio */, &mAudioDecoder);
                 }
             }
@@ -938,14 +1159,31 @@
             CHECK(msg->findInt32("needNotify", &needNotify));
 
             ALOGV("kWhatSeek seekTimeUs=%lld us, needNotify=%d",
-                    seekTimeUs, needNotify);
+                    (long long)seekTimeUs, needNotify);
+
+            if (!mStarted) {
+                // Seek before the player is started. In order to preview video,
+                // need to start the player and pause it. This branch is called
+                // only once if needed. After the player is started, any seek
+                // operation will go through normal path.
+                // Audio-only cases are handled separately.
+                onStart(seekTimeUs);
+                if (mStarted) {
+                    onPause();
+                    mPausedByClient = true;
+                }
+                if (needNotify) {
+                    notifyDriverSeekComplete();
+                }
+                break;
+            }
 
             mDeferredActions.push_back(
                     new FlushDecoderAction(FLUSH_CMD_FLUSH /* audio */,
                                            FLUSH_CMD_FLUSH /* video */));
 
             mDeferredActions.push_back(
-                    new SeekAction(seekTimeUs, needNotify));
+                    new SeekAction(seekTimeUs));
 
             // After a flush without shutdown, decoder is paused.
             // Don't resume it until source seek is done, otherwise it could
@@ -1017,7 +1255,7 @@
 
     // TRICKY: We rely on mRenderer being null, so that decoder does not start requesting
     // data on instantiation.
-    if (mNativeWindow != NULL) {
+    if (mSurface != NULL) {
         err = instantiateDecoder(false, &mVideoDecoder);
         if (err != OK) {
             return err;
@@ -1033,14 +1271,23 @@
     return OK;
 }
 
-void NuPlayer::onStart() {
+void NuPlayer::onStart(int64_t startPositionUs) {
+    if (!mSourceStarted) {
+        mSourceStarted = true;
+        mSource->start();
+    }
+    if (startPositionUs > 0) {
+        performSeek(startPositionUs);
+        if (mSource->getFormat(false /* audio */) == NULL) {
+            return;
+        }
+    }
+
     mOffloadAudio = false;
     mAudioEOS = false;
     mVideoEOS = false;
     mStarted = true;
 
-    mSource->start();
-
     uint32_t flags = 0;
 
     if (mSource->isRealTime()) {
@@ -1056,26 +1303,30 @@
     sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
 
     mOffloadAudio =
-        canOffloadStream(audioMeta, (videoFormat != NULL),
-                         true /* is_streaming */, streamType);
+        canOffloadStream(audioMeta, (videoFormat != NULL), mSource->isStreaming(), streamType);
     if (mOffloadAudio) {
         flags |= Renderer::FLAG_OFFLOAD_AUDIO;
     }
 
-    sp<AMessage> notify = new AMessage(kWhatRendererNotify, id());
+    sp<AMessage> notify = new AMessage(kWhatRendererNotify, this);
     ++mRendererGeneration;
     notify->setInt32("generation", mRendererGeneration);
     mRenderer = new Renderer(mAudioSink, notify, flags);
-
     mRendererLooper = new ALooper;
     mRendererLooper->setName("NuPlayerRenderer");
     mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
     mRendererLooper->registerHandler(mRenderer);
 
-    sp<MetaData> meta = getFileMeta();
-    int32_t rate;
-    if (meta != NULL
-            && meta->findInt32(kKeyFrameRate, &rate) && rate > 0) {
+    status_t err = mRenderer->setPlaybackSettings(mPlaybackSettings);
+    if (err != OK) {
+        mSource->stop();
+        mSourceStarted = false;
+        notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
+        return;
+    }
+
+    float rate = getFrameRate();
+    if (rate > 0) {
         mRenderer->setVideoFrameRate(rate);
     }
 
@@ -1137,6 +1388,7 @@
                 // Widevine source reads must stop before releasing the video decoder.
                 if (mSource != NULL && mSourceFlags & Source::FLAG_SECURE) {
                     mSource->stop();
+                    mSourceStarted = false;
                 }
             }
             getDecoder(audio)->initiateShutdown();
@@ -1176,7 +1428,7 @@
         return;
     }
 
-    sp<AMessage> msg = new AMessage(kWhatScanSources, id());
+    sp<AMessage> msg = new AMessage(kWhatScanSources, this);
     msg->setInt32("generation", mScanSourcesGeneration);
     msg->post();
 
@@ -1203,6 +1455,38 @@
     mRenderer->closeAudioSink();
 }
 
+void NuPlayer::determineAudioModeChange() {
+    if (mSource == NULL || mAudioSink == NULL) {
+        return;
+    }
+
+    if (mRenderer == NULL) {
+        ALOGW("No renderer can be used to determine audio mode. Use non-offload for safety.");
+        mOffloadAudio = false;
+        return;
+    }
+
+    sp<MetaData> audioMeta = mSource->getFormatMeta(true /* audio */);
+    sp<AMessage> videoFormat = mSource->getFormat(false /* audio */);
+    audio_stream_type_t streamType = mAudioSink->getAudioStreamType();
+    const bool hasVideo = (videoFormat != NULL);
+    const bool canOffload = canOffloadStream(
+            audioMeta, hasVideo, mSource->isStreaming(), streamType);
+    if (canOffload) {
+        if (!mOffloadAudio) {
+            mRenderer->signalEnableOffloadAudio();
+        }
+        // open audio sink early under offload mode.
+        sp<AMessage> format = mSource->getFormat(true /*audio*/);
+        tryOpenAudioSinkForOffload(format, hasVideo);
+    } else {
+        if (mOffloadAudio) {
+            mRenderer->signalDisableOffloadAudio();
+            mOffloadAudio = false;
+        }
+    }
+}
+
 status_t NuPlayer::instantiateDecoder(bool audio, sp<DecoderBase> *decoder) {
     if (*decoder != NULL) {
         return OK;
@@ -1214,11 +1498,13 @@
         return -EWOULDBLOCK;
     }
 
+    format->setInt32("priority", 0 /* realtime */);
+
     if (!audio) {
         AString mime;
         CHECK(format->findString("mime", &mime));
 
-        sp<AMessage> ccNotify = new AMessage(kWhatClosedCaptionNotify, id());
+        sp<AMessage> ccNotify = new AMessage(kWhatClosedCaptionNotify, this);
         if (mCCDecoder == NULL) {
             mCCDecoder = new CCDecoder(ccNotify);
         }
@@ -1230,28 +1516,36 @@
         if (mSourceFlags & Source::FLAG_PROTECTED) {
             format->setInt32("protected", true);
         }
+
+        float rate = getFrameRate();
+        if (rate > 0) {
+            format->setFloat("operating-rate", rate * mPlaybackSettings.mSpeed);
+        }
     }
 
     if (audio) {
-        sp<AMessage> notify = new AMessage(kWhatAudioNotify, id());
+        sp<AMessage> notify = new AMessage(kWhatAudioNotify, this);
         ++mAudioDecoderGeneration;
         notify->setInt32("generation", mAudioDecoderGeneration);
 
+        determineAudioModeChange();
         if (mOffloadAudio) {
+            const bool hasVideo = (mSource->getFormat(false /*audio */) != NULL);
+            format->setInt32("has-video", hasVideo);
             *decoder = new DecoderPassThrough(notify, mSource, mRenderer);
         } else {
-            *decoder = new Decoder(notify, mSource, mRenderer);
+            *decoder = new Decoder(notify, mSource, mPID, mRenderer);
         }
     } else {
-        sp<AMessage> notify = new AMessage(kWhatVideoNotify, id());
+        sp<AMessage> notify = new AMessage(kWhatVideoNotify, this);
         ++mVideoDecoderGeneration;
         notify->setInt32("generation", mVideoDecoderGeneration);
 
         *decoder = new Decoder(
-                notify, mSource, mRenderer, mNativeWindow, mCCDecoder);
+                notify, mSource, mPID, mRenderer, mSurface, mCCDecoder);
 
         // enable FRC if high-quality AV sync is requested, even if not
-        // queuing to native window, as this will even improve textureview
+        // directly queuing to display, as this will even improve textureview
         // playback.
         {
             char value[PROPERTY_VALUE_MAX];
@@ -1299,8 +1593,6 @@
     }
 
     int32_t displayWidth, displayHeight;
-    int32_t cropLeft, cropTop, cropRight, cropBottom;
-
     if (outputFormat != NULL) {
         int32_t width, height;
         CHECK(outputFormat->findInt32("width", &width));
@@ -1382,7 +1674,11 @@
 
     // Make sure we don't continue to scan sources until we finish flushing.
     ++mScanSourcesGeneration;
-    mScanSourcesPending = false;
+    if (mScanSourcesPending) {
+        mDeferredActions.push_back(
+                new SimpleAction(&NuPlayer::performScanSources));
+        mScanSourcesPending = false;
+    }
 
     decoder->signalFlush();
 
@@ -1421,9 +1717,8 @@
 
 status_t NuPlayer::setVideoScalingMode(int32_t mode) {
     mVideoScalingMode = mode;
-    if (mNativeWindow != NULL) {
-        status_t ret = native_window_set_scaling_mode(
-                mNativeWindow->getNativeWindow().get(), mVideoScalingMode);
+    if (mSurface != NULL) {
+        status_t ret = native_window_set_scaling_mode(mSurface.get(), mVideoScalingMode);
         if (ret != OK) {
             ALOGE("Failed to set scaling mode (%d): %s",
                 -ret, strerror(-ret));
@@ -1434,7 +1729,7 @@
 }
 
 status_t NuPlayer::getTrackInfo(Parcel* reply) const {
-    sp<AMessage> msg = new AMessage(kWhatGetTrackInfo, id());
+    sp<AMessage> msg = new AMessage(kWhatGetTrackInfo, this);
     msg->setPointer("reply", reply);
 
     sp<AMessage> response;
@@ -1443,7 +1738,7 @@
 }
 
 status_t NuPlayer::getSelectedTrack(int32_t type, Parcel* reply) const {
-    sp<AMessage> msg = new AMessage(kWhatGetSelectedTrack, id());
+    sp<AMessage> msg = new AMessage(kWhatGetSelectedTrack, this);
     msg->setPointer("reply", reply);
     msg->setInt32("type", type);
 
@@ -1456,7 +1751,7 @@
 }
 
 status_t NuPlayer::selectTrack(size_t trackIndex, bool select, int64_t timeUs) {
-    sp<AMessage> msg = new AMessage(kWhatSelectTrack, id());
+    sp<AMessage> msg = new AMessage(kWhatSelectTrack, this);
     msg->setSize("trackIndex", trackIndex);
     msg->setInt32("select", select);
     msg->setInt64("timeUs", timeUs);
@@ -1484,13 +1779,15 @@
     return renderer->getCurrentPosition(mediaUs);
 }
 
-void NuPlayer::getStats(int64_t *numFramesTotal, int64_t *numFramesDropped) {
-    sp<DecoderBase> decoder = getDecoder(false /* audio */);
-    if (decoder != NULL) {
-        decoder->getStats(numFramesTotal, numFramesDropped);
-    } else {
-        *numFramesTotal = 0;
-        *numFramesDropped = 0;
+void NuPlayer::getStats(Vector<sp<AMessage> > *mTrackStats) {
+    CHECK(mTrackStats != NULL);
+
+    mTrackStats->clear();
+    if (mVideoDecoder != NULL) {
+        mTrackStats->push_back(mVideoDecoder->getStats());
+    }
+    if (mAudioDecoder != NULL) {
+        mTrackStats->push_back(mAudioDecoder->getStats());
     }
 }
 
@@ -1498,8 +1795,30 @@
     return mSource->getFileFormatMeta();
 }
 
+float NuPlayer::getFrameRate() {
+    sp<MetaData> meta = mSource->getFormatMeta(false /* audio */);
+    if (meta == NULL) {
+        return 0;
+    }
+    int32_t rate;
+    if (!meta->findInt32(kKeyFrameRate, &rate)) {
+        // fall back to try file meta
+        sp<MetaData> fileMeta = getFileMeta();
+        if (fileMeta == NULL) {
+            ALOGW("source has video meta but not file meta");
+            return -1;
+        }
+        int32_t fileMetaRate;
+        if (!fileMeta->findInt32(kKeyFrameRate, &fileMetaRate)) {
+            return -1;
+        }
+        return fileMetaRate;
+    }
+    return rate;
+}
+
 void NuPlayer::schedulePollDuration() {
-    sp<AMessage> msg = new AMessage(kWhatPollDuration, id());
+    sp<AMessage> msg = new AMessage(kWhatPollDuration, this);
     msg->setInt32("generation", mPollDurationGeneration);
     msg->post();
 }
@@ -1531,11 +1850,10 @@
     }
 }
 
-void NuPlayer::performSeek(int64_t seekTimeUs, bool needNotify) {
-    ALOGV("performSeek seekTimeUs=%lld us (%.2f secs), needNotify(%d)",
-          seekTimeUs,
-          seekTimeUs / 1E6,
-          needNotify);
+void NuPlayer::performSeek(int64_t seekTimeUs) {
+    ALOGV("performSeek seekTimeUs=%lld us (%.2f secs)",
+          (long long)seekTimeUs,
+          seekTimeUs / 1E6);
 
     if (mSource == NULL) {
         // This happens when reset occurs right before the loop mode
@@ -1545,6 +1863,7 @@
                 mAudioDecoder.get(), mVideoDecoder.get());
         return;
     }
+    mPreviousSeekTimeUs = seekTimeUs;
     mSource->seekTo(seekTimeUs);
     ++mTimedTextGeneration;
 
@@ -1603,6 +1922,7 @@
     }
 
     mStarted = false;
+    mSourceStarted = false;
 }
 
 void NuPlayer::performScanSources() {
@@ -1617,10 +1937,10 @@
     }
 }
 
-void NuPlayer::performSetSurface(const sp<NativeWindowWrapper> &wrapper) {
+void NuPlayer::performSetSurface(const sp<Surface> &surface) {
     ALOGV("performSetSurface");
 
-    mNativeWindow = wrapper;
+    mSurface = surface;
 
     // XXX - ignore error from setVideoScalingMode for now
     setVideoScalingMode(mVideoScalingMode);
@@ -1660,11 +1980,15 @@
 void NuPlayer::finishResume() {
     if (mResumePending) {
         mResumePending = false;
-        if (mDriver != NULL) {
-            sp<NuPlayerDriver> driver = mDriver.promote();
-            if (driver != NULL) {
-                driver->notifySeekComplete();
-            }
+        notifyDriverSeekComplete();
+    }
+}
+
+void NuPlayer::notifyDriverSeekComplete() {
+    if (mDriver != NULL) {
+        sp<NuPlayerDriver> driver = mDriver.promote();
+        if (driver != NULL) {
+            driver->notifySeekComplete();
         }
     }
 }
@@ -1773,9 +2097,10 @@
         case Source::kWhatPauseOnBufferingStart:
         {
             // ignore if not playing
-            if (mStarted && !mPausedByClient) {
+            if (mStarted) {
                 ALOGI("buffer low, pausing...");
 
+                mPausedForBuffering = true;
                 onPause();
             }
             // fall-thru
@@ -1790,10 +2115,15 @@
         case Source::kWhatResumeOnBufferingEnd:
         {
             // ignore if not playing
-            if (mStarted && !mPausedByClient) {
+            if (mStarted) {
                 ALOGI("buffer ready, resuming...");
 
-                onResume();
+                mPausedForBuffering = false;
+
+                // do not resume yet if client didn't unpause
+                if (!mPausedByClient) {
+                    onResume();
+                }
             }
             // fall-thru
         }
@@ -1822,6 +2152,17 @@
             break;
         }
 
+        case Source::kWhatTimedMetaData:
+        {
+            sp<ABuffer> buffer;
+            if (!msg->findBuffer("buffer", &buffer)) {
+                notifyListener(MEDIA_INFO, MEDIA_INFO_METADATA_UPDATE, 0);
+            } else {
+                sendTimedMetaData(buffer);
+            }
+            break;
+        }
+
         case Source::kWhatTimedTextData:
         {
             int32_t generation;
@@ -1930,6 +2271,19 @@
     notifyListener(MEDIA_SUBTITLE_DATA, 0, 0, &in);
 }
 
+void NuPlayer::sendTimedMetaData(const sp<ABuffer> &buffer) {
+    int64_t timeUs;
+    CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+
+    Parcel in;
+    in.writeInt64(timeUs);
+    in.writeInt32(buffer->size());
+    in.writeInt32(buffer->size());
+    in.write(buffer->data(), buffer->size());
+
+    notifyListener(MEDIA_META_DATA, 0, 0, &in);
+}
+
 void NuPlayer::sendTimedTextData(const sp<ABuffer> &buffer) {
     const void *data;
     size_t size = 0;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 30ede1a..c9f0bbd 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -18,19 +18,22 @@
 
 #define NU_PLAYER_H_
 
+#include <media/AudioResamplerPublic.h>
 #include <media/MediaPlayerInterface.h>
 #include <media/stagefright/foundation/AHandler.h>
-#include <media/stagefright/NativeWindowWrapper.h>
 
 namespace android {
 
 struct ABuffer;
 struct AMessage;
+struct AudioPlaybackRate;
+struct AVSyncSettings;
+class IDataSource;
 class MetaData;
 struct NuPlayerDriver;
 
 struct NuPlayer : public AHandler {
-    NuPlayer();
+    NuPlayer(pid_t pid);
 
     void setUID(uid_t uid);
 
@@ -45,12 +48,19 @@
 
     void setDataSourceAsync(int fd, int64_t offset, int64_t length);
 
+    void setDataSourceAsync(const sp<DataSource> &source);
+
     void prepareAsync();
 
     void setVideoSurfaceTextureAsync(
             const sp<IGraphicBufferProducer> &bufferProducer);
 
     void setAudioSink(const sp<MediaPlayerBase::AudioSink> &sink);
+    status_t setPlaybackSettings(const AudioPlaybackRate &rate);
+    status_t getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
+    status_t setSyncSettings(const AVSyncSettings &sync, float videoFpsHint);
+    status_t getSyncSettings(AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
+
     void start();
 
     void pause();
@@ -67,9 +77,10 @@
     status_t getSelectedTrack(int32_t type, Parcel* reply) const;
     status_t selectTrack(size_t trackIndex, bool select, int64_t timeUs);
     status_t getCurrentPosition(int64_t *mediaUs);
-    void getStats(int64_t *mNumFramesTotal, int64_t *mNumFramesDropped);
+    void getStats(Vector<sp<AMessage> > *mTrackStats);
 
     sp<MetaData> getFileMeta();
+    float getFrameRate();
 
 protected:
     virtual ~NuPlayer();
@@ -101,9 +112,13 @@
     enum {
         kWhatSetDataSource              = '=DaS',
         kWhatPrepare                    = 'prep',
-        kWhatSetVideoNativeWindow       = '=NaW',
+        kWhatSetVideoSurface            = '=VSu',
         kWhatSetAudioSink               = '=AuS',
         kWhatMoreDataQueued             = 'more',
+        kWhatConfigPlayback             = 'cfPB',
+        kWhatConfigSync                 = 'cfSy',
+        kWhatGetPlaybackSettings        = 'gPbS',
+        kWhatGetSyncSettings            = 'gSyS',
         kWhatStart                      = 'strt',
         kWhatScanSources                = 'scan',
         kWhatVideoNotify                = 'vidN',
@@ -124,9 +139,10 @@
     wp<NuPlayerDriver> mDriver;
     bool mUIDValid;
     uid_t mUID;
+    pid_t mPID;
     sp<Source> mSource;
     uint32_t mSourceFlags;
-    sp<NativeWindowWrapper> mNativeWindow;
+    sp<Surface> mSurface;
     sp<MediaPlayerBase::AudioSink> mAudioSink;
     sp<DecoderBase> mVideoDecoder;
     bool mOffloadAudio;
@@ -138,6 +154,8 @@
     int32_t mVideoDecoderGeneration;
     int32_t mRendererGeneration;
 
+    int64_t mPreviousSeekTimeUs;
+
     List<sp<Action> > mDeferredActions;
 
     bool mAudioEOS;
@@ -175,7 +193,11 @@
 
     int32_t mVideoScalingMode;
 
+    AudioPlaybackRate mPlaybackSettings;
+    AVSyncSettings mSyncSettings;
+    float mVideoFpsHint;
     bool mStarted;
+    bool mSourceStarted;
 
     // Actual pause state, either as requested by client or due to buffering.
     bool mPaused;
@@ -185,6 +207,9 @@
     // still become true, when we pause internally due to buffering.
     bool mPausedByClient;
 
+    // Pause state as requested by source (internally) due to buffering
+    bool mPausedForBuffering;
+
     inline const sp<DecoderBase> &getDecoder(bool audio) {
         return audio ? mAudioDecoder : mVideoDecoder;
     }
@@ -198,6 +223,7 @@
 
     void tryOpenAudioSinkForOffload(const sp<AMessage> &format, bool hasVideo);
     void closeAudioSink();
+    void determineAudioModeChange();
 
     status_t instantiateDecoder(bool audio, sp<DecoderBase> *decoder);
 
@@ -212,7 +238,7 @@
     void handleFlushComplete(bool audio, bool isDecoder);
     void finishFlushIfPossible();
 
-    void onStart();
+    void onStart(int64_t startPositionUs = -1);
     void onResume();
     void onPause();
 
@@ -221,6 +247,7 @@
     void flushDecoder(bool audio, bool needShutdown);
 
     void finishResume();
+    void notifyDriverSeekComplete();
 
     void postScanSources();
 
@@ -229,11 +256,11 @@
 
     void processDeferredActions();
 
-    void performSeek(int64_t seekTimeUs, bool needNotify);
+    void performSeek(int64_t seekTimeUs);
     void performDecoderFlush(FlushCommand audio, FlushCommand video);
     void performReset();
     void performScanSources();
-    void performSetSurface(const sp<NativeWindowWrapper> &wrapper);
+    void performSetSurface(const sp<Surface> &wrapper);
     void performResumeDecoders(bool needNotify);
 
     void onSourceNotify(const sp<AMessage> &msg);
@@ -243,6 +270,7 @@
             bool audio, bool video, const sp<AMessage> &reply);
 
     void sendSubtitleData(const sp<ABuffer> &buffer, int32_t baseIndex);
+    void sendTimedMetaData(const sp<ABuffer> &buffer);
     void sendTimedTextData(const sp<ABuffer> &buffer);
 
     void writeTrackInfo(Parcel* reply, const sp<AMessage> format) const;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
index 9229704..ac3c6b6 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.cpp
@@ -19,6 +19,7 @@
 #include <utils/Log.h>
 #include <inttypes.h>
 
+#include "avc_utils.h"
 #include "NuPlayerCCDecoder.h"
 
 #include <media/stagefright/foundation/ABitReader.h>
@@ -50,6 +51,7 @@
     return cc->mData1 < 0x10 && cc->mData2 < 0x10;
 }
 
+static void dumpBytePair(const sp<ABuffer> &ccBuf) __attribute__ ((unused));
 static void dumpBytePair(const sp<ABuffer> &ccBuf) {
     size_t offset = 0;
     AString out;
@@ -185,17 +187,38 @@
 
 // returns true if a new CC track is found
 bool NuPlayer::CCDecoder::extractFromSEI(const sp<ABuffer> &accessUnit) {
-    int64_t timeUs;
-    CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
-
     sp<ABuffer> sei;
     if (!accessUnit->meta()->findBuffer("sei", &sei) || sei == NULL) {
         return false;
     }
 
+    int64_t timeUs;
+    CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
+
     bool trackAdded = false;
 
-    NALBitReader br(sei->data() + 1, sei->size() - 1);
+    const NALPosition *nal = (NALPosition *) sei->data();
+
+    for (size_t i = 0; i < sei->size() / sizeof(NALPosition); ++i, ++nal) {
+        trackAdded |= parseSEINalUnit(
+                timeUs, accessUnit->data() + nal->nalOffset, nal->nalSize);
+    }
+
+    return trackAdded;
+}
+
+// returns true if a new CC track is found
+bool NuPlayer::CCDecoder::parseSEINalUnit(
+        int64_t timeUs, const uint8_t *nalStart, size_t nalSize) {
+    unsigned nalType = nalStart[0] & 0x1f;
+
+    // the buffer should only have SEI in it
+    if (nalType != 6) {
+        return false;
+    }
+
+    bool trackAdded = false;
+    NALBitReader br(nalStart + 1, nalSize - 1);
     // sei_message()
     while (br.atLeastNumBitsLeft(16)) { // at least 16-bit for sei_message()
         uint32_t payload_type = 0;
@@ -214,20 +237,25 @@
 
         // sei_payload()
         if (payload_type == 4) {
-            // user_data_registered_itu_t_t35()
+            bool isCC = false;
+            if (payload_size > 1 + 2 + 4 + 1) {
+                // user_data_registered_itu_t_t35()
 
-            // ATSC A/72: 6.4.2
-            uint8_t itu_t_t35_country_code = br.getBits(8);
-            uint16_t itu_t_t35_provider_code = br.getBits(16);
-            uint32_t user_identifier = br.getBits(32);
-            uint8_t user_data_type_code = br.getBits(8);
+                // ATSC A/72: 6.4.2
+                uint8_t itu_t_t35_country_code = br.getBits(8);
+                uint16_t itu_t_t35_provider_code = br.getBits(16);
+                uint32_t user_identifier = br.getBits(32);
+                uint8_t user_data_type_code = br.getBits(8);
 
-            payload_size -= 1 + 2 + 4 + 1;
+                payload_size -= 1 + 2 + 4 + 1;
 
-            if (itu_t_t35_country_code == 0xB5
-                    && itu_t_t35_provider_code == 0x0031
-                    && user_identifier == 'GA94'
-                    && user_data_type_code == 0x3) {
+                isCC = itu_t_t35_country_code == 0xB5
+                        && itu_t_t35_provider_code == 0x0031
+                        && user_identifier == 'GA94'
+                        && user_data_type_code == 0x3;
+            }
+
+            if (isCC && payload_size > 2) {
                 // MPEG_cc_data()
                 // ATSC A/53 Part 4: 6.2.3.1
                 br.skipBits(1); //process_em_data_flag
@@ -243,7 +271,7 @@
                     sp<ABuffer> ccBuf = new ABuffer(cc_count * sizeof(CCData));
                     ccBuf->setRange(0, 0);
 
-                    for (size_t i = 0; i < cc_count; i++) {
+                    for (size_t i = 0; i < cc_count && payload_size >= 3; i++) {
                         uint8_t marker = br.getBits(5);
                         CHECK_EQ(marker, 0x1f);
 
@@ -253,6 +281,8 @@
                         uint8_t cc_data_1 = br.getBits(8) & 0x7f;
                         uint8_t cc_data_2 = br.getBits(8) & 0x7f;
 
+                        payload_size -= 3;
+
                         if (cc_valid
                                 && (cc_type == 0 || cc_type == 1)) {
                             CCData cc(cc_type, cc_data_1, cc_data_2);
@@ -269,7 +299,6 @@
                             }
                         }
                     }
-                    payload_size -= cc_count * 3;
 
                     mCCMap.add(timeUs, ccBuf);
                     break;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.h
index 5e06f4e..77fb0fe 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerCCDecoder.h
@@ -49,6 +49,7 @@
     bool isTrackValid(size_t index) const;
     int32_t getTrackIndex(size_t channel) const;
     bool extractFromSEI(const sp<ABuffer> &accessUnit);
+    bool parseSEINalUnit(int64_t timeUs, const uint8_t *nalStart, size_t nalSize);
     sp<ABuffer> filterCCBuf(const sp<ABuffer> &ccBuf, size_t index);
 
     DISALLOW_EVIL_CONSTRUCTORS(CCDecoder);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 5d98d98..3646828 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -24,6 +24,7 @@
 #include "NuPlayerRenderer.h"
 #include "NuPlayerSource.h"
 
+#include <cutils/properties.h>
 #include <media/ICrypto.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
@@ -33,29 +34,41 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 
+#include <gui/Surface.h>
+
 #include "avc_utils.h"
 #include "ATSParser.h"
 
 namespace android {
 
+static inline bool getAudioDeepBufferSetting() {
+    return property_get_bool("media.stagefright.audio.deep", false /* default_value */);
+}
+
 NuPlayer::Decoder::Decoder(
         const sp<AMessage> &notify,
         const sp<Source> &source,
+        pid_t pid,
         const sp<Renderer> &renderer,
-        const sp<NativeWindowWrapper> &nativeWindow,
+        const sp<Surface> &surface,
         const sp<CCDecoder> &ccDecoder)
     : DecoderBase(notify),
-      mNativeWindow(nativeWindow),
+      mSurface(surface),
       mSource(source),
       mRenderer(renderer),
       mCCDecoder(ccDecoder),
+      mPid(pid),
       mSkipRenderingUntilMediaTimeUs(-1ll),
       mNumFramesTotal(0ll),
-      mNumFramesDropped(0ll),
+      mNumInputFramesDropped(0ll),
+      mNumOutputFramesDropped(0ll),
+      mVideoWidth(0),
+      mVideoHeight(0),
       mIsAudio(true),
       mIsVideoAVC(false),
       mIsSecure(false),
       mFormatChangePending(false),
+      mTimeChangePending(false),
       mPaused(true),
       mResumePending(false),
       mComponentName("decoder") {
@@ -65,14 +78,31 @@
 }
 
 NuPlayer::Decoder::~Decoder() {
+    mCodec->release();
     releaseAndResetMediaBuffers();
 }
 
-void NuPlayer::Decoder::getStats(
-        int64_t *numFramesTotal,
-        int64_t *numFramesDropped) const {
-    *numFramesTotal = mNumFramesTotal;
-    *numFramesDropped = mNumFramesDropped;
+sp<AMessage> NuPlayer::Decoder::getStats() const {
+    mStats->setInt64("frames-total", mNumFramesTotal);
+    mStats->setInt64("frames-dropped-input", mNumInputFramesDropped);
+    mStats->setInt64("frames-dropped-output", mNumOutputFramesDropped);
+    return mStats;
+}
+
+status_t NuPlayer::Decoder::setVideoSurface(const sp<Surface> &surface) {
+    if (surface == NULL || ADebug::isExperimentEnabled("legacy-setsurface")) {
+        return BAD_VALUE;
+    }
+
+    sp<AMessage> msg = new AMessage(kWhatSetVideoSurface, this);
+
+    msg->setObject("surface", surface);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+    return err;
 }
 
 void NuPlayer::Decoder::onMessageReceived(const sp<AMessage> &msg) {
@@ -81,25 +111,71 @@
     switch (msg->what()) {
         case kWhatCodecNotify:
         {
-            if (!isStaleReply(msg)) {
-                int32_t numInput, numOutput;
+            int32_t cbID;
+            CHECK(msg->findInt32("callbackID", &cbID));
 
-                if (!msg->findInt32("input-buffers", &numInput)) {
-                    numInput = INT32_MAX;
-                }
+            ALOGV("[%s] kWhatCodecNotify: cbID = %d, paused = %d",
+                    mIsAudio ? "audio" : "video", cbID, mPaused);
 
-                if (!msg->findInt32("output-buffers", &numOutput)) {
-                    numOutput = INT32_MAX;
-                }
-
-                if (!mPaused) {
-                    while (numInput-- > 0 && handleAnInputBuffer()) {}
-                }
-
-                while (numOutput-- > 0 && handleAnOutputBuffer()) {}
+            if (mPaused) {
+                break;
             }
 
-            requestCodecNotification();
+            switch (cbID) {
+                case MediaCodec::CB_INPUT_AVAILABLE:
+                {
+                    int32_t index;
+                    CHECK(msg->findInt32("index", &index));
+
+                    handleAnInputBuffer(index);
+                    break;
+                }
+
+                case MediaCodec::CB_OUTPUT_AVAILABLE:
+                {
+                    int32_t index;
+                    size_t offset;
+                    size_t size;
+                    int64_t timeUs;
+                    int32_t flags;
+
+                    CHECK(msg->findInt32("index", &index));
+                    CHECK(msg->findSize("offset", &offset));
+                    CHECK(msg->findSize("size", &size));
+                    CHECK(msg->findInt64("timeUs", &timeUs));
+                    CHECK(msg->findInt32("flags", &flags));
+
+                    handleAnOutputBuffer(index, offset, size, timeUs, flags);
+                    break;
+                }
+
+                case MediaCodec::CB_OUTPUT_FORMAT_CHANGED:
+                {
+                    sp<AMessage> format;
+                    CHECK(msg->findMessage("format", &format));
+
+                    handleOutputFormatChange(format);
+                    break;
+                }
+
+                case MediaCodec::CB_ERROR:
+                {
+                    status_t err;
+                    CHECK(msg->findInt32("err", &err));
+                    ALOGE("Decoder (%s) reported error : 0x%x",
+                            mIsAudio ? "audio" : "video", err);
+
+                    handleError(err);
+                    break;
+                }
+
+                default:
+                {
+                    TRESPASS();
+                    break;
+                }
+            }
+
             break;
         }
 
@@ -111,6 +187,46 @@
             break;
         }
 
+        case kWhatSetVideoSurface:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            sp<RefBase> obj;
+            CHECK(msg->findObject("surface", &obj));
+            sp<Surface> surface = static_cast<Surface *>(obj.get()); // non-null
+            int32_t err = INVALID_OPERATION;
+            // NOTE: in practice mSurface is always non-null, but checking here for completeness
+            if (mCodec != NULL && mSurface != NULL) {
+                // TODO: once AwesomePlayer is removed, remove this automatic connecting
+                // to the surface by MediaPlayerService.
+                //
+                // at this point MediaPlayerService::client has already connected to the
+                // surface, which MediaCodec does not expect
+                err = native_window_api_disconnect(surface.get(), NATIVE_WINDOW_API_MEDIA);
+                if (err == OK) {
+                    err = mCodec->setSurface(surface);
+                    ALOGI_IF(err, "codec setSurface returned: %d", err);
+                    if (err == OK) {
+                        // reconnect to the old surface as MPS::Client will expect to
+                        // be able to disconnect from it.
+                        (void)native_window_api_connect(mSurface.get(), NATIVE_WINDOW_API_MEDIA);
+                        mSurface = surface;
+                    }
+                }
+                if (err != OK) {
+                    // reconnect to the new surface on error as MPS::Client will expect to
+                    // be able to disconnect from it.
+                    (void)native_window_api_connect(surface.get(), NATIVE_WINDOW_API_MEDIA);
+                }
+            }
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
         default:
             DecoderBase::onMessageReceived(msg);
             break;
@@ -121,6 +237,7 @@
     CHECK(mCodec == NULL);
 
     mFormatChangePending = false;
+    mTimeChangePending = false;
 
     ++mBufferGeneration;
 
@@ -130,16 +247,12 @@
     mIsAudio = !strncasecmp("audio/", mime.c_str(), 6);
     mIsVideoAVC = !strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime.c_str());
 
-    sp<Surface> surface = NULL;
-    if (mNativeWindow != NULL) {
-        surface = mNativeWindow->getSurfaceTextureClient();
-    }
-
     mComponentName = mime;
     mComponentName.append(" decoder");
-    ALOGV("[%s] onConfigure (surface=%p)", mComponentName.c_str(), surface.get());
+    ALOGV("[%s] onConfigure (surface=%p)", mComponentName.c_str(), mSurface.get());
 
-    mCodec = MediaCodec::CreateByType(mCodecLooper, mime.c_str(), false /* encoder */);
+    mCodec = MediaCodec::CreateByType(
+            mCodecLooper, mime.c_str(), false /* encoder */, NULL /* err */, mPid);
     int32_t secure = 0;
     if (format->findInt32("secure", &secure) && secure != 0) {
         if (mCodec != NULL) {
@@ -148,7 +261,7 @@
             mCodec->release();
             ALOGI("[%s] creating", mComponentName.c_str());
             mCodec = MediaCodec::CreateByComponentName(
-                    mCodecLooper, mComponentName.c_str());
+                    mCodecLooper, mComponentName.c_str(), NULL /* err */, mPid);
         }
     }
     if (mCodec == NULL) {
@@ -162,17 +275,17 @@
     mCodec->getName(&mComponentName);
 
     status_t err;
-    if (mNativeWindow != NULL) {
+    if (mSurface != NULL) {
         // disconnect from surface as MediaCodec will reconnect
         err = native_window_api_disconnect(
-                surface.get(), NATIVE_WINDOW_API_MEDIA);
+                mSurface.get(), NATIVE_WINDOW_API_MEDIA);
         // We treat this as a warning, as this is a preparatory step.
         // Codec will try to connect to the surface, which is where
         // any error signaling will occur.
         ALOGW_IF(err != OK, "failed to disconnect from surface: %d", err);
     }
     err = mCodec->configure(
-            format, surface, NULL /* crypto */, 0 /* flags */);
+            format, mSurface, NULL /* crypto */, 0 /* flags */);
     if (err != OK) {
         ALOGE("Failed to configure %s decoder (err=%d)", mComponentName.c_str(), err);
         mCodec->release();
@@ -186,6 +299,21 @@
     CHECK_EQ((status_t)OK, mCodec->getOutputFormat(&mOutputFormat));
     CHECK_EQ((status_t)OK, mCodec->getInputFormat(&mInputFormat));
 
+    mStats->setString("mime", mime.c_str());
+    mStats->setString("component-name", mComponentName.c_str());
+
+    if (!mIsAudio) {
+        int32_t width, height;
+        if (mOutputFormat->findInt32("width", &width)
+                && mOutputFormat->findInt32("height", &height)) {
+            mStats->setInt32("width", width);
+            mStats->setInt32("height", height);
+        }
+    }
+
+    sp<AMessage> reply = new AMessage(kWhatCodecNotify, this);
+    mCodec->setCallback(reply);
+
     err = mCodec->start();
     if (err != OK) {
         ALOGE("Failed to start %s decoder (err=%d)", mComponentName.c_str(), err);
@@ -195,36 +323,32 @@
         return;
     }
 
-    // the following should work after start
-    CHECK_EQ((status_t)OK, mCodec->getInputBuffers(&mInputBuffers));
     releaseAndResetMediaBuffers();
-    CHECK_EQ((status_t)OK, mCodec->getOutputBuffers(&mOutputBuffers));
-    ALOGV("[%s] got %zu input and %zu output buffers",
-            mComponentName.c_str(),
-            mInputBuffers.size(),
-            mOutputBuffers.size());
 
-    if (mRenderer != NULL) {
-        requestCodecNotification();
-    }
     mPaused = false;
     mResumePending = false;
 }
 
+void NuPlayer::Decoder::onSetParameters(const sp<AMessage> &params) {
+    if (mCodec == NULL) {
+        ALOGW("onSetParameters called before codec is created.");
+        return;
+    }
+    mCodec->setParameters(params);
+}
+
 void NuPlayer::Decoder::onSetRenderer(const sp<Renderer> &renderer) {
     bool hadNoRenderer = (mRenderer == NULL);
     mRenderer = renderer;
     if (hadNoRenderer && mRenderer != NULL) {
-        requestCodecNotification();
+        // this means that the widevine legacy source is ready
+        onRequestInputBuffers();
     }
 }
 
 void NuPlayer::Decoder::onGetInputBuffers(
         Vector<sp<ABuffer> > *dstBuffers) {
-    dstBuffers->clear();
-    for (size_t i = 0; i < mInputBuffers.size(); i++) {
-        dstBuffers->push(mInputBuffers[i]);
-    }
+    CHECK_EQ((status_t)OK, mCodec->getWidevineLegacyBuffers(dstBuffers));
 }
 
 void NuPlayer::Decoder::onResume(bool notifyComplete) {
@@ -233,9 +357,10 @@
     if (notifyComplete) {
         mResumePending = true;
     }
+    mCodec->start();
 }
 
-void NuPlayer::Decoder::onFlush(bool notifyComplete) {
+void NuPlayer::Decoder::doFlush(bool notifyComplete) {
     if (mCCDecoder != NULL) {
         mCCDecoder->flush();
     }
@@ -259,13 +384,23 @@
         // we attempt to release the buffers even if flush fails.
     }
     releaseAndResetMediaBuffers();
+    mPaused = true;
+}
 
-    if (notifyComplete) {
-        sp<AMessage> notify = mNotify->dup();
-        notify->setInt32("what", kWhatFlushCompleted);
-        notify->post();
-        mPaused = true;
+
+void NuPlayer::Decoder::onFlush() {
+    doFlush(true);
+
+    if (isDiscontinuityPending()) {
+        // This could happen if the client starts seeking/shutdown
+        // after we queued an EOS for discontinuities.
+        // We can consider discontinuity handled.
+        finishHandleDiscontinuity(false /* flushOnTimeChange */);
     }
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatFlushCompleted);
+    notify->post();
 }
 
 void NuPlayer::Decoder::onShutdown(bool notifyComplete) {
@@ -279,12 +414,10 @@
         mCodec = NULL;
         ++mBufferGeneration;
 
-        if (mNativeWindow != NULL) {
+        if (mSurface != NULL) {
             // reconnect to surface as MediaCodec disconnected from it
             status_t error =
-                    native_window_api_connect(
-                            mNativeWindow->getNativeWindow().get(),
-                            NATIVE_WINDOW_API_MEDIA);
+                    native_window_api_connect(mSurface.get(), NATIVE_WINDOW_API_MEDIA);
             ALOGW_IF(error != NO_ERROR,
                     "[%s] failed to connect to native window, error=%d",
                     mComponentName.c_str(), error);
@@ -308,17 +441,23 @@
     }
 }
 
-void NuPlayer::Decoder::doRequestBuffers() {
-    if (mFormatChangePending) {
-        return;
+/*
+ * returns true if we should request more data
+ */
+bool NuPlayer::Decoder::doRequestBuffers() {
+    // mRenderer is only NULL if we have a legacy widevine source that
+    // is not yet ready. In this case we must not fetch input.
+    if (isDiscontinuityPending() || mRenderer == NULL) {
+        return false;
     }
     status_t err = OK;
-    while (!mDequeuedInputBuffers.empty()) {
+    while (err == OK && !mDequeuedInputBuffers.empty()) {
         size_t bufferIx = *mDequeuedInputBuffers.begin();
         sp<AMessage> msg = new AMessage();
         msg->setSize("buffer-ix", bufferIx);
         err = fetchInputData(msg);
-        if (err != OK) {
+        if (err != OK && err != ERROR_END_OF_STREAM) {
+            // if EOS, need to queue EOS buffer
             break;
         }
         mDequeuedInputBuffers.erase(mDequeuedInputBuffers.begin());
@@ -329,40 +468,59 @@
         }
     }
 
-    if (err == -EWOULDBLOCK
-            && mSource->feedMoreTSData() == OK) {
-        scheduleRequestBuffers();
-    }
+    return err == -EWOULDBLOCK
+            && mSource->feedMoreTSData() == OK;
 }
 
-bool NuPlayer::Decoder::handleAnInputBuffer() {
-    if (mFormatChangePending) {
+void NuPlayer::Decoder::handleError(int32_t err)
+{
+    // We cannot immediately release the codec due to buffers still outstanding
+    // in the renderer.  We signal to the player the error so it can shutdown/release the
+    // decoder after flushing and increment the generation to discard unnecessary messages.
+
+    ++mBufferGeneration;
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatError);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
+bool NuPlayer::Decoder::handleAnInputBuffer(size_t index) {
+    if (isDiscontinuityPending()) {
         return false;
     }
-    size_t bufferIx = -1;
-    status_t res = mCodec->dequeueInputBuffer(&bufferIx);
-    ALOGV("[%s] dequeued input: %d",
-            mComponentName.c_str(), res == OK ? (int)bufferIx : res);
-    if (res != OK) {
-        if (res != -EAGAIN) {
-            ALOGE("Failed to dequeue input buffer for %s (err=%d)",
-                    mComponentName.c_str(), res);
-            handleError(res);
+
+    sp<ABuffer> buffer;
+    mCodec->getInputBuffer(index, &buffer);
+
+    if (buffer == NULL) {
+        handleError(UNKNOWN_ERROR);
+        return false;
+    }
+
+    if (index >= mInputBuffers.size()) {
+        for (size_t i = mInputBuffers.size(); i <= index; ++i) {
+            mInputBuffers.add();
+            mMediaBuffers.add();
+            mInputBufferIsDequeued.add();
+            mMediaBuffers.editItemAt(i) = NULL;
+            mInputBufferIsDequeued.editItemAt(i) = false;
         }
-        return false;
     }
+    mInputBuffers.editItemAt(index) = buffer;
 
-    CHECK_LT(bufferIx, mInputBuffers.size());
+    //CHECK_LT(bufferIx, mInputBuffers.size());
 
-    if (mMediaBuffers[bufferIx] != NULL) {
-        mMediaBuffers[bufferIx]->release();
-        mMediaBuffers.editItemAt(bufferIx) = NULL;
+    if (mMediaBuffers[index] != NULL) {
+        mMediaBuffers[index]->release();
+        mMediaBuffers.editItemAt(index) = NULL;
     }
-    mInputBufferIsDequeued.editItemAt(bufferIx) = true;
+    mInputBufferIsDequeued.editItemAt(index) = true;
 
     if (!mCSDsToSubmit.isEmpty()) {
         sp<AMessage> msg = new AMessage();
-        msg->setSize("buffer-ix", bufferIx);
+        msg->setSize("buffer-ix", index);
 
         sp<ABuffer> buffer = mCSDsToSubmit.itemAt(0);
         ALOGI("[%s] resubmitting CSD", mComponentName.c_str());
@@ -380,111 +538,51 @@
         mPendingInputMessages.erase(mPendingInputMessages.begin());
     }
 
-    if (!mInputBufferIsDequeued.editItemAt(bufferIx)) {
+    if (!mInputBufferIsDequeued.editItemAt(index)) {
         return true;
     }
 
-    mDequeuedInputBuffers.push_back(bufferIx);
+    mDequeuedInputBuffers.push_back(index);
 
     onRequestInputBuffers();
     return true;
 }
 
-bool NuPlayer::Decoder::handleAnOutputBuffer() {
-    if (mFormatChangePending) {
-        return false;
-    }
-    size_t bufferIx = -1;
-    size_t offset;
-    size_t size;
-    int64_t timeUs;
-    uint32_t flags;
-    status_t res = mCodec->dequeueOutputBuffer(
-            &bufferIx, &offset, &size, &timeUs, &flags);
+bool NuPlayer::Decoder::handleAnOutputBuffer(
+        size_t index,
+        size_t offset,
+        size_t size,
+        int64_t timeUs,
+        int32_t flags) {
+//    CHECK_LT(bufferIx, mOutputBuffers.size());
+    sp<ABuffer> buffer;
+    mCodec->getOutputBuffer(index, &buffer);
 
-    if (res != OK) {
-        ALOGV("[%s] dequeued output: %d", mComponentName.c_str(), res);
-    } else {
-        ALOGV("[%s] dequeued output: %d (time=%lld flags=%" PRIu32 ")",
-                mComponentName.c_str(), (int)bufferIx, timeUs, flags);
+    if (index >= mOutputBuffers.size()) {
+        for (size_t i = mOutputBuffers.size(); i <= index; ++i) {
+            mOutputBuffers.add();
+        }
     }
 
-    if (res == INFO_OUTPUT_BUFFERS_CHANGED) {
-        res = mCodec->getOutputBuffers(&mOutputBuffers);
-        if (res != OK) {
-            ALOGE("Failed to get output buffers for %s after INFO event (err=%d)",
-                    mComponentName.c_str(), res);
-            handleError(res);
-            return false;
-        }
-        // NuPlayer ignores this
-        return true;
-    } else if (res == INFO_FORMAT_CHANGED) {
-        sp<AMessage> format = new AMessage();
-        res = mCodec->getOutputFormat(&format);
-        if (res != OK) {
-            ALOGE("Failed to get output format for %s after INFO event (err=%d)",
-                    mComponentName.c_str(), res);
-            handleError(res);
-            return false;
-        }
+    mOutputBuffers.editItemAt(index) = buffer;
 
-        if (!mIsAudio) {
-            sp<AMessage> notify = mNotify->dup();
-            notify->setInt32("what", kWhatVideoSizeChanged);
-            notify->setMessage("format", format);
-            notify->post();
-        } else if (mRenderer != NULL) {
-            uint32_t flags;
-            int64_t durationUs;
-            bool hasVideo = (mSource->getFormat(false /* audio */) != NULL);
-            if (!hasVideo &&
-                    mSource->getDuration(&durationUs) == OK &&
-                    durationUs
-                        > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US) {
-                flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
-            } else {
-                flags = AUDIO_OUTPUT_FLAG_NONE;
-            }
-
-            res = mRenderer->openAudioSink(
-                    format, false /* offloadOnly */, hasVideo, flags, NULL /* isOffloaded */);
-            if (res != OK) {
-                ALOGE("Failed to open AudioSink on format change for %s (err=%d)",
-                        mComponentName.c_str(), res);
-                handleError(res);
-                return false;
-            }
-        }
-        return true;
-    } else if (res == INFO_DISCONTINUITY) {
-        // nothing to do
-        return true;
-    } else if (res != OK) {
-        if (res != -EAGAIN) {
-            ALOGE("Failed to dequeue output buffer for %s (err=%d)",
-                    mComponentName.c_str(), res);
-            handleError(res);
-        }
-        return false;
-    }
-
-    CHECK_LT(bufferIx, mOutputBuffers.size());
-    sp<ABuffer> buffer = mOutputBuffers[bufferIx];
     buffer->setRange(offset, size);
     buffer->meta()->clear();
     buffer->meta()->setInt64("timeUs", timeUs);
-    if (flags & MediaCodec::BUFFER_FLAG_EOS) {
-        buffer->meta()->setInt32("eos", true);
-        notifyResumeCompleteIfNecessary();
-    }
+
+    bool eos = flags & MediaCodec::BUFFER_FLAG_EOS;
     // we do not expect CODECCONFIG or SYNCFRAME for decoder
 
-    sp<AMessage> reply = new AMessage(kWhatRenderBuffer, id());
-    reply->setSize("buffer-ix", bufferIx);
+    sp<AMessage> reply = new AMessage(kWhatRenderBuffer, this);
+    reply->setSize("buffer-ix", index);
     reply->setInt32("generation", mBufferGeneration);
 
-    if (mSkipRenderingUntilMediaTimeUs >= 0) {
+    if (eos) {
+        ALOGI("[%s] saw output EOS", mIsAudio ? "audio" : "video");
+
+        buffer->meta()->setInt32("eos", true);
+        reply->setInt32("eos", true);
+    } else if (mSkipRenderingUntilMediaTimeUs >= 0) {
         if (timeUs < mSkipRenderingUntilMediaTimeUs) {
             ALOGV("[%s] dropping buffer at time %lld as requested.",
                      mComponentName.c_str(), (long long)timeUs);
@@ -496,13 +594,15 @@
         mSkipRenderingUntilMediaTimeUs = -1;
     }
 
+    mNumFramesTotal += !mIsAudio;
+
     // wait until 1st frame comes out to signal resume complete
     notifyResumeCompleteIfNecessary();
 
     if (mRenderer != NULL) {
         // send the buffer to renderer.
         mRenderer->queueBuffer(mIsAudio, buffer, reply);
-        if (flags & MediaCodec::BUFFER_FLAG_EOS) {
+        if (eos && !isDiscontinuityPending()) {
             mRenderer->queueEOS(mIsAudio, ERROR_END_OF_STREAM);
         }
     }
@@ -510,6 +610,36 @@
     return true;
 }
 
+void NuPlayer::Decoder::handleOutputFormatChange(const sp<AMessage> &format) {
+    if (!mIsAudio) {
+        int32_t width, height;
+        if (format->findInt32("width", &width)
+                && format->findInt32("height", &height)) {
+            mStats->setInt32("width", width);
+            mStats->setInt32("height", height);
+        }
+        sp<AMessage> notify = mNotify->dup();
+        notify->setInt32("what", kWhatVideoSizeChanged);
+        notify->setMessage("format", format);
+        notify->post();
+    } else if (mRenderer != NULL) {
+        uint32_t flags;
+        int64_t durationUs;
+        bool hasVideo = (mSource->getFormat(false /* audio */) != NULL);
+        if (getAudioDeepBufferSetting() // override regardless of source duration
+                || (!hasVideo
+                        && mSource->getDuration(&durationUs) == OK
+                        && durationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US)) {
+            flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+        } else {
+            flags = AUDIO_OUTPUT_FLAG_NONE;
+        }
+
+        mRenderer->openAudioSink(
+                format, false /* offloadOnly */, hasVideo, flags, NULL /* isOffloaed */);
+    }
+}
+
 void NuPlayer::Decoder::releaseAndResetMediaBuffers() {
     for (size_t i = 0; i < mMediaBuffers.size(); i++) {
         if (mMediaBuffers[i] != NULL) {
@@ -533,11 +663,8 @@
 }
 
 void NuPlayer::Decoder::requestCodecNotification() {
-    if (mFormatChangePending) {
-        return;
-    }
     if (mCodec != NULL) {
-        sp<AMessage> reply = new AMessage(kWhatCodecNotify, id());
+        sp<AMessage> reply = new AMessage(kWhatCodecNotify, this);
         reply->setInt32("generation", mBufferGeneration);
         mCodec->requestActivityNotification(reply);
     }
@@ -582,43 +709,31 @@
                     formatChange = !seamlessFormatChange;
                 }
 
-                if (formatChange || timeChange) {
-                    sp<AMessage> msg = mNotify->dup();
-                    msg->setInt32("what", kWhatInputDiscontinuity);
-                    msg->setInt32("formatChange", formatChange);
-                    msg->post();
-                }
-
+                // For format or time change, return EOS to queue EOS input,
+                // then wait for EOS on output.
                 if (formatChange /* not seamless */) {
-                    // must change decoder
-                    // return EOS and wait to be killed
                     mFormatChangePending = true;
-                    return ERROR_END_OF_STREAM;
+                    err = ERROR_END_OF_STREAM;
                 } else if (timeChange) {
-                    // need to flush
-                    // TODO: Ideally we shouldn't need a flush upon time
-                    // discontinuity, flushing will cause loss of frames.
-                    // We probably should queue a time change marker to the
-                    // output queue, and handles it in renderer instead.
                     rememberCodecSpecificData(newFormat);
-                    onFlush(false /* notifyComplete */);
-                    err = OK;
+                    mTimeChangePending = true;
+                    err = ERROR_END_OF_STREAM;
                 } else if (seamlessFormatChange) {
                     // reuse existing decoder and don't flush
                     rememberCodecSpecificData(newFormat);
-                    err = OK;
+                    continue;
                 } else {
                     // This stream is unaffected by the discontinuity
                     return -EWOULDBLOCK;
                 }
             }
 
-            reply->setInt32("err", err);
-            return OK;
-        }
+            // reply should only be returned without a buffer set
+            // when there is an error (including EOS)
+            CHECK(err != OK);
 
-        if (!mIsAudio) {
-            ++mNumFramesTotal;
+            reply->setInt32("err", err);
+            return ERROR_END_OF_STREAM;
         }
 
         dropAccessUnit = false;
@@ -628,7 +743,7 @@
                 && mIsVideoAVC
                 && !IsAVCReferenceFrame(accessUnit)) {
             dropAccessUnit = true;
-            ++mNumFramesDropped;
+            ++mNumInputFramesDropped;
         }
     } while (dropAccessUnit);
 
@@ -636,7 +751,7 @@
 #if 0
     int64_t mediaTimeUs;
     CHECK(accessUnit->meta()->findInt64("timeUs", &mediaTimeUs));
-    ALOGV("feeding %s input buffer at media time %.2f secs",
+    ALOGV("[%s] feeding input buffer at media time %.3f",
          mIsAudio ? "audio" : "video",
          mediaTimeUs / 1E6);
 #endif
@@ -696,10 +811,7 @@
         int32_t streamErr = ERROR_END_OF_STREAM;
         CHECK(msg->findInt32("err", &streamErr) || !hasBuffer);
 
-        if (streamErr == OK) {
-            /* buffers are returned to hold on to */
-            return true;
-        }
+        CHECK(streamErr != OK);
 
         // attempt to queue EOS
         status_t err = mCodec->queueInputBuffer(
@@ -781,6 +893,7 @@
     status_t err;
     int32_t render;
     size_t bufferIx;
+    int32_t eos;
     CHECK(msg->findSize("buffer-ix", &bufferIx));
 
     if (!mIsAudio) {
@@ -798,6 +911,7 @@
         CHECK(msg->findInt64("timestampNs", &timestampNs));
         err = mCodec->renderOutputBufferAndRelease(bufferIx, timestampNs);
     } else {
+        mNumOutputFramesDropped += !mIsAudio;
         err = mCodec->releaseOutputBuffer(bufferIx);
     }
     if (err != OK) {
@@ -805,6 +919,40 @@
                 mComponentName.c_str(), err);
         handleError(err);
     }
+    if (msg->findInt32("eos", &eos) && eos
+            && isDiscontinuityPending()) {
+        finishHandleDiscontinuity(true /* flushOnTimeChange */);
+    }
+}
+
+bool NuPlayer::Decoder::isDiscontinuityPending() const {
+    return mFormatChangePending || mTimeChangePending;
+}
+
+void NuPlayer::Decoder::finishHandleDiscontinuity(bool flushOnTimeChange) {
+    ALOGV("finishHandleDiscontinuity: format %d, time %d, flush %d",
+            mFormatChangePending, mTimeChangePending, flushOnTimeChange);
+
+    // If we have format change, pause and wait to be killed;
+    // If we have time change only, flush and restart fetching.
+
+    if (mFormatChangePending) {
+        mPaused = true;
+    } else if (mTimeChangePending) {
+        if (flushOnTimeChange) {
+            doFlush(false /* notifyComplete */);
+            signalResume(false /* notifyComplete */);
+        }
+    }
+
+    // Notify NuPlayer to either shutdown decoder, or rescan sources
+    sp<AMessage> msg = mNotify->dup();
+    msg->setInt32("what", kWhatInputDiscontinuity);
+    msg->setInt32("formatChange", mFormatChangePending);
+    msg->post();
+
+    mFormatChangePending = false;
+    mTimeChangePending = false;
 }
 
 bool NuPlayer::Decoder::supportsSeamlessAudioFormatChange(
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
index 1bfa94f..eeb4af4 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.h
@@ -26,13 +26,15 @@
 struct NuPlayer::Decoder : public DecoderBase {
     Decoder(const sp<AMessage> &notify,
             const sp<Source> &source,
+            pid_t pid,
             const sp<Renderer> &renderer = NULL,
-            const sp<NativeWindowWrapper> &nativeWindow = NULL,
+            const sp<Surface> &surface = NULL,
             const sp<CCDecoder> &ccDecoder = NULL);
 
-    virtual void getStats(
-            int64_t *mNumFramesTotal,
-            int64_t *mNumFramesDropped) const;
+    virtual sp<AMessage> getStats() const;
+
+    // sets the output surface of video decoders.
+    virtual status_t setVideoSurface(const sp<Surface> &surface);
 
 protected:
     virtual ~Decoder();
@@ -40,20 +42,22 @@
     virtual void onMessageReceived(const sp<AMessage> &msg);
 
     virtual void onConfigure(const sp<AMessage> &format);
+    virtual void onSetParameters(const sp<AMessage> &params);
     virtual void onSetRenderer(const sp<Renderer> &renderer);
     virtual void onGetInputBuffers(Vector<sp<ABuffer> > *dstBuffers);
     virtual void onResume(bool notifyComplete);
-    virtual void onFlush(bool notifyComplete);
+    virtual void onFlush();
     virtual void onShutdown(bool notifyComplete);
-    virtual void doRequestBuffers();
+    virtual bool doRequestBuffers();
 
 private:
     enum {
         kWhatCodecNotify         = 'cdcN',
         kWhatRenderBuffer        = 'rndr',
+        kWhatSetVideoSurface     = 'sSur'
     };
 
-    sp<NativeWindowWrapper> mNativeWindow;
+    sp<Surface> mSurface;
 
     sp<Source> mSource;
     sp<Renderer> mRenderer;
@@ -74,25 +78,38 @@
     Vector<MediaBuffer *> mMediaBuffers;
     Vector<size_t> mDequeuedInputBuffers;
 
+    const pid_t mPid;
     int64_t mSkipRenderingUntilMediaTimeUs;
     int64_t mNumFramesTotal;
-    int64_t mNumFramesDropped;
+    int64_t mNumInputFramesDropped;
+    int64_t mNumOutputFramesDropped;
+    int32_t mVideoWidth;
+    int32_t mVideoHeight;
     bool mIsAudio;
     bool mIsVideoAVC;
     bool mIsSecure;
     bool mFormatChangePending;
+    bool mTimeChangePending;
 
     bool mPaused;
     bool mResumePending;
     AString mComponentName;
 
-    bool handleAnInputBuffer();
-    bool handleAnOutputBuffer();
+    void handleError(int32_t err);
+    bool handleAnInputBuffer(size_t index);
+    bool handleAnOutputBuffer(
+            size_t index,
+            size_t offset,
+            size_t size,
+            int64_t timeUs,
+            int32_t flags);
+    void handleOutputFormatChange(const sp<AMessage> &format);
 
     void releaseAndResetMediaBuffers();
     void requestCodecNotification();
     bool isStaleReply(const sp<AMessage> &msg);
 
+    void doFlush(bool notifyComplete);
     status_t fetchInputData(sp<AMessage> &reply);
     bool onInputBufferFetched(const sp<AMessage> &msg);
     void onRenderBuffer(const sp<AMessage> &msg);
@@ -100,6 +117,8 @@
     bool supportsSeamlessFormatChange(const sp<AMessage> &to) const;
     bool supportsSeamlessAudioFormatChange(const sp<AMessage> &targetFormat) const;
     void rememberCodecSpecificData(const sp<AMessage> &format);
+    bool isDiscontinuityPending() const;
+    void finishHandleDiscontinuity(bool flushOnTimeChange);
 
     void notifyResumeCompleteIfNecessary();
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
index d56fc4d..7e76842 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.cpp
@@ -31,6 +31,7 @@
 NuPlayer::DecoderBase::DecoderBase(const sp<AMessage> &notify)
     :  mNotify(notify),
        mBufferGeneration(0),
+       mStats(new AMessage),
        mRequestInputBuffersPending(false) {
     // Every decoder has its own looper because MediaCodec operations
     // are blocking, but NuPlayer needs asynchronous operations.
@@ -61,7 +62,7 @@
 }
 
 void NuPlayer::DecoderBase::configure(const sp<AMessage> &format) {
-    sp<AMessage> msg = new AMessage(kWhatConfigure, id());
+    sp<AMessage> msg = new AMessage(kWhatConfigure, this);
     msg->setMessage("format", format);
     msg->post();
 }
@@ -70,14 +71,20 @@
     mDecoderLooper->registerHandler(this);
 }
 
+void NuPlayer::DecoderBase::setParameters(const sp<AMessage> &params) {
+    sp<AMessage> msg = new AMessage(kWhatSetParameters, this);
+    msg->setMessage("params", params);
+    msg->post();
+}
+
 void NuPlayer::DecoderBase::setRenderer(const sp<Renderer> &renderer) {
-    sp<AMessage> msg = new AMessage(kWhatSetRenderer, id());
+    sp<AMessage> msg = new AMessage(kWhatSetRenderer, this);
     msg->setObject("renderer", renderer);
     msg->post();
 }
 
 status_t NuPlayer::DecoderBase::getInputBuffers(Vector<sp<ABuffer> > *buffers) const {
-    sp<AMessage> msg = new AMessage(kWhatGetInputBuffers, id());
+    sp<AMessage> msg = new AMessage(kWhatGetInputBuffers, this);
     msg->setPointer("buffers", buffers);
 
     sp<AMessage> response;
@@ -85,17 +92,17 @@
 }
 
 void NuPlayer::DecoderBase::signalFlush() {
-    (new AMessage(kWhatFlush, id()))->post();
+    (new AMessage(kWhatFlush, this))->post();
 }
 
 void NuPlayer::DecoderBase::signalResume(bool notifyComplete) {
-    sp<AMessage> msg = new AMessage(kWhatResume, id());
+    sp<AMessage> msg = new AMessage(kWhatResume, this);
     msg->setInt32("notifyComplete", notifyComplete);
     msg->post();
 }
 
 void NuPlayer::DecoderBase::initiateShutdown() {
-    (new AMessage(kWhatShutdown, id()))->post();
+    (new AMessage(kWhatShutdown, this))->post();
 }
 
 void NuPlayer::DecoderBase::onRequestInputBuffers() {
@@ -103,16 +110,13 @@
         return;
     }
 
-    doRequestBuffers();
-}
+    // doRequestBuffers() return true if we should request more data
+    if (doRequestBuffers()) {
+        mRequestInputBuffersPending = true;
 
-void NuPlayer::DecoderBase::scheduleRequestBuffers() {
-    if (mRequestInputBuffersPending) {
-        return;
+        sp<AMessage> msg = new AMessage(kWhatRequestInputBuffers, this);
+        msg->post(10 * 1000ll);
     }
-    mRequestInputBuffersPending = true;
-    sp<AMessage> msg = new AMessage(kWhatRequestInputBuffers, id());
-    msg->post(10 * 1000ll);
 }
 
 void NuPlayer::DecoderBase::onMessageReceived(const sp<AMessage> &msg) {
@@ -126,6 +130,14 @@
             break;
         }
 
+        case kWhatSetParameters:
+        {
+            sp<AMessage> params;
+            CHECK(msg->findMessage("params", &params));
+            onSetParameters(params);
+            break;
+        }
+
         case kWhatSetRenderer:
         {
             sp<RefBase> obj;
@@ -136,7 +148,7 @@
 
         case kWhatGetInputBuffers:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             Vector<sp<ABuffer> > *dstBuffers;
@@ -157,7 +169,7 @@
 
         case kWhatFlush:
         {
-            onFlush(true);
+            onFlush();
             break;
         }
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
index 6732ff4..b0dc01d 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderBase.h
@@ -26,24 +26,27 @@
 
 struct ABuffer;
 struct MediaCodec;
-struct MediaBuffer;
+class MediaBuffer;
+class Surface;
 
 struct NuPlayer::DecoderBase : public AHandler {
     DecoderBase(const sp<AMessage> &notify);
 
     void configure(const sp<AMessage> &format);
     void init();
+    void setParameters(const sp<AMessage> &params);
 
     void setRenderer(const sp<Renderer> &renderer);
+    virtual status_t setVideoSurface(const sp<Surface> &) { return INVALID_OPERATION; }
 
     status_t getInputBuffers(Vector<sp<ABuffer> > *dstBuffers) const;
     void signalFlush();
     void signalResume(bool notifyComplete);
     void initiateShutdown();
 
-    virtual void getStats(
-            int64_t *mNumFramesTotal,
-            int64_t *mNumFramesDropped) const = 0;
+    virtual sp<AMessage> getStats() const {
+        return mStats;
+    }
 
     enum {
         kWhatInputDiscontinuity  = 'inDi',
@@ -62,23 +65,25 @@
     virtual void onMessageReceived(const sp<AMessage> &msg);
 
     virtual void onConfigure(const sp<AMessage> &format) = 0;
+    virtual void onSetParameters(const sp<AMessage> &params) = 0;
     virtual void onSetRenderer(const sp<Renderer> &renderer) = 0;
     virtual void onGetInputBuffers(Vector<sp<ABuffer> > *dstBuffers) = 0;
     virtual void onResume(bool notifyComplete) = 0;
-    virtual void onFlush(bool notifyComplete) = 0;
+    virtual void onFlush() = 0;
     virtual void onShutdown(bool notifyComplete) = 0;
 
     void onRequestInputBuffers();
-    void scheduleRequestBuffers();
-    virtual void doRequestBuffers() = 0;
+    virtual bool doRequestBuffers() = 0;
     virtual void handleError(int32_t err);
 
     sp<AMessage> mNotify;
     int32_t mBufferGeneration;
+    sp<AMessage> mStats;
 
 private:
     enum {
         kWhatConfigure           = 'conf',
+        kWhatSetParameters       = 'setP',
         kWhatSetRenderer         = 'setR',
         kWhatGetInputBuffers     = 'gInB',
         kWhatRequestInputBuffers = 'reqB',
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index 9f7f09a..30146c4 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -59,12 +59,6 @@
 NuPlayer::DecoderPassThrough::~DecoderPassThrough() {
 }
 
-void NuPlayer::DecoderPassThrough::getStats(
-        int64_t *numFramesTotal, int64_t *numFramesDropped) const {
-    *numFramesTotal = 0;
-    *numFramesDropped = 0;
-}
-
 void NuPlayer::DecoderPassThrough::onConfigure(const sp<AMessage> &format) {
     ALOGV("[%s] onConfigure", mComponentName.c_str());
     mCachedBytes = 0;
@@ -74,17 +68,24 @@
 
     onRequestInputBuffers();
 
+    int32_t hasVideo = 0;
+    format->findInt32("has-video", &hasVideo);
+
     // The audio sink is already opened before the PassThrough decoder is created.
     // Opening again might be relevant if decoder is instantiated after shutdown and
     // format is different.
     status_t err = mRenderer->openAudioSink(
-            format, true /* offloadOnly */, false /* hasVideo */,
+            format, true /* offloadOnly */, hasVideo,
             AUDIO_OUTPUT_FLAG_NONE /* flags */, NULL /* isOffloaded */);
     if (err != OK) {
         handleError(err);
     }
 }
 
+void NuPlayer::DecoderPassThrough::onSetParameters(const sp<AMessage> &/*params*/) {
+    ALOGW("onSetParameters() called unexpectedly");
+}
+
 void NuPlayer::DecoderPassThrough::onSetRenderer(
         const sp<Renderer> &renderer) {
     // renderer can't be changed during offloading
@@ -110,7 +111,10 @@
     return mCachedBytes >= kMaxCachedBytes || mReachedEOS || mPaused;
 }
 
-void NuPlayer::DecoderPassThrough::doRequestBuffers() {
+/*
+ * returns true if we should request more data
+ */
+bool NuPlayer::DecoderPassThrough::doRequestBuffers() {
     status_t err = OK;
     while (!isDoneFetching()) {
         sp<AMessage> msg = new AMessage();
@@ -123,10 +127,8 @@
         onInputBufferFetched(msg);
     }
 
-    if (err == -EWOULDBLOCK
-            && mSource->feedMoreTSData() == OK) {
-        scheduleRequestBuffers();
-    }
+    return err == -EWOULDBLOCK
+            && mSource->feedMoreTSData() == OK;
 }
 
 status_t NuPlayer::DecoderPassThrough::dequeueAccessUnit(sp<ABuffer> *accessUnit) {
@@ -247,7 +249,7 @@
                 }
 
                 if (timeChange) {
-                    onFlush(false /* notifyComplete */);
+                    doFlush(false /* notifyComplete */);
                     err = OK;
                 } else if (formatChange) {
                     // do seamless format change
@@ -333,7 +335,7 @@
         return;
     }
 
-    sp<AMessage> reply = new AMessage(kWhatBufferConsumed, id());
+    sp<AMessage> reply = new AMessage(kWhatBufferConsumed, this);
     reply->setInt32("generation", mBufferGeneration);
     reply->setInt32("size", bufferSize);
 
@@ -364,7 +366,7 @@
     }
 }
 
-void NuPlayer::DecoderPassThrough::onFlush(bool notifyComplete) {
+void NuPlayer::DecoderPassThrough::doFlush(bool notifyComplete) {
     ++mBufferGeneration;
     mSkipRenderingUntilMediaTimeUs = -1;
     mPendingAudioAccessUnit.clear();
@@ -376,18 +378,21 @@
         mRenderer->signalTimeDiscontinuity();
     }
 
-    if (notifyComplete) {
-        mPaused = true;
-        sp<AMessage> notify = mNotify->dup();
-        notify->setInt32("what", kWhatFlushCompleted);
-        notify->post();
-    }
-
     mPendingBuffersToDrain = 0;
     mCachedBytes = 0;
     mReachedEOS = false;
 }
 
+void NuPlayer::DecoderPassThrough::onFlush() {
+    doFlush(true /* notifyComplete */);
+
+    mPaused = true;
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatFlushCompleted);
+    notify->post();
+
+}
+
 void NuPlayer::DecoderPassThrough::onShutdown(bool notifyComplete) {
     ++mBufferGeneration;
     mSkipRenderingUntilMediaTimeUs = -1;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
index a6e1faf..db33e87 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.h
@@ -29,10 +29,6 @@
                        const sp<Source> &source,
                        const sp<Renderer> &renderer);
 
-    virtual void getStats(
-            int64_t *mNumFramesTotal,
-            int64_t *mNumFramesDropped) const;
-
 protected:
 
     virtual ~DecoderPassThrough();
@@ -40,12 +36,13 @@
     virtual void onMessageReceived(const sp<AMessage> &msg);
 
     virtual void onConfigure(const sp<AMessage> &format);
+    virtual void onSetParameters(const sp<AMessage> &params);
     virtual void onSetRenderer(const sp<Renderer> &renderer);
     virtual void onGetInputBuffers(Vector<sp<ABuffer> > *dstBuffers);
     virtual void onResume(bool notifyComplete);
-    virtual void onFlush(bool notifyComplete);
+    virtual void onFlush();
     virtual void onShutdown(bool notifyComplete);
-    virtual void doRequestBuffers();
+    virtual bool doRequestBuffers();
 
 private:
     enum {
@@ -77,6 +74,7 @@
     status_t dequeueAccessUnit(sp<ABuffer> *accessUnit);
     sp<ABuffer> aggregateBuffer(const sp<ABuffer> &accessUnit);
     status_t fetchInputData(sp<AMessage> &reply);
+    void doFlush(bool notifyComplete);
 
     void onInputBufferFetched(const sp<AMessage> &msg);
     void onBufferConsumed(int32_t size);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index bc79fdb..7370224 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -18,6 +18,7 @@
 #define LOG_TAG "NuPlayerDriver"
 #include <inttypes.h>
 #include <utils/Log.h>
+#include <cutils/properties.h>
 
 #include "NuPlayerDriver.h"
 
@@ -32,7 +33,7 @@
 
 namespace android {
 
-NuPlayerDriver::NuPlayerDriver()
+NuPlayerDriver::NuPlayerDriver(pid_t pid)
     : mState(STATE_IDLE),
       mIsAsyncPrepare(false),
       mAsyncResult(UNKNOWN_ERROR),
@@ -54,7 +55,7 @@
             true,  /* canCallJava */
             PRIORITY_AUDIO);
 
-    mPlayer = new NuPlayer;
+    mPlayer = new NuPlayer(pid);
     mLooper->registerHandler(mPlayer);
 
     mPlayer->setDriver(this);
@@ -135,6 +136,25 @@
     return mAsyncResult;
 }
 
+status_t NuPlayerDriver::setDataSource(const sp<DataSource> &source) {
+    ALOGV("setDataSource(%p) callback source", this);
+    Mutex::Autolock autoLock(mLock);
+
+    if (mState != STATE_IDLE) {
+        return INVALID_OPERATION;
+    }
+
+    mState = STATE_SET_DATASOURCE_PENDING;
+
+    mPlayer->setDataSourceAsync(source);
+
+    while (mState == STATE_SET_DATASOURCE_PENDING) {
+        mCondition.wait(mLock);
+    }
+
+    return mAsyncResult;
+}
+
 status_t NuPlayerDriver::setVideoSurfaceTexture(
         const sp<IGraphicBufferProducer> &bufferProducer) {
     ALOGV("setVideoSurfaceTexture(%p)", this);
@@ -222,7 +242,7 @@
 }
 
 status_t NuPlayerDriver::start() {
-    ALOGD("start(%p)", this);
+    ALOGD("start(%p), state is %d, eos is %d", this, mState, mAtEOS);
     Mutex::Autolock autoLock(mLock);
 
     switch (mState) {
@@ -341,6 +361,34 @@
     return mState == STATE_RUNNING && !mAtEOS;
 }
 
+status_t NuPlayerDriver::setPlaybackSettings(const AudioPlaybackRate &rate) {
+    Mutex::Autolock autoLock(mLock);
+    status_t err = mPlayer->setPlaybackSettings(rate);
+    if (err == OK) {
+        if (rate.mSpeed == 0.f && mState == STATE_RUNNING) {
+            mState = STATE_PAUSED;
+            // try to update position
+            (void)mPlayer->getCurrentPosition(&mPositionUs);
+            notifyListener_l(MEDIA_PAUSED);
+        } else if (rate.mSpeed != 0.f && mState == STATE_PAUSED) {
+            mState = STATE_RUNNING;
+        }
+    }
+    return err;
+}
+
+status_t NuPlayerDriver::getPlaybackSettings(AudioPlaybackRate *rate) {
+    return mPlayer->getPlaybackSettings(rate);
+}
+
+status_t NuPlayerDriver::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
+    return mPlayer->setSyncSettings(sync, videoFpsHint);
+}
+
+status_t NuPlayerDriver::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
+    return mPlayer->getSyncSettings(sync, videoFps);
+}
+
 status_t NuPlayerDriver::seekTo(int msec) {
     ALOGD("seekTo(%p) %d ms", this, msec);
     Mutex::Autolock autoLock(mLock);
@@ -350,17 +398,10 @@
     switch (mState) {
         case STATE_PREPARED:
         case STATE_STOPPED_AND_PREPARED:
-        {
-            mStartupSeekTimeUs = seekTimeUs;
-            // pretend that the seek completed. It will actually happen when starting playback.
-            // TODO: actually perform the seek here, so the player is ready to go at the new
-            // location
-            notifySeekComplete_l();
-            break;
-        }
-
-        case STATE_RUNNING:
         case STATE_PAUSED:
+            mStartupSeekTimeUs = seekTimeUs;
+            // fall through.
+        case STATE_RUNNING:
         {
             mAtEOS = false;
             mSeekInProgress = true;
@@ -444,6 +485,13 @@
         notifyListener_l(MEDIA_STOPPED);
     }
 
+    char value[PROPERTY_VALUE_MAX];
+    if (property_get("persist.debug.sf.stats", value, NULL) &&
+            (!strcmp("1", value) || !strcasecmp("true", value))) {
+        Vector<String16> args;
+        dump(-1, args);
+    }
+
     mState = STATE_RESET_IN_PROGRESS;
     mPlayer->resetAsync();
 
@@ -612,22 +660,59 @@
 
 status_t NuPlayerDriver::dump(
         int fd, const Vector<String16> & /* args */) const {
-    int64_t numFramesTotal;
-    int64_t numFramesDropped;
-    mPlayer->getStats(&numFramesTotal, &numFramesDropped);
 
-    FILE *out = fdopen(dup(fd), "w");
+    Vector<sp<AMessage> > trackStats;
+    mPlayer->getStats(&trackStats);
 
-    fprintf(out, " NuPlayer\n");
-    fprintf(out, "  numFramesTotal(%" PRId64 "), numFramesDropped(%" PRId64 "), "
-                 "percentageDropped(%.2f)\n",
-                 numFramesTotal,
-                 numFramesDropped,
-                 numFramesTotal == 0
-                    ? 0.0 : (double)numFramesDropped / numFramesTotal);
+    AString logString(" NuPlayer\n");
+    char buf[256] = {0};
 
-    fclose(out);
-    out = NULL;
+    for (size_t i = 0; i < trackStats.size(); ++i) {
+        const sp<AMessage> &stats = trackStats.itemAt(i);
+
+        AString mime;
+        if (stats->findString("mime", &mime)) {
+            snprintf(buf, sizeof(buf), "  mime(%s)\n", mime.c_str());
+            logString.append(buf);
+        }
+
+        AString name;
+        if (stats->findString("component-name", &name)) {
+            snprintf(buf, sizeof(buf), "    decoder(%s)\n", name.c_str());
+            logString.append(buf);
+        }
+
+        if (mime.startsWith("video/")) {
+            int32_t width, height;
+            if (stats->findInt32("width", &width)
+                    && stats->findInt32("height", &height)) {
+                snprintf(buf, sizeof(buf), "    resolution(%d x %d)\n", width, height);
+                logString.append(buf);
+            }
+
+            int64_t numFramesTotal = 0;
+            int64_t numFramesDropped = 0;
+
+            stats->findInt64("frames-total", &numFramesTotal);
+            stats->findInt64("frames-dropped-output", &numFramesDropped);
+            snprintf(buf, sizeof(buf), "    numFramesTotal(%lld), numFramesDropped(%lld), "
+                     "percentageDropped(%.2f%%)\n",
+                     (long long)numFramesTotal,
+                     (long long)numFramesDropped,
+                     numFramesTotal == 0
+                            ? 0.0 : (double)(numFramesDropped * 100) / numFramesTotal);
+            logString.append(buf);
+        }
+    }
+
+    ALOGI("%s", logString.c_str());
+
+    if (fd >= 0) {
+        FILE *out = fdopen(dup(fd), "w");
+        fprintf(out, "%s", logString.c_str());
+        fclose(out);
+        out = NULL;
+    }
 
     return OK;
 }
@@ -640,6 +725,7 @@
 
 void NuPlayerDriver::notifyListener_l(
         int msg, int ext1, int ext2, const Parcel *in) {
+    ALOGD("notifyListener_l(%p), (%d, %d, %d)", this, msg, ext1, ext2);
     switch (msg) {
         case MEDIA_PLAYBACK_COMPLETE:
         {
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
index 5cba7d9..d009fd7 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
@@ -24,7 +24,7 @@
 struct NuPlayer;
 
 struct NuPlayerDriver : public MediaPlayerInterface {
-    NuPlayerDriver();
+    NuPlayerDriver(pid_t pid);
 
     virtual status_t initCheck();
 
@@ -39,6 +39,8 @@
 
     virtual status_t setDataSource(const sp<IStreamSource> &source);
 
+    virtual status_t setDataSource(const sp<DataSource>& dataSource);
+
     virtual status_t setVideoSurfaceTexture(
             const sp<IGraphicBufferProducer> &bufferProducer);
     virtual status_t prepare();
@@ -47,6 +49,10 @@
     virtual status_t stop();
     virtual status_t pause();
     virtual bool isPlaying();
+    virtual status_t setPlaybackSettings(const AudioPlaybackRate &rate);
+    virtual status_t getPlaybackSettings(AudioPlaybackRate *rate);
+    virtual status_t setSyncSettings(const AVSyncSettings &sync, float videoFpsHint);
+    virtual status_t getSyncSettings(AVSyncSettings *sync, float *videoFps);
     virtual status_t seekTo(int msec);
     virtual status_t getCurrentPosition(int *msec);
     virtual status_t getDuration(int *msec);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 25225a8..04a46f4 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -19,22 +19,52 @@
 #include <utils/Log.h>
 
 #include "NuPlayerRenderer.h"
-
+#include <cutils/properties.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/foundation/AUtils.h>
 #include <media/stagefright/foundation/AWakeLock.h>
+#include <media/stagefright/MediaClock.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
-
-#include <VideoFrameScheduler.h>
+#include <media/stagefright/VideoFrameScheduler.h>
 
 #include <inttypes.h>
 
 namespace android {
 
+/*
+ * Example of common configuration settings in shell script form
+
+   #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
+   adb shell setprop audio.offload.disable 1
+
+   #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
+   adb shell setprop audio.offload.video 1
+
+   #Use audio callbacks for PCM data
+   adb shell setprop media.stagefright.audio.cbk 1
+
+   #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
+   adb shell setprop media.stagefright.audio.deep 1
+
+   #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
+   adb shell setprop media.stagefright.audio.sink 1000
+
+ * These configurations take effect for the next track played (not the current track).
+ */
+
+static inline bool getUseAudioCallbackSetting() {
+    return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
+}
+
+static inline int32_t getAudioSinkPcmMsSetting() {
+    return property_get_int32(
+            "media.stagefright.audio.sink", 500 /* default_value */);
+}
+
 // Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
 // is closed to allow the audio DSP to power down.
 static const int64_t kOffloadPauseMaxUs = 10000000ll;
@@ -63,34 +93,34 @@
       mDrainVideoQueuePending(false),
       mAudioQueueGeneration(0),
       mVideoQueueGeneration(0),
+      mAudioDrainGeneration(0),
+      mVideoDrainGeneration(0),
+      mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
       mAudioFirstAnchorTimeMediaUs(-1),
       mAnchorTimeMediaUs(-1),
-      mAnchorTimeRealUs(-1),
       mAnchorNumFramesWritten(-1),
-      mAnchorMaxMediaUs(-1),
       mVideoLateByUs(0ll),
       mHasAudio(false),
       mHasVideo(false),
-      mPauseStartedTimeRealUs(-1),
-      mFlushingAudio(false),
-      mFlushingVideo(false),
       mNotifyCompleteAudio(false),
       mNotifyCompleteVideo(false),
       mSyncQueues(false),
       mPaused(false),
-      mPausePositionMediaTimeUs(-1),
       mVideoSampleReceived(false),
       mVideoRenderingStarted(false),
       mVideoRenderingStartGeneration(0),
       mAudioRenderingStartGeneration(0),
       mAudioOffloadPauseTimeoutGeneration(0),
-      mAudioOffloadTornDown(false),
+      mAudioTornDown(false),
       mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
       mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
       mTotalBuffersQueued(0),
       mLastAudioBufferDrained(0),
+      mUseAudioCallback(false),
       mWakeLock(new AWakeLock()) {
-
+    mMediaClock = new MediaClock;
+    mPlaybackRate = mPlaybackSettings.mSpeed;
+    mMediaClock->setPlaybackRate(mPlaybackRate);
 }
 
 NuPlayer::Renderer::~Renderer() {
@@ -105,7 +135,8 @@
         bool audio,
         const sp<ABuffer> &buffer,
         const sp<AMessage> &notifyConsumed) {
-    sp<AMessage> msg = new AMessage(kWhatQueueBuffer, id());
+    sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
+    msg->setInt32("queueGeneration", getQueueGeneration(audio));
     msg->setInt32("audio", static_cast<int32_t>(audio));
     msg->setBuffer("buffer", buffer);
     msg->setMessage("notifyConsumed", notifyConsumed);
@@ -115,199 +146,209 @@
 void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) {
     CHECK_NE(finalResult, (status_t)OK);
 
-    sp<AMessage> msg = new AMessage(kWhatQueueEOS, id());
+    sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
+    msg->setInt32("queueGeneration", getQueueGeneration(audio));
     msg->setInt32("audio", static_cast<int32_t>(audio));
     msg->setInt32("finalResult", finalResult);
     msg->post();
 }
 
-void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) {
-    {
-        Mutex::Autolock autoLock(mFlushLock);
-        if (audio) {
-            mNotifyCompleteAudio |= notifyComplete;
-            if (mFlushingAudio) {
-                return;
-            }
-            mFlushingAudio = true;
-        } else {
-            mNotifyCompleteVideo |= notifyComplete;
-            if (mFlushingVideo) {
-                return;
-            }
-            mFlushingVideo = true;
-        }
+status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
+    sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
+    writeToAMessage(msg, rate);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+    return err;
+}
+
+status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
+    if (rate.mSpeed == 0.f) {
+        onPause();
+        // don't call audiosink's setPlaybackRate if pausing, as pitch does not
+        // have to correspond to the any non-0 speed (e.g old speed). Keep
+        // settings nonetheless, using the old speed, in case audiosink changes.
+        AudioPlaybackRate newRate = rate;
+        newRate.mSpeed = mPlaybackSettings.mSpeed;
+        mPlaybackSettings = newRate;
+        return OK;
     }
 
-    sp<AMessage> msg = new AMessage(kWhatFlush, id());
+    if (mAudioSink != NULL && mAudioSink->ready()) {
+        status_t err = mAudioSink->setPlaybackRate(rate);
+        if (err != OK) {
+            return err;
+        }
+    }
+    mPlaybackSettings = rate;
+    mPlaybackRate = rate.mSpeed;
+    mMediaClock->setPlaybackRate(mPlaybackRate);
+    return OK;
+}
+
+status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
+    sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+        if (err == OK) {
+            readFromAMessage(response, rate);
+        }
+    }
+    return err;
+}
+
+status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
+    if (mAudioSink != NULL && mAudioSink->ready()) {
+        status_t err = mAudioSink->getPlaybackRate(rate);
+        if (err == OK) {
+            if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
+                ALOGW("correcting mismatch in internal/external playback rate");
+            }
+            // get playback settings used by audiosink, as it may be
+            // slightly off due to audiosink not taking small changes.
+            mPlaybackSettings = *rate;
+            if (mPaused) {
+                rate->mSpeed = 0.f;
+            }
+        }
+        return err;
+    }
+    *rate = mPlaybackSettings;
+    return OK;
+}
+
+status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
+    sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
+    writeToAMessage(msg, sync, videoFpsHint);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+    return err;
+}
+
+status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
+    if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
+        return BAD_VALUE;
+    }
+    // TODO: support sync sources
+    return INVALID_OPERATION;
+}
+
+status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
+    sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+        if (err == OK) {
+            readFromAMessage(response, sync, videoFps);
+        }
+    }
+    return err;
+}
+
+status_t NuPlayer::Renderer::onGetSyncSettings(
+        AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
+    *sync = mSyncSettings;
+    *videoFps = -1.f;
+    return OK;
+}
+
+void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) {
+    {
+        Mutex::Autolock autoLock(mLock);
+        if (audio) {
+            mNotifyCompleteAudio |= notifyComplete;
+            clearAudioFirstAnchorTime_l();
+            ++mAudioQueueGeneration;
+            ++mAudioDrainGeneration;
+        } else {
+            mNotifyCompleteVideo |= notifyComplete;
+            ++mVideoQueueGeneration;
+            ++mVideoDrainGeneration;
+        }
+
+        clearAnchorTime_l();
+        mVideoLateByUs = 0;
+        mSyncQueues = false;
+    }
+
+    sp<AMessage> msg = new AMessage(kWhatFlush, this);
     msg->setInt32("audio", static_cast<int32_t>(audio));
     msg->post();
 }
 
 void NuPlayer::Renderer::signalTimeDiscontinuity() {
-    Mutex::Autolock autoLock(mLock);
-    // CHECK(mAudioQueue.empty());
-    // CHECK(mVideoQueue.empty());
-    setAudioFirstAnchorTime(-1);
-    setAnchorTime(-1, -1);
-    setVideoLateByUs(0);
-    mSyncQueues = false;
-}
-
-void NuPlayer::Renderer::signalAudioSinkChanged() {
-    (new AMessage(kWhatAudioSinkChanged, id()))->post();
 }
 
 void NuPlayer::Renderer::signalDisableOffloadAudio() {
-    (new AMessage(kWhatDisableOffloadAudio, id()))->post();
+    (new AMessage(kWhatDisableOffloadAudio, this))->post();
 }
 
 void NuPlayer::Renderer::signalEnableOffloadAudio() {
-    (new AMessage(kWhatEnableOffloadAudio, id()))->post();
+    (new AMessage(kWhatEnableOffloadAudio, this))->post();
 }
 
 void NuPlayer::Renderer::pause() {
-    (new AMessage(kWhatPause, id()))->post();
+    (new AMessage(kWhatPause, this))->post();
 }
 
 void NuPlayer::Renderer::resume() {
-    (new AMessage(kWhatResume, id()))->post();
+    (new AMessage(kWhatResume, this))->post();
 }
 
 void NuPlayer::Renderer::setVideoFrameRate(float fps) {
-    sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, id());
+    sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
     msg->setFloat("frame-rate", fps);
     msg->post();
 }
 
-// Called on any threads, except renderer's thread.
-status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
-    {
-        Mutex::Autolock autoLock(mLock);
-        int64_t currentPositionUs;
-        if (getCurrentPositionIfPaused_l(&currentPositionUs)) {
-            *mediaUs = currentPositionUs;
-            return OK;
-        }
-    }
-    return getCurrentPositionFromAnchor(mediaUs, ALooper::GetNowUs());
-}
-
-// Called on only renderer's thread.
-status_t NuPlayer::Renderer::getCurrentPositionOnLooper(int64_t *mediaUs) {
-    return getCurrentPositionOnLooper(mediaUs, ALooper::GetNowUs());
-}
-
-// Called on only renderer's thread.
-// Since mPaused and mPausePositionMediaTimeUs are changed only on renderer's
-// thread, no need to acquire mLock.
-status_t NuPlayer::Renderer::getCurrentPositionOnLooper(
-        int64_t *mediaUs, int64_t nowUs, bool allowPastQueuedVideo) {
-    int64_t currentPositionUs;
-    if (getCurrentPositionIfPaused_l(&currentPositionUs)) {
-        *mediaUs = currentPositionUs;
-        return OK;
-    }
-    return getCurrentPositionFromAnchor(mediaUs, nowUs, allowPastQueuedVideo);
-}
-
-// Called either with mLock acquired or on renderer's thread.
-bool NuPlayer::Renderer::getCurrentPositionIfPaused_l(int64_t *mediaUs) {
-    if (!mPaused || mPausePositionMediaTimeUs < 0ll) {
-        return false;
-    }
-    *mediaUs = mPausePositionMediaTimeUs;
-    return true;
-}
-
 // Called on any threads.
-status_t NuPlayer::Renderer::getCurrentPositionFromAnchor(
-        int64_t *mediaUs, int64_t nowUs, bool allowPastQueuedVideo) {
-    Mutex::Autolock autoLock(mTimeLock);
-    if (!mHasAudio && !mHasVideo) {
-        return NO_INIT;
-    }
-
-    if (mAnchorTimeMediaUs < 0) {
-        return NO_INIT;
-    }
-
-    int64_t positionUs = (nowUs - mAnchorTimeRealUs) + mAnchorTimeMediaUs;
-
-    if (mPauseStartedTimeRealUs != -1) {
-        positionUs -= (nowUs - mPauseStartedTimeRealUs);
-    }
-
-    // limit position to the last queued media time (for video only stream
-    // position will be discrete as we don't know how long each frame lasts)
-    if (mAnchorMaxMediaUs >= 0 && !allowPastQueuedVideo) {
-        if (positionUs > mAnchorMaxMediaUs) {
-            positionUs = mAnchorMaxMediaUs;
-        }
-    }
-
-    if (positionUs < mAudioFirstAnchorTimeMediaUs) {
-        positionUs = mAudioFirstAnchorTimeMediaUs;
-    }
-
-    *mediaUs = (positionUs <= 0) ? 0 : positionUs;
-    return OK;
+status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
+    return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
 }
 
-void NuPlayer::Renderer::setHasMedia(bool audio) {
-    Mutex::Autolock autoLock(mTimeLock);
-    if (audio) {
-        mHasAudio = true;
-    } else {
-        mHasVideo = true;
-    }
+void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() {
+    mAudioFirstAnchorTimeMediaUs = -1;
+    mMediaClock->setStartingTimeMedia(-1);
 }
 
-void NuPlayer::Renderer::setAudioFirstAnchorTime(int64_t mediaUs) {
-    Mutex::Autolock autoLock(mTimeLock);
-    mAudioFirstAnchorTimeMediaUs = mediaUs;
-}
-
-void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded(int64_t mediaUs) {
-    Mutex::Autolock autoLock(mTimeLock);
+void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
     if (mAudioFirstAnchorTimeMediaUs == -1) {
         mAudioFirstAnchorTimeMediaUs = mediaUs;
+        mMediaClock->setStartingTimeMedia(mediaUs);
     }
 }
 
-void NuPlayer::Renderer::setAnchorTime(
-        int64_t mediaUs, int64_t realUs, int64_t numFramesWritten, bool resume) {
-    Mutex::Autolock autoLock(mTimeLock);
-    mAnchorTimeMediaUs = mediaUs;
-    mAnchorTimeRealUs = realUs;
-    mAnchorNumFramesWritten = numFramesWritten;
-    if (resume) {
-        mPauseStartedTimeRealUs = -1;
-    }
+void NuPlayer::Renderer::clearAnchorTime_l() {
+    mMediaClock->clearAnchor();
+    mAnchorTimeMediaUs = -1;
+    mAnchorNumFramesWritten = -1;
 }
 
 void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) {
-    Mutex::Autolock autoLock(mTimeLock);
+    Mutex::Autolock autoLock(mLock);
     mVideoLateByUs = lateUs;
 }
 
 int64_t NuPlayer::Renderer::getVideoLateByUs() {
-    Mutex::Autolock autoLock(mTimeLock);
+    Mutex::Autolock autoLock(mLock);
     return mVideoLateByUs;
 }
 
-void NuPlayer::Renderer::setPauseStartedTimeRealUs(int64_t realUs) {
-    Mutex::Autolock autoLock(mTimeLock);
-    mPauseStartedTimeRealUs = realUs;
-}
-
 status_t NuPlayer::Renderer::openAudioSink(
         const sp<AMessage> &format,
         bool offloadOnly,
         bool hasVideo,
         uint32_t flags,
         bool *isOffloaded) {
-    sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, id());
+    sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
     msg->setMessage("format", format);
     msg->setInt32("offload-only", offloadOnly);
     msg->setInt32("has-video", hasVideo);
@@ -328,7 +369,7 @@
 }
 
 void NuPlayer::Renderer::closeAudioSink() {
-    sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, id());
+    sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
 
     sp<AMessage> response;
     msg->postAndAwaitResponse(&response);
@@ -356,7 +397,7 @@
             response->setInt32("err", err);
             response->setInt32("offload", offloadingAudio());
 
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
             response->postReply(replyID);
 
@@ -365,7 +406,7 @@
 
         case kWhatCloseAudioSink:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             onCloseAudioSink();
@@ -383,14 +424,14 @@
 
         case kWhatDrainAudioQueue:
         {
+            mDrainAudioQueuePending = false;
+
             int32_t generation;
-            CHECK(msg->findInt32("generation", &generation));
-            if (generation != mAudioQueueGeneration) {
+            CHECK(msg->findInt32("drainGeneration", &generation));
+            if (generation != getDrainGeneration(true /* audio */)) {
                 break;
             }
 
-            mDrainAudioQueuePending = false;
-
             if (onDrainAudioQueue()) {
                 uint32_t numFramesPlayed;
                 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
@@ -404,12 +445,13 @@
                 int64_t delayUs =
                     mAudioSink->msecsPerFrame()
                         * numFramesPendingPlayout * 1000ll;
+                if (mPlaybackRate > 1.0f) {
+                    delayUs /= mPlaybackRate;
+                }
 
                 // Let's give it more data after about half that time
                 // has elapsed.
-                // kWhatDrainAudioQueue is used for non-offloading mode,
-                // and mLock is used only for offloading mode. Therefore,
-                // no need to acquire mLock here.
+                Mutex::Autolock autoLock(mLock);
                 postDrainAudioQueue_l(delayUs / 2);
             }
             break;
@@ -418,8 +460,8 @@
         case kWhatDrainVideoQueue:
         {
             int32_t generation;
-            CHECK(msg->findInt32("generation", &generation));
-            if (generation != mVideoQueueGeneration) {
+            CHECK(msg->findInt32("drainGeneration", &generation));
+            if (generation != getDrainGeneration(false /* audio */)) {
                 break;
             }
 
@@ -427,22 +469,20 @@
 
             onDrainVideoQueue();
 
-            Mutex::Autolock autoLock(mLock);
-            postDrainVideoQueue_l();
+            postDrainVideoQueue();
             break;
         }
 
         case kWhatPostDrainVideoQueue:
         {
             int32_t generation;
-            CHECK(msg->findInt32("generation", &generation));
-            if (generation != mVideoQueueGeneration) {
+            CHECK(msg->findInt32("drainGeneration", &generation));
+            if (generation != getDrainGeneration(false /* audio */)) {
                 break;
             }
 
             mDrainVideoQueuePending = false;
-            Mutex::Autolock autoLock(mLock);
-            postDrainVideoQueue_l();
+            postDrainVideoQueue();
             break;
         }
 
@@ -458,15 +498,69 @@
             break;
         }
 
-        case kWhatFlush:
+        case kWhatConfigPlayback:
         {
-            onFlush(msg);
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            AudioPlaybackRate rate;
+            readFromAMessage(msg, &rate);
+            status_t err = onConfigPlayback(rate);
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
             break;
         }
 
-        case kWhatAudioSinkChanged:
+        case kWhatGetPlaybackSettings:
         {
-            onAudioSinkChanged();
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
+            status_t err = onGetPlaybackSettings(&rate);
+            sp<AMessage> response = new AMessage;
+            if (err == OK) {
+                writeToAMessage(response, rate);
+            }
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatConfigSync:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+            AVSyncSettings sync;
+            float videoFpsHint;
+            readFromAMessage(msg, &sync, &videoFpsHint);
+            status_t err = onConfigSync(sync, videoFpsHint);
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatGetSyncSettings:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            ALOGV("kWhatGetSyncSettings");
+            AVSyncSettings sync;
+            float videoFps = -1.f;
+            status_t err = onGetSyncSettings(&sync, &videoFps);
+            sp<AMessage> response = new AMessage;
+            if (err == OK) {
+                writeToAMessage(response, sync, videoFps);
+            }
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
+        case kWhatFlush:
+        {
+            onFlush(msg);
             break;
         }
 
@@ -502,21 +596,21 @@
             break;
         }
 
-        case kWhatAudioOffloadTearDown:
+        case kWhatAudioTearDown:
         {
-            onAudioOffloadTearDown(kDueToError);
+            onAudioTearDown(kDueToError);
             break;
         }
 
         case kWhatAudioOffloadPauseTimeout:
         {
             int32_t generation;
-            CHECK(msg->findInt32("generation", &generation));
+            CHECK(msg->findInt32("drainGeneration", &generation));
             if (generation != mAudioOffloadPauseTimeoutGeneration) {
                 break;
             }
             ALOGV("Audio Offload tear down due to pause timeout.");
-            onAudioOffloadTearDown(kDueToTimeout);
+            onAudioTearDown(kDueToTimeout);
             mWakeLock->release();
             break;
         }
@@ -528,8 +622,7 @@
 }
 
 void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
-    if (mDrainAudioQueuePending || mSyncQueues || mPaused
-            || offloadingAudio()) {
+    if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
         return;
     }
 
@@ -538,19 +631,19 @@
     }
 
     mDrainAudioQueuePending = true;
-    sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, id());
-    msg->setInt32("generation", mAudioQueueGeneration);
+    sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
+    msg->setInt32("drainGeneration", mAudioDrainGeneration);
     msg->post(delayUs);
 }
 
-void NuPlayer::Renderer::prepareForMediaRenderingStart() {
-    mAudioRenderingStartGeneration = mAudioQueueGeneration;
-    mVideoRenderingStartGeneration = mVideoQueueGeneration;
+void NuPlayer::Renderer::prepareForMediaRenderingStart_l() {
+    mAudioRenderingStartGeneration = mAudioDrainGeneration;
+    mVideoRenderingStartGeneration = mVideoDrainGeneration;
 }
 
-void NuPlayer::Renderer::notifyIfMediaRenderingStarted() {
-    if (mVideoRenderingStartGeneration == mVideoQueueGeneration &&
-        mAudioRenderingStartGeneration == mAudioQueueGeneration) {
+void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() {
+    if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
+        mAudioRenderingStartGeneration == mAudioDrainGeneration) {
         mVideoRenderingStartGeneration = -1;
         mAudioRenderingStartGeneration = -1;
 
@@ -578,13 +671,15 @@
 
         case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
         {
+            ALOGV("AudioSink::CB_EVENT_STREAM_END");
             me->notifyEOS(true /* audio */, ERROR_END_OF_STREAM);
             break;
         }
 
         case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
         {
-            me->notifyAudioOffloadTearDown();
+            ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
+            me->notifyAudioTearDown();
             break;
         }
     }
@@ -595,7 +690,7 @@
 size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
     Mutex::Autolock autoLock(mLock);
 
-    if (!offloadingAudio() || mPaused) {
+    if (!mUseAudioCallback) {
         return 0;
     }
 
@@ -603,13 +698,13 @@
 
     size_t sizeCopied = 0;
     bool firstEntry = true;
+    QueueEntry *entry;  // will be valid after while loop if hasEOS is set.
     while (sizeCopied < size && !mAudioQueue.empty()) {
-        QueueEntry *entry = &*mAudioQueue.begin();
+        entry = &*mAudioQueue.begin();
 
         if (entry->mBuffer == NULL) { // EOS
             hasEOS = true;
             mAudioQueue.erase(mAudioQueue.begin());
-            entry = NULL;
             break;
         }
 
@@ -617,8 +712,8 @@
             firstEntry = false;
             int64_t mediaTimeUs;
             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
-            ALOGV("rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
-            setAudioFirstAnchorTimeIfNeeded(mediaTimeUs);
+            ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
+            setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
         }
 
         size_t copy = entry->mBuffer->size() - entry->mOffset;
@@ -638,34 +733,97 @@
             entry = NULL;
         }
         sizeCopied += copy;
-        notifyIfMediaRenderingStarted();
+
+        notifyIfMediaRenderingStarted_l();
     }
 
     if (mAudioFirstAnchorTimeMediaUs >= 0) {
         int64_t nowUs = ALooper::GetNowUs();
-        setAnchorTime(mAudioFirstAnchorTimeMediaUs, nowUs - getPlayedOutAudioDurationUs(nowUs));
+        int64_t nowMediaUs =
+            mAudioFirstAnchorTimeMediaUs + getPlayedOutAudioDurationUs(nowUs);
+        // we don't know how much data we are queueing for offloaded tracks.
+        mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
     }
 
-    // we don't know how much data we are queueing for offloaded tracks
-    mAnchorMaxMediaUs = -1;
+    // for non-offloaded audio, we need to compute the frames written because
+    // there is no EVENT_STREAM_END notification. The frames written gives
+    // an estimate on the pending played out duration.
+    if (!offloadingAudio()) {
+        mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
+    }
 
     if (hasEOS) {
-        (new AMessage(kWhatStopAudioSink, id()))->post();
+        (new AMessage(kWhatStopAudioSink, this))->post();
+        // As there is currently no EVENT_STREAM_END callback notification for
+        // non-offloaded audio tracks, we need to post the EOS ourselves.
+        if (!offloadingAudio()) {
+            int64_t postEOSDelayUs = 0;
+            if (mAudioSink->needsTrailingPadding()) {
+                postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
+            }
+            ALOGV("fillAudioBuffer: notifyEOS "
+                    "mNumFramesWritten:%u  finalResult:%d  postEOSDelay:%lld",
+                    mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
+            notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
+        }
     }
-
     return sizeCopied;
 }
 
+void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() {
+    List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
+    bool foundEOS = false;
+    while (it != mAudioQueue.end()) {
+        int32_t eos;
+        QueueEntry *entry = &*it++;
+        if (entry->mBuffer == NULL
+                || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
+            itEOS = it;
+            foundEOS = true;
+        }
+    }
+
+    if (foundEOS) {
+        // post all replies before EOS and drop the samples
+        for (it = mAudioQueue.begin(); it != itEOS; it++) {
+            if (it->mBuffer == NULL) {
+                // delay doesn't matter as we don't even have an AudioTrack
+                notifyEOS(true /* audio */, it->mFinalResult);
+            } else {
+                it->mNotifyConsumed->post();
+            }
+        }
+        mAudioQueue.erase(mAudioQueue.begin(), itEOS);
+    }
+}
+
 bool NuPlayer::Renderer::onDrainAudioQueue() {
+    // TODO: This call to getPosition checks if AudioTrack has been created
+    // in AudioSink before draining audio. If AudioTrack doesn't exist, then
+    // CHECKs on getPosition will fail.
+    // We still need to figure out why AudioTrack is not created when
+    // this function is called. One possible reason could be leftover
+    // audio. Another possible place is to check whether decoder
+    // has received INFO_FORMAT_CHANGED as the first buffer since
+    // AudioSink is opened there, and possible interactions with flush
+    // immediately after start. Investigate error message
+    // "vorbis_dsp_synthesis returned -135", along with RTSP.
     uint32_t numFramesPlayed;
     if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
+        // When getPosition fails, renderer will not reschedule the draining
+        // unless new samples are queued.
+        // If we have pending EOS (or "eos" marker for discontinuities), we need
+        // to post these now as NuPlayerDecoder might be waiting for it.
+        drainAudioQueueUntilLastEOS();
+
+        ALOGW("onDrainAudioQueue(): audio sink is not ready");
         return false;
     }
 
+#if 0
     ssize_t numFramesAvailableToWrite =
         mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
 
-#if 0
     if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
         ALOGI("audio sink underrun");
     } else {
@@ -674,10 +832,8 @@
     }
 #endif
 
-    size_t numBytesAvailableToWrite =
-        numFramesAvailableToWrite * mAudioSink->frameSize();
-
-    while (numBytesAvailableToWrite > 0 && !mAudioQueue.empty()) {
+    uint32_t prevFramesWritten = mNumFramesWritten;
+    while (!mAudioQueue.empty()) {
         QueueEntry *entry = &*mAudioQueue.begin();
 
         mLastAudioBufferDrained = entry->mBufferOrdinal;
@@ -702,22 +858,27 @@
             return false;
         }
 
-        if (entry->mOffset == 0) {
+        // ignore 0-sized buffer which could be EOS marker with no data
+        if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
             int64_t mediaTimeUs;
             CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
-            ALOGV("rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
+            ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
+                    mediaTimeUs / 1E6);
             onNewAudioMediaTime(mediaTimeUs);
         }
 
         size_t copy = entry->mBuffer->size() - entry->mOffset;
-        if (copy > numBytesAvailableToWrite) {
-            copy = numBytesAvailableToWrite;
-        }
 
-        ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset, copy);
+        ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
+                                            copy, false /* blocking */);
         if (written < 0) {
             // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
-            ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
+            if (written == WOULD_BLOCK) {
+                ALOGV("AudioSink write would block when writing %zu bytes", copy);
+            } else {
+                ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
+                notifyAudioTearDown();
+            }
             break;
         }
 
@@ -729,73 +890,98 @@
             entry = NULL;
         }
 
-        numBytesAvailableToWrite -= written;
         size_t copiedFrames = written / mAudioSink->frameSize();
         mNumFramesWritten += copiedFrames;
 
-        notifyIfMediaRenderingStarted();
+        {
+            Mutex::Autolock autoLock(mLock);
+            notifyIfMediaRenderingStarted_l();
+        }
 
         if (written != (ssize_t)copy) {
             // A short count was received from AudioSink::write()
             //
-            // AudioSink write should block until exactly the number of bytes are delivered.
-            // But it may return with a short count (without an error) when:
+            // AudioSink write is called in non-blocking mode.
+            // It may return with a short count when:
             //
             // 1) Size to be copied is not a multiple of the frame size. We consider this fatal.
-            // 2) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
+            // 2) The data to be copied exceeds the available buffer in AudioSink.
+            // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
+            // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
 
             // (Case 1)
             // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it
             // needs to fail, as we should not carry over fractional frames between calls.
             CHECK_EQ(copy % mAudioSink->frameSize(), 0);
 
-            // (Case 2)
+            // (Case 2, 3, 4)
             // Return early to the caller.
             // Beware of calling immediately again as this may busy-loop if you are not careful.
-            ALOGW("AudioSink write short frame count %zd < %zu", written, copy);
+            ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
             break;
         }
     }
-    mAnchorMaxMediaUs =
-        mAnchorTimeMediaUs +
-                (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
-                        * 1000LL * mAudioSink->msecsPerFrame());
+    int64_t maxTimeMedia;
+    {
+        Mutex::Autolock autoLock(mLock);
+        maxTimeMedia =
+            mAnchorTimeMediaUs +
+                    (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
+                            * 1000LL * mAudioSink->msecsPerFrame());
+    }
+    mMediaClock->updateMaxTimeMedia(maxTimeMedia);
 
-    return !mAudioQueue.empty();
+    // calculate whether we need to reschedule another write.
+    bool reschedule = !mAudioQueue.empty()
+            && (!mPaused
+                || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
+    //ALOGD("reschedule:%d  empty:%d  mPaused:%d  prevFramesWritten:%u  mNumFramesWritten:%u",
+    //        reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
+    return reschedule;
 }
 
+int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
+    int32_t sampleRate = offloadingAudio() ?
+            mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
+    // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
+    return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
+}
+
+// Calculate duration of pending samples if played at normal rate (i.e., 1.0).
 int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
-    int64_t writtenAudioDurationUs =
-        mNumFramesWritten * 1000LL * mAudioSink->msecsPerFrame();
+    int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
     return writtenAudioDurationUs - getPlayedOutAudioDurationUs(nowUs);
 }
 
 int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
-    int64_t currentPositionUs;
-    if (mPaused || getCurrentPositionOnLooper(
-            &currentPositionUs, nowUs, true /* allowPastQueuedVideo */) != OK) {
-        // If failed to get current position, e.g. due to audio clock is not ready, then just
-        // play out video immediately without delay.
+    int64_t realUs;
+    if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
+        // If failed to get current position, e.g. due to audio clock is
+        // not ready, then just play out video immediately without delay.
         return nowUs;
     }
-    return (mediaTimeUs - currentPositionUs) + nowUs;
+    return realUs;
 }
 
 void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
+    Mutex::Autolock autoLock(mLock);
     // TRICKY: vorbis decoder generates multiple frames with the same
     // timestamp, so only update on the first frame with a given timestamp
     if (mediaTimeUs == mAnchorTimeMediaUs) {
         return;
     }
-    setAudioFirstAnchorTimeIfNeeded(mediaTimeUs);
+    setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
     int64_t nowUs = ALooper::GetNowUs();
-    setAnchorTime(
-            mediaTimeUs, nowUs + getPendingAudioPlayoutDurationUs(nowUs), mNumFramesWritten);
+    int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
+    mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
+    mAnchorNumFramesWritten = mNumFramesWritten;
+    mAnchorTimeMediaUs = mediaTimeUs;
 }
 
-void NuPlayer::Renderer::postDrainVideoQueue_l() {
+// Called without mLock acquired.
+void NuPlayer::Renderer::postDrainVideoQueue() {
     if (mDrainVideoQueuePending
-            || mSyncQueues
+            || getSyncQueues()
             || (mPaused && mVideoSampleReceived)) {
         return;
     }
@@ -806,8 +992,8 @@
 
     QueueEntry &entry = *mVideoQueue.begin();
 
-    sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, id());
-    msg->setInt32("generation", mVideoQueueGeneration);
+    sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
+    msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
 
     if (entry.mBuffer == NULL) {
         // EOS doesn't carry a timestamp.
@@ -827,16 +1013,19 @@
         int64_t mediaTimeUs;
         CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
 
-        if (mAnchorTimeMediaUs < 0) {
-            setAnchorTime(mediaTimeUs, nowUs);
-            mPausePositionMediaTimeUs = mediaTimeUs;
-            mAnchorMaxMediaUs = mediaTimeUs;
-            realTimeUs = nowUs;
-        } else {
-            realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
+        {
+            Mutex::Autolock autoLock(mLock);
+            if (mAnchorTimeMediaUs < 0) {
+                mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
+                mAnchorTimeMediaUs = mediaTimeUs;
+                realTimeUs = nowUs;
+            } else {
+                realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
+            }
         }
         if (!mHasAudio) {
-            mAnchorMaxMediaUs = mediaTimeUs + 100000; // smooth out videos >= 10fps
+            // smooth out videos >= 10fps
+            mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
         }
 
         // Heuristics to handle situation when media time changed without a
@@ -913,18 +1102,21 @@
 
         if (tooLate) {
             ALOGV("video late by %lld us (%.2f secs)",
-                 mVideoLateByUs, mVideoLateByUs / 1E6);
+                 (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
         } else {
+            int64_t mediaUs = 0;
+            mMediaClock->getMediaTime(realTimeUs, &mediaUs);
             ALOGV("rendering video at media time %.2f secs",
                     (mFlags & FLAG_REAL_TIME ? realTimeUs :
-                    (realTimeUs + mAnchorTimeMediaUs - mAnchorTimeRealUs)) / 1E6);
+                    mediaUs) / 1E6);
         }
     } else {
         setVideoLateByUs(0);
         if (!mVideoSampleReceived && !mHasAudio) {
             // This will ensure that the first frame after a flush won't be used as anchor
             // when renderer is in paused state, because resume can happen any time after seek.
-            setAnchorTime(-1, -1);
+            Mutex::Autolock autoLock(mLock);
+            clearAnchorTime_l();
         }
     }
 
@@ -941,7 +1133,8 @@
             mVideoRenderingStarted = true;
             notifyVideoRenderingStart();
         }
-        notifyIfMediaRenderingStarted();
+        Mutex::Autolock autoLock(mLock);
+        notifyIfMediaRenderingStarted_l();
     }
 }
 
@@ -959,15 +1152,23 @@
     notify->post(delayUs);
 }
 
-void NuPlayer::Renderer::notifyAudioOffloadTearDown() {
-    (new AMessage(kWhatAudioOffloadTearDown, id()))->post();
+void NuPlayer::Renderer::notifyAudioTearDown() {
+    (new AMessage(kWhatAudioTearDown, this))->post();
 }
 
 void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
     int32_t audio;
     CHECK(msg->findInt32("audio", &audio));
 
-    setHasMedia(audio);
+    if (dropBufferIfStale(audio, msg)) {
+        return;
+    }
+
+    if (audio) {
+        mHasAudio = true;
+    } else {
+        mHasVideo = true;
+    }
 
     if (mHasVideo) {
         if (mVideoScheduler == NULL) {
@@ -976,10 +1177,6 @@
         }
     }
 
-    if (dropBufferWhileFlushing(audio, msg)) {
-        return;
-    }
-
     sp<ABuffer> buffer;
     CHECK(msg->findBuffer("buffer", &buffer));
 
@@ -993,15 +1190,16 @@
     entry.mFinalResult = OK;
     entry.mBufferOrdinal = ++mTotalBuffersQueued;
 
-    Mutex::Autolock autoLock(mLock);
     if (audio) {
+        Mutex::Autolock autoLock(mLock);
         mAudioQueue.push_back(entry);
         postDrainAudioQueue_l();
     } else {
         mVideoQueue.push_back(entry);
-        postDrainVideoQueue_l();
+        postDrainVideoQueue();
     }
 
+    Mutex::Autolock autoLock(mLock);
     if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
         return;
     }
@@ -1050,7 +1248,9 @@
     }
 
     if (!mVideoQueue.empty()) {
-        postDrainVideoQueue_l();
+        mLock.unlock();
+        postDrainVideoQueue();
+        mLock.lock();
     }
 }
 
@@ -1058,7 +1258,7 @@
     int32_t audio;
     CHECK(msg->findInt32("audio", &audio));
 
-    if (dropBufferWhileFlushing(audio, msg)) {
+    if (dropBufferIfStale(audio, msg)) {
         return;
     }
 
@@ -1069,19 +1269,20 @@
     entry.mOffset = 0;
     entry.mFinalResult = finalResult;
 
-    Mutex::Autolock autoLock(mLock);
     if (audio) {
+        Mutex::Autolock autoLock(mLock);
         if (mAudioQueue.empty() && mSyncQueues) {
             syncQueuesDone_l();
         }
         mAudioQueue.push_back(entry);
         postDrainAudioQueue_l();
     } else {
-        if (mVideoQueue.empty() && mSyncQueues) {
+        if (mVideoQueue.empty() && getSyncQueues()) {
+            Mutex::Autolock autoLock(mLock);
             syncQueuesDone_l();
         }
         mVideoQueue.push_back(entry);
-        postDrainVideoQueue_l();
+        postDrainVideoQueue();
     }
 }
 
@@ -1090,31 +1291,25 @@
     CHECK(msg->findInt32("audio", &audio));
 
     {
-        Mutex::Autolock autoLock(mFlushLock);
+        Mutex::Autolock autoLock(mLock);
         if (audio) {
-            mFlushingAudio = false;
             notifyComplete = mNotifyCompleteAudio;
             mNotifyCompleteAudio = false;
         } else {
-            mFlushingVideo = false;
             notifyComplete = mNotifyCompleteVideo;
             mNotifyCompleteVideo = false;
         }
-    }
 
-    // If we're currently syncing the queues, i.e. dropping audio while
-    // aligning the first audio/video buffer times and only one of the
-    // two queues has data, we may starve that queue by not requesting
-    // more buffers from the decoder. If the other source then encounters
-    // a discontinuity that leads to flushing, we'll never find the
-    // corresponding discontinuity on the other queue.
-    // Therefore we'll stop syncing the queues if at least one of them
-    // is flushed.
-    {
-         Mutex::Autolock autoLock(mLock);
-         syncQueuesDone_l();
-         setPauseStartedTimeRealUs(-1);
-         setAnchorTime(-1, -1);
+        // If we're currently syncing the queues, i.e. dropping audio while
+        // aligning the first audio/video buffer times and only one of the
+        // two queues has data, we may starve that queue by not requesting
+        // more buffers from the decoder. If the other source then encounters
+        // a discontinuity that leads to flushing, we'll never find the
+        // corresponding discontinuity on the other queue.
+        // Therefore we'll stop syncing the queues if at least one of them
+        // is flushed.
+        syncQueuesDone_l();
+        clearAnchorTime_l();
     }
 
     ALOGV("flushing %s", audio ? "audio" : "video");
@@ -1123,12 +1318,11 @@
             Mutex::Autolock autoLock(mLock);
             flushQueue(&mAudioQueue);
 
-            ++mAudioQueueGeneration;
-            prepareForMediaRenderingStart();
+            ++mAudioDrainGeneration;
+            prepareForMediaRenderingStart_l();
 
-            if (offloadingAudio()) {
-                setAudioFirstAnchorTime(-1);
-            }
+            // the frame count will be reset after flush.
+            clearAudioFirstAnchorTime_l();
         }
 
         mDrainAudioQueuePending = false;
@@ -1136,19 +1330,32 @@
         if (offloadingAudio()) {
             mAudioSink->pause();
             mAudioSink->flush();
-            mAudioSink->start();
+            if (!mPaused) {
+                mAudioSink->start();
+            }
+        } else {
+            mAudioSink->pause();
+            mAudioSink->flush();
+            // Call stop() to signal to the AudioSink to completely fill the
+            // internal buffer before resuming playback.
+            mAudioSink->stop();
+            if (!mPaused) {
+                mAudioSink->start();
+            }
+            mNumFramesWritten = 0;
         }
     } else {
         flushQueue(&mVideoQueue);
 
         mDrainVideoQueuePending = false;
-        ++mVideoQueueGeneration;
 
         if (mVideoScheduler != NULL) {
             mVideoScheduler->restart();
         }
 
-        prepareForMediaRenderingStart();
+        Mutex::Autolock autoLock(mLock);
+        ++mVideoDrainGeneration;
+        prepareForMediaRenderingStart_l();
     }
 
     mVideoSampleReceived = false;
@@ -1178,20 +1385,12 @@
     notify->post();
 }
 
-bool NuPlayer::Renderer::dropBufferWhileFlushing(
+bool NuPlayer::Renderer::dropBufferIfStale(
         bool audio, const sp<AMessage> &msg) {
-    bool flushing = false;
+    int32_t queueGeneration;
+    CHECK(msg->findInt32("queueGeneration", &queueGeneration));
 
-    {
-        Mutex::Autolock autoLock(mFlushLock);
-        if (audio) {
-            flushing = mFlushingAudio;
-        } else {
-            flushing = mFlushingVideo;
-        }
-    }
-
-    if (!flushing) {
+    if (queueGeneration == getQueueGeneration(audio)) {
         return false;
     }
 
@@ -1209,7 +1408,10 @@
     }
     CHECK(!mDrainAudioQueuePending);
     mNumFramesWritten = 0;
-    mAnchorNumFramesWritten = -1;
+    {
+        Mutex::Autolock autoLock(mLock);
+        mAnchorNumFramesWritten = -1;
+    }
     uint32_t written;
     if (mAudioSink->getFramesWritten(&written) == OK) {
         mNumFramesWritten = written;
@@ -1219,40 +1421,33 @@
 void NuPlayer::Renderer::onDisableOffloadAudio() {
     Mutex::Autolock autoLock(mLock);
     mFlags &= ~FLAG_OFFLOAD_AUDIO;
-    ++mAudioQueueGeneration;
+    ++mAudioDrainGeneration;
+    if (mAudioRenderingStartGeneration != -1) {
+        prepareForMediaRenderingStart_l();
+    }
 }
 
 void NuPlayer::Renderer::onEnableOffloadAudio() {
     Mutex::Autolock autoLock(mLock);
     mFlags |= FLAG_OFFLOAD_AUDIO;
-    ++mAudioQueueGeneration;
+    ++mAudioDrainGeneration;
+    if (mAudioRenderingStartGeneration != -1) {
+        prepareForMediaRenderingStart_l();
+    }
 }
 
 void NuPlayer::Renderer::onPause() {
     if (mPaused) {
-        ALOGW("Renderer::onPause() called while already paused!");
         return;
     }
-    int64_t currentPositionUs;
-    int64_t pausePositionMediaTimeUs;
-    if (getCurrentPositionFromAnchor(
-            &currentPositionUs, ALooper::GetNowUs()) == OK) {
-        pausePositionMediaTimeUs = currentPositionUs;
-    } else {
-        // Set paused position to -1 (unavailabe) if we don't have anchor time
-        // This could happen if client does a seekTo() immediately followed by
-        // pause(). Renderer will be flushed with anchor time cleared. We don't
-        // want to leave stale value in mPausePositionMediaTimeUs.
-        pausePositionMediaTimeUs = -1;
-    }
+
     {
         Mutex::Autolock autoLock(mLock);
-        mPausePositionMediaTimeUs = pausePositionMediaTimeUs;
-        ++mAudioQueueGeneration;
-        ++mVideoQueueGeneration;
-        prepareForMediaRenderingStart();
+        // we do not increment audio drain generation so that we fill audio buffer during pause.
+        ++mVideoDrainGeneration;
+        prepareForMediaRenderingStart_l();
         mPaused = true;
-        setPauseStartedTimeRealUs(ALooper::GetNowUs());
+        mMediaClock->setPlaybackRate(0.0);
     }
 
     mDrainAudioQueuePending = false;
@@ -1263,7 +1458,7 @@
         startAudioOffloadPauseTimeout();
     }
 
-    ALOGV("now paused audio queue has %d entries, video has %d entries",
+    ALOGV("now paused audio queue has %zu entries, video has %zu entries",
           mAudioQueue.size(), mVideoQueue.size());
 }
 
@@ -1274,24 +1469,30 @@
 
     if (mHasAudio) {
         cancelAudioOffloadPauseTimeout();
-        mAudioSink->start();
+        status_t err = mAudioSink->start();
+        if (err != OK) {
+            notifyAudioTearDown();
+        }
     }
 
-    Mutex::Autolock autoLock(mLock);
-    mPaused = false;
-    if (mPauseStartedTimeRealUs != -1) {
-        int64_t newAnchorRealUs =
-            mAnchorTimeRealUs + ALooper::GetNowUs() - mPauseStartedTimeRealUs;
-        setAnchorTime(
-                mAnchorTimeMediaUs, newAnchorRealUs, mAnchorNumFramesWritten, true /* resume */);
-    }
+    {
+        Mutex::Autolock autoLock(mLock);
+        mPaused = false;
 
-    if (!mAudioQueue.empty()) {
-        postDrainAudioQueue_l();
+        // configure audiosink as we did not do it when pausing
+        if (mAudioSink != NULL && mAudioSink->ready()) {
+            mAudioSink->setPlaybackRate(mPlaybackSettings);
+        }
+
+        mMediaClock->setPlaybackRate(mPlaybackRate);
+
+        if (!mAudioQueue.empty()) {
+            postDrainAudioQueue_l();
+        }
     }
 
     if (!mVideoQueue.empty()) {
-        postDrainVideoQueue_l();
+        postDrainVideoQueue();
     }
 }
 
@@ -1302,6 +1503,21 @@
     mVideoScheduler->init(fps);
 }
 
+int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) {
+    Mutex::Autolock autoLock(mLock);
+    return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
+}
+
+int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) {
+    Mutex::Autolock autoLock(mLock);
+    return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
+}
+
+bool NuPlayer::Renderer::getSyncQueues() {
+    Mutex::Autolock autoLock(mLock);
+    return mSyncQueues;
+}
+
 // TODO: Remove unnecessary calls to getPlayedOutAudioDurationUs()
 // as it acquires locks and may query the audio driver.
 //
@@ -1309,6 +1525,7 @@
 // accessing getTimestamp() or getPosition() every time a data buffer with
 // a media time is received.
 //
+// Calculate duration of played samples if played at normal rate (i.e., 1.0).
 int64_t NuPlayer::Renderer::getPlayedOutAudioDurationUs(int64_t nowUs) {
     uint32_t numFramesPlayed;
     int64_t numFramesPlayedAt;
@@ -1343,12 +1560,11 @@
         CHECK_EQ(res, (status_t)OK);
         numFramesPlayedAt = nowUs;
         numFramesPlayedAt += 1000LL * mAudioSink->latency() / 2; /* XXX */
-        //ALOGD("getPosition: %d %lld", numFramesPlayed, numFramesPlayedAt);
+        //ALOGD("getPosition: %u %lld", numFramesPlayed, (long long)numFramesPlayedAt);
     }
 
-    // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
     //CHECK_EQ(numFramesPlayed & (1 << 31), 0);  // can't be negative until 12.4 hrs, test
-    int64_t durationUs = (int64_t)((int32_t)numFramesPlayed * 1000LL * mAudioSink->msecsPerFrame())
+    int64_t durationUs = getDurationUsIfPlayedAtSampleRate(numFramesPlayed)
             + nowUs - numFramesPlayedAt;
     if (durationUs < 0) {
         // Occurs when numFramesPlayed position is very small and the following:
@@ -1366,23 +1582,22 @@
     return durationUs;
 }
 
-void NuPlayer::Renderer::onAudioOffloadTearDown(AudioOffloadTearDownReason reason) {
-    if (mAudioOffloadTornDown) {
+void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
+    if (mAudioTornDown) {
         return;
     }
-    mAudioOffloadTornDown = true;
+    mAudioTornDown = true;
 
     int64_t currentPositionUs;
-    if (getCurrentPositionOnLooper(&currentPositionUs) != OK) {
-        currentPositionUs = 0;
+    sp<AMessage> notify = mNotify->dup();
+    if (getCurrentPosition(&currentPositionUs) == OK) {
+        notify->setInt64("positionUs", currentPositionUs);
     }
 
     mAudioSink->stop();
     mAudioSink->flush();
 
-    sp<AMessage> notify = mNotify->dup();
-    notify->setInt32("what", kWhatAudioOffloadTearDown);
-    notify->setInt64("positionUs", currentPositionUs);
+    notify->setInt32("what", kWhatAudioTearDown);
     notify->setInt32("reason", reason);
     notify->post();
 }
@@ -1390,8 +1605,8 @@
 void NuPlayer::Renderer::startAudioOffloadPauseTimeout() {
     if (offloadingAudio()) {
         mWakeLock->acquire();
-        sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, id());
-        msg->setInt32("generation", mAudioOffloadPauseTimeoutGeneration);
+        sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
+        msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
         msg->post(kOffloadPauseMaxUs);
     }
 }
@@ -1475,18 +1690,23 @@
             offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
             audioSinkChanged = true;
             mAudioSink->close();
+
             err = mAudioSink->open(
                     sampleRate,
                     numChannels,
                     (audio_channel_mask_t)channelMask,
                     audioFormat,
-                    8 /* bufferCount */,
+                    0 /* bufferCount - unused */,
                     &NuPlayer::Renderer::AudioSinkCallback,
                     this,
                     (audio_output_flags_t)offloadFlags,
                     &offloadInfo);
 
             if (err == OK) {
+                err = mAudioSink->setPlaybackRate(mPlaybackSettings);
+            }
+
+            if (err == OK) {
                 // If the playback is offloaded to h/w, we pass
                 // the HAL some metadata information.
                 // We don't want to do this for PCM because it
@@ -1494,7 +1714,9 @@
                 // before reaching the hardware.
                 // TODO
                 mCurrentOffloadInfo = offloadInfo;
-                err = mAudioSink->start();
+                if (!mPaused) { // for preview mode, don't start if paused
+                    err = mAudioSink->start();
+                }
                 ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
             }
             if (err != OK) {
@@ -1503,6 +1725,9 @@
                 onDisableOffloadAudio();
                 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
                 ALOGV("openAudioSink: offload failed");
+            } else {
+                mUseAudioCallback = true;  // offload mode transfers data through callback
+                ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
             }
         }
     }
@@ -1527,29 +1752,47 @@
         audioSinkChanged = true;
         mAudioSink->close();
         mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
+        // Note: It is possible to set up the callback, but not use it to send audio data.
+        // This requires a fix in AudioSink to explicitly specify the transfer mode.
+        mUseAudioCallback = getUseAudioCallbackSetting();
+        if (mUseAudioCallback) {
+            ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
+        }
+
+        // Compute the desired buffer size.
+        // For callback mode, the amount of time before wakeup is about half the buffer size.
+        const uint32_t frameCount =
+                (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
+
         status_t err = mAudioSink->open(
                     sampleRate,
                     numChannels,
                     (audio_channel_mask_t)channelMask,
                     AUDIO_FORMAT_PCM_16_BIT,
-                    8 /* bufferCount */,
+                    0 /* bufferCount - unused */,
+                    mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL,
+                    mUseAudioCallback ? this : NULL,
+                    (audio_output_flags_t)pcmFlags,
                     NULL,
-                    NULL,
-                    (audio_output_flags_t)pcmFlags);
+                    true /* doNotReconnect */,
+                    frameCount);
+        if (err == OK) {
+            err = mAudioSink->setPlaybackRate(mPlaybackSettings);
+        }
         if (err != OK) {
             ALOGW("openAudioSink: non offloaded open failed status: %d", err);
             mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
             return err;
         }
         mCurrentPcmInfo = info;
-        mAudioSink->start();
+        if (!mPaused) { // for preview mode, don't start if paused
+            mAudioSink->start();
+        }
     }
     if (audioSinkChanged) {
         onAudioSinkChanged();
     }
-    if (offloadingAudio()) {
-        mAudioOffloadTornDown = false;
-    }
+    mAudioTornDown = false;
     return OK;
 }
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 003d1d0..3e65649 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -18,12 +18,16 @@
 
 #define NUPLAYER_RENDERER_H_
 
+#include <media/AudioResamplerPublic.h>
+#include <media/AVSyncSettings.h>
+
 #include "NuPlayer.h"
 
 namespace android {
 
 struct ABuffer;
 class  AWakeLock;
+struct MediaClock;
 struct VideoFrameScheduler;
 
 struct NuPlayer::Renderer : public AHandler {
@@ -47,6 +51,11 @@
 
     void queueEOS(bool audio, status_t finalResult);
 
+    status_t setPlaybackSettings(const AudioPlaybackRate &rate /* sanitized */);
+    status_t getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
+    status_t setSyncSettings(const AVSyncSettings &sync, float videoFpsHint);
+    status_t getSyncSettings(AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
+
     void flush(bool audio, bool notifyComplete);
 
     void signalTimeDiscontinuity();
@@ -61,16 +70,8 @@
 
     void setVideoFrameRate(float fps);
 
-    // Following setters and getters are protected by mTimeLock.
     status_t getCurrentPosition(int64_t *mediaUs);
-    void setHasMedia(bool audio);
-    void setAudioFirstAnchorTime(int64_t mediaUs);
-    void setAudioFirstAnchorTimeIfNeeded(int64_t mediaUs);
-    void setAnchorTime(
-            int64_t mediaUs, int64_t realUs, int64_t numFramesWritten = -1, bool resume = false);
-    void setVideoLateByUs(int64_t lateUs);
     int64_t getVideoLateByUs();
-    void setPauseStartedTimeRealUs(int64_t realUs);
 
     status_t openAudioSink(
             const sp<AMessage> &format,
@@ -81,16 +82,16 @@
     void closeAudioSink();
 
     enum {
-        kWhatEOS                 = 'eos ',
-        kWhatFlushComplete       = 'fluC',
-        kWhatPosition            = 'posi',
-        kWhatVideoRenderingStart = 'vdrd',
-        kWhatMediaRenderingStart = 'mdrd',
-        kWhatAudioOffloadTearDown = 'aOTD',
+        kWhatEOS                      = 'eos ',
+        kWhatFlushComplete            = 'fluC',
+        kWhatPosition                 = 'posi',
+        kWhatVideoRenderingStart      = 'vdrd',
+        kWhatMediaRenderingStart      = 'mdrd',
+        kWhatAudioTearDown            = 'adTD',
         kWhatAudioOffloadPauseTimeout = 'aOPT',
     };
 
-    enum AudioOffloadTearDownReason {
+    enum AudioTearDownReason {
         kDueToError = 0,
         kDueToTimeout,
     };
@@ -107,8 +108,11 @@
         kWhatPostDrainVideoQueue = 'pDVQ',
         kWhatQueueBuffer         = 'queB',
         kWhatQueueEOS            = 'qEOS',
+        kWhatConfigPlayback      = 'cfPB',
+        kWhatConfigSync          = 'cfSy',
+        kWhatGetPlaybackSettings = 'gPbS',
+        kWhatGetSyncSettings     = 'gSyS',
         kWhatFlush               = 'flus',
-        kWhatAudioSinkChanged    = 'auSC',
         kWhatPause               = 'paus',
         kWhatResume              = 'resm',
         kWhatOpenAudioSink       = 'opnA',
@@ -142,26 +146,23 @@
     bool mDrainVideoQueuePending;
     int32_t mAudioQueueGeneration;
     int32_t mVideoQueueGeneration;
+    int32_t mAudioDrainGeneration;
+    int32_t mVideoDrainGeneration;
 
-    Mutex mTimeLock;
-    // |mTimeLock| protects the following 7 member vars that are related to time.
-    // Note: those members are only written on Renderer thread, so reading on Renderer thread
-    // doesn't need to be protected. Otherwise accessing those members must be protected by
-    // |mTimeLock|.
-    // TODO: move those members to a seperated media clock class.
+    sp<MediaClock> mMediaClock;
+    float mPlaybackRate; // audio track rate
+
+    AudioPlaybackRate mPlaybackSettings;
+    AVSyncSettings mSyncSettings;
+    float mVideoFpsHint;
+
     int64_t mAudioFirstAnchorTimeMediaUs;
     int64_t mAnchorTimeMediaUs;
-    int64_t mAnchorTimeRealUs;
     int64_t mAnchorNumFramesWritten;
-    int64_t mAnchorMaxMediaUs;
     int64_t mVideoLateByUs;
     bool mHasAudio;
     bool mHasVideo;
-    int64_t mPauseStartedTimeRealUs;
 
-    Mutex mFlushLock;  // protects the following 2 member vars.
-    bool mFlushingAudio;
-    bool mFlushingVideo;
     bool mNotifyCompleteAudio;
     bool mNotifyCompleteVideo;
 
@@ -169,7 +170,6 @@
 
     // modified on only renderer's thread.
     bool mPaused;
-    int64_t mPausePositionMediaTimeUs;
 
     bool mVideoSampleReceived;
     bool mVideoRenderingStarted;
@@ -179,7 +179,7 @@
     int64_t mLastPositionUpdateUs;
 
     int32_t mAudioOffloadPauseTimeoutGeneration;
-    bool mAudioOffloadTornDown;
+    bool mAudioTornDown;
     audio_offload_info_t mCurrentOffloadInfo;
 
     struct PcmInfo {
@@ -194,6 +194,7 @@
 
     int32_t mTotalBuffersQueued;
     int32_t mLastAudioBufferDrained;
+    bool mUseAudioCallback;
 
     sp<AWakeLock> mWakeLock;
 
@@ -207,18 +208,24 @@
     size_t fillAudioBuffer(void *buffer, size_t size);
 
     bool onDrainAudioQueue();
+    void drainAudioQueueUntilLastEOS();
     int64_t getPendingAudioPlayoutDurationUs(int64_t nowUs);
     int64_t getPlayedOutAudioDurationUs(int64_t nowUs);
     void postDrainAudioQueue_l(int64_t delayUs = 0);
 
+    void clearAnchorTime_l();
+    void clearAudioFirstAnchorTime_l();
+    void setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs);
+    void setVideoLateByUs(int64_t lateUs);
+
     void onNewAudioMediaTime(int64_t mediaTimeUs);
     int64_t getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs);
 
     void onDrainVideoQueue();
-    void postDrainVideoQueue_l();
+    void postDrainVideoQueue();
 
-    void prepareForMediaRenderingStart();
-    void notifyIfMediaRenderingStarted();
+    void prepareForMediaRenderingStart_l();
+    void notifyIfMediaRenderingStarted_l();
 
     void onQueueBuffer(const sp<AMessage> &msg);
     void onQueueEOS(const sp<AMessage> &msg);
@@ -226,10 +233,18 @@
     void onAudioSinkChanged();
     void onDisableOffloadAudio();
     void onEnableOffloadAudio();
+    status_t onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */);
+    status_t onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
+    status_t onConfigSync(const AVSyncSettings &sync, float videoFpsHint);
+    status_t onGetSyncSettings(AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */);
+
     void onPause();
     void onResume();
     void onSetVideoFrameRate(float fps);
-    void onAudioOffloadTearDown(AudioOffloadTearDownReason reason);
+    int32_t getQueueGeneration(bool audio);
+    int32_t getDrainGeneration(bool audio);
+    bool getSyncQueues();
+    void onAudioTearDown(AudioTearDownReason reason);
     status_t onOpenAudioSink(
             const sp<AMessage> &format,
             bool offloadOnly,
@@ -242,10 +257,10 @@
     void notifyPosition();
     void notifyVideoLateBy(int64_t lateByUs);
     void notifyVideoRenderingStart();
-    void notifyAudioOffloadTearDown();
+    void notifyAudioTearDown();
 
     void flushQueue(List<QueueEntry> *queue);
-    bool dropBufferWhileFlushing(bool audio, const sp<AMessage> &msg);
+    bool dropBufferIfStale(bool audio, const sp<AMessage> &msg);
     void syncQueuesDone_l();
 
     bool offloadingAudio() const { return (mFlags & FLAG_OFFLOAD_AUDIO) != 0; }
@@ -253,9 +268,11 @@
     void startAudioOffloadPauseTimeout();
     void cancelAudioOffloadPauseTimeout();
 
+    int64_t getDurationUsIfPlayedAtSampleRate(uint32_t numFrames);
+
     DISALLOW_EVIL_CONSTRUCTORS(Renderer);
 };
 
-}  // namespace android
+} // namespace android
 
 #endif  // NUPLAYER_RENDERER_H_
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
index d9f14a2..11a6a9f 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerSource.h
@@ -28,7 +28,7 @@
 namespace android {
 
 struct ABuffer;
-struct MediaBuffer;
+class MediaBuffer;
 
 struct NuPlayer::Source : public AHandler {
     enum Flags {
@@ -53,6 +53,7 @@
         kWhatCacheStats,
         kWhatSubtitleData,
         kWhatTimedTextData,
+        kWhatTimedMetaData,
         kWhatQueueDecoderShutdown,
         kWhatDrmNoLicense,
         kWhatInstantiateSecureDecoders,
@@ -117,6 +118,10 @@
         return false;
     }
 
+    virtual bool isStreaming() const {
+        return true;
+    }
+
 protected:
     virtual ~Source() {}
 
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp
index 885ebe4..f53afbd 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp
@@ -29,9 +29,9 @@
 
 NuPlayer::NuPlayerStreamListener::NuPlayerStreamListener(
         const sp<IStreamSource> &source,
-        ALooper::handler_id id)
+        const sp<AHandler> &targetHandler)
     : mSource(source),
-      mTargetID(id),
+      mTargetHandler(targetHandler),
       mEOS(false),
       mSendDataNotification(true) {
     mSource->setListener(this);
@@ -65,8 +65,8 @@
     if (mSendDataNotification) {
         mSendDataNotification = false;
 
-        if (mTargetID != 0) {
-            (new AMessage(kWhatMoreDataQueued, mTargetID))->post();
+        if (mTargetHandler != NULL) {
+            (new AMessage(kWhatMoreDataQueued, mTargetHandler))->post();
         }
     }
 }
@@ -86,8 +86,8 @@
     if (mSendDataNotification) {
         mSendDataNotification = false;
 
-        if (mTargetID != 0) {
-            (new AMessage(kWhatMoreDataQueued, mTargetID))->post();
+        if (mTargetHandler != NULL) {
+            (new AMessage(kWhatMoreDataQueued, mTargetHandler))->post();
         }
     }
 }
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.h b/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.h
index 1874d80..2de829b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.h
@@ -29,7 +29,7 @@
 struct NuPlayer::NuPlayerStreamListener : public BnStreamListener {
     NuPlayerStreamListener(
             const sp<IStreamSource> &source,
-            ALooper::handler_id targetID);
+            const sp<AHandler> &targetHandler);
 
     virtual void queueBuffer(size_t index, size_t size);
 
@@ -59,7 +59,7 @@
     Mutex mLock;
 
     sp<IStreamSource> mSource;
-    ALooper::handler_id mTargetID;
+    sp<AHandler> mTargetHandler;
     sp<MemoryDealer> mMemoryDealer;
     Vector<sp<IMemory> > mBuffers;
     List<QueueEntry> mQueue;
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index 0282a9f..58ff113 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -87,7 +87,7 @@
     CHECK(mHandler == NULL);
     CHECK(mSDPLoader == NULL);
 
-    sp<AMessage> notify = new AMessage(kWhatNotify, id());
+    sp<AMessage> notify = new AMessage(kWhatNotify, this);
 
     CHECK_EQ(mState, (int)DISCONNECTED);
     mState = CONNECTING;
@@ -116,7 +116,7 @@
     if (mLooper == NULL) {
         return;
     }
-    sp<AMessage> msg = new AMessage(kWhatDisconnect, id());
+    sp<AMessage> msg = new AMessage(kWhatDisconnect, this);
 
     sp<AMessage> dummy;
     msg->postAndAwaitResponse(&dummy);
@@ -138,7 +138,9 @@
 }
 
 void NuPlayer::RTSPSource::resume() {
-    mHandler->resume();
+    if (mHandler != NULL) {
+        mHandler->resume();
+    }
 }
 
 status_t NuPlayer::RTSPSource::feedMoreTSData() {
@@ -292,16 +294,22 @@
 }
 
 status_t NuPlayer::RTSPSource::seekTo(int64_t seekTimeUs) {
-    sp<AMessage> msg = new AMessage(kWhatPerformSeek, id());
+    sp<AMessage> msg = new AMessage(kWhatPerformSeek, this);
     msg->setInt32("generation", ++mSeekGeneration);
     msg->setInt64("timeUs", seekTimeUs);
-    msg->post(200000ll);
 
-    return OK;
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+    if (err == OK && response != NULL) {
+        CHECK(response->findInt32("err", &err));
+    }
+
+    return err;
 }
 
 void NuPlayer::RTSPSource::performSeek(int64_t seekTimeUs) {
     if (mState != CONNECTED) {
+        finishSeek(INVALID_OPERATION);
         return;
     }
 
@@ -311,7 +319,7 @@
 
 void NuPlayer::RTSPSource::onMessageReceived(const sp<AMessage> &msg) {
     if (msg->what() == kWhatDisconnect) {
-        uint32_t replyID;
+        sp<AReplyToken> replyID;
         CHECK(msg->senderAwaitsResponse(&replyID));
 
         mDisconnectReplyID = replyID;
@@ -320,9 +328,11 @@
     } else if (msg->what() == kWhatPerformSeek) {
         int32_t generation;
         CHECK(msg->findInt32("generation", &generation));
+        CHECK(msg->senderAwaitsResponse(&mSeekReplyID));
 
         if (generation != mSeekGeneration) {
             // obsolete.
+            finishSeek(OK);
             return;
         }
 
@@ -368,6 +378,37 @@
         case MyHandler::kWhatSeekDone:
         {
             mState = CONNECTED;
+            if (mSeekReplyID != NULL) {
+                // Unblock seekTo here in case we attempted to seek in a live stream
+                finishSeek(OK);
+            }
+            break;
+        }
+
+        case MyHandler::kWhatSeekPaused:
+        {
+            sp<AnotherPacketSource> source = getSource(true /* audio */);
+            if (source != NULL) {
+                source->queueDiscontinuity(ATSParser::DISCONTINUITY_NONE,
+                        /* extra */ NULL,
+                        /* discard */ true);
+            }
+            source = getSource(false /* video */);
+            if (source != NULL) {
+                source->queueDiscontinuity(ATSParser::DISCONTINUITY_NONE,
+                        /* extra */ NULL,
+                        /* discard */ true);
+            };
+
+            status_t err = OK;
+            msg->findInt32("err", &err);
+            finishSeek(err);
+
+            if (err == OK) {
+                int64_t timeUs;
+                CHECK(msg->findInt64("time", &timeUs));
+                mHandler->continueSeekAfterPause(timeUs);
+            }
             break;
         }
 
@@ -600,7 +641,7 @@
             ALOGE("Unable to find url in SDP");
             err = UNKNOWN_ERROR;
         } else {
-            sp<AMessage> notify = new AMessage(kWhatNotify, id());
+            sp<AMessage> notify = new AMessage(kWhatNotify, this);
 
             mHandler = new MyHandler(rtspUri.c_str(), notify, mUIDValid, mUID);
             mLooper->registerHandler(mHandler);
@@ -700,5 +741,12 @@
     return true;
 }
 
+void NuPlayer::RTSPSource::finishSeek(status_t err) {
+    CHECK(mSeekReplyID != NULL);
+    sp<AMessage> seekReply = new AMessage;
+    seekReply->setInt32("err", err);
+    seekReply->postReply(mSeekReplyID);
+    mSeekReplyID = NULL;
+}
 
 }  // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/RTSPSource.h
index ac3299a..6438a1e 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.h
@@ -25,6 +25,7 @@
 namespace android {
 
 struct ALooper;
+struct AReplyToken;
 struct AnotherPacketSource;
 struct MyHandler;
 struct SDPLoader;
@@ -96,7 +97,7 @@
     bool mIsSDP;
     State mState;
     status_t mFinalResult;
-    uint32_t mDisconnectReplyID;
+    sp<AReplyToken> mDisconnectReplyID;
     Mutex mBufferingLock;
     bool mBuffering;
 
@@ -115,6 +116,8 @@
     int64_t mEOSTimeoutAudio;
     int64_t mEOSTimeoutVideo;
 
+    sp<AReplyToken> mSeekReplyID;
+
     sp<AnotherPacketSource> getSource(bool audio);
 
     void onConnected();
@@ -130,6 +133,7 @@
     void setError(status_t err);
     void startBufferingIfNecessary();
     bool stopBufferingIfNecessary();
+    void finishSeek(status_t err);
 
     DISALLOW_EVIL_CONSTRUCTORS(RTSPSource);
 };
diff --git a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
index b3f224d..0246b59 100644
--- a/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/StreamingSource.cpp
@@ -63,7 +63,7 @@
 }
 
 void NuPlayer::StreamingSource::start() {
-    mStreamListener = new NuPlayerStreamListener(mSource, 0);
+    mStreamListener = new NuPlayerStreamListener(mSource, NULL);
 
     uint32_t sourceFlags = mSource->flags();
 
@@ -163,7 +163,7 @@
         mBuffering = true;
     }
 
-    (new AMessage(kWhatReadBuffer, id()))->post();
+    (new AMessage(kWhatReadBuffer, this))->post();
     return OK;
 }
 
diff --git a/media/libmediaplayerservice/tests/Android.mk b/media/libmediaplayerservice/tests/Android.mk
new file mode 100644
index 0000000..8cbf782
--- /dev/null
+++ b/media/libmediaplayerservice/tests/Android.mk
@@ -0,0 +1,27 @@
+# Build the unit tests.
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := DrmSessionManager_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+	DrmSessionManager_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+	liblog \
+	libmediaplayerservice \
+	libutils \
+
+LOCAL_C_INCLUDES := \
+	frameworks/av/include \
+	frameworks/av/media/libmediaplayerservice \
+
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
+
+LOCAL_32_BIT_ONLY := true
+
+include $(BUILD_NATIVE_TEST)
+
diff --git a/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp b/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp
new file mode 100644
index 0000000..de350a1
--- /dev/null
+++ b/media/libmediaplayerservice/tests/DrmSessionManager_test.cpp
@@ -0,0 +1,249 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DrmSessionManager_test"
+#include <utils/Log.h>
+
+#include <gtest/gtest.h>
+
+#include "Drm.h"
+#include "DrmSessionClientInterface.h"
+#include "DrmSessionManager.h"
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/ProcessInfoInterface.h>
+
+namespace android {
+
+struct FakeProcessInfo : public ProcessInfoInterface {
+    FakeProcessInfo() {}
+    virtual ~FakeProcessInfo() {}
+
+    virtual bool getPriority(int pid, int* priority) {
+        // For testing, use pid as priority.
+        // Lower the value higher the priority.
+        *priority = pid;
+        return true;
+    }
+
+private:
+    DISALLOW_EVIL_CONSTRUCTORS(FakeProcessInfo);
+};
+
+struct FakeDrm : public DrmSessionClientInterface {
+    FakeDrm() {}
+    virtual ~FakeDrm() {}
+
+    virtual bool reclaimSession(const Vector<uint8_t>& sessionId) {
+        mReclaimedSessions.push_back(sessionId);
+        return true;
+    }
+
+    const Vector<Vector<uint8_t> >& reclaimedSessions() const {
+        return mReclaimedSessions;
+    }
+
+private:
+    Vector<Vector<uint8_t> > mReclaimedSessions;
+
+    DISALLOW_EVIL_CONSTRUCTORS(FakeDrm);
+};
+
+static const int kTestPid1 = 30;
+static const int kTestPid2 = 20;
+static const uint8_t kTestSessionId1[] = {1, 2, 3};
+static const uint8_t kTestSessionId2[] = {4, 5, 6, 7, 8};
+static const uint8_t kTestSessionId3[] = {9, 0};
+
+class DrmSessionManagerTest : public ::testing::Test {
+public:
+    DrmSessionManagerTest()
+        : mDrmSessionManager(new DrmSessionManager(new FakeProcessInfo())),
+          mTestDrm1(new FakeDrm()),
+          mTestDrm2(new FakeDrm()) {
+        GetSessionId(kTestSessionId1, ARRAY_SIZE(kTestSessionId1), &mSessionId1);
+        GetSessionId(kTestSessionId2, ARRAY_SIZE(kTestSessionId2), &mSessionId2);
+        GetSessionId(kTestSessionId3, ARRAY_SIZE(kTestSessionId3), &mSessionId3);
+    }
+
+protected:
+    static void GetSessionId(const uint8_t* ids, size_t num, Vector<uint8_t>* sessionId) {
+        for (size_t i = 0; i < num; ++i) {
+            sessionId->push_back(ids[i]);
+        }
+    }
+
+    static void ExpectEqSessionInfo(const SessionInfo& info, sp<DrmSessionClientInterface> drm,
+            const Vector<uint8_t>& sessionId, int64_t timeStamp) {
+        EXPECT_EQ(drm, info.drm);
+        EXPECT_TRUE(isEqualSessionId(sessionId, info.sessionId));
+        EXPECT_EQ(timeStamp, info.timeStamp);
+    }
+
+    void addSession() {
+        mDrmSessionManager->addSession(kTestPid1, mTestDrm1, mSessionId1);
+        mDrmSessionManager->addSession(kTestPid2, mTestDrm2, mSessionId2);
+        mDrmSessionManager->addSession(kTestPid2, mTestDrm2, mSessionId3);
+        const PidSessionInfosMap& map = sessionMap();
+        EXPECT_EQ(2u, map.size());
+        ssize_t index1 = map.indexOfKey(kTestPid1);
+        ASSERT_GE(index1, 0);
+        const SessionInfos& infos1 = map[index1];
+        EXPECT_EQ(1u, infos1.size());
+        ExpectEqSessionInfo(infos1[0], mTestDrm1, mSessionId1, 0);
+
+        ssize_t index2 = map.indexOfKey(kTestPid2);
+        ASSERT_GE(index2, 0);
+        const SessionInfos& infos2 = map[index2];
+        EXPECT_EQ(2u, infos2.size());
+        ExpectEqSessionInfo(infos2[0], mTestDrm2, mSessionId2, 1);
+        ExpectEqSessionInfo(infos2[1], mTestDrm2, mSessionId3, 2);
+    }
+
+    const PidSessionInfosMap& sessionMap() {
+        return mDrmSessionManager->mSessionMap;
+    }
+
+    void testGetLowestPriority() {
+        int pid;
+        int priority;
+        EXPECT_FALSE(mDrmSessionManager->getLowestPriority_l(&pid, &priority));
+
+        addSession();
+        EXPECT_TRUE(mDrmSessionManager->getLowestPriority_l(&pid, &priority));
+
+        EXPECT_EQ(kTestPid1, pid);
+        FakeProcessInfo processInfo;
+        int priority1;
+        processInfo.getPriority(kTestPid1, &priority1);
+        EXPECT_EQ(priority1, priority);
+    }
+
+    void testGetLeastUsedSession() {
+        sp<DrmSessionClientInterface> drm;
+        Vector<uint8_t> sessionId;
+        EXPECT_FALSE(mDrmSessionManager->getLeastUsedSession_l(kTestPid1, &drm, &sessionId));
+
+        addSession();
+
+        EXPECT_TRUE(mDrmSessionManager->getLeastUsedSession_l(kTestPid1, &drm, &sessionId));
+        EXPECT_EQ(mTestDrm1, drm);
+        EXPECT_TRUE(isEqualSessionId(mSessionId1, sessionId));
+
+        EXPECT_TRUE(mDrmSessionManager->getLeastUsedSession_l(kTestPid2, &drm, &sessionId));
+        EXPECT_EQ(mTestDrm2, drm);
+        EXPECT_TRUE(isEqualSessionId(mSessionId2, sessionId));
+
+        // mSessionId2 is no longer the least used session.
+        mDrmSessionManager->useSession(mSessionId2);
+        EXPECT_TRUE(mDrmSessionManager->getLeastUsedSession_l(kTestPid2, &drm, &sessionId));
+        EXPECT_EQ(mTestDrm2, drm);
+        EXPECT_TRUE(isEqualSessionId(mSessionId3, sessionId));
+    }
+
+    sp<DrmSessionManager> mDrmSessionManager;
+    sp<FakeDrm> mTestDrm1;
+    sp<FakeDrm> mTestDrm2;
+    Vector<uint8_t> mSessionId1;
+    Vector<uint8_t> mSessionId2;
+    Vector<uint8_t> mSessionId3;
+};
+
+TEST_F(DrmSessionManagerTest, addSession) {
+    addSession();
+}
+
+TEST_F(DrmSessionManagerTest, useSession) {
+    addSession();
+
+    mDrmSessionManager->useSession(mSessionId1);
+    mDrmSessionManager->useSession(mSessionId3);
+
+    const PidSessionInfosMap& map = sessionMap();
+    const SessionInfos& infos1 = map.valueFor(kTestPid1);
+    const SessionInfos& infos2 = map.valueFor(kTestPid2);
+    ExpectEqSessionInfo(infos1[0], mTestDrm1, mSessionId1, 3);
+    ExpectEqSessionInfo(infos2[1], mTestDrm2, mSessionId3, 4);
+}
+
+TEST_F(DrmSessionManagerTest, removeSession) {
+    addSession();
+
+    mDrmSessionManager->removeSession(mSessionId2);
+
+    const PidSessionInfosMap& map = sessionMap();
+    EXPECT_EQ(2u, map.size());
+    const SessionInfos& infos1 = map.valueFor(kTestPid1);
+    const SessionInfos& infos2 = map.valueFor(kTestPid2);
+    EXPECT_EQ(1u, infos1.size());
+    EXPECT_EQ(1u, infos2.size());
+    // mSessionId2 has been removed.
+    ExpectEqSessionInfo(infos2[0], mTestDrm2, mSessionId3, 2);
+}
+
+TEST_F(DrmSessionManagerTest, removeDrm) {
+    addSession();
+
+    sp<FakeDrm> drm = new FakeDrm;
+    const uint8_t ids[] = {123};
+    Vector<uint8_t> sessionId;
+    GetSessionId(ids, ARRAY_SIZE(ids), &sessionId);
+    mDrmSessionManager->addSession(kTestPid2, drm, sessionId);
+
+    mDrmSessionManager->removeDrm(mTestDrm2);
+
+    const PidSessionInfosMap& map = sessionMap();
+    const SessionInfos& infos2 = map.valueFor(kTestPid2);
+    EXPECT_EQ(1u, infos2.size());
+    // mTestDrm2 has been removed.
+    ExpectEqSessionInfo(infos2[0], drm, sessionId, 3);
+}
+
+TEST_F(DrmSessionManagerTest, reclaimSession) {
+    EXPECT_FALSE(mDrmSessionManager->reclaimSession(kTestPid1));
+    addSession();
+
+    // calling pid priority is too low
+    EXPECT_FALSE(mDrmSessionManager->reclaimSession(50));
+
+    EXPECT_TRUE(mDrmSessionManager->reclaimSession(10));
+    EXPECT_EQ(1u, mTestDrm1->reclaimedSessions().size());
+    EXPECT_TRUE(isEqualSessionId(mSessionId1, mTestDrm1->reclaimedSessions()[0]));
+
+    mDrmSessionManager->removeSession(mSessionId1);
+
+    // add a session from a higher priority process.
+    sp<FakeDrm> drm = new FakeDrm;
+    const uint8_t ids[] = {1, 3, 5};
+    Vector<uint8_t> sessionId;
+    GetSessionId(ids, ARRAY_SIZE(ids), &sessionId);
+    mDrmSessionManager->addSession(15, drm, sessionId);
+
+    EXPECT_TRUE(mDrmSessionManager->reclaimSession(18));
+    EXPECT_EQ(1u, mTestDrm2->reclaimedSessions().size());
+    // mSessionId2 is reclaimed.
+    EXPECT_TRUE(isEqualSessionId(mSessionId2, mTestDrm2->reclaimedSessions()[0]));
+}
+
+TEST_F(DrmSessionManagerTest, getLowestPriority) {
+    testGetLowestPriority();
+}
+
+TEST_F(DrmSessionManagerTest, getLeastUsedSession_l) {
+    testGetLeastUsedSession();
+}
+
+} // namespace android
diff --git a/media/libnbaio/Android.mk b/media/libnbaio/Android.mk
index 9707c4a..1353f28 100644
--- a/media/libnbaio/Android.mk
+++ b/media/libnbaio/Android.mk
@@ -11,7 +11,6 @@
     MonoPipeReader.cpp              \
     Pipe.cpp                        \
     PipeReader.cpp                  \
-    roundup.c                       \
     SourceAudioBufferProvider.cpp
 
 LOCAL_SRC_FILES += NBLog.cpp
@@ -27,12 +26,13 @@
 LOCAL_MODULE := libnbaio
 
 LOCAL_SHARED_LIBRARIES := \
+    libaudioutils \
     libbinder \
     libcommon_time_client \
     libcutils \
     libutils \
     liblog
 
-LOCAL_STATIC_LIBRARIES += libinstantssq
+LOCAL_C_INCLUDES := $(call include-path-for, audio-utils)
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libnbaio/MonoPipe.cpp b/media/libnbaio/MonoPipe.cpp
index 0b65861..129e9ef 100644
--- a/media/libnbaio/MonoPipe.cpp
+++ b/media/libnbaio/MonoPipe.cpp
@@ -27,7 +27,7 @@
 #include <utils/Trace.h>
 #include <media/AudioBufferProvider.h>
 #include <media/nbaio/MonoPipe.h>
-#include <media/nbaio/roundup.h>
+#include <audio_utils/roundup.h>
 
 
 namespace android {
diff --git a/media/libnbaio/MonoPipeReader.cpp b/media/libnbaio/MonoPipeReader.cpp
index de82229..e4d3ed8 100644
--- a/media/libnbaio/MonoPipeReader.cpp
+++ b/media/libnbaio/MonoPipeReader.cpp
@@ -39,7 +39,7 @@
         return NEGOTIATE;
     }
     ssize_t ret = android_atomic_acquire_load(&mPipe->mRear) - mPipe->mFront;
-    ALOG_ASSERT((0 <= ret) && (ret <= mMaxFrames));
+    ALOG_ASSERT((0 <= ret) && ((size_t) ret <= mPipe->mMaxFrames));
     return ret;
 }
 
diff --git a/media/libnbaio/Pipe.cpp b/media/libnbaio/Pipe.cpp
index 6e0ec8c..13f211d 100644
--- a/media/libnbaio/Pipe.cpp
+++ b/media/libnbaio/Pipe.cpp
@@ -21,7 +21,7 @@
 #include <cutils/compiler.h>
 #include <utils/Log.h>
 #include <media/nbaio/Pipe.h>
-#include <media/nbaio/roundup.h>
+#include <audio_utils/roundup.h>
 
 namespace android {
 
diff --git a/media/libnbaio/roundup.c b/media/libnbaio/roundup.c
deleted file mode 100644
index 1d552d1..0000000
--- a/media/libnbaio/roundup.c
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <media/nbaio/roundup.h>
-
-unsigned roundup(unsigned v)
-{
-    // __builtin_clz is undefined for zero input
-    if (v == 0) {
-        v = 1;
-    }
-    int lz = __builtin_clz((int) v);
-    unsigned rounded = ((unsigned) 0x80000000) >> lz;
-    // 0x800000001 and higher are actually rounded _down_ to prevent overflow
-    if (v > rounded && lz > 0) {
-        rounded <<= 1;
-    }
-    return rounded;
-}
diff --git a/media/libstagefright/AACExtractor.cpp b/media/libstagefright/AACExtractor.cpp
index 196f6ee..45e8a30 100644
--- a/media/libstagefright/AACExtractor.cpp
+++ b/media/libstagefright/AACExtractor.cpp
@@ -360,7 +360,7 @@
         pos += len;
 
         ALOGV("skipped ID3 tag, new starting offset is %lld (0x%016llx)",
-             pos, pos);
+                (long long)pos, (long long)pos);
     }
 
     uint8_t header[2];
diff --git a/media/libstagefright/AACWriter.cpp b/media/libstagefright/AACWriter.cpp
index 2e41d80..9d90dbd 100644
--- a/media/libstagefright/AACWriter.cpp
+++ b/media/libstagefright/AACWriter.cpp
@@ -36,33 +36,19 @@
 
 namespace android {
 
-AACWriter::AACWriter(const char *filename)
-    : mFd(-1),
-      mInitCheck(NO_INIT),
-      mStarted(false),
-      mPaused(false),
-      mResumed(false),
-      mChannelCount(-1),
-      mSampleRate(-1),
-      mAACProfile(OMX_AUDIO_AACObjectLC) {
-
-    ALOGV("AACWriter Constructor");
-
-    mFd = open(filename, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
-    if (mFd >= 0) {
-        mInitCheck = OK;
-    }
-}
-
 AACWriter::AACWriter(int fd)
     : mFd(dup(fd)),
       mInitCheck(mFd < 0? NO_INIT: OK),
       mStarted(false),
       mPaused(false),
       mResumed(false),
+      mThread(0),
+      mEstimatedSizeBytes(0),
+      mEstimatedDurationUs(0),
       mChannelCount(-1),
       mSampleRate(-1),
-      mAACProfile(OMX_AUDIO_AACObjectLC) {
+      mAACProfile(OMX_AUDIO_AACObjectLC),
+      mFrameDurationUs(0) {
 }
 
 AACWriter::~AACWriter() {
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index d49594f..8d9bd21 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -24,6 +24,8 @@
 #include <inttypes.h>
 #include <utils/Trace.h>
 
+#include <gui/Surface.h>
+
 #include <media/stagefright/ACodec.h>
 
 #include <binder/MemoryDealer.h>
@@ -35,18 +37,20 @@
 #include <media/stagefright/foundation/AUtils.h>
 
 #include <media/stagefright/BufferProducerWrapper.h>
+#include <media/stagefright/MediaCodec.h>
 #include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/NativeWindowWrapper.h>
 #include <media/stagefright/OMXClient.h>
 #include <media/stagefright/OMXCodec.h>
-
+#include <media/stagefright/PersistentSurface.h>
+#include <media/stagefright/SurfaceUtils.h>
 #include <media/hardware/HardwareAPI.h>
 
 #include <OMX_AudioExt.h>
 #include <OMX_VideoExt.h>
 #include <OMX_Component.h>
 #include <OMX_IndexExt.h>
+#include <OMX_AsString.h>
 
 #include "include/avc_utils.h"
 
@@ -103,6 +107,18 @@
     params->nVersion.s.nStep = 0;
 }
 
+struct MessageList : public RefBase {
+    MessageList() {
+    }
+    virtual ~MessageList() {
+    }
+    std::list<sp<AMessage> > &getList() { return mList; }
+private:
+    std::list<sp<AMessage> > mList;
+
+    DISALLOW_EVIL_CONSTRUCTORS(MessageList);
+};
+
 struct CodecObserver : public BnOMXObserver {
     CodecObserver() {}
 
@@ -111,52 +127,78 @@
     }
 
     // from IOMXObserver
-    virtual void onMessage(const omx_message &omx_msg) {
-        sp<AMessage> msg = mNotify->dup();
-
-        msg->setInt32("type", omx_msg.type);
-        msg->setInt32("node", omx_msg.node);
-
-        switch (omx_msg.type) {
-            case omx_message::EVENT:
-            {
-                msg->setInt32("event", omx_msg.u.event_data.event);
-                msg->setInt32("data1", omx_msg.u.event_data.data1);
-                msg->setInt32("data2", omx_msg.u.event_data.data2);
-                break;
-            }
-
-            case omx_message::EMPTY_BUFFER_DONE:
-            {
-                msg->setInt32("buffer", omx_msg.u.buffer_data.buffer);
-                break;
-            }
-
-            case omx_message::FILL_BUFFER_DONE:
-            {
-                msg->setInt32(
-                        "buffer", omx_msg.u.extended_buffer_data.buffer);
-                msg->setInt32(
-                        "range_offset",
-                        omx_msg.u.extended_buffer_data.range_offset);
-                msg->setInt32(
-                        "range_length",
-                        omx_msg.u.extended_buffer_data.range_length);
-                msg->setInt32(
-                        "flags",
-                        omx_msg.u.extended_buffer_data.flags);
-                msg->setInt64(
-                        "timestamp",
-                        omx_msg.u.extended_buffer_data.timestamp);
-                break;
-            }
-
-            default:
-                TRESPASS();
-                break;
+    virtual void onMessages(const std::list<omx_message> &messages) {
+        if (messages.empty()) {
+            return;
         }
 
-        msg->post();
+        sp<AMessage> notify = mNotify->dup();
+        bool first = true;
+        sp<MessageList> msgList = new MessageList();
+        for (std::list<omx_message>::const_iterator it = messages.cbegin();
+              it != messages.cend(); ++it) {
+            const omx_message &omx_msg = *it;
+            if (first) {
+                notify->setInt32("node", omx_msg.node);
+                first = false;
+            }
+
+            sp<AMessage> msg = new AMessage;
+            msg->setInt32("type", omx_msg.type);
+            switch (omx_msg.type) {
+                case omx_message::EVENT:
+                {
+                    msg->setInt32("event", omx_msg.u.event_data.event);
+                    msg->setInt32("data1", omx_msg.u.event_data.data1);
+                    msg->setInt32("data2", omx_msg.u.event_data.data2);
+                    break;
+                }
+
+                case omx_message::EMPTY_BUFFER_DONE:
+                {
+                    msg->setInt32("buffer", omx_msg.u.buffer_data.buffer);
+                    msg->setInt32("fence_fd", omx_msg.fenceFd);
+                    break;
+                }
+
+                case omx_message::FILL_BUFFER_DONE:
+                {
+                    msg->setInt32(
+                            "buffer", omx_msg.u.extended_buffer_data.buffer);
+                    msg->setInt32(
+                            "range_offset",
+                            omx_msg.u.extended_buffer_data.range_offset);
+                    msg->setInt32(
+                            "range_length",
+                            omx_msg.u.extended_buffer_data.range_length);
+                    msg->setInt32(
+                            "flags",
+                            omx_msg.u.extended_buffer_data.flags);
+                    msg->setInt64(
+                            "timestamp",
+                            omx_msg.u.extended_buffer_data.timestamp);
+                    msg->setInt32(
+                            "fence_fd", omx_msg.fenceFd);
+                    break;
+                }
+
+                case omx_message::FRAME_RENDERED:
+                {
+                    msg->setInt64(
+                            "media_time_us", omx_msg.u.render_data.timestamp);
+                    msg->setInt64(
+                            "system_nano", omx_msg.u.render_data.nanoTime);
+                    break;
+                }
+
+                default:
+                    ALOGE("Unrecognized message type: %d", omx_msg.type);
+                    break;
+            }
+            msgList->getList().push_back(msg);
+        }
+        notify->setObject("messages", msgList);
+        notify->post();
     }
 
 protected:
@@ -194,15 +236,25 @@
     void postFillThisBuffer(BufferInfo *info);
 
 private:
+    // Handles an OMX message. Returns true iff message was handled.
     bool onOMXMessage(const sp<AMessage> &msg);
 
-    bool onOMXEmptyBufferDone(IOMX::buffer_id bufferID);
+    // Handles a list of messages. Returns true iff messages were handled.
+    bool onOMXMessageList(const sp<AMessage> &msg);
+
+    // returns true iff this message is for this component and the component is alive
+    bool checkOMXMessage(const sp<AMessage> &msg);
+
+    bool onOMXEmptyBufferDone(IOMX::buffer_id bufferID, int fenceFd);
 
     bool onOMXFillBufferDone(
             IOMX::buffer_id bufferID,
             size_t rangeOffset, size_t rangeLength,
             OMX_U32 flags,
-            int64_t timeUs);
+            int64_t timeUs,
+            int fenceFd);
+
+    virtual bool onOMXFrameRendered(int64_t mediaTimeUs, nsecs_t systemNano);
 
     void getMoreInputDataIfPossible();
 
@@ -259,9 +311,12 @@
 
     bool onConfigureComponent(const sp<AMessage> &msg);
     void onCreateInputSurface(const sp<AMessage> &msg);
+    void onSetInputSurface(const sp<AMessage> &msg);
     void onStart();
     void onShutdown(bool keepComponentAllocated);
 
+    status_t setupInputSurface();
+
     DISALLOW_EVIL_CONSTRUCTORS(LoadedState);
 };
 
@@ -317,6 +372,7 @@
     virtual void stateEntered();
 
     virtual bool onOMXEvent(OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2);
+    virtual bool onOMXFrameRendered(int64_t mediaTimeUs, nsecs_t systemNano);
 
 private:
     bool mActive;
@@ -335,6 +391,7 @@
     virtual void stateEntered();
 
     virtual bool onOMXEvent(OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2);
+    virtual bool onOMXFrameRendered(int64_t mediaTimeUs, nsecs_t systemNano);
 
 private:
     DISALLOW_EVIL_CONSTRUCTORS(OutputPortSettingsChangedState);
@@ -401,12 +458,45 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
+void ACodec::BufferInfo::setWriteFence(int fenceFd, const char *dbg) {
+    if (mFenceFd >= 0) {
+        ALOGW("OVERWRITE OF %s fence %d by write fence %d in %s",
+                mIsReadFence ? "read" : "write", mFenceFd, fenceFd, dbg);
+    }
+    mFenceFd = fenceFd;
+    mIsReadFence = false;
+}
+
+void ACodec::BufferInfo::setReadFence(int fenceFd, const char *dbg) {
+    if (mFenceFd >= 0) {
+        ALOGW("OVERWRITE OF %s fence %d by read fence %d in %s",
+                mIsReadFence ? "read" : "write", mFenceFd, fenceFd, dbg);
+    }
+    mFenceFd = fenceFd;
+    mIsReadFence = true;
+}
+
+void ACodec::BufferInfo::checkWriteFence(const char *dbg) {
+    if (mFenceFd >= 0 && mIsReadFence) {
+        ALOGD("REUSING read fence %d as write fence in %s", mFenceFd, dbg);
+    }
+}
+
+void ACodec::BufferInfo::checkReadFence(const char *dbg) {
+    if (mFenceFd >= 0 && !mIsReadFence) {
+        ALOGD("REUSING write fence %d as read fence in %s", mFenceFd, dbg);
+    }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
 ACodec::ACodec()
     : mQuirks(0),
       mNode(0),
+      mNativeWindowUsageBits(0),
       mSentFormat(false),
+      mIsVideo(false),
       mIsEncoder(false),
-      mUseMetadataOnEncoderOutput(false),
       mFatalError(false),
       mShutdownInProgress(false),
       mExplicitShutdown(false),
@@ -416,10 +506,13 @@
       mChannelMaskPresent(false),
       mChannelMask(0),
       mDequeueCounter(0),
-      mStoreMetaDataInOutputBuffers(false),
-      mMetaDataBuffersToSubmit(0),
+      mInputMetadataType(kMetadataBufferTypeInvalid),
+      mOutputMetadataType(kMetadataBufferTypeInvalid),
+      mLegacyAdaptiveExperiment(false),
+      mMetadataBuffersToSubmit(0),
       mRepeatFrameDelayUs(-1ll),
       mMaxPtsGapUs(-1ll),
+      mMaxFps(-1),
       mTimePerFrameUs(-1ll),
       mTimePerCaptureUs(-1ll),
       mCreateInputBuffersSuspended(false),
@@ -452,61 +545,81 @@
 
 void ACodec::initiateSetup(const sp<AMessage> &msg) {
     msg->setWhat(kWhatSetup);
-    msg->setTarget(id());
+    msg->setTarget(this);
     msg->post();
 }
 
 void ACodec::signalSetParameters(const sp<AMessage> &params) {
-    sp<AMessage> msg = new AMessage(kWhatSetParameters, id());
+    sp<AMessage> msg = new AMessage(kWhatSetParameters, this);
     msg->setMessage("params", params);
     msg->post();
 }
 
 void ACodec::initiateAllocateComponent(const sp<AMessage> &msg) {
     msg->setWhat(kWhatAllocateComponent);
-    msg->setTarget(id());
+    msg->setTarget(this);
     msg->post();
 }
 
 void ACodec::initiateConfigureComponent(const sp<AMessage> &msg) {
     msg->setWhat(kWhatConfigureComponent);
-    msg->setTarget(id());
+    msg->setTarget(this);
     msg->post();
 }
 
+status_t ACodec::setSurface(const sp<Surface> &surface) {
+    sp<AMessage> msg = new AMessage(kWhatSetSurface, this);
+    msg->setObject("surface", surface);
+
+    sp<AMessage> response;
+    status_t err = msg->postAndAwaitResponse(&response);
+
+    if (err == OK) {
+        (void)response->findInt32("err", &err);
+    }
+    return err;
+}
+
 void ACodec::initiateCreateInputSurface() {
-    (new AMessage(kWhatCreateInputSurface, id()))->post();
+    (new AMessage(kWhatCreateInputSurface, this))->post();
+}
+
+void ACodec::initiateSetInputSurface(
+        const sp<PersistentSurface> &surface) {
+    sp<AMessage> msg = new AMessage(kWhatSetInputSurface, this);
+    msg->setObject("input-surface", surface);
+    msg->post();
 }
 
 void ACodec::signalEndOfInputStream() {
-    (new AMessage(kWhatSignalEndOfInputStream, id()))->post();
+    (new AMessage(kWhatSignalEndOfInputStream, this))->post();
 }
 
 void ACodec::initiateStart() {
-    (new AMessage(kWhatStart, id()))->post();
+    (new AMessage(kWhatStart, this))->post();
 }
 
 void ACodec::signalFlush() {
     ALOGV("[%s] signalFlush", mComponentName.c_str());
-    (new AMessage(kWhatFlush, id()))->post();
+    (new AMessage(kWhatFlush, this))->post();
 }
 
 void ACodec::signalResume() {
-    (new AMessage(kWhatResume, id()))->post();
+    (new AMessage(kWhatResume, this))->post();
 }
 
 void ACodec::initiateShutdown(bool keepComponentAllocated) {
-    sp<AMessage> msg = new AMessage(kWhatShutdown, id());
+    sp<AMessage> msg = new AMessage(kWhatShutdown, this);
     msg->setInt32("keepComponentAllocated", keepComponentAllocated);
     msg->post();
     if (!keepComponentAllocated) {
         // ensure shutdown completes in 3 seconds
-        (new AMessage(kWhatReleaseCodecInstance, id()))->post(3000000);
+        (new AMessage(kWhatReleaseCodecInstance, this))->post(3000000);
     }
 }
 
 void ACodec::signalRequestIDRFrame() {
-    (new AMessage(kWhatRequestIDRFrame, id()))->post();
+    (new AMessage(kWhatRequestIDRFrame, this))->post();
 }
 
 // *** NOTE: THE FOLLOWING WORKAROUND WILL BE REMOVED ***
@@ -514,13 +627,140 @@
 // This causes a halt if we already signaled an EOS on the input
 // port.  For now keep submitting an output buffer if there was an
 // EOS on the input port, but not yet on the output port.
-void ACodec::signalSubmitOutputMetaDataBufferIfEOS_workaround() {
+void ACodec::signalSubmitOutputMetadataBufferIfEOS_workaround() {
     if (mPortEOS[kPortIndexInput] && !mPortEOS[kPortIndexOutput] &&
-            mMetaDataBuffersToSubmit > 0) {
-        (new AMessage(kWhatSubmitOutputMetaDataBufferIfEOS, id()))->post();
+            mMetadataBuffersToSubmit > 0) {
+        (new AMessage(kWhatSubmitOutputMetadataBufferIfEOS, this))->post();
     }
 }
 
+status_t ACodec::handleSetSurface(const sp<Surface> &surface) {
+    // allow keeping unset surface
+    if (surface == NULL) {
+        if (mNativeWindow != NULL) {
+            ALOGW("cannot unset a surface");
+            return INVALID_OPERATION;
+        }
+        return OK;
+    }
+
+    // cannot switch from bytebuffers to surface
+    if (mNativeWindow == NULL) {
+        ALOGW("component was not configured with a surface");
+        return INVALID_OPERATION;
+    }
+
+    ANativeWindow *nativeWindow = surface.get();
+    // if we have not yet started the codec, we can simply set the native window
+    if (mBuffers[kPortIndexInput].size() == 0) {
+        mNativeWindow = surface;
+        return OK;
+    }
+
+    // we do not support changing a tunneled surface after start
+    if (mTunneled) {
+        ALOGW("cannot change tunneled surface");
+        return INVALID_OPERATION;
+    }
+
+    int usageBits = 0;
+    status_t err = setupNativeWindowSizeFormatAndUsage(nativeWindow, &usageBits);
+    if (err != OK) {
+        return err;
+    }
+
+    int ignoredFlags = kVideoGrallocUsage;
+    // New output surface is not allowed to add new usage flag except ignored ones.
+    if ((usageBits & ~(mNativeWindowUsageBits | ignoredFlags)) != 0) {
+        ALOGW("cannot change usage from %#x to %#x", mNativeWindowUsageBits, usageBits);
+        return BAD_VALUE;
+    }
+
+    // get min undequeued count. We cannot switch to a surface that has a higher
+    // undequeued count than we allocated.
+    int minUndequeuedBuffers = 0;
+    err = nativeWindow->query(
+            nativeWindow, NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS,
+            &minUndequeuedBuffers);
+    if (err != 0) {
+        ALOGE("NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS query failed: %s (%d)",
+                strerror(-err), -err);
+        return err;
+    }
+    if (minUndequeuedBuffers > (int)mNumUndequeuedBuffers) {
+        ALOGE("new surface holds onto more buffers (%d) than planned for (%zu)",
+                minUndequeuedBuffers, mNumUndequeuedBuffers);
+        return BAD_VALUE;
+    }
+
+    // we cannot change the number of output buffers while OMX is running
+    // set up surface to the same count
+    Vector<BufferInfo> &buffers = mBuffers[kPortIndexOutput];
+    ALOGV("setting up surface for %zu buffers", buffers.size());
+
+    err = native_window_set_buffer_count(nativeWindow, buffers.size());
+    if (err != 0) {
+        ALOGE("native_window_set_buffer_count failed: %s (%d)", strerror(-err),
+                -err);
+        return err;
+    }
+
+    // need to enable allocation when attaching
+    surface->getIGraphicBufferProducer()->allowAllocation(true);
+
+    // for meta data mode, we move dequeud buffers to the new surface.
+    // for non-meta mode, we must move all registered buffers
+    for (size_t i = 0; i < buffers.size(); ++i) {
+        const BufferInfo &info = buffers[i];
+        // skip undequeued buffers for meta data mode
+        if (storingMetadataInDecodedBuffers()
+                && !mLegacyAdaptiveExperiment
+                && info.mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) {
+            ALOGV("skipping buffer %p", info.mGraphicBuffer->getNativeBuffer());
+            continue;
+        }
+        ALOGV("attaching buffer %p", info.mGraphicBuffer->getNativeBuffer());
+
+        err = surface->attachBuffer(info.mGraphicBuffer->getNativeBuffer());
+        if (err != OK) {
+            ALOGE("failed to attach buffer %p to the new surface: %s (%d)",
+                    info.mGraphicBuffer->getNativeBuffer(),
+                    strerror(-err), -err);
+            return err;
+        }
+    }
+
+    // cancel undequeued buffers to new surface
+    if (!storingMetadataInDecodedBuffers() || mLegacyAdaptiveExperiment) {
+        for (size_t i = 0; i < buffers.size(); ++i) {
+            BufferInfo &info = buffers.editItemAt(i);
+            if (info.mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) {
+                ALOGV("canceling buffer %p", info.mGraphicBuffer->getNativeBuffer());
+                err = nativeWindow->cancelBuffer(
+                        nativeWindow, info.mGraphicBuffer->getNativeBuffer(), info.mFenceFd);
+                info.mFenceFd = -1;
+                if (err != OK) {
+                    ALOGE("failed to cancel buffer %p to the new surface: %s (%d)",
+                            info.mGraphicBuffer->getNativeBuffer(),
+                            strerror(-err), -err);
+                    return err;
+                }
+            }
+        }
+        // disallow further allocation
+        (void)surface->getIGraphicBufferProducer()->allowAllocation(false);
+    }
+
+    // push blank buffers to previous window if requested
+    if (mFlags & kFlagPushBlankBuffersToNativeWindowOnShutdown) {
+        pushBlankBuffersToNativeWindow(mNativeWindow.get());
+    }
+
+    mNativeWindow = nativeWindow;
+    mNativeWindowUsageBits = usageBits;
+    return OK;
+}
+
 status_t ACodec::allocateBuffersOnPort(OMX_U32 portIndex) {
     CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
 
@@ -529,8 +769,8 @@
 
     status_t err;
     if (mNativeWindow != NULL && portIndex == kPortIndexOutput) {
-        if (mStoreMetaDataInOutputBuffers) {
-            err = allocateOutputMetaDataBuffers();
+        if (storingMetadataInDecodedBuffers()) {
+            err = allocateOutputMetadataBuffers();
         } else {
             err = allocateOutputBuffersFromNativeWindow();
         }
@@ -543,22 +783,44 @@
                 mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
 
         if (err == OK) {
-            ALOGV("[%s] Allocating %u buffers of size %u on %s port",
+            MetadataBufferType type =
+                portIndex == kPortIndexOutput ? mOutputMetadataType : mInputMetadataType;
+            int32_t bufSize = def.nBufferSize;
+            if (type == kMetadataBufferTypeGrallocSource) {
+                bufSize = sizeof(VideoGrallocMetadata);
+            } else if (type == kMetadataBufferTypeANWBuffer) {
+                bufSize = sizeof(VideoNativeMetadata);
+            }
+
+            // If using gralloc or native source input metadata buffers, allocate largest
+            // metadata size as we prefer to generate native source metadata, but component
+            // may require gralloc source. For camera source, allocate at least enough
+            // size for native metadata buffers.
+            int32_t allottedSize = bufSize;
+            if (portIndex == kPortIndexInput && type >= kMetadataBufferTypeGrallocSource) {
+                bufSize = max(sizeof(VideoGrallocMetadata), sizeof(VideoNativeMetadata));
+            } else if (portIndex == kPortIndexInput && type == kMetadataBufferTypeCameraSource) {
+                bufSize = max(bufSize, (int32_t)sizeof(VideoNativeMetadata));
+            }
+
+            ALOGV("[%s] Allocating %u buffers of size %d/%d (from %u using %s) on %s port",
                     mComponentName.c_str(),
-                    def.nBufferCountActual, def.nBufferSize,
+                    def.nBufferCountActual, bufSize, allottedSize, def.nBufferSize, asString(type),
                     portIndex == kPortIndexInput ? "input" : "output");
 
-            size_t totalSize = def.nBufferCountActual * def.nBufferSize;
+            size_t totalSize = def.nBufferCountActual * bufSize;
             mDealer[portIndex] = new MemoryDealer(totalSize, "ACodec");
 
-            for (OMX_U32 i = 0; i < def.nBufferCountActual; ++i) {
-                sp<IMemory> mem = mDealer[portIndex]->allocate(def.nBufferSize);
+            for (OMX_U32 i = 0; i < def.nBufferCountActual && err == OK; ++i) {
+                sp<IMemory> mem = mDealer[portIndex]->allocate(bufSize);
                 if (mem == NULL || mem->pointer() == NULL) {
                     return NO_MEMORY;
                 }
 
                 BufferInfo info;
                 info.mStatus = BufferInfo::OWNED_BY_US;
+                info.mFenceFd = -1;
+                info.mRenderInfo = NULL;
 
                 uint32_t requiresAllocateBufferBit =
                     (portIndex == kPortIndexInput)
@@ -566,27 +828,27 @@
                         : OMXCodec::kRequiresAllocateBufferOnOutputPorts;
 
                 if ((portIndex == kPortIndexInput && (mFlags & kFlagIsSecure))
-                        || mUseMetadataOnEncoderOutput) {
+                        || (portIndex == kPortIndexOutput && usingMetadataOnEncoderOutput())) {
                     mem.clear();
 
                     void *ptr;
                     err = mOMX->allocateBuffer(
-                            mNode, portIndex, def.nBufferSize, &info.mBufferID,
+                            mNode, portIndex, bufSize, &info.mBufferID,
                             &ptr);
 
-                    int32_t bufSize = mUseMetadataOnEncoderOutput ?
-                            (4 + sizeof(buffer_handle_t)) : def.nBufferSize;
-
                     info.mData = new ABuffer(ptr, bufSize);
                 } else if (mQuirks & requiresAllocateBufferBit) {
                     err = mOMX->allocateBufferWithBackup(
-                            mNode, portIndex, mem, &info.mBufferID);
+                            mNode, portIndex, mem, &info.mBufferID, allottedSize);
                 } else {
-                    err = mOMX->useBuffer(mNode, portIndex, mem, &info.mBufferID);
+                    err = mOMX->useBuffer(mNode, portIndex, mem, &info.mBufferID, allottedSize);
                 }
 
                 if (mem != NULL) {
-                    info.mData = new ABuffer(mem->pointer(), def.nBufferSize);
+                    info.mData = new ABuffer(mem->pointer(), bufSize);
+                    if (type == kMetadataBufferTypeANWBuffer) {
+                        ((VideoNativeMetadata *)mem->pointer())->nFenceFd = -1;
+                    }
                 }
 
                 mBuffers[portIndex].push(info);
@@ -617,9 +879,8 @@
     return OK;
 }
 
-status_t ACodec::configureOutputBuffersFromNativeWindow(
-        OMX_U32 *bufferCount, OMX_U32 *bufferSize,
-        OMX_U32 *minUndequeuedBuffers) {
+status_t ACodec::setupNativeWindowSizeFormatAndUsage(
+        ANativeWindow *nativeWindow /* nonnull */, int *finalUsage /* nonnull */) {
     OMX_PARAM_PORTDEFINITIONTYPE def;
     InitOMXParams(&def);
     def.nPortIndex = kPortIndexOutput;
@@ -631,40 +892,6 @@
         return err;
     }
 
-    err = native_window_set_buffers_geometry(
-            mNativeWindow.get(),
-            def.format.video.nFrameWidth,
-            def.format.video.nFrameHeight,
-            def.format.video.eColorFormat);
-
-    if (err != 0) {
-        ALOGE("native_window_set_buffers_geometry failed: %s (%d)",
-                strerror(-err), -err);
-        return err;
-    }
-
-    if (mRotationDegrees != 0) {
-        uint32_t transform = 0;
-        switch (mRotationDegrees) {
-            case 0: transform = 0; break;
-            case 90: transform = HAL_TRANSFORM_ROT_90; break;
-            case 180: transform = HAL_TRANSFORM_ROT_180; break;
-            case 270: transform = HAL_TRANSFORM_ROT_270; break;
-            default: transform = 0; break;
-        }
-
-        if (transform > 0) {
-            err = native_window_set_buffers_transform(
-                    mNativeWindow.get(), transform);
-            if (err != 0) {
-                ALOGE("native_window_set_buffers_transform failed: %s (%d)",
-                        strerror(-err), -err);
-                return err;
-            }
-        }
-    }
-
-    // Set up the native window.
     OMX_U32 usage = 0;
     err = mOMX->getGraphicBufferUsage(mNode, kPortIndexOutput, &usage);
     if (err != 0) {
@@ -678,43 +905,34 @@
         usage |= GRALLOC_USAGE_PROTECTED;
     }
 
-    // Make sure to check whether either Stagefright or the video decoder
-    // requested protected buffers.
-    if (usage & GRALLOC_USAGE_PROTECTED) {
-        // Verify that the ANativeWindow sends images directly to
-        // SurfaceFlinger.
-        int queuesToNativeWindow = 0;
-        err = mNativeWindow->query(
-                mNativeWindow.get(), NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER,
-                &queuesToNativeWindow);
-        if (err != 0) {
-            ALOGE("error authenticating native window: %d", err);
-            return err;
-        }
-        if (queuesToNativeWindow != 1) {
-            ALOGE("native window could not be authenticated");
-            return PERMISSION_DENIED;
-        }
+    usage |= kVideoGrallocUsage;
+    *finalUsage = usage;
+
+    ALOGV("gralloc usage: %#x(OMX) => %#x(ACodec)", omxUsage, usage);
+    return setNativeWindowSizeFormatAndUsage(
+            nativeWindow,
+            def.format.video.nFrameWidth,
+            def.format.video.nFrameHeight,
+            def.format.video.eColorFormat,
+            mRotationDegrees,
+            usage);
+}
+
+status_t ACodec::configureOutputBuffersFromNativeWindow(
+        OMX_U32 *bufferCount, OMX_U32 *bufferSize,
+        OMX_U32 *minUndequeuedBuffers) {
+    OMX_PARAM_PORTDEFINITIONTYPE def;
+    InitOMXParams(&def);
+    def.nPortIndex = kPortIndexOutput;
+
+    status_t err = mOMX->getParameter(
+            mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+
+    if (err == OK) {
+        err = setupNativeWindowSizeFormatAndUsage(mNativeWindow.get(), &mNativeWindowUsageBits);
     }
-
-    int consumerUsage = 0;
-    err = mNativeWindow->query(
-            mNativeWindow.get(), NATIVE_WINDOW_CONSUMER_USAGE_BITS,
-            &consumerUsage);
-    if (err != 0) {
-        ALOGW("failed to get consumer usage bits. ignoring");
-        err = 0;
-    }
-
-    ALOGV("gralloc usage: %#x(OMX) => %#x(ACodec) + %#x(Consumer) = %#x",
-            omxUsage, usage, consumerUsage, usage | consumerUsage);
-    usage |= consumerUsage;
-    err = native_window_set_usage(
-            mNativeWindow.get(),
-            usage | GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_EXTERNAL_DISP);
-
-    if (err != 0) {
-        ALOGE("native_window_set_usage failed: %s (%d)", strerror(-err), -err);
+    if (err != OK) {
+        mNativeWindowUsageBits = 0;
         return err;
     }
 
@@ -798,6 +1016,11 @@
         return err;
     mNumUndequeuedBuffers = minUndequeuedBuffers;
 
+    if (!storingMetadataInDecodedBuffers()) {
+        static_cast<Surface*>(mNativeWindow.get())
+                ->getIGraphicBufferProducer()->allowAllocation(true);
+    }
+
     ALOGV("[%s] Allocating %u buffers from a native window of size %u on "
          "output port",
          mComponentName.c_str(), bufferCount, bufferSize);
@@ -805,7 +1028,8 @@
     // Dequeue buffers and send them to OMX
     for (OMX_U32 i = 0; i < bufferCount; i++) {
         ANativeWindowBuffer *buf;
-        err = native_window_dequeue_buffer_and_wait(mNativeWindow.get(), &buf);
+        int fenceFd;
+        err = mNativeWindow->dequeueBuffer(mNativeWindow.get(), &buf, &fenceFd);
         if (err != 0) {
             ALOGE("dequeueBuffer failed: %s (%d)", strerror(-err), -err);
             break;
@@ -814,6 +1038,9 @@
         sp<GraphicBuffer> graphicBuffer(new GraphicBuffer(buf, false));
         BufferInfo info;
         info.mStatus = BufferInfo::OWNED_BY_US;
+        info.mFenceFd = fenceFd;
+        info.mIsReadFence = false;
+        info.mRenderInfo = NULL;
         info.mData = new ABuffer(NULL /* data */, bufferSize /* capacity */);
         info.mGraphicBuffer = graphicBuffer;
         mBuffers[kPortIndexOutput].push(info);
@@ -850,16 +1077,23 @@
 
     for (OMX_U32 i = cancelStart; i < cancelEnd; i++) {
         BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
-        status_t error = cancelBufferToNativeWindow(info);
-        if (err == 0) {
-            err = error;
+        if (info->mStatus == BufferInfo::OWNED_BY_US) {
+            status_t error = cancelBufferToNativeWindow(info);
+            if (err == 0) {
+                err = error;
+            }
         }
     }
 
+    if (!storingMetadataInDecodedBuffers()) {
+        static_cast<Surface*>(mNativeWindow.get())
+                ->getIGraphicBufferProducer()->allowAllocation(false);
+    }
+
     return err;
 }
 
-status_t ACodec::allocateOutputMetaDataBuffers() {
+status_t ACodec::allocateOutputMetadataBuffers() {
     OMX_U32 bufferCount, bufferSize, minUndequeuedBuffers;
     status_t err = configureOutputBuffersFromNativeWindow(
             &bufferCount, &bufferSize, &minUndequeuedBuffers);
@@ -870,26 +1104,32 @@
     ALOGV("[%s] Allocating %u meta buffers on output port",
          mComponentName.c_str(), bufferCount);
 
-    size_t totalSize = bufferCount * 8;
+    size_t bufSize = mOutputMetadataType == kMetadataBufferTypeANWBuffer ?
+            sizeof(struct VideoNativeMetadata) : sizeof(struct VideoGrallocMetadata);
+    size_t totalSize = bufferCount * bufSize;
     mDealer[kPortIndexOutput] = new MemoryDealer(totalSize, "ACodec");
 
     // Dequeue buffers and send them to OMX
     for (OMX_U32 i = 0; i < bufferCount; i++) {
         BufferInfo info;
         info.mStatus = BufferInfo::OWNED_BY_NATIVE_WINDOW;
+        info.mFenceFd = -1;
+        info.mRenderInfo = NULL;
         info.mGraphicBuffer = NULL;
         info.mDequeuedAt = mDequeueCounter;
 
-        sp<IMemory> mem = mDealer[kPortIndexOutput]->allocate(
-                sizeof(struct VideoDecoderOutputMetaData));
+        sp<IMemory> mem = mDealer[kPortIndexOutput]->allocate(bufSize);
         if (mem == NULL || mem->pointer() == NULL) {
             return NO_MEMORY;
         }
+        if (mOutputMetadataType == kMetadataBufferTypeANWBuffer) {
+            ((VideoNativeMetadata *)mem->pointer())->nFenceFd = -1;
+        }
         info.mData = new ABuffer(mem->pointer(), mem->size());
 
         // we use useBuffer for metadata regardless of quirks
         err = mOMX->useBuffer(
-                mNode, kPortIndexOutput, mem, &info.mBufferID);
+                mNode, kPortIndexOutput, mem, &info.mBufferID, mem->size());
 
         mBuffers[kPortIndexOutput].push(info);
 
@@ -897,28 +1137,111 @@
              mComponentName.c_str(), info.mBufferID, mem->pointer());
     }
 
-    mMetaDataBuffersToSubmit = bufferCount - minUndequeuedBuffers;
+    if (mLegacyAdaptiveExperiment) {
+        // preallocate and preregister buffers
+        static_cast<Surface *>(mNativeWindow.get())
+                ->getIGraphicBufferProducer()->allowAllocation(true);
+
+        ALOGV("[%s] Allocating %u buffers from a native window of size %u on "
+             "output port",
+             mComponentName.c_str(), bufferCount, bufferSize);
+
+        // Dequeue buffers then cancel them all
+        for (OMX_U32 i = 0; i < bufferCount; i++) {
+            BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
+
+            ANativeWindowBuffer *buf;
+            int fenceFd;
+            err = mNativeWindow->dequeueBuffer(mNativeWindow.get(), &buf, &fenceFd);
+            if (err != 0) {
+                ALOGE("dequeueBuffer failed: %s (%d)", strerror(-err), -err);
+                break;
+            }
+
+            sp<GraphicBuffer> graphicBuffer(new GraphicBuffer(buf, false));
+            mOMX->updateGraphicBufferInMeta(
+                    mNode, kPortIndexOutput, graphicBuffer, info->mBufferID);
+            info->mStatus = BufferInfo::OWNED_BY_US;
+            info->setWriteFence(fenceFd, "allocateOutputMetadataBuffers for legacy");
+            info->mGraphicBuffer = graphicBuffer;
+        }
+
+        for (OMX_U32 i = 0; i < mBuffers[kPortIndexOutput].size(); i++) {
+            BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
+            if (info->mStatus == BufferInfo::OWNED_BY_US) {
+                status_t error = cancelBufferToNativeWindow(info);
+                if (err == OK) {
+                    err = error;
+                }
+            }
+        }
+
+        static_cast<Surface*>(mNativeWindow.get())
+                ->getIGraphicBufferProducer()->allowAllocation(false);
+    }
+
+    mMetadataBuffersToSubmit = bufferCount - minUndequeuedBuffers;
     return err;
 }
 
-status_t ACodec::submitOutputMetaDataBuffer() {
-    CHECK(mStoreMetaDataInOutputBuffers);
-    if (mMetaDataBuffersToSubmit == 0)
+status_t ACodec::submitOutputMetadataBuffer() {
+    CHECK(storingMetadataInDecodedBuffers());
+    if (mMetadataBuffersToSubmit == 0)
         return OK;
 
     BufferInfo *info = dequeueBufferFromNativeWindow();
-    if (info == NULL)
+    if (info == NULL) {
         return ERROR_IO;
+    }
 
     ALOGV("[%s] submitting output meta buffer ID %u for graphic buffer %p",
           mComponentName.c_str(), info->mBufferID, info->mGraphicBuffer.get());
 
-    --mMetaDataBuffersToSubmit;
-    CHECK_EQ(mOMX->fillBuffer(mNode, info->mBufferID),
-             (status_t)OK);
+    --mMetadataBuffersToSubmit;
+    info->checkWriteFence("submitOutputMetadataBuffer");
+    status_t err = mOMX->fillBuffer(mNode, info->mBufferID, info->mFenceFd);
+    info->mFenceFd = -1;
+    if (err == OK) {
+        info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
+    }
 
-    info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
-    return OK;
+    return err;
+}
+
+status_t ACodec::waitForFence(int fd, const char *dbg ) {
+    status_t res = OK;
+    if (fd >= 0) {
+        sp<Fence> fence = new Fence(fd);
+        res = fence->wait(IOMX::kFenceTimeoutMs);
+        ALOGW_IF(res != OK, "FENCE TIMEOUT for %d in %s", fd, dbg);
+    }
+    return res;
+}
+
+// static
+const char *ACodec::_asString(BufferInfo::Status s) {
+    switch (s) {
+        case BufferInfo::OWNED_BY_US:            return "OUR";
+        case BufferInfo::OWNED_BY_COMPONENT:     return "COMPONENT";
+        case BufferInfo::OWNED_BY_UPSTREAM:      return "UPSTREAM";
+        case BufferInfo::OWNED_BY_DOWNSTREAM:    return "DOWNSTREAM";
+        case BufferInfo::OWNED_BY_NATIVE_WINDOW: return "SURFACE";
+        case BufferInfo::UNRECOGNIZED:           return "UNRECOGNIZED";
+        default:                                 return "?";
+    }
+}
+
+void ACodec::dumpBuffers(OMX_U32 portIndex) {
+    CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
+    ALOGI("[%s] %s port has %zu buffers:", mComponentName.c_str(),
+            portIndex == kPortIndexInput ? "input" : "output", mBuffers[portIndex].size());
+    for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
+        const BufferInfo &info = mBuffers[portIndex][i];
+        ALOGI("  slot %2zu: #%8u %p/%p %s(%d) dequeued:%u",
+                i, info.mBufferID, info.mGraphicBuffer.get(),
+                info.mGraphicBuffer == NULL ? NULL : info.mGraphicBuffer->getNativeBuffer(),
+                _asString(info.mStatus), info.mStatus, info.mDequeuedAt);
+    }
 }
 
 status_t ACodec::cancelBufferToNativeWindow(BufferInfo *info) {
@@ -927,17 +1250,59 @@
     ALOGV("[%s] Calling cancelBuffer on buffer %u",
          mComponentName.c_str(), info->mBufferID);
 
+    info->checkWriteFence("cancelBufferToNativeWindow");
     int err = mNativeWindow->cancelBuffer(
-        mNativeWindow.get(), info->mGraphicBuffer.get(), -1);
+        mNativeWindow.get(), info->mGraphicBuffer.get(), info->mFenceFd);
+    info->mFenceFd = -1;
 
     ALOGW_IF(err != 0, "[%s] can not return buffer %u to native window",
             mComponentName.c_str(), info->mBufferID);
-
+    // change ownership even if cancelBuffer fails
     info->mStatus = BufferInfo::OWNED_BY_NATIVE_WINDOW;
 
     return err;
 }
 
+void ACodec::updateRenderInfoForDequeuedBuffer(
+        ANativeWindowBuffer *buf, int fenceFd, BufferInfo *info) {
+
+    info->mRenderInfo =
+        mRenderTracker.updateInfoForDequeuedBuffer(
+                buf, fenceFd, info - &mBuffers[kPortIndexOutput][0]);
+
+    // check for any fences already signaled
+    notifyOfRenderedFrames(false /* dropIncomplete */, info->mRenderInfo);
+}
+
+void ACodec::onFrameRendered(int64_t mediaTimeUs, nsecs_t systemNano) {
+    if (mRenderTracker.onFrameRendered(mediaTimeUs, systemNano) != OK) {
+        mRenderTracker.dumpRenderQueue();
+    }
+}
+
+void ACodec::notifyOfRenderedFrames(bool dropIncomplete, FrameRenderTracker::Info *until) {
+    sp<AMessage> msg = mNotify->dup();
+    msg->setInt32("what", CodecBase::kWhatOutputFramesRendered);
+    std::list<FrameRenderTracker::Info> done =
+        mRenderTracker.checkFencesAndGetRenderedFrames(until, dropIncomplete);
+
+    // unlink untracked frames
+    for (std::list<FrameRenderTracker::Info>::const_iterator it = done.cbegin();
+            it != done.cend(); ++it) {
+        ssize_t index = it->getIndex();
+        if (index >= 0 && (size_t)index < mBuffers[kPortIndexOutput].size()) {
+            mBuffers[kPortIndexOutput].editItemAt(index).mRenderInfo = NULL;
+        } else if (index >= 0) {
+            // THIS SHOULD NEVER HAPPEN
+            ALOGE("invalid index %zd in %zu", index, mBuffers[kPortIndexOutput].size());
+        }
+    }
+
+    if (MediaCodec::CreateFramesRenderedMessage(done, msg)) {
+        msg->post();
+    }
+}
+
 ACodec::BufferInfo *ACodec::dequeueBufferFromNativeWindow() {
     ANativeWindowBuffer *buf;
     CHECK(mNativeWindow.get() != NULL);
@@ -953,26 +1318,61 @@
         return NULL;
     }
 
-    if (native_window_dequeue_buffer_and_wait(mNativeWindow.get(), &buf) != 0) {
-        ALOGE("dequeueBuffer failed.");
-        return NULL;
-    }
+    int fenceFd = -1;
+    do {
+        status_t err = mNativeWindow->dequeueBuffer(mNativeWindow.get(), &buf, &fenceFd);
+        if (err != 0) {
+            ALOGE("dequeueBuffer failed: %s(%d).", asString(err), err);
+            return NULL;
+        }
 
+        bool stale = false;
+        for (size_t i = mBuffers[kPortIndexOutput].size(); i-- > 0;) {
+            BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
+
+            if (info->mGraphicBuffer != NULL &&
+                    info->mGraphicBuffer->handle == buf->handle) {
+                // Since consumers can attach buffers to BufferQueues, it is possible
+                // that a known yet stale buffer can return from a surface that we
+                // once used.  We can simply ignore this as we have already dequeued
+                // this buffer properly.  NOTE: this does not eliminate all cases,
+                // e.g. it is possible that we have queued the valid buffer to the
+                // NW, and a stale copy of the same buffer gets dequeued - which will
+                // be treated as the valid buffer by ACodec.
+                if (info->mStatus != BufferInfo::OWNED_BY_NATIVE_WINDOW) {
+                    ALOGI("dequeued stale buffer %p. discarding", buf);
+                    stale = true;
+                    break;
+                }
+
+                ALOGV("dequeued buffer %p", info->mGraphicBuffer->getNativeBuffer());
+                info->mStatus = BufferInfo::OWNED_BY_US;
+                info->setWriteFence(fenceFd, "dequeueBufferFromNativeWindow");
+                updateRenderInfoForDequeuedBuffer(buf, fenceFd, info);
+                return info;
+            }
+        }
+
+        // It is also possible to receive a previously unregistered buffer
+        // in non-meta mode. These should be treated as stale buffers. The
+        // same is possible in meta mode, in which case, it will be treated
+        // as a normal buffer, which is not desirable.
+        // TODO: fix this.
+        if (!stale && (!storingMetadataInDecodedBuffers() || mLegacyAdaptiveExperiment)) {
+            ALOGI("dequeued unrecognized (stale) buffer %p. discarding", buf);
+            stale = true;
+        }
+        if (stale) {
+            // TODO: detach stale buffer, but there is no API yet to do it.
+            buf = NULL;
+        }
+    } while (buf == NULL);
+
+    // get oldest undequeued buffer
     BufferInfo *oldest = NULL;
     for (size_t i = mBuffers[kPortIndexOutput].size(); i-- > 0;) {
         BufferInfo *info =
             &mBuffers[kPortIndexOutput].editItemAt(i);
-
-        if (info->mGraphicBuffer != NULL &&
-            info->mGraphicBuffer->handle == buf->handle) {
-            CHECK_EQ((int)info->mStatus,
-                     (int)BufferInfo::OWNED_BY_NATIVE_WINDOW);
-
-            info->mStatus = BufferInfo::OWNED_BY_US;
-
-            return info;
-        }
-
         if (info->mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW &&
             (oldest == NULL ||
              // avoid potential issues from counter rolling over
@@ -982,48 +1382,64 @@
         }
     }
 
-    if (oldest) {
-        CHECK(mStoreMetaDataInOutputBuffers);
+    // it is impossible dequeue a buffer when there are no buffers with ANW
+    CHECK(oldest != NULL);
+    // it is impossible to dequeue an unknown buffer in non-meta mode, as the
+    // while loop above does not complete
+    CHECK(storingMetadataInDecodedBuffers());
 
-        // discard buffer in LRU info and replace with new buffer
-        oldest->mGraphicBuffer = new GraphicBuffer(buf, false);
-        oldest->mStatus = BufferInfo::OWNED_BY_US;
+    // discard buffer in LRU info and replace with new buffer
+    oldest->mGraphicBuffer = new GraphicBuffer(buf, false);
+    oldest->mStatus = BufferInfo::OWNED_BY_US;
+    oldest->setWriteFence(fenceFd, "dequeueBufferFromNativeWindow for oldest");
+    mRenderTracker.untrackFrame(oldest->mRenderInfo);
+    oldest->mRenderInfo = NULL;
 
-        mOMX->updateGraphicBufferInMeta(
-                mNode, kPortIndexOutput, oldest->mGraphicBuffer,
-                oldest->mBufferID);
+    mOMX->updateGraphicBufferInMeta(
+            mNode, kPortIndexOutput, oldest->mGraphicBuffer,
+            oldest->mBufferID);
 
-        VideoDecoderOutputMetaData *metaData =
-            reinterpret_cast<VideoDecoderOutputMetaData *>(
-                    oldest->mData->base());
-        CHECK_EQ(metaData->eType, kMetadataBufferTypeGrallocSource);
-
+    if (mOutputMetadataType == kMetadataBufferTypeGrallocSource) {
+        VideoGrallocMetadata *grallocMeta =
+            reinterpret_cast<VideoGrallocMetadata *>(oldest->mData->base());
         ALOGV("replaced oldest buffer #%u with age %u (%p/%p stored in %p)",
-                oldest - &mBuffers[kPortIndexOutput][0],
+                (unsigned)(oldest - &mBuffers[kPortIndexOutput][0]),
                 mDequeueCounter - oldest->mDequeuedAt,
-                metaData->pHandle,
+                (void *)(uintptr_t)grallocMeta->pHandle,
                 oldest->mGraphicBuffer->handle, oldest->mData->base());
-
-        return oldest;
+    } else if (mOutputMetadataType == kMetadataBufferTypeANWBuffer) {
+        VideoNativeMetadata *nativeMeta =
+            reinterpret_cast<VideoNativeMetadata *>(oldest->mData->base());
+        ALOGV("replaced oldest buffer #%u with age %u (%p/%p stored in %p)",
+                (unsigned)(oldest - &mBuffers[kPortIndexOutput][0]),
+                mDequeueCounter - oldest->mDequeuedAt,
+                (void *)(uintptr_t)nativeMeta->pBuffer,
+                oldest->mGraphicBuffer->getNativeBuffer(), oldest->mData->base());
     }
 
-    TRESPASS();
-
-    return NULL;
+    updateRenderInfoForDequeuedBuffer(buf, fenceFd, oldest);
+    return oldest;
 }
 
 status_t ACodec::freeBuffersOnPort(OMX_U32 portIndex) {
-    for (size_t i = mBuffers[portIndex].size(); i-- > 0;) {
-        CHECK_EQ((status_t)OK, freeBuffer(portIndex, i));
+    status_t err = OK;
+    for (size_t i = mBuffers[portIndex].size(); i > 0;) {
+        i--;
+        status_t err2 = freeBuffer(portIndex, i);
+        if (err == OK) {
+            err = err2;
+        }
     }
 
+    // clear mDealer even on an error
     mDealer[portIndex].clear();
-
-    return OK;
+    return err;
 }
 
 status_t ACodec::freeOutputBuffersNotOwnedByComponent() {
-    for (size_t i = mBuffers[kPortIndexOutput].size(); i-- > 0;) {
+    status_t err = OK;
+    for (size_t i = mBuffers[kPortIndexOutput].size(); i > 0;) {
+        i--;
         BufferInfo *info =
             &mBuffers[kPortIndexOutput].editItemAt(i);
 
@@ -1031,36 +1447,65 @@
         // or being drained.
         if (info->mStatus != BufferInfo::OWNED_BY_COMPONENT &&
             info->mStatus != BufferInfo::OWNED_BY_DOWNSTREAM) {
-            CHECK_EQ((status_t)OK, freeBuffer(kPortIndexOutput, i));
+            status_t err2 = freeBuffer(kPortIndexOutput, i);
+            if (err == OK) {
+                err = err2;
+            }
         }
     }
 
-    return OK;
+    return err;
 }
 
 status_t ACodec::freeBuffer(OMX_U32 portIndex, size_t i) {
     BufferInfo *info = &mBuffers[portIndex].editItemAt(i);
+    status_t err = OK;
 
-    CHECK(info->mStatus == BufferInfo::OWNED_BY_US
-            || info->mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW);
-
-    if (portIndex == kPortIndexOutput && mNativeWindow != NULL
-            && info->mStatus == BufferInfo::OWNED_BY_US) {
-        cancelBufferToNativeWindow(info);
+    // there should not be any fences in the metadata
+    MetadataBufferType type =
+        portIndex == kPortIndexOutput ? mOutputMetadataType : mInputMetadataType;
+    if (type == kMetadataBufferTypeANWBuffer && info->mData != NULL
+            && info->mData->size() >= sizeof(VideoNativeMetadata)) {
+        int fenceFd = ((VideoNativeMetadata *)info->mData->data())->nFenceFd;
+        if (fenceFd >= 0) {
+            ALOGW("unreleased fence (%d) in %s metadata buffer %zu",
+                    fenceFd, portIndex == kPortIndexInput ? "input" : "output", i);
+        }
     }
 
-    CHECK_EQ(mOMX->freeBuffer(
-                mNode, portIndex, info->mBufferID),
-             (status_t)OK);
+    switch (info->mStatus) {
+        case BufferInfo::OWNED_BY_US:
+            if (portIndex == kPortIndexOutput && mNativeWindow != NULL) {
+                (void)cancelBufferToNativeWindow(info);
+            }
+            // fall through
 
+        case BufferInfo::OWNED_BY_NATIVE_WINDOW:
+            err = mOMX->freeBuffer(mNode, portIndex, info->mBufferID);
+            break;
+
+        default:
+            ALOGE("trying to free buffer not owned by us or ANW (%d)", info->mStatus);
+            err = FAILED_TRANSACTION;
+            break;
+    }
+
+    if (info->mFenceFd >= 0) {
+        ::close(info->mFenceFd);
+    }
+
+    if (portIndex == kPortIndexOutput) {
+        mRenderTracker.untrackFrame(info->mRenderInfo, i);
+        info->mRenderInfo = NULL;
+    }
+
+    // remove buffer even if mOMX->freeBuffer fails
     mBuffers[portIndex].removeAt(i);
-
-    return OK;
+    return err;
 }
 
 ACodec::BufferInfo *ACodec::findBufferByID(
-        uint32_t portIndex, IOMX::buffer_id bufferID,
-        ssize_t *index) {
+        uint32_t portIndex, IOMX::buffer_id bufferID, ssize_t *index) {
     for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
         BufferInfo *info = &mBuffers[portIndex].editItemAt(i);
 
@@ -1072,8 +1517,7 @@
         }
     }
 
-    TRESPASS();
-
+    ALOGE("Could not find buffer with ID %u", bufferID);
     return NULL;
 }
 
@@ -1186,6 +1630,9 @@
 
     mIsEncoder = encoder;
 
+    mInputMetadataType = kMetadataBufferTypeInvalid;
+    mOutputMetadataType = kMetadataBufferTypeInvalid;
+
     status_t err = setComponentRole(encoder /* isEncoder */, mime);
 
     if (err != OK) {
@@ -1203,15 +1650,27 @@
     if (encoder
             && msg->findInt32("store-metadata-in-buffers", &storeMeta)
             && storeMeta != 0) {
-        err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexInput, OMX_TRUE);
-
+        err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexInput, OMX_TRUE, &mInputMetadataType);
         if (err != OK) {
-              ALOGE("[%s] storeMetaDataInBuffers (input) failed w/ err %d",
+            ALOGE("[%s] storeMetaDataInBuffers (input) failed w/ err %d",
                     mComponentName.c_str(), err);
 
-              return err;
-          }
-      }
+            return err;
+        }
+        // For this specific case we could be using camera source even if storeMetaDataInBuffers
+        // returns Gralloc source. Pretend that we are; this will force us to use nBufferSize.
+        if (mInputMetadataType == kMetadataBufferTypeGrallocSource) {
+            mInputMetadataType = kMetadataBufferTypeCameraSource;
+        }
+
+        uint32_t usageBits;
+        if (mOMX->getParameter(
+                mNode, (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
+                &usageBits, sizeof(usageBits)) == OK) {
+            inputFormat->setInt32(
+                    "using-sw-read-often", !!(usageBits & GRALLOC_USAGE_SW_READ_OFTEN));
+        }
+    }
 
     int32_t prependSPSPPS = 0;
     if (encoder
@@ -1244,19 +1703,16 @@
     // sps/pps to idr frames, since in metadata mode the bitstream is in an
     // opaque handle, to which we don't have access.
     int32_t video = !strncasecmp(mime, "video/", 6);
+    mIsVideo = video;
     if (encoder && video) {
         OMX_BOOL enable = (OMX_BOOL) (prependSPSPPS
             && msg->findInt32("store-metadata-in-buffers-output", &storeMeta)
             && storeMeta != 0);
 
-        err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexOutput, enable);
-
+        err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexOutput, enable, &mOutputMetadataType);
         if (err != OK) {
             ALOGE("[%s] storeMetaDataInBuffers (output) failed w/ err %d",
                 mComponentName.c_str(), err);
-            mUseMetadataOnEncoderOutput = 0;
-        } else {
-            mUseMetadataOnEncoderOutput = enable;
         }
 
         if (!msg->findInt64(
@@ -1269,6 +1725,10 @@
             mMaxPtsGapUs = -1ll;
         }
 
+        if (!msg->findFloat("max-fps-to-encoder", &mMaxFps)) {
+            mMaxFps = -1;
+        }
+
         if (!msg->findInt64("time-lapse", &mTimePerCaptureUs)) {
             mTimePerCaptureUs = -1ll;
         }
@@ -1284,7 +1744,7 @@
     sp<RefBase> obj;
     bool haveNativeWindow = msg->findObject("native-window", &obj)
             && obj != NULL && video && !encoder;
-    mStoreMetaDataInOutputBuffers = false;
+    mLegacyAdaptiveExperiment = false;
     if (video && !encoder) {
         inputFormat->setInt32("adaptive-playback", false);
 
@@ -1299,9 +1759,8 @@
         }
     }
     if (haveNativeWindow) {
-        sp<NativeWindowWrapper> windowWrapper(
-                static_cast<NativeWindowWrapper *>(obj.get()));
-        sp<ANativeWindow> nativeWindow = windowWrapper->getNativeWindow();
+        sp<ANativeWindow> nativeWindow =
+            static_cast<ANativeWindow *>(static_cast<Surface *>(obj.get()));
 
         // START of temporary support for automatic FRC - THIS WILL BE REMOVED
         int32_t autoFrc;
@@ -1370,7 +1829,7 @@
 
             // Always try to enable dynamic output buffers on native surface
             err = mOMX->storeMetaDataInBuffers(
-                    mNode, kPortIndexOutput, OMX_TRUE);
+                    mNode, kPortIndexOutput, OMX_TRUE, &mOutputMetadataType);
             if (err != OK) {
                 ALOGE("[%s] storeMetaDataInBuffers failed w/ err %d",
                         mComponentName.c_str(), err);
@@ -1422,7 +1881,10 @@
             } else {
                 ALOGV("[%s] storeMetaDataInBuffers succeeded",
                         mComponentName.c_str());
-                mStoreMetaDataInOutputBuffers = true;
+                CHECK(storingMetadataInDecodedBuffers());
+                mLegacyAdaptiveExperiment = ADebug::isExperimentEnabled(
+                        "legacy-adaptive", !msg->contains("no-experiments"));
+
                 inputFormat->setInt32("adaptive-playback", true);
             }
 
@@ -1460,28 +1922,31 @@
         }
 
         if (haveNativeWindow) {
-            sp<NativeWindowWrapper> nativeWindow(
-                    static_cast<NativeWindowWrapper *>(obj.get()));
-            CHECK(nativeWindow != NULL);
-            mNativeWindow = nativeWindow->getNativeWindow();
-
-            native_window_set_scaling_mode(
-                    mNativeWindow.get(), NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+            mNativeWindow = static_cast<Surface *>(obj.get());
         }
 
         // initialize native window now to get actual output format
         // TODO: this is needed for some encoders even though they don't use native window
-        CHECK_EQ((status_t)OK, initNativeWindow());
+        err = initNativeWindow();
+        if (err != OK) {
+            return err;
+        }
 
         // fallback for devices that do not handle flex-YUV for native buffers
         if (haveNativeWindow) {
             int32_t requestedColorFormat = OMX_COLOR_FormatUnused;
             if (msg->findInt32("color-format", &requestedColorFormat) &&
                     requestedColorFormat == OMX_COLOR_FormatYUV420Flexible) {
-                CHECK_EQ(getPortFormat(kPortIndexOutput, outputFormat), (status_t)OK);
+                status_t err = getPortFormat(kPortIndexOutput, outputFormat);
+                if (err != OK) {
+                    return err;
+                }
                 int32_t colorFormat = OMX_COLOR_FormatUnused;
                 OMX_U32 flexibleEquivalent = OMX_COLOR_FormatUnused;
-                CHECK(outputFormat->findInt32("color-format", &colorFormat));
+                if (!outputFormat->findInt32("color-format", &colorFormat)) {
+                    ALOGE("ouptut port did not have a color format (wrong domain?)");
+                    return BAD_VALUE;
+                }
                 ALOGD("[%s] Requested output format %#x and got %#x.",
                         mComponentName.c_str(), requestedColorFormat, colorFormat);
                 if (!isFlexibleColorFormat(
@@ -1491,11 +1956,13 @@
                     // to SW renderer
                     ALOGI("[%s] Falling back to software renderer", mComponentName.c_str());
                     mNativeWindow.clear();
+                    mNativeWindowUsageBits = 0;
                     haveNativeWindow = false;
                     usingSwRenderer = true;
-                    if (mStoreMetaDataInOutputBuffers) {
-                        err = mOMX->storeMetaDataInBuffers(mNode, kPortIndexOutput, OMX_FALSE);
-                        mStoreMetaDataInOutputBuffers = false;
+                    if (storingMetadataInDecodedBuffers()) {
+                        err = mOMX->storeMetaDataInBuffers(
+                                mNode, kPortIndexOutput, OMX_FALSE, &mOutputMetadataType);
+                        mOutputMetadataType = kMetadataBufferTypeInvalid; // just in case
                         // TODO: implement adaptive-playback support for bytebuffer mode.
                         // This is done by SW codecs, but most HW codecs don't support it.
                         inputFormat->setInt32("adaptive-playback", false);
@@ -1603,7 +2070,7 @@
             err = setupG711Codec(encoder, sampleRate, numChannels);
         }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_FLAC)) {
-        int32_t numChannels, sampleRate, compressionLevel = -1;
+        int32_t numChannels = 0, sampleRate = 0, compressionLevel = -1;
         if (encoder &&
                 (!msg->findInt32("channel-count", &numChannels)
                         || !msg->findInt32("sample-rate", &sampleRate))) {
@@ -1685,16 +2152,78 @@
         err = setMinBufferSize(kPortIndexInput, 8192);  // XXX
     }
 
+    int32_t priority;
+    if (msg->findInt32("priority", &priority)) {
+        err = setPriority(priority);
+    }
+
+    int32_t rateInt = -1;
+    float rateFloat = -1;
+    if (!msg->findFloat("operating-rate", &rateFloat)) {
+        msg->findInt32("operating-rate", &rateInt);
+        rateFloat = (float)rateInt;  // 16MHz (FLINTMAX) is OK for upper bound.
+    }
+    if (rateFloat > 0) {
+        err = setOperatingRate(rateFloat, video);
+    }
+
     mBaseOutputFormat = outputFormat;
 
-    CHECK_EQ(getPortFormat(kPortIndexInput, inputFormat), (status_t)OK);
-    CHECK_EQ(getPortFormat(kPortIndexOutput, outputFormat), (status_t)OK);
-    mInputFormat = inputFormat;
-    mOutputFormat = outputFormat;
-
+    err = getPortFormat(kPortIndexInput, inputFormat);
+    if (err == OK) {
+        err = getPortFormat(kPortIndexOutput, outputFormat);
+        if (err == OK) {
+            mInputFormat = inputFormat;
+            mOutputFormat = outputFormat;
+        }
+    }
     return err;
 }
 
+status_t ACodec::setPriority(int32_t priority) {
+    if (priority < 0) {
+        return BAD_VALUE;
+    }
+    OMX_PARAM_U32TYPE config;
+    InitOMXParams(&config);
+    config.nU32 = (OMX_U32)priority;
+    status_t temp = mOMX->setConfig(
+            mNode, (OMX_INDEXTYPE)OMX_IndexConfigPriority,
+            &config, sizeof(config));
+    if (temp != OK) {
+        ALOGI("codec does not support config priority (err %d)", temp);
+    }
+    return OK;
+}
+
+status_t ACodec::setOperatingRate(float rateFloat, bool isVideo) {
+    if (rateFloat < 0) {
+        return BAD_VALUE;
+    }
+    OMX_U32 rate;
+    if (isVideo) {
+        if (rateFloat > 65535) {
+            return BAD_VALUE;
+        }
+        rate = (OMX_U32)(rateFloat * 65536.0f + 0.5f);
+    } else {
+        if (rateFloat > UINT_MAX) {
+            return BAD_VALUE;
+        }
+        rate = (OMX_U32)(rateFloat);
+    }
+    OMX_PARAM_U32TYPE config;
+    InitOMXParams(&config);
+    config.nU32 = rate;
+    status_t err = mOMX->setConfig(
+            mNode, (OMX_INDEXTYPE)OMX_IndexConfigOperatingRate,
+            &config, sizeof(config));
+    if (err != OK) {
+        ALOGI("codec does not support config operating rate (err %d)", err);
+    }
+    return OK;
+}
+
 status_t ACodec::setMinBufferSize(OMX_U32 portIndex, size_t size) {
     OMX_PARAM_PORTDEFINITIONTYPE def;
     InitOMXParams(&def);
@@ -1727,7 +2256,10 @@
         return err;
     }
 
-    CHECK(def.nBufferSize >= size);
+    if (def.nBufferSize < size) {
+        ALOGE("failed to set min buffer size to %zu (is still %u)", size, def.nBufferSize);
+        return FAILED_TRANSACTION;
+    }
 
     return OK;
 }
@@ -2055,7 +2587,9 @@
 }
 
 status_t ACodec::setupG711Codec(bool encoder, int32_t sampleRate, int32_t numChannels) {
-    CHECK(!encoder);  // XXX TODO
+    if (encoder) {
+        return INVALID_OPERATION;
+    }
 
     return setupRawAudioFormat(
             kPortIndexInput, sampleRate, numChannels);
@@ -2566,7 +3100,9 @@
             break;
     }
 
-    ALOGI("setupVideoEncoder succeeded");
+    if (err == OK) {
+        ALOGI("setupVideoEncoder succeeded");
+    }
 
     return err;
 }
@@ -3164,8 +3700,9 @@
 
     status_t err = mOMX->getParameter(
             mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
-
-    CHECK_EQ(err, (status_t)OK);
+    if (err != OK) {
+        return err;
+    }
 
     if (portIndex == kPortIndexInput) {
         // XXX Need a (much) better heuristic to compute input buffer sizes.
@@ -3175,7 +3712,10 @@
         }
     }
 
-    CHECK_EQ((int)def.eDomain, (int)OMX_PortDomainVideo);
+    if (def.eDomain != OMX_PortDomainVideo) {
+        ALOGE("expected video port, got %s(%d)", asString(def.eDomain), def.eDomain);
+        return FAILED_TRANSACTION;
+    }
 
     video_def->nFrameWidth = width;
     video_def->nFrameHeight = height;
@@ -3239,8 +3779,8 @@
     while (countBuffersOwnedByNativeWindow() > mNumUndequeuedBuffers
             && dequeueBufferFromNativeWindow() != NULL) {
         // these buffers will be submitted as regular buffers; account for this
-        if (mStoreMetaDataInOutputBuffers && mMetaDataBuffersToSubmit > 0) {
-            --mMetaDataBuffersToSubmit;
+        if (storingMetadataInDecodedBuffers() && mMetadataBuffersToSubmit > 0) {
+            --mMetadataBuffersToSubmit;
         }
     }
 }
@@ -3452,17 +3992,20 @@
 }
 
 status_t ACodec::getPortFormat(OMX_U32 portIndex, sp<AMessage> &notify) {
-    // TODO: catch errors an return them instead of using CHECK
+    const char *niceIndex = portIndex == kPortIndexInput ? "input" : "output";
     OMX_PARAM_PORTDEFINITIONTYPE def;
     InitOMXParams(&def);
     def.nPortIndex = portIndex;
 
-    CHECK_EQ(mOMX->getParameter(
-                mNode, OMX_IndexParamPortDefinition, &def, sizeof(def)),
-             (status_t)OK);
+    status_t err = mOMX->getParameter(mNode, OMX_IndexParamPortDefinition, &def, sizeof(def));
+    if (err != OK) {
+        return err;
+    }
 
-    CHECK_EQ((int)def.eDir,
-            (int)(portIndex == kPortIndexOutput ? OMX_DirOutput : OMX_DirInput));
+    if (def.eDir != (portIndex == kPortIndexOutput ? OMX_DirOutput : OMX_DirInput)) {
+        ALOGE("unexpected dir: %s(%d) on %s port", asString(def.eDir), def.eDir, niceIndex);
+        return BAD_VALUE;
+    }
 
     switch (def.eDomain) {
         case OMX_PortDomainVideo:
@@ -3496,7 +4039,7 @@
                                             sizeof(describeParams.sMediaImage)));
 
                             MediaImage *img = &describeParams.sMediaImage;
-                            ALOGV("[%s] MediaImage { F(%zux%zu) @%zu+%zu+%zu @%zu+%zu+%zu @%zu+%zu+%zu }",
+                            ALOGV("[%s] MediaImage { F(%ux%u) @%u+%u+%u @%u+%u+%u @%u+%u+%u }",
                                     mComponentName.c_str(), img->mWidth, img->mHeight,
                                     img->mPlane[0].mOffset, img->mPlane[0].mColInc, img->mPlane[0].mRowInc,
                                     img->mPlane[1].mOffset, img->mPlane[1].mColInc, img->mPlane[1].mRowInc,
@@ -3525,12 +4068,16 @@
                         rect.nHeight = videoDef->nFrameHeight;
                     }
 
-                    CHECK_GE(rect.nLeft, 0);
-                    CHECK_GE(rect.nTop, 0);
-                    CHECK_GE(rect.nWidth, 0u);
-                    CHECK_GE(rect.nHeight, 0u);
-                    CHECK_LE(rect.nLeft + rect.nWidth - 1, videoDef->nFrameWidth);
-                    CHECK_LE(rect.nTop + rect.nHeight - 1, videoDef->nFrameHeight);
+                    if (rect.nLeft < 0 ||
+                        rect.nTop < 0 ||
+                        rect.nLeft + rect.nWidth > videoDef->nFrameWidth ||
+                        rect.nTop + rect.nHeight > videoDef->nFrameHeight) {
+                        ALOGE("Wrong cropped rect (%d, %d) - (%u, %u) vs. frame (%u, %u)",
+                                rect.nLeft, rect.nTop,
+                                rect.nLeft + rect.nWidth, rect.nTop + rect.nHeight,
+                                videoDef->nFrameWidth, videoDef->nFrameHeight);
+                        return BAD_VALUE;
+                    }
 
                     notify->setRect(
                             "crop",
@@ -3587,7 +4134,13 @@
 
                 default:
                 {
-                    CHECK(mIsEncoder ^ (portIndex == kPortIndexInput));
+                    if (mIsEncoder ^ (portIndex == kPortIndexOutput)) {
+                        // should be CodingUnused
+                        ALOGE("Raw port video compression format is %s(%d)",
+                                asString(videoDef->eCompressionFormat),
+                                videoDef->eCompressionFormat);
+                        return BAD_VALUE;
+                    }
                     AString mime;
                     if (GetMimeTypeForVideoCoding(
                         videoDef->eCompressionFormat, &mime) != OK) {
@@ -3618,20 +4171,25 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    CHECK_EQ(mOMX->getParameter(
-                                mNode, OMX_IndexParamAudioPcm,
-                                &params, sizeof(params)),
-                             (status_t)OK);
+                    err = mOMX->getParameter(
+                            mNode, OMX_IndexParamAudioPcm, &params, sizeof(params));
+                    if (err != OK) {
+                        return err;
+                    }
 
-                    CHECK_GT(params.nChannels, 0);
-                    CHECK(params.nChannels == 1 || params.bInterleaved);
-                    CHECK_EQ(params.nBitPerSample, 16u);
-
-                    CHECK_EQ((int)params.eNumData,
-                             (int)OMX_NumericalDataSigned);
-
-                    CHECK_EQ((int)params.ePCMMode,
-                             (int)OMX_AUDIO_PCMModeLinear);
+                    if (params.nChannels <= 0
+                            || (params.nChannels != 1 && !params.bInterleaved)
+                            || params.nBitPerSample != 16u
+                            || params.eNumData != OMX_NumericalDataSigned
+                            || params.ePCMMode != OMX_AUDIO_PCMModeLinear) {
+                        ALOGE("unsupported PCM port: %u channels%s, %u-bit, %s(%d), %s(%d) mode ",
+                                params.nChannels,
+                                params.bInterleaved ? " interleaved" : "",
+                                params.nBitPerSample,
+                                asString(params.eNumData), params.eNumData,
+                                asString(params.ePCMMode), params.ePCMMode);
+                        return FAILED_TRANSACTION;
+                    }
 
                     notify->setString("mime", MEDIA_MIMETYPE_AUDIO_RAW);
                     notify->setInt32("channel-count", params.nChannels);
@@ -3649,10 +4207,11 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    CHECK_EQ(mOMX->getParameter(
-                                mNode, OMX_IndexParamAudioAac,
-                                &params, sizeof(params)),
-                             (status_t)OK);
+                    err = mOMX->getParameter(
+                            mNode, OMX_IndexParamAudioAac, &params, sizeof(params));
+                    if (err != OK) {
+                        return err;
+                    }
 
                     notify->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC);
                     notify->setInt32("channel-count", params.nChannels);
@@ -3666,21 +4225,18 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    CHECK_EQ(mOMX->getParameter(
-                                mNode, OMX_IndexParamAudioAmr,
-                                &params, sizeof(params)),
-                             (status_t)OK);
+                    err = mOMX->getParameter(
+                            mNode, OMX_IndexParamAudioAmr, &params, sizeof(params));
+                    if (err != OK) {
+                        return err;
+                    }
 
                     notify->setInt32("channel-count", 1);
                     if (params.eAMRBandMode >= OMX_AUDIO_AMRBandModeWB0) {
-                        notify->setString(
-                                "mime", MEDIA_MIMETYPE_AUDIO_AMR_WB);
-
+                        notify->setString("mime", MEDIA_MIMETYPE_AUDIO_AMR_WB);
                         notify->setInt32("sample-rate", 16000);
                     } else {
-                        notify->setString(
-                                "mime", MEDIA_MIMETYPE_AUDIO_AMR_NB);
-
+                        notify->setString("mime", MEDIA_MIMETYPE_AUDIO_AMR_NB);
                         notify->setInt32("sample-rate", 8000);
                     }
                     break;
@@ -3692,10 +4248,11 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    CHECK_EQ(mOMX->getParameter(
-                                mNode, OMX_IndexParamAudioFlac,
-                                &params, sizeof(params)),
-                             (status_t)OK);
+                    err = mOMX->getParameter(
+                            mNode, OMX_IndexParamAudioFlac, &params, sizeof(params));
+                    if (err != OK) {
+                        return err;
+                    }
 
                     notify->setString("mime", MEDIA_MIMETYPE_AUDIO_FLAC);
                     notify->setInt32("channel-count", params.nChannels);
@@ -3709,10 +4266,11 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    CHECK_EQ(mOMX->getParameter(
-                                mNode, OMX_IndexParamAudioMp3,
-                                &params, sizeof(params)),
-                             (status_t)OK);
+                    err = mOMX->getParameter(
+                            mNode, OMX_IndexParamAudioMp3, &params, sizeof(params));
+                    if (err != OK) {
+                        return err;
+                    }
 
                     notify->setString("mime", MEDIA_MIMETYPE_AUDIO_MPEG);
                     notify->setInt32("channel-count", params.nChannels);
@@ -3726,10 +4284,11 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    CHECK_EQ(mOMX->getParameter(
-                                mNode, OMX_IndexParamAudioVorbis,
-                                &params, sizeof(params)),
-                             (status_t)OK);
+                    err = mOMX->getParameter(
+                            mNode, OMX_IndexParamAudioVorbis, &params, sizeof(params));
+                    if (err != OK) {
+                        return err;
+                    }
 
                     notify->setString("mime", MEDIA_MIMETYPE_AUDIO_VORBIS);
                     notify->setInt32("channel-count", params.nChannels);
@@ -3743,11 +4302,12 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    CHECK_EQ((status_t)OK, mOMX->getParameter(
-                            mNode,
-                            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
-                            &params,
-                            sizeof(params)));
+                    err = mOMX->getParameter(
+                            mNode, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidAc3,
+                            &params, sizeof(params));
+                    if (err != OK) {
+                        return err;
+                    }
 
                     notify->setString("mime", MEDIA_MIMETYPE_AUDIO_AC3);
                     notify->setInt32("channel-count", params.nChannels);
@@ -3761,11 +4321,12 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    CHECK_EQ((status_t)OK, mOMX->getParameter(
-                            mNode,
-                            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidEac3,
-                            &params,
-                            sizeof(params)));
+                    err = mOMX->getParameter(
+                            mNode, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidEac3,
+                            &params, sizeof(params));
+                    if (err != OK) {
+                        return err;
+                    }
 
                     notify->setString("mime", MEDIA_MIMETYPE_AUDIO_EAC3);
                     notify->setInt32("channel-count", params.nChannels);
@@ -3779,11 +4340,12 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    CHECK_EQ((status_t)OK, mOMX->getParameter(
-                            mNode,
-                            (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidOpus,
-                            &params,
-                            sizeof(params)));
+                    err = mOMX->getParameter(
+                            mNode, (OMX_INDEXTYPE)OMX_IndexParamAudioAndroidOpus,
+                            &params, sizeof(params));
+                    if (err != OK) {
+                        return err;
+                    }
 
                     notify->setString("mime", MEDIA_MIMETYPE_AUDIO_OPUS);
                     notify->setInt32("channel-count", params.nChannels);
@@ -3797,11 +4359,11 @@
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    CHECK_EQ((status_t)OK, mOMX->getParameter(
-                            mNode,
-                            (OMX_INDEXTYPE)OMX_IndexParamAudioPcm,
-                            &params,
-                            sizeof(params)));
+                    err = mOMX->getParameter(
+                            mNode, (OMX_INDEXTYPE)OMX_IndexParamAudioPcm, &params, sizeof(params));
+                    if (err != OK) {
+                        return err;
+                    }
 
                     const char *mime = NULL;
                     if (params.ePCMMode == OMX_AUDIO_PCMModeMULaw) {
@@ -3819,30 +4381,33 @@
 
                 case OMX_AUDIO_CodingGSMFR:
                 {
-                    OMX_AUDIO_PARAM_MP3TYPE params;
+                    OMX_AUDIO_PARAM_PCMMODETYPE params;
                     InitOMXParams(&params);
                     params.nPortIndex = portIndex;
 
-                    CHECK_EQ(mOMX->getParameter(
-                                mNode, OMX_IndexParamAudioPcm,
-                                &params, sizeof(params)),
-                             (status_t)OK);
+                    err = mOMX->getParameter(
+                                mNode, OMX_IndexParamAudioPcm, &params, sizeof(params));
+                    if (err != OK) {
+                        return err;
+                    }
 
                     notify->setString("mime", MEDIA_MIMETYPE_AUDIO_MSGSM);
                     notify->setInt32("channel-count", params.nChannels);
-                    notify->setInt32("sample-rate", params.nSampleRate);
+                    notify->setInt32("sample-rate", params.nSamplingRate);
                     break;
                 }
 
                 default:
-                    ALOGE("UNKNOWN AUDIO CODING: %d\n", audioDef->eEncoding);
-                    TRESPASS();
+                    ALOGE("Unsupported audio coding: %s(%d)\n",
+                            asString(audioDef->eEncoding), audioDef->eEncoding);
+                    return BAD_TYPE;
             }
             break;
         }
 
         default:
-            TRESPASS();
+            ALOGE("Unsupported domain: %s(%d)", asString(def.eDomain), def.eDomain);
+            return BAD_TYPE;
     }
 
     return OK;
@@ -3852,7 +4417,10 @@
     sp<AMessage> notify = mBaseOutputFormat->dup();
     notify->setInt32("what", kWhatOutputFormatChanged);
 
-    CHECK_EQ(getPortFormat(kPortIndexOutput, notify), (status_t)OK);
+    if (getPortFormat(kPortIndexOutput, notify) != OK) {
+        ALOGE("[%s] Failed to get port format to send format change", mComponentName.c_str());
+        return;
+    }
 
     AString mime;
     CHECK(notify->findString("mime", &mime));
@@ -3872,9 +4440,7 @@
         if (mSkipCutBuffer != NULL) {
             size_t prevbufsize = mSkipCutBuffer->size();
             if (prevbufsize != 0) {
-                ALOGW("Replacing SkipCutBuffer holding %d "
-                      "bytes",
-                      prevbufsize);
+                ALOGW("Replacing SkipCutBuffer holding %zu bytes", prevbufsize);
             }
         }
         mSkipCutBuffer = new SkipCutBuffer(
@@ -3908,150 +4474,6 @@
     notify->post();
 }
 
-status_t ACodec::pushBlankBuffersToNativeWindow() {
-    status_t err = NO_ERROR;
-    ANativeWindowBuffer* anb = NULL;
-    int numBufs = 0;
-    int minUndequeuedBufs = 0;
-
-    // We need to reconnect to the ANativeWindow as a CPU client to ensure that
-    // no frames get dropped by SurfaceFlinger assuming that these are video
-    // frames.
-    err = native_window_api_disconnect(mNativeWindow.get(),
-            NATIVE_WINDOW_API_MEDIA);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)",
-                strerror(-err), -err);
-        return err;
-    }
-
-    err = native_window_api_connect(mNativeWindow.get(),
-            NATIVE_WINDOW_API_CPU);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: api_connect failed: %s (%d)",
-                strerror(-err), -err);
-        return err;
-    }
-
-    err = native_window_set_buffers_geometry(mNativeWindow.get(), 1, 1,
-            HAL_PIXEL_FORMAT_RGBX_8888);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: set_buffers_geometry failed: %s (%d)",
-                strerror(-err), -err);
-        goto error;
-    }
-
-    err = native_window_set_scaling_mode(mNativeWindow.get(),
-                NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank_frames: set_scaling_mode failed: %s (%d)",
-              strerror(-err), -err);
-        goto error;
-    }
-
-    err = native_window_set_usage(mNativeWindow.get(),
-            GRALLOC_USAGE_SW_WRITE_OFTEN);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: set_usage failed: %s (%d)",
-                strerror(-err), -err);
-        goto error;
-    }
-
-    err = mNativeWindow->query(mNativeWindow.get(),
-            NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBufs);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: MIN_UNDEQUEUED_BUFFERS query "
-                "failed: %s (%d)", strerror(-err), -err);
-        goto error;
-    }
-
-    numBufs = minUndequeuedBufs + 1;
-    err = native_window_set_buffer_count(mNativeWindow.get(), numBufs);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: set_buffer_count failed: %s (%d)",
-                strerror(-err), -err);
-        goto error;
-    }
-
-    // We  push numBufs + 1 buffers to ensure that we've drawn into the same
-    // buffer twice.  This should guarantee that the buffer has been displayed
-    // on the screen and then been replaced, so an previous video frames are
-    // guaranteed NOT to be currently displayed.
-    for (int i = 0; i < numBufs + 1; i++) {
-        err = native_window_dequeue_buffer_and_wait(mNativeWindow.get(), &anb);
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: dequeueBuffer failed: %s (%d)",
-                    strerror(-err), -err);
-            goto error;
-        }
-
-        sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
-
-        // Fill the buffer with the a 1x1 checkerboard pattern ;)
-        uint32_t* img = NULL;
-        err = buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img));
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: lock failed: %s (%d)",
-                    strerror(-err), -err);
-            goto error;
-        }
-
-        *img = 0;
-
-        err = buf->unlock();
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: unlock failed: %s (%d)",
-                    strerror(-err), -err);
-            goto error;
-        }
-
-        err = mNativeWindow->queueBuffer(mNativeWindow.get(),
-                buf->getNativeBuffer(), -1);
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: queueBuffer failed: %s (%d)",
-                    strerror(-err), -err);
-            goto error;
-        }
-
-        anb = NULL;
-    }
-
-error:
-
-    if (err != NO_ERROR) {
-        // Clean up after an error.
-        if (anb != NULL) {
-            mNativeWindow->cancelBuffer(mNativeWindow.get(), anb, -1);
-        }
-
-        native_window_api_disconnect(mNativeWindow.get(),
-                NATIVE_WINDOW_API_CPU);
-        native_window_api_connect(mNativeWindow.get(),
-                NATIVE_WINDOW_API_MEDIA);
-
-        return err;
-    } else {
-        // Clean up after success.
-        err = native_window_api_disconnect(mNativeWindow.get(),
-                NATIVE_WINDOW_API_CPU);
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)",
-                    strerror(-err), -err);
-            return err;
-        }
-
-        err = native_window_api_connect(mNativeWindow.get(),
-                NATIVE_WINDOW_API_MEDIA);
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: api_connect failed: %s (%d)",
-                    strerror(-err), -err);
-            return err;
-        }
-
-        return NO_ERROR;
-    }
-}
-
 ////////////////////////////////////////////////////////////////////////////////
 
 ACodec::PortDescription::PortDescription() {
@@ -4119,12 +4541,40 @@
             break;
         }
 
-        case ACodec::kWhatOMXMessage:
+        case ACodec::kWhatOMXMessageList:
         {
+            return checkOMXMessage(msg) ? onOMXMessageList(msg) : true;
+        }
+
+        case ACodec::kWhatOMXMessageItem:
+        {
+            // no need to check as we already did it for kWhatOMXMessageList
             return onOMXMessage(msg);
         }
 
+        case ACodec::kWhatOMXMessage:
+        {
+            return checkOMXMessage(msg) ? onOMXMessage(msg) : true;
+        }
+
+        case ACodec::kWhatSetSurface:
+        {
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            sp<RefBase> obj;
+            CHECK(msg->findObject("surface", &obj));
+
+            status_t err = mCodec->handleSetSurface(static_cast<Surface *>(obj.get()));
+
+            sp<AMessage> response = new AMessage;
+            response->setInt32("err", err);
+            response->postReply(replyID);
+            break;
+        }
+
         case ACodec::kWhatCreateInputSurface:
+        case ACodec::kWhatSetInputSurface:
         case ACodec::kWhatSignalEndOfInputStream:
         {
             // This may result in an app illegal state exception.
@@ -4161,21 +4611,51 @@
     return true;
 }
 
-bool ACodec::BaseState::onOMXMessage(const sp<AMessage> &msg) {
-    int32_t type;
-    CHECK(msg->findInt32("type", &type));
-
+bool ACodec::BaseState::checkOMXMessage(const sp<AMessage> &msg) {
     // there is a possibility that this is an outstanding message for a
     // codec that we have already destroyed
-    if (mCodec->mNode == NULL) {
+    if (mCodec->mNode == 0) {
         ALOGI("ignoring message as already freed component: %s",
                 msg->debugString().c_str());
-        return true;
+        return false;
     }
 
     IOMX::node_id nodeID;
     CHECK(msg->findInt32("node", (int32_t*)&nodeID));
-    CHECK_EQ(nodeID, mCodec->mNode);
+    if (nodeID != mCodec->mNode) {
+        ALOGE("Unexpected message for nodeID: %u, should have been %u", nodeID, mCodec->mNode);
+        return false;
+    }
+    return true;
+}
+
+bool ACodec::BaseState::onOMXMessageList(const sp<AMessage> &msg) {
+    sp<RefBase> obj;
+    CHECK(msg->findObject("messages", &obj));
+    sp<MessageList> msgList = static_cast<MessageList *>(obj.get());
+
+    bool receivedRenderedEvents = false;
+    for (std::list<sp<AMessage>>::const_iterator it = msgList->getList().cbegin();
+          it != msgList->getList().cend(); ++it) {
+        (*it)->setWhat(ACodec::kWhatOMXMessageItem);
+        mCodec->handleMessage(*it);
+        int32_t type;
+        CHECK((*it)->findInt32("type", &type));
+        if (type == omx_message::FRAME_RENDERED) {
+            receivedRenderedEvents = true;
+        }
+    }
+
+    if (receivedRenderedEvents) {
+        // NOTE: all buffers are rendered in this case
+        mCodec->notifyOfRenderedFrames();
+    }
+    return true;
+}
+
+bool ACodec::BaseState::onOMXMessage(const sp<AMessage> &msg) {
+    int32_t type;
+    CHECK(msg->findInt32("type", &type));
 
     switch (type) {
         case omx_message::EVENT:
@@ -4205,9 +4685,12 @@
         case omx_message::EMPTY_BUFFER_DONE:
         {
             IOMX::buffer_id bufferID;
-            CHECK(msg->findInt32("buffer", (int32_t*)&bufferID));
+            int32_t fenceFd;
 
-            return onOMXEmptyBufferDone(bufferID);
+            CHECK(msg->findInt32("buffer", (int32_t*)&bufferID));
+            CHECK(msg->findInt32("fence_fd", &fenceFd));
+
+            return onOMXEmptyBufferDone(bufferID, fenceFd);
         }
 
         case omx_message::FILL_BUFFER_DONE:
@@ -4215,37 +4698,56 @@
             IOMX::buffer_id bufferID;
             CHECK(msg->findInt32("buffer", (int32_t*)&bufferID));
 
-            int32_t rangeOffset, rangeLength, flags;
+            int32_t rangeOffset, rangeLength, flags, fenceFd;
             int64_t timeUs;
 
             CHECK(msg->findInt32("range_offset", &rangeOffset));
             CHECK(msg->findInt32("range_length", &rangeLength));
             CHECK(msg->findInt32("flags", &flags));
             CHECK(msg->findInt64("timestamp", &timeUs));
+            CHECK(msg->findInt32("fence_fd", &fenceFd));
 
             return onOMXFillBufferDone(
                     bufferID,
                     (size_t)rangeOffset, (size_t)rangeLength,
                     (OMX_U32)flags,
-                    timeUs);
+                    timeUs,
+                    fenceFd);
+        }
+
+        case omx_message::FRAME_RENDERED:
+        {
+            int64_t mediaTimeUs, systemNano;
+
+            CHECK(msg->findInt64("media_time_us", &mediaTimeUs));
+            CHECK(msg->findInt64("system_nano", &systemNano));
+
+            return onOMXFrameRendered(
+                    mediaTimeUs, systemNano);
         }
 
         default:
-            TRESPASS();
-            break;
+            ALOGE("Unexpected message type: %d", type);
+            return false;
     }
 }
 
+bool ACodec::BaseState::onOMXFrameRendered(
+        int64_t mediaTimeUs __unused, nsecs_t systemNano __unused) {
+    // ignore outside of Executing and PortSettingsChanged states
+    return true;
+}
+
 bool ACodec::BaseState::onOMXEvent(
         OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
     if (event != OMX_EventError) {
-        ALOGV("[%s] EVENT(%d, 0x%08lx, 0x%08lx)",
+        ALOGV("[%s] EVENT(%d, 0x%08x, 0x%08x)",
              mCodec->mComponentName.c_str(), event, data1, data2);
 
         return false;
     }
 
-    ALOGE("[%s] ERROR(0x%08lx)", mCodec->mComponentName.c_str(), data1);
+    ALOGE("[%s] ERROR(0x%08x)", mCodec->mComponentName.c_str(), data1);
 
     // verify OMX component sends back an error we expect.
     OMX_ERRORTYPE omxError = (OMX_ERRORTYPE)data1;
@@ -4258,16 +4760,29 @@
     return true;
 }
 
-bool ACodec::BaseState::onOMXEmptyBufferDone(IOMX::buffer_id bufferID) {
-    ALOGV("[%s] onOMXEmptyBufferDone %p",
+bool ACodec::BaseState::onOMXEmptyBufferDone(IOMX::buffer_id bufferID, int fenceFd) {
+    ALOGV("[%s] onOMXEmptyBufferDone %u",
          mCodec->mComponentName.c_str(), bufferID);
 
-    BufferInfo *info =
-        mCodec->findBufferByID(kPortIndexInput, bufferID);
-
-    CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_COMPONENT);
+    BufferInfo *info = mCodec->findBufferByID(kPortIndexInput, bufferID);
+    BufferInfo::Status status = BufferInfo::getSafeStatus(info);
+    if (status != BufferInfo::OWNED_BY_COMPONENT) {
+        ALOGE("Wrong ownership in EBD: %s(%d) buffer #%u", _asString(status), status, bufferID);
+        mCodec->dumpBuffers(kPortIndexInput);
+        if (fenceFd >= 0) {
+            ::close(fenceFd);
+        }
+        return false;
+    }
     info->mStatus = BufferInfo::OWNED_BY_US;
 
+    // input buffers cannot take fences, so wait for any fence now
+    (void)mCodec->waitForFence(fenceFd, "onOMXEmptyBufferDone");
+    fenceFd = -1;
+
+    // still save fence for completeness
+    info->setWriteFence(fenceFd, "onOMXEmptyBufferDone");
+
     // We're in "store-metadata-in-buffers" mode, the underlying
     // OMX component had access to data that's implicitly refcounted
     // by this "MediaBuffer" object. Now that the OMX component has
@@ -4285,12 +4800,10 @@
             postFillThisBuffer(info);
             break;
 
+        case FREE_BUFFERS:
         default:
-        {
-            CHECK_EQ((int)mode, (int)FREE_BUFFERS);
-            TRESPASS();  // Not currently used
-            break;
-        }
+            ALOGE("SHOULD NOT REACH HERE: cannot free empty output buffers");
+            return false;
     }
 
     return true;
@@ -4310,7 +4823,7 @@
     info->mData->meta()->clear();
     notify->setBuffer("buffer", info->mData);
 
-    sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, mCodec->id());
+    sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, mCodec);
     reply->setInt32("buffer-id", info->mBufferID);
 
     notify->setMessage("reply", reply);
@@ -4351,7 +4864,13 @@
     }
 
     BufferInfo *info = mCodec->findBufferByID(kPortIndexInput, bufferID);
-    CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_UPSTREAM);
+    BufferInfo::Status status = BufferInfo::getSafeStatus(info);
+    if (status != BufferInfo::OWNED_BY_UPSTREAM) {
+        ALOGE("Wrong ownership in IBF: %s(%d) buffer #%u", _asString(status), status, bufferID);
+        mCodec->dumpBuffers(kPortIndexInput);
+        mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
+        return;
+    }
 
     info->mStatus = BufferInfo::OWNED_BY_US;
 
@@ -4370,6 +4889,12 @@
         case RESUBMIT_BUFFERS:
         {
             if (buffer != NULL && !mCodec->mPortEOS[kPortIndexInput]) {
+                // Do not send empty input buffer w/o EOS to the component.
+                if (buffer->size() == 0 && !eos) {
+                    postFillThisBuffer(info);
+                    break;
+                }
+
                 int64_t timeUs;
                 CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
 
@@ -4385,28 +4910,34 @@
                 }
 
                 if (buffer != info->mData) {
-                    ALOGV("[%s] Needs to copy input data for buffer %p. (%p != %p)",
+                    ALOGV("[%s] Needs to copy input data for buffer %u. (%p != %p)",
                          mCodec->mComponentName.c_str(),
                          bufferID,
                          buffer.get(), info->mData.get());
 
-                    CHECK_LE(buffer->size(), info->mData->capacity());
+                    if (buffer->size() > info->mData->capacity()) {
+                        ALOGE("data size (%zu) is greated than buffer capacity (%zu)",
+                                buffer->size(),           // this is the data received
+                                info->mData->capacity()); // this is out buffer size
+                        mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
+                        return;
+                    }
                     memcpy(info->mData->data(), buffer->data(), buffer->size());
                 }
 
                 if (flags & OMX_BUFFERFLAG_CODECCONFIG) {
-                    ALOGV("[%s] calling emptyBuffer %p w/ codec specific data",
+                    ALOGV("[%s] calling emptyBuffer %u w/ codec specific data",
                          mCodec->mComponentName.c_str(), bufferID);
                 } else if (flags & OMX_BUFFERFLAG_EOS) {
-                    ALOGV("[%s] calling emptyBuffer %p w/ EOS",
+                    ALOGV("[%s] calling emptyBuffer %u w/ EOS",
                          mCodec->mComponentName.c_str(), bufferID);
                 } else {
 #if TRACK_BUFFER_TIMING
-                    ALOGI("[%s] calling emptyBuffer %p w/ time %lld us",
-                         mCodec->mComponentName.c_str(), bufferID, timeUs);
+                    ALOGI("[%s] calling emptyBuffer %u w/ time %lld us",
+                         mCodec->mComponentName.c_str(), bufferID, (long long)timeUs);
 #else
-                    ALOGV("[%s] calling emptyBuffer %p w/ time %lld us",
-                         mCodec->mComponentName.c_str(), bufferID, timeUs);
+                    ALOGV("[%s] calling emptyBuffer %u w/ time %lld us",
+                         mCodec->mComponentName.c_str(), bufferID, (long long)timeUs);
 #endif
                 }
 
@@ -4417,61 +4948,69 @@
                 mCodec->mBufferStats.add(timeUs, stats);
 #endif
 
-                if (mCodec->mStoreMetaDataInOutputBuffers) {
+                if (mCodec->storingMetadataInDecodedBuffers()) {
                     // try to submit an output buffer for each input buffer
                     PortMode outputMode = getPortMode(kPortIndexOutput);
 
-                    ALOGV("MetaDataBuffersToSubmit=%u portMode=%s",
-                            mCodec->mMetaDataBuffersToSubmit,
+                    ALOGV("MetadataBuffersToSubmit=%u portMode=%s",
+                            mCodec->mMetadataBuffersToSubmit,
                             (outputMode == FREE_BUFFERS ? "FREE" :
                              outputMode == KEEP_BUFFERS ? "KEEP" : "RESUBMIT"));
                     if (outputMode == RESUBMIT_BUFFERS) {
-                        mCodec->submitOutputMetaDataBuffer();
+                        mCodec->submitOutputMetadataBuffer();
                     }
                 }
-
-                CHECK_EQ(mCodec->mOMX->emptyBuffer(
-                            mCodec->mNode,
-                            bufferID,
-                            0,
-                            buffer->size(),
-                            flags,
-                            timeUs),
-                         (status_t)OK);
-
+                info->checkReadFence("onInputBufferFilled");
+                status_t err2 = mCodec->mOMX->emptyBuffer(
+                    mCodec->mNode,
+                    bufferID,
+                    0,
+                    buffer->size(),
+                    flags,
+                    timeUs,
+                    info->mFenceFd);
+                info->mFenceFd = -1;
+                if (err2 != OK) {
+                    mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err2));
+                    return;
+                }
                 info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
 
-                if (!eos) {
+                if (!eos && err == OK) {
                     getMoreInputDataIfPossible();
                 } else {
-                    ALOGV("[%s] Signalled EOS on the input port",
-                         mCodec->mComponentName.c_str());
+                    ALOGV("[%s] Signalled EOS (%d) on the input port",
+                         mCodec->mComponentName.c_str(), err);
 
                     mCodec->mPortEOS[kPortIndexInput] = true;
                     mCodec->mInputEOSResult = err;
                 }
             } else if (!mCodec->mPortEOS[kPortIndexInput]) {
-                if (err != ERROR_END_OF_STREAM) {
-                    ALOGV("[%s] Signalling EOS on the input port "
-                         "due to error %d",
+                if (err != OK && err != ERROR_END_OF_STREAM) {
+                    ALOGV("[%s] Signalling EOS on the input port due to error %d",
                          mCodec->mComponentName.c_str(), err);
                 } else {
                     ALOGV("[%s] Signalling EOS on the input port",
                          mCodec->mComponentName.c_str());
                 }
 
-                ALOGV("[%s] calling emptyBuffer %p signalling EOS",
+                ALOGV("[%s] calling emptyBuffer %u signalling EOS",
                      mCodec->mComponentName.c_str(), bufferID);
 
-                CHECK_EQ(mCodec->mOMX->emptyBuffer(
-                            mCodec->mNode,
-                            bufferID,
-                            0,
-                            0,
-                            OMX_BUFFERFLAG_EOS,
-                            0),
-                         (status_t)OK);
-
+                info->checkReadFence("onInputBufferFilled");
+                status_t err2 = mCodec->mOMX->emptyBuffer(
+                        mCodec->mNode,
+                        bufferID,
+                        0,
+                        0,
+                        OMX_BUFFERFLAG_EOS,
+                        0,
+                        info->mFenceFd);
+                info->mFenceFd = -1;
+                if (err2 != OK) {
+                    mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err2));
+                    return;
+                }
                 info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
 
                 mCodec->mPortEOS[kPortIndexInput] = true;
@@ -4480,8 +5019,11 @@
             break;
         }
 
+        case FREE_BUFFERS:
+            break;
+
         default:
-            CHECK_EQ((int)mode, (int)FREE_BUFFERS);
+            ALOGE("invalid port mode: %d", mode);
             break;
     }
 }
@@ -4519,11 +5061,13 @@
         IOMX::buffer_id bufferID,
         size_t rangeOffset, size_t rangeLength,
         OMX_U32 flags,
-        int64_t timeUs) {
+        int64_t timeUs,
+        int fenceFd) {
     ALOGV("[%s] onOMXFillBufferDone %u time %" PRId64 " us, flags = 0x%08x",
          mCodec->mComponentName.c_str(), bufferID, timeUs, flags);
 
     ssize_t index;
+    status_t err= OK;
 
 #if TRACK_BUFFER_TIMING
     index = mCodec->mBufferStats.indexOfKey(timeUs);
@@ -4542,12 +5086,36 @@
 
     BufferInfo *info =
         mCodec->findBufferByID(kPortIndexOutput, bufferID, &index);
-
-    CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_COMPONENT);
+    BufferInfo::Status status = BufferInfo::getSafeStatus(info);
+    if (status != BufferInfo::OWNED_BY_COMPONENT) {
+        ALOGE("Wrong ownership in FBD: %s(%d) buffer #%u", _asString(status), status, bufferID);
+        mCodec->dumpBuffers(kPortIndexOutput);
+        mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
+        if (fenceFd >= 0) {
+            ::close(fenceFd);
+        }
+        return true;
+    }
 
     info->mDequeuedAt = ++mCodec->mDequeueCounter;
     info->mStatus = BufferInfo::OWNED_BY_US;
 
+    if (info->mRenderInfo != NULL) {
+        // The fence for an emptied buffer must have signaled, but there still could be queued
+        // or out-of-order dequeued buffers in the render queue prior to this buffer. Drop these,
+        // as we will soon requeue this buffer to the surface. While in theory we could still keep
+        // track of buffers that are requeued to the surface, it is better to add support to the
+        // buffer-queue to notify us of released buffers and their fences (in the future).
+        mCodec->notifyOfRenderedFrames(true /* dropIncomplete */);
+    }
+
+    // byte buffers cannot take fences, so wait for any fence now
+    if (mCodec->mNativeWindow == NULL) {
+        (void)mCodec->waitForFence(fenceFd, "onOMXFillBufferDone");
+        fenceFd = -1;
+    }
+    info->setReadFence(fenceFd, "onOMXFillBufferDone");
+
     PortMode mode = getPortMode(kPortIndexOutput);
 
     switch (mode) {
@@ -4561,24 +5129,39 @@
                 ALOGV("[%s] calling fillBuffer %u",
                      mCodec->mComponentName.c_str(), info->mBufferID);
 
-                CHECK_EQ(mCodec->mOMX->fillBuffer(
-                            mCodec->mNode, info->mBufferID),
-                         (status_t)OK);
+                err = mCodec->mOMX->fillBuffer(mCodec->mNode, info->mBufferID, info->mFenceFd);
+                info->mFenceFd = -1;
+                if (err != OK) {
+                    mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
+                    return true;
+                }
 
                 info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
                 break;
             }
 
             sp<AMessage> reply =
-                new AMessage(kWhatOutputBufferDrained, mCodec->id());
+                new AMessage(kWhatOutputBufferDrained, mCodec);
 
             if (!mCodec->mSentFormat && rangeLength > 0) {
                 mCodec->sendFormatChange(reply);
             }
-
-            if (mCodec->mUseMetadataOnEncoderOutput) {
-                native_handle_t* handle =
-                        *(native_handle_t**)(info->mData->data() + 4);
+            if (mCodec->usingMetadataOnEncoderOutput()) {
+                native_handle_t *handle = NULL;
+                VideoGrallocMetadata &grallocMeta = *(VideoGrallocMetadata *)info->mData->data();
+                VideoNativeMetadata &nativeMeta = *(VideoNativeMetadata *)info->mData->data();
+                if (info->mData->size() >= sizeof(grallocMeta)
+                        && grallocMeta.eType == kMetadataBufferTypeGrallocSource) {
+                    handle = (native_handle_t *)(uintptr_t)grallocMeta.pHandle;
+                } else if (info->mData->size() >= sizeof(nativeMeta)
+                        && nativeMeta.eType == kMetadataBufferTypeANWBuffer) {
+#ifdef OMX_ANDROID_COMPILE_AS_32BIT_ON_64BIT_PLATFORMS
+                    // ANativeWindowBuffer is only valid on 32-bit/mediaserver process
+                    handle = NULL;
+#else
+                    handle = (native_handle_t *)nativeMeta.pBuffer->handle;
+#endif
+                }
                 info->mData->meta()->setPointer("handle", handle);
                 info->mData->meta()->setInt32("rangeOffset", rangeOffset);
                 info->mData->meta()->setInt32("rangeLength", rangeLength);
@@ -4625,14 +5208,17 @@
             break;
         }
 
-        default:
-        {
-            CHECK_EQ((int)mode, (int)FREE_BUFFERS);
-
-            CHECK_EQ((status_t)OK,
-                     mCodec->freeBuffer(kPortIndexOutput, index));
+        case FREE_BUFFERS:
+            err = mCodec->freeBuffer(kPortIndexOutput, index);
+            if (err != OK) {
+                mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
+                return true;
+            }
             break;
-        }
+
+        default:
+            ALOGE("Invalid port mode: %d", mode);
+            return false;
     }
 
     return true;
@@ -4642,15 +5228,19 @@
     IOMX::buffer_id bufferID;
     CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID));
     ssize_t index;
-    BufferInfo *info =
-        mCodec->findBufferByID(kPortIndexOutput, bufferID, &index);
-    CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_DOWNSTREAM);
+    BufferInfo *info = mCodec->findBufferByID(kPortIndexOutput, bufferID, &index);
+    BufferInfo::Status status = BufferInfo::getSafeStatus(info);
+    if (status != BufferInfo::OWNED_BY_DOWNSTREAM) {
+        ALOGE("Wrong ownership in OBD: %s(%d) buffer #%u", _asString(status), status, bufferID);
+        mCodec->dumpBuffers(kPortIndexOutput);
+        mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
+        return;
+    }
 
     android_native_rect_t crop;
-    if (msg->findRect("crop",
-            &crop.left, &crop.top, &crop.right, &crop.bottom)) {
-        CHECK_EQ(0, native_window_set_crop(
-                mCodec->mNativeWindow.get(), &crop));
+    if (msg->findRect("crop", &crop.left, &crop.top, &crop.right, &crop.bottom)) {
+        status_t err = native_window_set_crop(mCodec->mNativeWindow.get(), &crop);
+        ALOGW_IF(err != NO_ERROR, "failed to set crop: %d", err);
     }
 
     int32_t render;
@@ -4660,37 +5250,45 @@
         ATRACE_NAME("render");
         // The client wants this buffer to be rendered.
 
+        // save buffers sent to the surface so we can get render time when they return
+        int64_t mediaTimeUs = -1;
+        info->mData->meta()->findInt64("timeUs", &mediaTimeUs);
+        if (mediaTimeUs >= 0) {
+            mCodec->mRenderTracker.onFrameQueued(
+                    mediaTimeUs, info->mGraphicBuffer, new Fence(::dup(info->mFenceFd)));
+        }
+
         int64_t timestampNs = 0;
         if (!msg->findInt64("timestampNs", &timestampNs)) {
-            // TODO: it seems like we should use the timestamp
-            // in the (media)buffer as it potentially came from
-            // an input surface, but we did not propagate it prior to
-            // API 20.  Perhaps check for target SDK version.
-#if 0
+            // use media timestamp if client did not request a specific render timestamp
             if (info->mData->meta()->findInt64("timeUs", &timestampNs)) {
-                ALOGV("using buffer PTS of %" PRId64, timestampNs);
+                ALOGV("using buffer PTS of %lld", (long long)timestampNs);
                 timestampNs *= 1000;
             }
-#endif
         }
 
         status_t err;
         err = native_window_set_buffers_timestamp(mCodec->mNativeWindow.get(), timestampNs);
-        if (err != OK) {
-            ALOGW("failed to set buffer timestamp: %d", err);
-        }
+        ALOGW_IF(err != NO_ERROR, "failed to set buffer timestamp: %d", err);
 
-        if ((err = mCodec->mNativeWindow->queueBuffer(
-                    mCodec->mNativeWindow.get(),
-                    info->mGraphicBuffer.get(), -1)) == OK) {
+        info->checkReadFence("onOutputBufferDrained before queueBuffer");
+        err = mCodec->mNativeWindow->queueBuffer(
+                    mCodec->mNativeWindow.get(), info->mGraphicBuffer.get(), info->mFenceFd);
+        info->mFenceFd = -1;
+        if (err == OK) {
             info->mStatus = BufferInfo::OWNED_BY_NATIVE_WINDOW;
         } else {
+            ALOGE("queueBuffer failed in onOutputBufferDrained: %d", err);
             mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
             info->mStatus = BufferInfo::OWNED_BY_US;
+            // keeping read fence as write fence to avoid clobbering
+            info->mIsReadFence = false;
         }
     } else {
         if (mCodec->mNativeWindow != NULL &&
             (info->mData == NULL || info->mData->size() != 0)) {
+            // move read fence into write fence to avoid clobbering
+            info->mIsReadFence = false;
             ATRACE_NAME("frame-drop");
         }
         info->mStatus = BufferInfo::OWNED_BY_US;
@@ -4725,24 +5323,32 @@
                 if (info != NULL) {
                     ALOGV("[%s] calling fillBuffer %u",
                          mCodec->mComponentName.c_str(), info->mBufferID);
-
-                    CHECK_EQ(mCodec->mOMX->fillBuffer(mCodec->mNode, info->mBufferID),
-                             (status_t)OK);
-
-                    info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
+                    info->checkWriteFence("onOutputBufferDrained::RESUBMIT_BUFFERS");
+                    status_t err = mCodec->mOMX->fillBuffer(
+                            mCodec->mNode, info->mBufferID, info->mFenceFd);
+                    info->mFenceFd = -1;
+                    if (err == OK) {
+                        info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
+                    } else {
+                        mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
+                    }
                 }
             }
             break;
         }
 
-        default:
+        case FREE_BUFFERS:
         {
-            CHECK_EQ((int)mode, (int)FREE_BUFFERS);
-
-            CHECK_EQ((status_t)OK,
-                     mCodec->freeBuffer(kPortIndexOutput, index));
+            status_t err = mCodec->freeBuffer(kPortIndexOutput, index);
+            if (err != OK) {
+                mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
+            }
             break;
         }
+
+        default:
+            ALOGE("Invalid port mode: %d", mode);
+            return;
     }
 }
 
@@ -4761,11 +5367,13 @@
     }
 
     mCodec->mNativeWindow.clear();
-    mCodec->mNode = NULL;
+    mCodec->mNativeWindowUsageBits = 0;
+    mCodec->mNode = 0;
     mCodec->mOMX.clear();
     mCodec->mQuirks = 0;
     mCodec->mFlags = 0;
-    mCodec->mUseMetadataOnEncoderOutput = 0;
+    mCodec->mInputMetadataType = kMetadataBufferTypeInvalid;
+    mCodec->mOutputMetadataType = kMetadataBufferTypeInvalid;
     mCodec->mComponentName.clear();
 }
 
@@ -4839,14 +5447,17 @@
 bool ACodec::UninitializedState::onAllocateComponent(const sp<AMessage> &msg) {
     ALOGV("onAllocateComponent");
 
-    CHECK(mCodec->mNode == NULL);
+    CHECK(mCodec->mNode == 0);
 
     OMXClient client;
-    CHECK_EQ(client.connect(), (status_t)OK);
+    if (client.connect() != OK) {
+        mCodec->signalError(OMX_ErrorUndefined, NO_INIT);
+        return false;
+    }
 
     sp<IOMX> omx = client.interface();
 
-    sp<AMessage> notify = new AMessage(kWhatOMXDied, mCodec->id());
+    sp<AMessage> notify = new AMessage(kWhatOMXDied, mCodec);
 
     mDeathNotifier = new DeathNotifier(notify);
     if (IInterface::asBinder(omx)->linkToDeath(mDeathNotifier) != OK) {
@@ -4887,8 +5498,9 @@
     }
 
     sp<CodecObserver> observer = new CodecObserver;
-    IOMX::node_id node = NULL;
+    IOMX::node_id node = 0;
 
+    status_t err = NAME_NOT_FOUND;
     for (size_t matchIndex = 0; matchIndex < matchingCodecs.size();
             ++matchIndex) {
         componentName = matchingCodecs.itemAt(matchIndex).mName.string();
@@ -4897,7 +5509,7 @@
         pid_t tid = gettid();
         int prevPriority = androidGetThreadPriority(tid);
         androidSetThreadPriority(tid, ANDROID_PRIORITY_FOREGROUND);
-        status_t err = omx->allocateNode(componentName.c_str(), observer, &node);
+        err = omx->allocateNode(componentName.c_str(), observer, &node);
         androidSetThreadPriority(tid, prevPriority);
 
         if (err == OK) {
@@ -4906,25 +5518,26 @@
             ALOGW("Allocating component '%s' failed, try next one.", componentName.c_str());
         }
 
-        node = NULL;
+        node = 0;
     }
 
-    if (node == NULL) {
+    if (node == 0) {
         if (!mime.empty()) {
-            ALOGE("Unable to instantiate a %scoder for type '%s'.",
-                    encoder ? "en" : "de", mime.c_str());
+            ALOGE("Unable to instantiate a %scoder for type '%s' with err %#x.",
+                    encoder ? "en" : "de", mime.c_str(), err);
         } else {
-            ALOGE("Unable to instantiate codec '%s'.", componentName.c_str());
+            ALOGE("Unable to instantiate codec '%s' with err %#x.", componentName.c_str(), err);
         }
 
-        mCodec->signalError(OMX_ErrorComponentNotFound);
+        mCodec->signalError((OMX_ERRORTYPE)err, makeNoSideEffectStatus(err));
         return false;
     }
 
-    notify = new AMessage(kWhatOMXMessage, mCodec->id());
+    notify = new AMessage(kWhatOMXMessageList, mCodec);
     observer->setNotificationMessage(notify);
 
     mCodec->mComponentName = componentName;
+    mCodec->mRenderTracker.setComponentName(componentName);
     mCodec->mFlags = 0;
 
     if (componentName.endsWith(".secure")) {
@@ -4964,7 +5577,7 @@
     mCodec->mInputEOSResult = OK;
 
     mCodec->mDequeueCounter = 0;
-    mCodec->mMetaDataBuffersToSubmit = 0;
+    mCodec->mMetadataBuffersToSubmit = 0;
     mCodec->mRepeatFrameDelayUs = -1ll;
     mCodec->mInputFormat.clear();
     mCodec->mOutputFormat.clear();
@@ -4985,7 +5598,7 @@
 
 void ACodec::LoadedState::onShutdown(bool keepComponentAllocated) {
     if (!keepComponentAllocated) {
-        CHECK_EQ(mCodec->mOMX->freeNode(mCodec->mNode), (status_t)OK);
+        (void)mCodec->mOMX->freeNode(mCodec->mNode);
 
         mCodec->changeState(mCodec->mUninitializedState);
     }
@@ -5016,6 +5629,13 @@
             break;
         }
 
+        case ACodec::kWhatSetInputSurface:
+        {
+            onSetInputSurface(msg);
+            handled = true;
+            break;
+        }
+
         case ACodec::kWhatStart:
         {
             onStart();
@@ -5057,13 +5677,15 @@
         const sp<AMessage> &msg) {
     ALOGV("onConfigureComponent");
 
-    CHECK(mCodec->mNode != NULL);
+    CHECK(mCodec->mNode != 0);
 
+    status_t err = OK;
     AString mime;
-    CHECK(msg->findString("mime", &mime));
-
-    status_t err = mCodec->configureCodec(mime.c_str(), msg);
-
+    if (!msg->findString("mime", &mime)) {
+        err = BAD_VALUE;
+    } else {
+        err = mCodec->configureCodec(mime.c_str(), msg);
+    }
     if (err != OK) {
         ALOGE("[%s] configureCodec returning error %d",
               mCodec->mComponentName.c_str(), err);
@@ -5083,20 +5705,10 @@
     return true;
 }
 
-void ACodec::LoadedState::onCreateInputSurface(
-        const sp<AMessage> & /* msg */) {
-    ALOGV("onCreateInputSurface");
+status_t ACodec::LoadedState::setupInputSurface() {
+    status_t err = OK;
 
-    sp<AMessage> notify = mCodec->mNotify->dup();
-    notify->setInt32("what", CodecBase::kWhatInputSurfaceCreated);
-
-    sp<IGraphicBufferProducer> bufferProducer;
-    status_t err;
-
-    err = mCodec->mOMX->createInputSurface(mCodec->mNode, kPortIndexInput,
-            &bufferProducer);
-
-    if (err == OK && mCodec->mRepeatFrameDelayUs > 0ll) {
+    if (mCodec->mRepeatFrameDelayUs > 0ll) {
         err = mCodec->mOMX->setInternalOption(
                 mCodec->mNode,
                 kPortIndexInput,
@@ -5109,10 +5721,11 @@
                   "frames (err %d)",
                   mCodec->mComponentName.c_str(),
                   err);
+            return err;
         }
     }
 
-    if (err == OK && mCodec->mMaxPtsGapUs > 0ll) {
+    if (mCodec->mMaxPtsGapUs > 0ll) {
         err = mCodec->mOMX->setInternalOption(
                 mCodec->mNode,
                 kPortIndexInput,
@@ -5124,10 +5737,27 @@
             ALOGE("[%s] Unable to configure max timestamp gap (err %d)",
                     mCodec->mComponentName.c_str(),
                     err);
+            return err;
         }
     }
 
-    if (err == OK && mCodec->mTimePerCaptureUs > 0ll
+    if (mCodec->mMaxFps > 0) {
+        err = mCodec->mOMX->setInternalOption(
+                mCodec->mNode,
+                kPortIndexInput,
+                IOMX::INTERNAL_OPTION_MAX_FPS,
+                &mCodec->mMaxFps,
+                sizeof(mCodec->mMaxFps));
+
+        if (err != OK) {
+            ALOGE("[%s] Unable to configure max fps (err %d)",
+                    mCodec->mComponentName.c_str(),
+                    err);
+            return err;
+        }
+    }
+
+    if (mCodec->mTimePerCaptureUs > 0ll
             && mCodec->mTimePerFrameUs > 0ll) {
         int64_t timeLapse[2];
         timeLapse[0] = mCodec->mTimePerFrameUs;
@@ -5143,10 +5773,11 @@
             ALOGE("[%s] Unable to configure time lapse (err %d)",
                     mCodec->mComponentName.c_str(),
                     err);
+            return err;
         }
     }
 
-    if (err == OK && mCodec->mCreateInputBuffersSuspended) {
+    if (mCodec->mCreateInputBuffersSuspended) {
         bool suspend = true;
         err = mCodec->mOMX->setInternalOption(
                 mCodec->mNode,
@@ -5159,9 +5790,36 @@
             ALOGE("[%s] Unable to configure option to suspend (err %d)",
                   mCodec->mComponentName.c_str(),
                   err);
+            return err;
         }
     }
 
+    uint32_t usageBits;
+    if (mCodec->mOMX->getParameter(
+            mCodec->mNode, (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
+            &usageBits, sizeof(usageBits)) == OK) {
+        mCodec->mInputFormat->setInt32(
+                "using-sw-read-often", !!(usageBits & GRALLOC_USAGE_SW_READ_OFTEN));
+    }
+
+    return OK;
+}
+
+void ACodec::LoadedState::onCreateInputSurface(
+        const sp<AMessage> & /* msg */) {
+    ALOGV("onCreateInputSurface");
+
+    sp<AMessage> notify = mCodec->mNotify->dup();
+    notify->setInt32("what", CodecBase::kWhatInputSurfaceCreated);
+
+    sp<IGraphicBufferProducer> bufferProducer;
+    status_t err = mCodec->mOMX->createInputSurface(
+            mCodec->mNode, kPortIndexInput, &bufferProducer, &mCodec->mInputMetadataType);
+
+    if (err == OK) {
+        err = setupInputSurface();
+    }
+
     if (err == OK) {
         notify->setObject("input-surface",
                 new BufferProducerWrapper(bufferProducer));
@@ -5176,14 +5834,45 @@
     notify->post();
 }
 
+void ACodec::LoadedState::onSetInputSurface(
+        const sp<AMessage> &msg) {
+    ALOGV("onSetInputSurface");
+
+    sp<AMessage> notify = mCodec->mNotify->dup();
+    notify->setInt32("what", CodecBase::kWhatInputSurfaceAccepted);
+
+    sp<RefBase> obj;
+    CHECK(msg->findObject("input-surface", &obj));
+    sp<PersistentSurface> surface = static_cast<PersistentSurface *>(obj.get());
+
+    status_t err = mCodec->mOMX->setInputSurface(
+            mCodec->mNode, kPortIndexInput, surface->getBufferConsumer(),
+            &mCodec->mInputMetadataType);
+
+    if (err == OK) {
+        err = setupInputSurface();
+    }
+
+    if (err != OK) {
+        // Can't use mCodec->signalError() here -- MediaCodec won't forward
+        // the error through because it's in the "configured" state.  We
+        // send a kWhatInputSurfaceAccepted with an error value instead.
+        ALOGE("[%s] onSetInputSurface returning error %d",
+                mCodec->mComponentName.c_str(), err);
+        notify->setInt32("err", err);
+    }
+    notify->post();
+}
+
 void ACodec::LoadedState::onStart() {
     ALOGV("onStart");
 
-    CHECK_EQ(mCodec->mOMX->sendCommand(
-                mCodec->mNode, OMX_CommandStateSet, OMX_StateIdle),
-             (status_t)OK);
-
-    mCodec->changeState(mCodec->mLoadedToIdleState);
+    status_t err = mCodec->mOMX->sendCommand(mCodec->mNode, OMX_CommandStateSet, OMX_StateIdle);
+    if (err != OK) {
+        mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
+    } else {
+        mCodec->changeState(mCodec->mLoadedToIdleState);
+    }
 }
 
 ////////////////////////////////////////////////////////////////////////////////
@@ -5257,14 +5946,25 @@
     switch (event) {
         case OMX_EventCmdComplete:
         {
-            CHECK_EQ(data1, (OMX_U32)OMX_CommandStateSet);
-            CHECK_EQ(data2, (OMX_U32)OMX_StateIdle);
+            status_t err = OK;
+            if (data1 != (OMX_U32)OMX_CommandStateSet
+                    || data2 != (OMX_U32)OMX_StateIdle) {
+                ALOGE("Unexpected command completion in LoadedToIdleState: %s(%u) %s(%u)",
+                        asString((OMX_COMMANDTYPE)data1), data1,
+                        asString((OMX_STATETYPE)data2), data2);
+                err = FAILED_TRANSACTION;
+            }
 
-            CHECK_EQ(mCodec->mOMX->sendCommand(
-                        mCodec->mNode, OMX_CommandStateSet, OMX_StateExecuting),
-                     (status_t)OK);
+            if (err == OK) {
+                err = mCodec->mOMX->sendCommand(
+                    mCodec->mNode, OMX_CommandStateSet, OMX_StateExecuting);
+            }
 
-            mCodec->changeState(mCodec->mIdleToExecutingState);
+            if (err != OK) {
+                mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
+            } else {
+                mCodec->changeState(mCodec->mIdleToExecutingState);
+            }
 
             return true;
         }
@@ -5325,8 +6025,14 @@
     switch (event) {
         case OMX_EventCmdComplete:
         {
-            CHECK_EQ(data1, (OMX_U32)OMX_CommandStateSet);
-            CHECK_EQ(data2, (OMX_U32)OMX_StateExecuting);
+            if (data1 != (OMX_U32)OMX_CommandStateSet
+                    || data2 != (OMX_U32)OMX_StateExecuting) {
+                ALOGE("Unexpected command completion in IdleToExecutingState: %s(%u) %s(%u)",
+                        asString((OMX_COMMANDTYPE)data1), data1,
+                        asString((OMX_STATETYPE)data2), data2);
+                mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
+                return true;
+            }
 
             mCodec->mExecutingState->resume();
             mCodec->changeState(mCodec->mExecutingState);
@@ -5358,59 +6064,77 @@
         BufferInfo *info = &mCodec->mBuffers[kPortIndexInput].editItemAt(i);
 
         if (info->mStatus == BufferInfo::OWNED_BY_COMPONENT) {
-            if (mCodec->submitOutputMetaDataBuffer() != OK)
+            if (mCodec->submitOutputMetadataBuffer() != OK)
                 break;
         }
     }
 
     // *** NOTE: THE FOLLOWING WORKAROUND WILL BE REMOVED ***
-    mCodec->signalSubmitOutputMetaDataBufferIfEOS_workaround();
+    mCodec->signalSubmitOutputMetadataBufferIfEOS_workaround();
 }
 
 void ACodec::ExecutingState::submitRegularOutputBuffers() {
+    bool failed = false;
     for (size_t i = 0; i < mCodec->mBuffers[kPortIndexOutput].size(); ++i) {
         BufferInfo *info = &mCodec->mBuffers[kPortIndexOutput].editItemAt(i);
 
         if (mCodec->mNativeWindow != NULL) {
-            CHECK(info->mStatus == BufferInfo::OWNED_BY_US
-                    || info->mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW);
+            if (info->mStatus != BufferInfo::OWNED_BY_US
+                    && info->mStatus != BufferInfo::OWNED_BY_NATIVE_WINDOW) {
+                ALOGE("buffers should be owned by us or the surface");
+                failed = true;
+                break;
+            }
 
             if (info->mStatus == BufferInfo::OWNED_BY_NATIVE_WINDOW) {
                 continue;
             }
         } else {
-            CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_US);
+            if (info->mStatus != BufferInfo::OWNED_BY_US) {
+                ALOGE("buffers should be owned by us");
+                failed = true;
+                break;
+            }
         }
 
-        ALOGV("[%s] calling fillBuffer %p",
-             mCodec->mComponentName.c_str(), info->mBufferID);
+        ALOGV("[%s] calling fillBuffer %u", mCodec->mComponentName.c_str(), info->mBufferID);
 
-        CHECK_EQ(mCodec->mOMX->fillBuffer(mCodec->mNode, info->mBufferID),
-                 (status_t)OK);
+        info->checkWriteFence("submitRegularOutputBuffers");
+        status_t err = mCodec->mOMX->fillBuffer(mCodec->mNode, info->mBufferID, info->mFenceFd);
+        info->mFenceFd = -1;
+        if (err != OK) {
+            failed = true;
+            break;
+        }
 
         info->mStatus = BufferInfo::OWNED_BY_COMPONENT;
     }
+
+    if (failed) {
+        mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
+    }
 }
 
 void ACodec::ExecutingState::submitOutputBuffers() {
     submitRegularOutputBuffers();
-    if (mCodec->mStoreMetaDataInOutputBuffers) {
+    if (mCodec->storingMetadataInDecodedBuffers()) {
         submitOutputMetaBuffers();
     }
 }
 
 void ACodec::ExecutingState::resume() {
     if (mActive) {
-        ALOGV("[%s] We're already active, no need to resume.",
-             mCodec->mComponentName.c_str());
-
+        ALOGV("[%s] We're already active, no need to resume.", mCodec->mComponentName.c_str());
         return;
     }
 
     submitOutputBuffers();
 
     // Post all available input buffers
-    CHECK_GT(mCodec->mBuffers[kPortIndexInput].size(), 0u);
+    if (mCodec->mBuffers[kPortIndexInput].size() == 0u) {
+        ALOGW("[%s] we don't have any input buffers to resume", mCodec->mComponentName.c_str());
+    }
+
     for (size_t i = 0; i < mCodec->mBuffers[kPortIndexInput].size(); i++) {
         BufferInfo *info = &mCodec->mBuffers[kPortIndexInput].editItemAt(i);
         if (info->mStatus == BufferInfo::OWNED_BY_US) {
@@ -5424,6 +6148,7 @@
 void ACodec::ExecutingState::stateEntered() {
     ALOGV("[%s] Now Executing", mCodec->mComponentName.c_str());
 
+    mCodec->mRenderTracker.clear(systemTime(CLOCK_MONOTONIC));
     mCodec->processDeferredMessages();
 }
 
@@ -5443,11 +6168,16 @@
 
             mActive = false;
 
-            CHECK_EQ(mCodec->mOMX->sendCommand(
-                        mCodec->mNode, OMX_CommandStateSet, OMX_StateIdle),
-                     (status_t)OK);
-
-            mCodec->changeState(mCodec->mExecutingToIdleState);
+            status_t err = mCodec->mOMX->sendCommand(
+                    mCodec->mNode, OMX_CommandStateSet, OMX_StateIdle);
+            if (err != OK) {
+                if (keepComponentAllocated) {
+                    mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
+                }
+                // TODO: do some recovery here.
+            } else {
+                mCodec->changeState(mCodec->mExecutingToIdleState);
+            }
 
             handled = true;
             break;
@@ -5456,7 +6186,7 @@
         case kWhatFlush:
         {
             ALOGV("[%s] ExecutingState flushing now "
-                 "(codec owns %d/%d input, %d/%d output).",
+                 "(codec owns %zu/%zu input, %zu/%zu output).",
                     mCodec->mComponentName.c_str(),
                     mCodec->countBuffersOwnedByComponent(kPortIndexInput),
                     mCodec->mBuffers[kPortIndexInput].size(),
@@ -5465,11 +6195,13 @@
 
             mActive = false;
 
-            CHECK_EQ(mCodec->mOMX->sendCommand(
-                        mCodec->mNode, OMX_CommandFlush, OMX_ALL),
-                     (status_t)OK);
+            status_t err = mCodec->mOMX->sendCommand(mCodec->mNode, OMX_CommandFlush, OMX_ALL);
+            if (err != OK) {
+                mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
+            } else {
+                mCodec->changeState(mCodec->mFlushingState);
+            }
 
-            mCodec->changeState(mCodec->mFlushingState);
             handled = true;
             break;
         }
@@ -5518,13 +6250,13 @@
         }
 
         // *** NOTE: THE FOLLOWING WORKAROUND WILL BE REMOVED ***
-        case kWhatSubmitOutputMetaDataBufferIfEOS:
+        case kWhatSubmitOutputMetadataBufferIfEOS:
         {
             if (mCodec->mPortEOS[kPortIndexInput] &&
                     !mCodec->mPortEOS[kPortIndexOutput]) {
-                status_t err = mCodec->submitOutputMetaDataBuffer();
+                status_t err = mCodec->submitOutputMetadataBuffer();
                 if (err == OK) {
-                    mCodec->signalSubmitOutputMetaDataBufferIfEOS_workaround();
+                    mCodec->signalSubmitOutputMetadataBufferIfEOS_workaround();
                 }
             }
             return true;
@@ -5604,6 +6336,15 @@
         }
     }
 
+    float rate;
+    if (params->findFloat("operating-rate", &rate) && rate > 0) {
+        status_t err = setOperatingRate(rate, mIsVideo);
+        if (err != OK) {
+            ALOGE("Failed to set parameter 'operating-rate' (err %d)", err);
+            return err;
+        }
+    }
+
     return OK;
 }
 
@@ -5618,6 +6359,11 @@
     notify->post();
 }
 
+bool ACodec::ExecutingState::onOMXFrameRendered(int64_t mediaTimeUs, nsecs_t systemNano) {
+    mCodec->onFrameRendered(mediaTimeUs, systemNano);
+    return true;
+}
+
 bool ACodec::ExecutingState::onOMXEvent(
         OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
     switch (event) {
@@ -5626,7 +6372,7 @@
             CHECK_EQ(data1, (OMX_U32)kPortIndexOutput);
 
             if (data2 == 0 || data2 == OMX_IndexParamPortDefinition) {
-                mCodec->mMetaDataBuffersToSubmit = 0;
+                mCodec->mMetadataBuffersToSubmit = 0;
                 CHECK_EQ(mCodec->mOMX->sendCommand(
                             mCodec->mNode,
                             OMX_CommandPortDisable, kPortIndexOutput),
@@ -5637,8 +6383,13 @@
                 mCodec->changeState(mCodec->mOutputPortSettingsChangedState);
             } else if (data2 == OMX_IndexConfigCommonOutputCrop) {
                 mCodec->mSentFormat = false;
+
+                if (mCodec->mTunneled) {
+                    sp<AMessage> dummy = new AMessage(kWhatOutputBufferDrained, mCodec);
+                    mCodec->sendFormatChange(dummy);
+                }
             } else {
-                ALOGV("[%s] OMX_EventPortSettingsChanged 0x%08lx",
+                ALOGV("[%s] OMX_EventPortSettingsChanged 0x%08x",
                      mCodec->mComponentName.c_str(), data2);
             }
 
@@ -5705,31 +6456,46 @@
          mCodec->mComponentName.c_str());
 }
 
+bool ACodec::OutputPortSettingsChangedState::onOMXFrameRendered(
+        int64_t mediaTimeUs, nsecs_t systemNano) {
+    mCodec->onFrameRendered(mediaTimeUs, systemNano);
+    return true;
+}
+
 bool ACodec::OutputPortSettingsChangedState::onOMXEvent(
         OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
     switch (event) {
         case OMX_EventCmdComplete:
         {
             if (data1 == (OMX_U32)OMX_CommandPortDisable) {
-                CHECK_EQ(data2, (OMX_U32)kPortIndexOutput);
+                if (data2 != (OMX_U32)kPortIndexOutput) {
+                    ALOGW("ignoring EventCmdComplete CommandPortDisable for port %u", data2);
+                    return false;
+                }
 
-                ALOGV("[%s] Output port now disabled.",
-                        mCodec->mComponentName.c_str());
+                ALOGV("[%s] Output port now disabled.", mCodec->mComponentName.c_str());
 
-                CHECK(mCodec->mBuffers[kPortIndexOutput].isEmpty());
-                mCodec->mDealer[kPortIndexOutput].clear();
+                status_t err = OK;
+                if (!mCodec->mBuffers[kPortIndexOutput].isEmpty()) {
+                    ALOGE("disabled port should be empty, but has %zu buffers",
+                            mCodec->mBuffers[kPortIndexOutput].size());
+                    err = FAILED_TRANSACTION;
+                } else {
+                    mCodec->mDealer[kPortIndexOutput].clear();
+                }
 
-                CHECK_EQ(mCodec->mOMX->sendCommand(
-                            mCodec->mNode, OMX_CommandPortEnable, kPortIndexOutput),
-                         (status_t)OK);
+                if (err == OK) {
+                    err = mCodec->mOMX->sendCommand(
+                            mCodec->mNode, OMX_CommandPortEnable, kPortIndexOutput);
+                }
 
-                status_t err;
-                if ((err = mCodec->allocateBuffersOnPort(
-                                kPortIndexOutput)) != OK) {
-                    ALOGE("Failed to allocate output port buffers after "
-                         "port reconfiguration (error 0x%08x)",
-                         err);
+                if (err == OK) {
+                    err = mCodec->allocateBuffersOnPort(kPortIndexOutput);
+                    ALOGE_IF(err != OK, "Failed to allocate output port buffers after port "
+                            "reconfiguration: (%d)", err);
+                }
 
+                if (err != OK) {
                     mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
 
                     // This is technically not correct, but appears to be
@@ -5744,12 +6510,19 @@
 
                 return true;
             } else if (data1 == (OMX_U32)OMX_CommandPortEnable) {
-                CHECK_EQ(data2, (OMX_U32)kPortIndexOutput);
+                if (data2 != (OMX_U32)kPortIndexOutput) {
+                    ALOGW("ignoring EventCmdComplete OMX_CommandPortEnable for port %u", data2);
+                    return false;
+                }
 
                 mCodec->mSentFormat = false;
 
-                ALOGV("[%s] Output port now reenabled.",
-                        mCodec->mComponentName.c_str());
+                if (mCodec->mTunneled) {
+                    sp<AMessage> dummy = new AMessage(kWhatOutputBufferDrained, mCodec);
+                    mCodec->sendFormatChange(dummy);
+                }
+
+                ALOGV("[%s] Output port now reenabled.", mCodec->mComponentName.c_str());
 
                 if (mCodec->mExecutingState->active()) {
                     mCodec->mExecutingState->submitOutputBuffers();
@@ -5783,7 +6556,7 @@
         {
             // Don't send me a flush request if you previously wanted me
             // to shutdown.
-            TRESPASS();
+            ALOGW("Ignoring flush request in ExecutingToIdleState");
             break;
         }
 
@@ -5815,8 +6588,14 @@
     switch (event) {
         case OMX_EventCmdComplete:
         {
-            CHECK_EQ(data1, (OMX_U32)OMX_CommandStateSet);
-            CHECK_EQ(data2, (OMX_U32)OMX_StateIdle);
+            if (data1 != (OMX_U32)OMX_CommandStateSet
+                    || data2 != (OMX_U32)OMX_StateIdle) {
+                ALOGE("Unexpected command completion in ExecutingToIdleState: %s(%u) %s(%u)",
+                        asString((OMX_COMMANDTYPE)data1), data1,
+                        asString((OMX_STATETYPE)data2), data2);
+                mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
+                return true;
+            }
 
             mComponentNowIdle = true;
 
@@ -5839,12 +6618,15 @@
 
 void ACodec::ExecutingToIdleState::changeStateIfWeOwnAllBuffers() {
     if (mComponentNowIdle && mCodec->allYourBuffersAreBelongToUs()) {
-        CHECK_EQ(mCodec->mOMX->sendCommand(
-                    mCodec->mNode, OMX_CommandStateSet, OMX_StateLoaded),
-                 (status_t)OK);
-
-        CHECK_EQ(mCodec->freeBuffersOnPort(kPortIndexInput), (status_t)OK);
-        CHECK_EQ(mCodec->freeBuffersOnPort(kPortIndexOutput), (status_t)OK);
+        status_t err = mCodec->mOMX->sendCommand(
+                mCodec->mNode, OMX_CommandStateSet, OMX_StateLoaded);
+        if (err == OK) {
+            err = mCodec->freeBuffersOnPort(kPortIndexInput);
+            status_t err2 = mCodec->freeBuffersOnPort(kPortIndexOutput);
+            if (err == OK) {
+                err = err2;
+            }
+        }
 
         if ((mCodec->mFlags & kFlagPushBlankBuffersToNativeWindowOnShutdown)
                 && mCodec->mNativeWindow != NULL) {
@@ -5852,7 +6634,12 @@
             // them has made it to the display.  This allows the OMX
             // component teardown to zero out any protected buffers
             // without the risk of scanning out one of those buffers.
-            mCodec->pushBlankBuffersToNativeWindow();
+            pushBlankBuffersToNativeWindow(mCodec->mNativeWindow.get());
+        }
+
+        if (err != OK) {
+            mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
+            return;
         }
 
         mCodec->changeState(mCodec->mIdleToLoadedState);
@@ -5895,7 +6682,7 @@
         {
             // Don't send me a flush request if you previously wanted me
             // to shutdown.
-            TRESPASS();
+            ALOGE("Got flush request in IdleToLoadedState");
             break;
         }
 
@@ -5916,8 +6703,14 @@
     switch (event) {
         case OMX_EventCmdComplete:
         {
-            CHECK_EQ(data1, (OMX_U32)OMX_CommandStateSet);
-            CHECK_EQ(data2, (OMX_U32)OMX_StateLoaded);
+            if (data1 != (OMX_U32)OMX_CommandStateSet
+                    || data2 != (OMX_U32)OMX_StateLoaded) {
+                ALOGE("Unexpected command completion in IdleToLoadedState: %s(%u) %s(%u)",
+                        asString((OMX_COMMANDTYPE)data1), data1,
+                        asString((OMX_STATETYPE)data2), data2);
+                mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
+                return true;
+            }
 
             mCodec->changeState(mCodec->mLoadedState);
 
@@ -5968,28 +6761,41 @@
 
 bool ACodec::FlushingState::onOMXEvent(
         OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
-    ALOGV("[%s] FlushingState onOMXEvent(%d,%ld)",
-            mCodec->mComponentName.c_str(), event, data1);
+    ALOGV("[%s] FlushingState onOMXEvent(%u,%d)",
+            mCodec->mComponentName.c_str(), event, (OMX_S32)data1);
 
     switch (event) {
         case OMX_EventCmdComplete:
         {
-            CHECK_EQ(data1, (OMX_U32)OMX_CommandFlush);
+            if (data1 != (OMX_U32)OMX_CommandFlush) {
+                ALOGE("unexpected EventCmdComplete %s(%d) data2:%d in FlushingState",
+                        asString((OMX_COMMANDTYPE)data1), data1, data2);
+                mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
+                return true;
+            }
 
             if (data2 == kPortIndexInput || data2 == kPortIndexOutput) {
-                CHECK(!mFlushComplete[data2]);
+                if (mFlushComplete[data2]) {
+                    ALOGW("Flush already completed for %s port",
+                            data2 == kPortIndexInput ? "input" : "output");
+                    return true;
+                }
                 mFlushComplete[data2] = true;
 
-                if (mFlushComplete[kPortIndexInput]
-                        && mFlushComplete[kPortIndexOutput]) {
+                if (mFlushComplete[kPortIndexInput] && mFlushComplete[kPortIndexOutput]) {
                     changeStateIfWeOwnAllBuffers();
                 }
-            } else {
-                CHECK_EQ(data2, OMX_ALL);
-                CHECK(mFlushComplete[kPortIndexInput]);
-                CHECK(mFlushComplete[kPortIndexOutput]);
+            } else if (data2 == OMX_ALL) {
+                if (!mFlushComplete[kPortIndexInput] || !mFlushComplete[kPortIndexOutput]) {
+                    ALOGW("received flush complete event for OMX_ALL before ports have been"
+                            "flushed (%d/%d)",
+                            mFlushComplete[kPortIndexInput], mFlushComplete[kPortIndexOutput]);
+                    return false;
+                }
 
                 changeStateIfWeOwnAllBuffers();
+            } else {
+                ALOGW("data2 not OMX_ALL but %u in EventCmdComplete CommandFlush", data2);
             }
 
             return true;
@@ -5997,7 +6803,7 @@
 
         case OMX_EventPortSettingsChanged:
         {
-            sp<AMessage> msg = new AMessage(kWhatOMXMessage, mCodec->id());
+            sp<AMessage> msg = new AMessage(kWhatOMXMessage, mCodec);
             msg->setInt32("type", omx_message::EVENT);
             msg->setInt32("node", mCodec->mNode);
             msg->setInt32("event", event);
@@ -6039,6 +6845,8 @@
         // the native window for rendering. Let's get those back as well.
         mCodec->waitUntilAllPossibleNativeWindowBuffersAreReturnedToUs();
 
+        mCodec->mRenderTracker.clear(systemTime(CLOCK_MONOTONIC));
+
         sp<AMessage> notify = mCodec->mNotify->dup();
         notify->setInt32("what", CodecBase::kWhatFlushCompleted);
         notify->post();
diff --git a/media/libstagefright/AMRWriter.cpp b/media/libstagefright/AMRWriter.cpp
index 9aa7d95..f53d7f0 100644
--- a/media/libstagefright/AMRWriter.cpp
+++ b/media/libstagefright/AMRWriter.cpp
@@ -31,19 +31,6 @@
 
 namespace android {
 
-AMRWriter::AMRWriter(const char *filename)
-    : mFd(-1),
-      mInitCheck(NO_INIT),
-      mStarted(false),
-      mPaused(false),
-      mResumed(false) {
-
-    mFd = open(filename, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
-    if (mFd >= 0) {
-        mInitCheck = OK;
-    }
-}
-
 AMRWriter::AMRWriter(int fd)
     : mFd(dup(fd)),
       mInitCheck(mFd < 0? NO_INIT: OK),
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 2629afc..2529aa7 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -12,6 +12,7 @@
         AudioPlayer.cpp                   \
         AudioSource.cpp                   \
         AwesomePlayer.cpp                 \
+        CallbackDataSource.cpp            \
         CameraSource.cpp                  \
         CameraSourceTimeLapse.cpp         \
         ClockEstimator.cpp                \
@@ -22,6 +23,7 @@
         ESDS.cpp                          \
         FileSource.cpp                    \
         FLACExtractor.cpp                 \
+        FrameRenderTracker.cpp            \
         HTTPBase.cpp                      \
         JPEGSource.cpp                    \
         MP3Extractor.cpp                  \
@@ -31,11 +33,14 @@
         MediaAdapter.cpp                  \
         MediaBuffer.cpp                   \
         MediaBufferGroup.cpp              \
+        MediaClock.cpp                    \
         MediaCodec.cpp                    \
         MediaCodecList.cpp                \
+        MediaCodecListOverrides.cpp       \
         MediaCodecSource.cpp              \
         MediaDefs.cpp                     \
         MediaExtractor.cpp                \
+        MediaSync.cpp                     \
         MidiExtractor.cpp                 \
         http/MediaHTTP.cpp                \
         MediaMuxer.cpp                    \
@@ -46,17 +51,20 @@
         OMXClient.cpp                     \
         OMXCodec.cpp                      \
         OggExtractor.cpp                  \
+        ProcessInfo.cpp                   \
         SampleIterator.cpp                \
         SampleTable.cpp                   \
         SkipCutBuffer.cpp                 \
         StagefrightMediaScanner.cpp       \
         StagefrightMetadataRetriever.cpp  \
         SurfaceMediaSource.cpp            \
+        SurfaceUtils.cpp                  \
         ThrottledSource.cpp               \
         TimeSource.cpp                    \
         TimedEventQueue.cpp               \
         Utils.cpp                         \
         VBRISeeker.cpp                    \
+        VideoFrameScheduler.cpp           \
         WAVExtractor.cpp                  \
         WVMExtractor.cpp                  \
         XINGSeeker.cpp                    \
@@ -84,6 +92,7 @@
         libicuuc \
         liblog \
         libmedia \
+        libmediautils \
         libnetd_client \
         libopus \
         libsonivox \
@@ -101,6 +110,7 @@
         libstagefright_color_conversion \
         libstagefright_aacenc \
         libstagefright_matroska \
+        libstagefright_mediafilter \
         libstagefright_webm \
         libstagefright_timedtext \
         libvpx \
@@ -108,15 +118,23 @@
         libstagefright_mpeg2ts \
         libstagefright_id3 \
         libFLAC \
-        libmedia_helper
+        libmedia_helper \
 
 LOCAL_SHARED_LIBRARIES += \
         libstagefright_enc_common \
         libstagefright_avc_common \
         libstagefright_foundation \
-        libdl
+        libdl \
+        libRScpp \
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wno-error=deprecated-declarations -Wall
+
+# enable experiments only in userdebug and eng builds
+ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
+LOCAL_CFLAGS += -DENABLE_STAGEFRIGHT_EXPERIMENTS
+endif
+
+LOCAL_CLANG := true
 
 LOCAL_MODULE:= libstagefright
 
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index e24824b..dd9d393 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -408,11 +408,22 @@
     }
 }
 
-status_t AudioPlayer::setPlaybackRatePermille(int32_t ratePermille) {
+status_t AudioPlayer::setPlaybackRate(const AudioPlaybackRate &rate) {
     if (mAudioSink.get() != NULL) {
-        return mAudioSink->setPlaybackRatePermille(ratePermille);
+        return mAudioSink->setPlaybackRate(rate);
     } else if (mAudioTrack != 0){
-        return mAudioTrack->setSampleRate(ratePermille * mSampleRate / 1000);
+        return mAudioTrack->setPlaybackRate(rate);
+    } else {
+        return NO_INIT;
+    }
+}
+
+status_t AudioPlayer::getPlaybackRate(AudioPlaybackRate *rate /* nonnull */) {
+    if (mAudioSink.get() != NULL) {
+        return mAudioSink->getPlaybackRate(rate);
+    } else if (mAudioTrack != 0) {
+        *rate = mAudioTrack->getPlaybackRate();
+        return OK;
     } else {
         return NO_INIT;
     }
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 804f131..3505844 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -50,14 +50,19 @@
 }
 
 AudioSource::AudioSource(
-        audio_source_t inputSource, uint32_t sampleRate, uint32_t channelCount)
+        audio_source_t inputSource, const String16 &opPackageName,
+        uint32_t sampleRate, uint32_t channelCount, uint32_t outSampleRate)
     : mStarted(false),
       mSampleRate(sampleRate),
+      mOutSampleRate(outSampleRate > 0 ? outSampleRate : sampleRate),
       mPrevSampleTimeUs(0),
+      mFirstSampleTimeUs(-1ll),
       mNumFramesReceived(0),
       mNumClientOwnedBuffers(0) {
-    ALOGV("sampleRate: %d, channelCount: %d", sampleRate, channelCount);
+    ALOGV("sampleRate: %u, outSampleRate: %u, channelCount: %u",
+            sampleRate, outSampleRate, channelCount);
     CHECK(channelCount == 1 || channelCount == 2);
+    CHECK(sampleRate > 0);
 
     size_t minFrameCount;
     status_t status = AudioRecord::getMinFrameCount(&minFrameCount,
@@ -78,11 +83,15 @@
         mRecord = new AudioRecord(
                     inputSource, sampleRate, AUDIO_FORMAT_PCM_16_BIT,
                     audio_channel_in_mask_from_count(channelCount),
+                    opPackageName,
                     (size_t) (bufCount * frameCount),
                     AudioRecordCallbackFunction,
                     this,
                     frameCount /*notificationFrames*/);
         mInitCheck = mRecord->initCheck();
+        if (mInitCheck != OK) {
+            mRecord.clear();
+        }
     } else {
         mInitCheck = status;
     }
@@ -256,6 +265,15 @@
             (int16_t *) buffer->data(), buffer->range_length() >> 1);
     }
 
+    if (mSampleRate != mOutSampleRate) {
+        if (mFirstSampleTimeUs < 0) {
+            mFirstSampleTimeUs = timeUs;
+        }
+        timeUs = mFirstSampleTimeUs + (timeUs - mFirstSampleTimeUs)
+                * (int64_t)mSampleRate / (int64_t)mOutSampleRate;
+        buffer->meta_data()->setInt64(kKeyTime, timeUs);
+    }
+
     *out = buffer;
     return OK;
 }
diff --git a/media/libstagefright/AwesomePlayer.cpp b/media/libstagefright/AwesomePlayer.cpp
index 87eef1e..4e6c2a6 100644
--- a/media/libstagefright/AwesomePlayer.cpp
+++ b/media/libstagefright/AwesomePlayer.cpp
@@ -113,11 +113,11 @@
         CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
 
         render((const uint8_t *)buffer->data() + buffer->range_offset(),
-               buffer->range_length(), timeUs * 1000);
+               buffer->range_length(), timeUs, timeUs * 1000);
     }
 
-    void render(const void *data, size_t size, int64_t timestampNs) {
-        mTarget->render(data, size, timestampNs, NULL, mFormat);
+    void render(const void *data, size_t size, int64_t mediaTimeUs, nsecs_t renderTimeNs) {
+        (void)mTarget->render(data, size, mediaTimeUs, renderTimeNs, NULL, mFormat);
     }
 
 protected:
@@ -241,6 +241,8 @@
 
     mClockEstimator = new WindowedLinearFitEstimator();
 
+    mPlaybackSettings = AUDIO_PLAYBACK_RATE_DEFAULT;
+
     reset();
 }
 
@@ -360,7 +362,7 @@
     return setDataSource_l(dataSource);
 }
 
-status_t AwesomePlayer::setDataSource(const sp<IStreamSource> &source) {
+status_t AwesomePlayer::setDataSource(const sp<IStreamSource> &source __unused) {
     return INVALID_OPERATION;
 }
 
@@ -422,7 +424,7 @@
 
     mBitrate = totalBitRate;
 
-    ALOGV("mBitrate = %lld bits/sec", mBitrate);
+    ALOGV("mBitrate = %lld bits/sec", (long long)mBitrate);
 
     {
         Mutex::Autolock autoLock(mStatsLock);
@@ -1009,6 +1011,10 @@
                 return err;
             }
         }
+
+        if (mAudioPlayer != NULL) {
+            mAudioPlayer->setPlaybackRate(mPlaybackSettings);
+        }
     }
 
     if (mTimeSource == NULL && mAudioPlayer == NULL) {
@@ -1131,6 +1137,10 @@
     }
 
     if (err == OK) {
+        err = mAudioPlayer->setPlaybackRate(mPlaybackSettings);
+    }
+
+    if (err == OK) {
         modifyFlags(AUDIO_RUNNING, SET);
 
         mWatchForAudioEOS = true;
@@ -1722,7 +1732,7 @@
     // we are now resuming.  Signal new position to media time provider.
     // Cannot signal another SEEK_COMPLETE, as existing clients may not expect
     // multiple SEEK_COMPLETE responses to a single seek() request.
-    if (mSeekNotificationSent && abs(mSeekTimeUs - videoTimeUs) > 10000) {
+    if (mSeekNotificationSent && llabs((long long)(mSeekTimeUs - videoTimeUs)) > 10000) {
         // notify if we are resuming more than 10ms away from desired seek time
         notifyListener_l(MEDIA_SKIPPED);
     }
@@ -2358,7 +2368,7 @@
                         }
 
                         CHECK_GE(metaDataSize, 0ll);
-                        ALOGV("metaDataSize = %lld bytes", metaDataSize);
+                        ALOGV("metaDataSize = %lld bytes", (long long)metaDataSize);
                     }
 
                     usleep(200000);
@@ -2553,14 +2563,6 @@
         {
             return setCacheStatCollectFreq(request);
         }
-        case KEY_PARAMETER_PLAYBACK_RATE_PERMILLE:
-        {
-            if (mAudioPlayer != NULL) {
-                return mAudioPlayer->setPlaybackRatePermille(request.readInt32());
-            } else {
-                return NO_INIT;
-            }
-        }
         default:
         {
             return ERROR_UNSUPPORTED;
@@ -2597,6 +2599,58 @@
     }
 }
 
+status_t AwesomePlayer::setPlaybackSettings(const AudioPlaybackRate &rate) {
+    Mutex::Autolock autoLock(mLock);
+    // cursory sanity check for non-audio and paused cases
+    if ((rate.mSpeed != 0.f && rate.mSpeed < AUDIO_TIMESTRETCH_SPEED_MIN)
+        || rate.mSpeed > AUDIO_TIMESTRETCH_SPEED_MAX
+        || rate.mPitch < AUDIO_TIMESTRETCH_SPEED_MIN
+        || rate.mPitch > AUDIO_TIMESTRETCH_SPEED_MAX) {
+        return BAD_VALUE;
+    }
+
+    status_t err = OK;
+    if (rate.mSpeed == 0.f) {
+        if (mFlags & PLAYING) {
+            modifyFlags(CACHE_UNDERRUN, CLEAR); // same as pause
+            err = pause_l();
+        }
+        if (err == OK) {
+            // save settings (using old speed) in case player is resumed
+            AudioPlaybackRate newRate = rate;
+            newRate.mSpeed = mPlaybackSettings.mSpeed;
+            mPlaybackSettings = newRate;
+        }
+        return err;
+    }
+    if (mAudioPlayer != NULL) {
+        err = mAudioPlayer->setPlaybackRate(rate);
+    }
+    if (err == OK) {
+        mPlaybackSettings = rate;
+        if (!(mFlags & PLAYING)) {
+            play_l();
+        }
+    }
+    return err;
+}
+
+status_t AwesomePlayer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
+    if (mAudioPlayer != NULL) {
+        status_t err = mAudioPlayer->getPlaybackRate(rate);
+        if (err == OK) {
+            mPlaybackSettings = *rate;
+            Mutex::Autolock autoLock(mLock);
+            if (!(mFlags & PLAYING)) {
+                rate->mSpeed = 0.f;
+            }
+        }
+        return err;
+    }
+    *rate = mPlaybackSettings;
+    return OK;
+}
+
 status_t AwesomePlayer::getTrackInfo(Parcel *reply) const {
     Mutex::Autolock autoLock(mLock);
     size_t trackCount = mExtractor->countTracks();
diff --git a/media/libstagefright/CallbackDataSource.cpp b/media/libstagefright/CallbackDataSource.cpp
new file mode 100644
index 0000000..e17fdf8
--- /dev/null
+++ b/media/libstagefright/CallbackDataSource.cpp
@@ -0,0 +1,160 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "CallbackDataSource"
+#include <utils/Log.h>
+
+#include "include/CallbackDataSource.h"
+
+#include <binder/IMemory.h>
+#include <media/IDataSource.h>
+#include <media/stagefright/foundation/ADebug.h>
+
+#include <algorithm>
+
+namespace android {
+
+CallbackDataSource::CallbackDataSource(
+    const sp<IDataSource>& binderDataSource)
+    : mIDataSource(binderDataSource) {
+    // Set up the buffer to read into.
+    mMemory = mIDataSource->getIMemory();
+}
+
+CallbackDataSource::~CallbackDataSource() {
+    ALOGV("~CallbackDataSource");
+    mIDataSource->close();
+}
+
+status_t CallbackDataSource::initCheck() const {
+    if (mMemory == NULL) {
+        return UNKNOWN_ERROR;
+    }
+    return OK;
+}
+
+ssize_t CallbackDataSource::readAt(off64_t offset, void* data, size_t size) {
+    if (mMemory == NULL) {
+        return -1;
+    }
+
+    // IDataSource can only read up to mMemory->size() bytes at a time, but this
+    // method should be able to read any number of bytes, so read in a loop.
+    size_t totalNumRead = 0;
+    size_t numLeft = size;
+    const size_t bufferSize = mMemory->size();
+
+    while (numLeft > 0) {
+        size_t numToRead = std::min(numLeft, bufferSize);
+        ssize_t numRead =
+            mIDataSource->readAt(offset + totalNumRead, numToRead);
+        // A negative return value represents an error. Pass it on.
+        if (numRead < 0) {
+            return numRead;
+        }
+        // A zero return value signals EOS. Return the bytes read so far.
+        if (numRead == 0) {
+            return totalNumRead;
+        }
+        if ((size_t)numRead > numToRead) {
+            return ERROR_OUT_OF_RANGE;
+        }
+        CHECK(numRead >= 0 && (size_t)numRead <= bufferSize);
+        memcpy(((uint8_t*)data) + totalNumRead, mMemory->pointer(), numRead);
+        numLeft -= numRead;
+        totalNumRead += numRead;
+    }
+
+    return totalNumRead;
+}
+
+status_t CallbackDataSource::getSize(off64_t *size) {
+    status_t err = mIDataSource->getSize(size);
+    if (err != OK) {
+        return err;
+    }
+    if (*size < 0) {
+        // IDataSource will set size to -1 to indicate unknown size, but
+        // DataSource returns ERROR_UNSUPPORTED for that.
+        return ERROR_UNSUPPORTED;
+    }
+    return OK;
+}
+
+TinyCacheSource::TinyCacheSource(const sp<DataSource>& source)
+    : mSource(source), mCachedOffset(0), mCachedSize(0) {
+}
+
+status_t TinyCacheSource::initCheck() const {
+    return mSource->initCheck();
+}
+
+ssize_t TinyCacheSource::readAt(off64_t offset, void* data, size_t size) {
+    if (size >= kCacheSize) {
+        return mSource->readAt(offset, data, size);
+    }
+
+    // Check if the cache satisfies the read.
+    if (mCachedOffset <= offset
+            && offset < (off64_t) (mCachedOffset + mCachedSize)) {
+        if (offset + size <= mCachedOffset + mCachedSize) {
+            memcpy(data, &mCache[offset - mCachedOffset], size);
+            return size;
+        } else {
+            // If the cache hits only partially, flush the cache and read the
+            // remainder.
+
+            // This value is guaranteed to be greater than 0 because of the
+            // enclosing if statement.
+            const ssize_t remaining = mCachedOffset + mCachedSize - offset;
+            memcpy(data, &mCache[offset - mCachedOffset], remaining);
+            const ssize_t readMore = readAt(offset + remaining,
+                    (uint8_t*)data + remaining, size - remaining);
+            if (readMore < 0) {
+                return readMore;
+            }
+            return remaining + readMore;
+        }
+    }
+
+    // Fill the cache and copy to the caller.
+    const ssize_t numRead = mSource->readAt(offset, mCache, kCacheSize);
+    if (numRead <= 0) {
+        return numRead;
+    }
+    if ((size_t)numRead > kCacheSize) {
+        return ERROR_OUT_OF_RANGE;
+    }
+
+    mCachedSize = numRead;
+    mCachedOffset = offset;
+    CHECK(mCachedSize <= kCacheSize && mCachedOffset >= 0);
+    const size_t numToReturn = std::min(size, (size_t)numRead);
+    memcpy(data, mCache, numToReturn);
+
+    return numToReturn;
+}
+
+status_t TinyCacheSource::getSize(off64_t *size) {
+    return mSource->getSize(size);
+}
+
+uint32_t TinyCacheSource::flags() {
+    return mSource->flags();
+}
+
+} // namespace android
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index ad12bdd..66280da 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -608,6 +608,16 @@
         }
     }
 
+    err = mCamera->sendCommand(
+        CAMERA_CMD_SET_VIDEO_FORMAT, mEncoderFormat, mEncoderDataSpace);
+
+    // This could happen for CameraHAL1 clients; thus the failure is
+    // not a fatal error
+    if (err != OK) {
+        ALOGW("Failed to set video encoder format/dataspace to %d, %d due to %d",
+                mEncoderFormat, mEncoderDataSpace, err);
+    }
+
     err = OK;
     if (mCameraFlags & FLAGS_HOT_CAMERA) {
         mCamera->unlock();
@@ -645,6 +655,9 @@
 
     mStartTimeUs = 0;
     mNumInputBuffers = 0;
+    mEncoderFormat = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+    mEncoderDataSpace = HAL_DATASPACE_BT709;
+
     if (meta) {
         int64_t startTimeUs;
         if (meta->findInt64(kKeyTime, &startTimeUs)) {
@@ -656,6 +669,14 @@
             CHECK_GT(nBuffers, 0);
             mNumInputBuffers = nBuffers;
         }
+
+        // apply encoder color format if specified
+        if (meta->findInt32(kKeyPixelFormat, &mEncoderFormat)) {
+            ALOGV("Using encoder format: %#x", mEncoderFormat);
+        }
+        if (meta->findInt32(kKeyColorSpace, &mEncoderDataSpace)) {
+            ALOGV("Using encoder data space: %#x", mEncoderDataSpace);
+        }
     }
 
     status_t err;
@@ -851,22 +872,15 @@
 }
 
 void CameraSource::dataCallbackTimestamp(int64_t timestampUs,
-        int32_t msgType, const sp<IMemory> &data) {
-    ALOGV("dataCallbackTimestamp: timestamp %" PRId64 " us", timestampUs);
+        int32_t msgType __unused, const sp<IMemory> &data) {
+    ALOGV("dataCallbackTimestamp: timestamp %lld us", (long long)timestampUs);
     Mutex::Autolock autoLock(mLock);
     if (!mStarted || (mNumFramesReceived == 0 && timestampUs < mStartTimeUs)) {
-        ALOGV("Drop frame at %" PRId64 "/%" PRId64 " us", timestampUs, mStartTimeUs);
+        ALOGV("Drop frame at %lld/%lld us", (long long)timestampUs, (long long)mStartTimeUs);
         releaseOneRecordingFrame(data);
         return;
     }
 
-    if (mNumFramesReceived > 0) {
-        CHECK(timestampUs > mLastFrameTimestampUs);
-        if (timestampUs - mLastFrameTimestampUs > mGlitchDurationThresholdUs) {
-            ++mNumGlitches;
-        }
-    }
-
     // May need to skip frame or modify timestamp. Currently implemented
     // by the subclass CameraSourceTimeLapse.
     if (skipCurrentFrame(timestampUs)) {
@@ -874,6 +888,18 @@
         return;
     }
 
+    if (mNumFramesReceived > 0) {
+        if (timestampUs <= mLastFrameTimestampUs) {
+            ALOGW("Dropping frame with backward timestamp %lld (last %lld)",
+                    (long long)timestampUs, (long long)mLastFrameTimestampUs);
+            releaseOneRecordingFrame(data);
+            return;
+        }
+        if (timestampUs - mLastFrameTimestampUs > mGlitchDurationThresholdUs) {
+            ++mNumGlitches;
+        }
+    }
+
     mLastFrameTimestampUs = timestampUs;
     if (mNumFramesReceived == 0) {
         mFirstFrameTimeUs = timestampUs;
@@ -913,7 +939,7 @@
     mSource->dataCallbackTimestamp(timestamp / 1000, msgType, dataPtr);
 }
 
-void CameraSource::DeathNotifier::binderDied(const wp<IBinder>& who) {
+void CameraSource::DeathNotifier::binderDied(const wp<IBinder>& who __unused) {
     ALOGI("Camera recording proxy died");
 }
 
diff --git a/media/libstagefright/DataSource.cpp b/media/libstagefright/DataSource.cpp
index f7dcf35..75ef288 100644
--- a/media/libstagefright/DataSource.cpp
+++ b/media/libstagefright/DataSource.cpp
@@ -19,6 +19,7 @@
 #include "include/AMRExtractor.h"
 
 #include "include/AACExtractor.h"
+#include "include/CallbackDataSource.h"
 #include "include/DRMExtractor.h"
 #include "include/FLACExtractor.h"
 #include "include/HTTPBase.h"
@@ -281,6 +282,10 @@
     }
 }
 
+sp<DataSource> DataSource::CreateFromIDataSource(const sp<IDataSource> &source) {
+    return new TinyCacheSource(new CallbackDataSource(source));
+}
+
 String8 DataSource::getMIMEType() const {
     return String8("application/octet-stream");
 }
diff --git a/media/libstagefright/FLACExtractor.cpp b/media/libstagefright/FLACExtractor.cpp
index fa7251c..89a91f7 100644
--- a/media/libstagefright/FLACExtractor.cpp
+++ b/media/libstagefright/FLACExtractor.cpp
@@ -651,10 +651,10 @@
     if (doSeek) {
         // We implement the seek callback, so this works without explicit flush
         if (!FLAC__stream_decoder_seek_absolute(mDecoder, sample)) {
-            ALOGE("FLACParser::readBuffer seek to sample %llu failed", sample);
+            ALOGE("FLACParser::readBuffer seek to sample %lld failed", (long long)sample);
             return NULL;
         }
-        ALOGV("FLACParser::readBuffer seek to sample %llu succeeded", sample);
+        ALOGV("FLACParser::readBuffer seek to sample %lld succeeded", (long long)sample);
     } else {
         if (!FLAC__stream_decoder_process_single(mDecoder)) {
             ALOGE("FLACParser::readBuffer process_single failed");
@@ -674,7 +674,10 @@
     if (mWriteHeader.sample_rate != getSampleRate() ||
         mWriteHeader.channels != getChannels() ||
         mWriteHeader.bits_per_sample != getBitsPerSample()) {
-        ALOGE("FLACParser::readBuffer write changed parameters mid-stream");
+        ALOGE("FLACParser::readBuffer write changed parameters mid-stream: %d/%d/%d -> %d/%d/%d",
+                getSampleRate(), getChannels(), getBitsPerSample(),
+                mWriteHeader.sample_rate, mWriteHeader.channels, mWriteHeader.bits_per_sample);
+        return NULL;
     }
     // acquire a media buffer
     CHECK(mGroup != NULL);
diff --git a/media/libstagefright/FileSource.cpp b/media/libstagefright/FileSource.cpp
index a7ca3da..565f156 100644
--- a/media/libstagefright/FileSource.cpp
+++ b/media/libstagefright/FileSource.cpp
@@ -14,6 +14,10 @@
  * limitations under the License.
  */
 
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FileSource"
+#include <utils/Log.h>
+
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/FileSource.h>
 #include <sys/types.h>
@@ -107,7 +111,7 @@
    } else {
         off64_t result = lseek64(mFd, offset + mOffset, SEEK_SET);
         if (result == -1) {
-            ALOGE("seek to %lld failed", offset + mOffset);
+            ALOGE("seek to %lld failed", (long long)(offset + mOffset));
             return UNKNOWN_ERROR;
         }
 
diff --git a/media/libstagefright/FrameRenderTracker.cpp b/media/libstagefright/FrameRenderTracker.cpp
new file mode 100644
index 0000000..917870f
--- /dev/null
+++ b/media/libstagefright/FrameRenderTracker.cpp
@@ -0,0 +1,191 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FrameRenderTracker"
+
+#include <inttypes.h>
+#include <gui/Surface.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/FrameRenderTracker.h>
+
+namespace android {
+
+FrameRenderTracker::FrameRenderTracker()
+    : mLastRenderTimeNs(-1),
+      mComponentName("unknown component") {
+}
+
+FrameRenderTracker::~FrameRenderTracker() {
+}
+
+void FrameRenderTracker::setComponentName(const AString &componentName) {
+    mComponentName = componentName;
+}
+
+void FrameRenderTracker::clear(nsecs_t lastRenderTimeNs) {
+    mRenderQueue.clear();
+    mLastRenderTimeNs = lastRenderTimeNs;
+}
+
+void FrameRenderTracker::onFrameQueued(
+        int64_t mediaTimeUs, const sp<GraphicBuffer> &graphicBuffer, const sp<Fence> &fence) {
+    mRenderQueue.emplace_back(mediaTimeUs, graphicBuffer, fence);
+}
+
+FrameRenderTracker::Info *FrameRenderTracker::updateInfoForDequeuedBuffer(
+        ANativeWindowBuffer *buf, int fenceFd, int index) {
+    if (index < 0) {
+        return NULL;
+    }
+
+    // see if this is a buffer that was to be rendered
+    std::list<Info>::iterator renderInfo = mRenderQueue.end();
+    for (std::list<Info>::iterator it = mRenderQueue.begin();
+            it != mRenderQueue.end(); ++it) {
+        if (it->mGraphicBuffer->handle == buf->handle) {
+            renderInfo = it;
+            break;
+        }
+    }
+    if (renderInfo == mRenderQueue.end()) {
+        // could have been canceled after fence has signaled
+        return NULL;
+    }
+
+    if (renderInfo->mIndex >= 0) {
+        // buffer has been dequeued before, so there is nothing to do
+        return NULL;
+    }
+
+    // was this frame dropped (we could also infer this if the fence is invalid or a dup of
+    // the queued fence; however, there is no way to figure that out.)
+    if (fenceFd < 0) {
+        // frame is new or was dropped
+        mRenderQueue.erase(renderInfo);
+        return NULL;
+    }
+
+    // store dequeue fence and buffer index
+    renderInfo->mFence = new Fence(::dup(fenceFd));
+    renderInfo->mIndex = index;
+    return &*renderInfo;
+}
+
+status_t FrameRenderTracker::onFrameRendered(int64_t mediaTimeUs, nsecs_t systemNano) {
+    // ensure monotonic timestamps
+    if (mLastRenderTimeNs >= systemNano) {
+        ALOGW("[%s] Ignoring out of order/stale system nano %lld for media time %lld from codec.",
+                mComponentName.c_str(), (long long)systemNano, (long long)mediaTimeUs);
+        return BAD_VALUE;
+    }
+
+    nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC);
+    if (systemNano > now) {
+        ALOGW("[%s] Ignoring system nano %lld in the future for media time %lld from codec.",
+                mComponentName.c_str(), (long long)systemNano, (long long)mediaTimeUs);
+        return OK;
+    }
+
+    mRenderQueue.emplace_back(mediaTimeUs, systemNano);
+    mLastRenderTimeNs = systemNano;
+    return OK;
+}
+
+std::list<FrameRenderTracker::Info> FrameRenderTracker::checkFencesAndGetRenderedFrames(
+        const FrameRenderTracker::Info *until, bool dropIncomplete) {
+    std::list<Info> done;
+
+    // complete any frames queued prior to this and drop any incomplete ones if requested
+    for (std::list<Info>::iterator it = mRenderQueue.begin();
+            it != mRenderQueue.end(); ) {
+        bool drop = false; // whether to drop each frame
+        if (it->mIndex < 0) {
+            // frame not yet dequeued (or already rendered on a tunneled surface)
+            drop = dropIncomplete;
+        } else if (it->mFence != NULL) {
+            // check if fence signaled
+            nsecs_t signalTime = it->mFence->getSignalTime();
+            if (signalTime < 0) { // invalid fence
+                drop = true;
+            } else if (signalTime == INT64_MAX) { // unsignaled fence
+                drop = dropIncomplete;
+            } else { // signaled
+                // save render time
+                it->mFence.clear();
+                it->mRenderTimeNs = signalTime;
+            }
+        }
+        bool foundFrame = (Info *)&*it == until;
+
+        // Return frames with signaled fences at the start of the queue, as they are
+        // in submit order, and we don't have to wait for any in-between frames.
+        // Also return any dropped frames.
+        if (drop || (it->mFence == NULL && it == mRenderQueue.begin())) {
+            // (unrendered) dropped frames have their mRenderTimeNs still set to -1
+            done.splice(done.end(), mRenderQueue, it++);
+        } else {
+            ++it;
+        }
+        if (foundFrame) {
+            break;
+        }
+    }
+
+    return done;
+}
+
+void FrameRenderTracker::untrackFrame(const FrameRenderTracker::Info *info, ssize_t index) {
+    if (info == NULL && index == SSIZE_MAX) {
+        // nothing to do
+        return;
+    }
+
+    for (std::list<Info>::iterator it = mRenderQueue.begin();
+            it != mRenderQueue.end(); ) {
+        if (&*it == info) {
+            mRenderQueue.erase(it++);
+        } else {
+            if (it->mIndex > index) {
+                --(it->mIndex);
+            }
+            ++it;
+        }
+    }
+}
+
+void FrameRenderTracker::dumpRenderQueue() const {
+    ALOGI("[%s] Render Queue: (last render time: %lldns)",
+            mComponentName.c_str(), (long long)mLastRenderTimeNs);
+    for (std::list<Info>::const_iterator it = mRenderQueue.cbegin();
+            it != mRenderQueue.cend(); ++it) {
+        if (it->mFence == NULL) {
+            ALOGI("  RENDERED: handle: %p, media time: %lldus, index: %zd, render time: %lldns",
+                    it->mGraphicBuffer == NULL ? NULL : it->mGraphicBuffer->handle,
+                    (long long)it->mMediaTimeUs, it->mIndex, (long long)it->mRenderTimeNs);
+        } else if (it->mIndex < 0) {
+            ALOGI("    QUEUED: handle: %p, media time: %lldus, fence: %s",
+                    it->mGraphicBuffer->handle, (long long)it->mMediaTimeUs,
+                    it->mFence->isValid() ? "YES" : "NO");
+        } else {
+            ALOGI("  DEQUEUED: handle: %p, media time: %lldus, index: %zd",
+                    it->mGraphicBuffer->handle, (long long)it->mMediaTimeUs, it->mIndex);
+        }
+    }
+}
+
+}  // namespace android
diff --git a/media/libstagefright/HTTPBase.cpp b/media/libstagefright/HTTPBase.cpp
index 0c2ff15..068a77f 100644
--- a/media/libstagefright/HTTPBase.cpp
+++ b/media/libstagefright/HTTPBase.cpp
@@ -34,10 +34,10 @@
     : mNumBandwidthHistoryItems(0),
       mTotalTransferTimeUs(0),
       mTotalTransferBytes(0),
+      mMaxBandwidthHistoryItems(100),
       mPrevBandwidthMeasureTimeUs(0),
       mPrevEstimatedBandWidthKbps(0),
-      mBandWidthCollectFreqMs(5000),
-      mMaxBandwidthHistoryItems(100) {
+      mBandWidthCollectFreqMs(5000) {
 }
 
 void HTTPBase::addBandwidthMeasurement(
@@ -75,7 +75,11 @@
 bool HTTPBase::estimateBandwidth(int32_t *bandwidth_bps) {
     Mutex::Autolock autoLock(mLock);
 
-    if (mNumBandwidthHistoryItems < 2) {
+    // Do not do bandwidth estimation if we don't have enough samples, or
+    // total bytes download are too small (<64K).
+    // Bandwidth estimation from these samples can often shoot up and cause
+    // unwanted bw adaption behaviors.
+    if (mNumBandwidthHistoryItems < 2 || mTotalTransferBytes < 65536) {
         return false;
     }
 
diff --git a/media/libstagefright/MP3Extractor.cpp b/media/libstagefright/MP3Extractor.cpp
index 4a63152..2e54e8c 100644
--- a/media/libstagefright/MP3Extractor.cpp
+++ b/media/libstagefright/MP3Extractor.cpp
@@ -82,7 +82,7 @@
             *inout_pos += len;
 
             ALOGV("skipped ID3 tag, new starting offset is %lld (0x%016llx)",
-                 *inout_pos, *inout_pos);
+                    (long long)*inout_pos, (long long)*inout_pos);
         }
 
         if (post_id3_pos != NULL) {
@@ -103,9 +103,9 @@
     uint8_t *tmp = buf;
 
     do {
-        if (pos >= *inout_pos + kMaxBytesChecked) {
+        if (pos >= (off64_t)(*inout_pos + kMaxBytesChecked)) {
             // Don't scan forever.
-            ALOGV("giving up at offset %lld", pos);
+            ALOGV("giving up at offset %lld", (long long)pos);
             break;
         }
 
@@ -155,7 +155,7 @@
             continue;
         }
 
-        ALOGV("found possible 1st frame at %lld (header = 0x%08x)", pos, header);
+        ALOGV("found possible 1st frame at %lld (header = 0x%08x)", (long long)pos, header);
 
         // We found what looks like a valid frame,
         // now find its successors.
@@ -186,7 +186,7 @@
                 break;
             }
 
-            ALOGV("found subsequent frame #%d at %lld", j + 2, test_pos);
+            ALOGV("found subsequent frame #%d at %lld", j + 2, (long long)test_pos);
 
             test_pos += test_frame_size;
         }
@@ -282,6 +282,41 @@
 
     mFirstFramePos = pos;
     mFixedHeader = header;
+    mMeta = new MetaData;
+    sp<XINGSeeker> seeker = XINGSeeker::CreateFromSource(mDataSource, mFirstFramePos);
+
+    if (seeker == NULL) {
+        mSeeker = VBRISeeker::CreateFromSource(mDataSource, post_id3_pos);
+    } else {
+        mSeeker = seeker;
+        int encd = seeker->getEncoderDelay();
+        int encp = seeker->getEncoderPadding();
+        if (encd != 0 || encp != 0) {
+            mMeta->setInt32(kKeyEncoderDelay, encd);
+            mMeta->setInt32(kKeyEncoderPadding, encp);
+        }
+    }
+
+    if (mSeeker != NULL) {
+        // While it is safe to send the XING/VBRI frame to the decoder, this will
+        // result in an extra 1152 samples being output. In addition, the bitrate
+        // of the Xing header might not match the rest of the file, which could
+        // lead to problems when seeking. The real first frame to decode is after
+        // the XING/VBRI frame, so skip there.
+        size_t frame_size;
+        int sample_rate;
+        int num_channels;
+        int bitrate;
+        GetMPEGAudioFrameSize(
+                header, &frame_size, &sample_rate, &num_channels, &bitrate);
+        pos += frame_size;
+        if (!Resync(mDataSource, 0, &pos, &post_id3_pos, &header)) {
+            // mInitCheck will remain NO_INIT
+            return;
+        }
+        mFirstFramePos = pos;
+        mFixedHeader = header;
+    }
 
     size_t frame_size;
     int sample_rate;
@@ -292,8 +327,6 @@
 
     unsigned layer = 4 - ((header >> 17) & 3);
 
-    mMeta = new MetaData;
-
     switch (layer) {
         case 1:
             mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I);
@@ -312,27 +345,6 @@
     mMeta->setInt32(kKeyBitRate, bitrate * 1000);
     mMeta->setInt32(kKeyChannelCount, num_channels);
 
-    sp<XINGSeeker> seeker = XINGSeeker::CreateFromSource(mDataSource, mFirstFramePos);
-
-    if (seeker == NULL) {
-        mSeeker = VBRISeeker::CreateFromSource(mDataSource, post_id3_pos);
-    } else {
-        mSeeker = seeker;
-        int encd = seeker->getEncoderDelay();
-        int encp = seeker->getEncoderPadding();
-        if (encd != 0 || encp != 0) {
-            mMeta->setInt32(kKeyEncoderDelay, encd);
-            mMeta->setInt32(kKeyEncoderPadding, encp);
-        }
-    }
-
-    if (mSeeker != NULL) {
-        // While it is safe to send the XING/VBRI frame to the decoder, this will
-        // result in an extra 1152 samples being output. The real first frame to
-        // decode is after the XING/VBRI frame, so skip there.
-        mFirstFramePos += frame_size;
-    }
-
     int64_t durationUs;
 
     if (mSeeker == NULL || !mSeeker->getDuration(&durationUs)) {
diff --git a/media/libstagefright/MPEG2TSWriter.cpp b/media/libstagefright/MPEG2TSWriter.cpp
index 9856f92..ef07aa0 100644
--- a/media/libstagefright/MPEG2TSWriter.cpp
+++ b/media/libstagefright/MPEG2TSWriter.cpp
@@ -135,7 +135,7 @@
 
     mNotify = notify;
 
-    (new AMessage(kWhatStart, id()))->post();
+    (new AMessage(kWhatStart, this))->post();
 }
 
 void MPEG2TSWriter::SourceInfo::stop() {
@@ -361,7 +361,7 @@
 }
 
 void MPEG2TSWriter::SourceInfo::readMore() {
-    (new AMessage(kWhatRead, id()))->post();
+    (new AMessage(kWhatRead, this))->post();
 }
 
 void MPEG2TSWriter::SourceInfo::onMessageReceived(const sp<AMessage> &msg) {
@@ -480,19 +480,6 @@
     init();
 }
 
-MPEG2TSWriter::MPEG2TSWriter(const char *filename)
-    : mFile(fopen(filename, "wb")),
-      mWriteCookie(NULL),
-      mWriteFunc(NULL),
-      mStarted(false),
-      mNumSourcesDone(0),
-      mNumTSPacketsWritten(0),
-      mNumTSPacketsBeforeMeta(0),
-      mPATContinuityCounter(0),
-      mPMTContinuityCounter(0) {
-    init();
-}
-
 MPEG2TSWriter::MPEG2TSWriter(
         void *cookie,
         ssize_t (*write)(void *cookie, const void *data, size_t size))
@@ -565,7 +552,7 @@
 
     for (size_t i = 0; i < mSources.size(); ++i) {
         sp<AMessage> notify =
-            new AMessage(kWhatSourceNotify, mReflector->id());
+            new AMessage(kWhatSourceNotify, mReflector);
 
         notify->setInt32("source-index", i);
 
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
old mode 100644
new mode 100755
index 08e989b..a76334f
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -358,6 +358,8 @@
 
 MPEG4Extractor::MPEG4Extractor(const sp<DataSource> &source)
     : mMoofOffset(0),
+      mMoofFound(false),
+      mMdatFound(false),
       mDataSource(source),
       mInitCheck(NO_INIT),
       mHasVideo(false),
@@ -494,7 +496,9 @@
 
     off64_t offset = 0;
     status_t err;
-    while (true) {
+    bool sawMoovOrSidx = false;
+
+    while (!(sawMoovOrSidx && (mMdatFound || mMoofFound))) {
         off64_t orig_offset = offset;
         err = parseChunk(&offset, 0);
 
@@ -503,26 +507,12 @@
         } else if (offset <= orig_offset) {
             // only continue parsing if the offset was advanced,
             // otherwise we might end up in an infinite loop
-            ALOGE("did not advance: 0x%lld->0x%lld", orig_offset, offset);
+            ALOGE("did not advance: %lld->%lld", (long long)orig_offset, (long long)offset);
             err = ERROR_MALFORMED;
             break;
-        } else if (err == OK) {
-            continue;
+        } else if (err == UNKNOWN_ERROR) {
+            sawMoovOrSidx = true;
         }
-
-        uint32_t hdr[2];
-        if (mDataSource->readAt(offset, hdr, 8) < 8) {
-            break;
-        }
-        uint32_t chunk_type = ntohl(hdr[1]);
-        if (chunk_type == FOURCC('m', 'o', 'o', 'f')) {
-            // store the offset of the first segment
-            mMoofOffset = offset;
-        } else if (chunk_type != FOURCC('m', 'd', 'a', 't')) {
-            // keep parsing until we get to the data
-            continue;
-        }
-        break;
     }
 
     if (mInitCheck == OK) {
@@ -539,11 +529,11 @@
     CHECK_NE(err, (status_t)NO_INIT);
 
     // copy pssh data into file metadata
-    int psshsize = 0;
+    uint64_t psshsize = 0;
     for (size_t i = 0; i < mPssh.size(); i++) {
         psshsize += 20 + mPssh[i].datalen;
     }
-    if (psshsize) {
+    if (psshsize > 0 && psshsize <= UINT32_MAX) {
         char *buf = (char*)malloc(psshsize);
         char *ptr = buf;
         for (size_t i = 0; i < mPssh.size(); i++) {
@@ -753,6 +743,17 @@
         && path[3] == FOURCC('i', 'l', 's', 't');
 }
 
+static bool underQTMetaPath(const Vector<uint32_t> &path, int32_t depth) {
+    return path.size() >= 2
+            && path[0] == FOURCC('m', 'o', 'o', 'v')
+            && path[1] == FOURCC('m', 'e', 't', 'a')
+            && (depth == 2
+            || (depth == 3
+                    && (path[2] == FOURCC('h', 'd', 'l', 'r')
+                    ||  path[2] == FOURCC('i', 'l', 's', 't')
+                    ||  path[2] == FOURCC('k', 'e', 'y', 's'))));
+}
+
 // Given a time in seconds since Jan 1 1904, produce a human-readable string.
 static void convertTimeToDate(int64_t time_1904, String8 *s) {
     time_t time_1970 = time_1904 - (((66 * 365 + 17) * 24) * 3600);
@@ -764,7 +765,7 @@
 }
 
 status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
-    ALOGV("entering parseChunk %lld/%d", *offset, depth);
+    ALOGV("entering parseChunk %lld/%d", (long long)*offset, depth);
     uint32_t hdr[2];
     if (mDataSource->readAt(*offset, hdr, 8) < 8) {
         return ERROR_IO;
@@ -808,7 +809,7 @@
 
     char chunk[5];
     MakeFourCCString(chunk_type, chunk);
-    ALOGV("chunk: %s @ %lld, %d", chunk, *offset, depth);
+    ALOGV("chunk: %s @ %lld, %d", chunk, (long long)*offset, depth);
 
     if (kUseHexDump) {
         static const char kWhitespace[] = "                                        ";
@@ -868,6 +869,12 @@
         case FOURCC('s', 'c', 'h', 'i'):
         case FOURCC('e', 'd', 't', 's'):
         {
+            if (chunk_type == FOURCC('m', 'o', 'o', 'f') && !mMoofFound) {
+                // store the offset of the first segment
+                mMoofFound = true;
+                mMoofOffset = *offset;
+            }
+
             if (chunk_type == FOURCC('s', 't', 'b', 'l')) {
                 ALOGV("sampleTable chunk is %" PRIu64 " bytes long.", chunk_size);
 
@@ -882,6 +889,9 @@
                     }
                 }
 
+                if (mLastTrack == NULL)
+                    return ERROR_MALFORMED;
+
                 mLastTrack->sampleTable = new SampleTable(mDataSource);
             }
 
@@ -1036,6 +1046,10 @@
             }
             original_fourcc = ntohl(original_fourcc);
             ALOGV("read original format: %d", original_fourcc);
+
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setCString(kKeyMIMEType, FourCC2MIME(original_fourcc));
             uint32_t num_channels = 0;
             uint32_t sample_rate = 0;
@@ -1091,6 +1105,9 @@
                 return ERROR_IO;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setInt32(kKeyCryptoMode, defaultAlgorithmId);
             mLastTrack->meta->setInt32(kKeyCryptoDefaultIVSize, defaultIVSize);
             mLastTrack->meta->setData(kKeyCryptoKey, 'tenc', defaultKeyId, 16);
@@ -1125,7 +1142,7 @@
             }
             pssh.datalen = ntohl(psshdatalen);
             ALOGV("pssh data size: %d", pssh.datalen);
-            if (pssh.datalen + 20 > chunk_size) {
+            if (chunk_size < 20 || pssh.datalen > chunk_size - 20) {
                 // pssh data length exceeds size of containing box
                 return ERROR_MALFORMED;
             }
@@ -1206,7 +1223,7 @@
                     duration = ntohl(duration32);
                 }
             }
-            if (duration != 0) {
+            if (duration != 0 && mLastTrack->timescale != 0) {
                 mLastTrack->meta->setInt64(
                         kKeyDuration, (duration * 1000000) / mLastTrack->timescale);
             }
@@ -1270,6 +1287,10 @@
                 // display the timed text.
                 // For encrypted files, there may also be more than one entry.
                 const char *mime;
+
+                if (mLastTrack == NULL)
+                    return ERROR_MALFORMED;
+
                 CHECK(mLastTrack->meta->findCString(kKeyMIMEType, &mime));
                 if (strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP) &&
                         strcasecmp(mime, "application/octet-stream")) {
@@ -1316,6 +1337,9 @@
             uint16_t sample_size = U16_AT(&buffer[18]);
             uint32_t sample_rate = U32_AT(&buffer[24]) >> 16;
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             if (chunk_type != FOURCC('e', 'n', 'c', 'a')) {
                 // if the chunk type is enca, we'll get the type from the sinf/frma box later
                 mLastTrack->meta->setCString(kKeyMIMEType, FourCC2MIME(chunk_type));
@@ -1377,6 +1401,9 @@
             // printf("*** coding='%s' width=%d height=%d\n",
             //        chunk, width, height);
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             if (chunk_type != FOURCC('e', 'n', 'c', 'v')) {
                 // if the chunk type is encv, we'll get the type from the sinf/frma box later
                 mLastTrack->meta->setCString(kKeyMIMEType, FourCC2MIME(chunk_type));
@@ -1402,6 +1429,9 @@
         case FOURCC('s', 't', 'c', 'o'):
         case FOURCC('c', 'o', '6', '4'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             status_t err =
                 mLastTrack->sampleTable->setChunkOffsetParams(
                         chunk_type, data_offset, chunk_data_size);
@@ -1417,6 +1447,9 @@
 
         case FOURCC('s', 't', 's', 'c'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             status_t err =
                 mLastTrack->sampleTable->setSampleToChunkParams(
                         data_offset, chunk_data_size);
@@ -1433,6 +1466,9 @@
         case FOURCC('s', 't', 's', 'z'):
         case FOURCC('s', 't', 'z', '2'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             status_t err =
                 mLastTrack->sampleTable->setSampleSizeParams(
                         chunk_type, data_offset, chunk_data_size);
@@ -1514,6 +1550,9 @@
 
         case FOURCC('s', 't', 't', 's'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             *offset += chunk_size;
 
             status_t err =
@@ -1529,6 +1568,9 @@
 
         case FOURCC('c', 't', 't', 's'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             *offset += chunk_size;
 
             status_t err =
@@ -1544,6 +1586,9 @@
 
         case FOURCC('s', 't', 's', 's'):
         {
+            if ((mLastTrack == NULL) || (mLastTrack->sampleTable == NULL))
+                return ERROR_MALFORMED;
+
             *offset += chunk_size;
 
             status_t err =
@@ -1557,13 +1602,13 @@
             break;
         }
 
-        // ©xyz
+        // \xA9xyz
         case FOURCC(0xA9, 'x', 'y', 'z'):
         {
             *offset += chunk_size;
 
-            // Best case the total data length inside "©xyz" box
-            // would be 8, for instance "©xyz" + "\x00\x04\x15\xc7" + "0+0/",
+            // Best case the total data length inside "\xA9xyz" box
+            // would be 8, for instance "\xA9xyz" + "\x00\x04\x15\xc7" + "0+0/",
             // where "\x00\x04" is the text string length with value = 4,
             // "\0x15\xc7" is the language code = en, and "0+0" is a
             // location (string) value with longitude = 0 and latitude = 0.
@@ -1616,6 +1661,9 @@
                 return ERROR_MALFORMED;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setData(
                     kKeyESDS, kTypeESDS, &buffer[4], chunk_data_size - 4);
 
@@ -1633,7 +1681,18 @@
                     return err;
                 }
             }
+            if (mPath.size() >= 2
+                    && mPath[mPath.size() - 2] == FOURCC('m', 'p', '4', 'v')) {
+                // Check if the video is MPEG2
+                ESDS esds(&buffer[4], chunk_data_size - 4);
 
+                uint8_t objectTypeIndication;
+                if (esds.getObjectTypeIndication(&objectTypeIndication) == OK) {
+                    if (objectTypeIndication >= 0x60 && objectTypeIndication <= 0x65) {
+                        mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_MPEG2);
+                    }
+                }
+            }
             break;
         }
 
@@ -1648,6 +1707,9 @@
                 return ERROR_IO;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setData(
                     kKeyAVCC, kTypeAVCC, buffer->data(), chunk_data_size);
 
@@ -1662,6 +1724,9 @@
                 return ERROR_IO;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setData(
                     kKeyHVCC, kTypeHVCC, buffer->data(), chunk_data_size);
 
@@ -1686,7 +1751,7 @@
             char buffer[23];
             if (chunk_data_size != 7 &&
                 chunk_data_size != 23) {
-                ALOGE("Incorrect D263 box size %lld", chunk_data_size);
+                ALOGE("Incorrect D263 box size %lld", (long long)chunk_data_size);
                 return ERROR_MALFORMED;
             }
 
@@ -1695,6 +1760,9 @@
                 return ERROR_IO;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             mLastTrack->meta->setData(kKeyD263, kTypeD263, buffer, chunk_data_size);
 
             break;
@@ -1702,31 +1770,35 @@
 
         case FOURCC('m', 'e', 't', 'a'):
         {
-            uint8_t buffer[4];
-            if (chunk_data_size < (off64_t)sizeof(buffer)) {
-                *offset += chunk_size;
-                return ERROR_MALFORMED;
-            }
-
-            if (mDataSource->readAt(
-                        data_offset, buffer, 4) < 4) {
-                *offset += chunk_size;
-                return ERROR_IO;
-            }
-
-            if (U32_AT(buffer) != 0) {
-                // Should be version 0, flags 0.
-
-                // If it's not, let's assume this is one of those
-                // apparently malformed chunks that don't have flags
-                // and completely different semantics than what's
-                // in the MPEG4 specs and skip it.
-                *offset += chunk_size;
-                return OK;
-            }
-
             off64_t stop_offset = *offset + chunk_size;
-            *offset = data_offset + sizeof(buffer);
+            *offset = data_offset;
+            bool isParsingMetaKeys = underQTMetaPath(mPath, 2);
+            if (!isParsingMetaKeys) {
+                uint8_t buffer[4];
+                if (chunk_data_size < (off64_t)sizeof(buffer)) {
+                    *offset += chunk_size;
+                    return ERROR_MALFORMED;
+                }
+
+                if (mDataSource->readAt(
+                            data_offset, buffer, 4) < 4) {
+                    *offset += chunk_size;
+                    return ERROR_IO;
+                }
+
+                if (U32_AT(buffer) != 0) {
+                    // Should be version 0, flags 0.
+
+                    // If it's not, let's assume this is one of those
+                    // apparently malformed chunks that don't have flags
+                    // and completely different semantics than what's
+                    // in the MPEG4 specs and skip it.
+                    *offset += chunk_size;
+                    return OK;
+                }
+                *offset +=  sizeof(buffer);
+            }
+
             while (*offset < stop_offset) {
                 status_t err = parseChunk(offset, depth + 1);
                 if (err != OK) {
@@ -1792,7 +1864,7 @@
                 }
                 duration = d32;
             }
-            if (duration != 0) {
+            if (duration != 0 && mHeaderTimescale != 0) {
                 mFileMetaData->setInt64(kKeyDuration, duration * 1000000 / mHeaderTimescale);
             }
 
@@ -1841,7 +1913,7 @@
                 return ERROR_MALFORMED;
             }
 
-            if (duration != 0) {
+            if (duration != 0 && mHeaderTimescale != 0) {
                 mFileMetaData->setInt64(kKeyDuration, duration * 1000000 / mHeaderTimescale);
             }
 
@@ -1851,6 +1923,9 @@
         case FOURCC('m', 'd', 'a', 't'):
         {
             ALOGV("mdat chunk, drm: %d", mIsDrm);
+
+            mMdatFound = true;
+
             if (!mIsDrm) {
                 *offset += chunk_size;
                 break;
@@ -1867,12 +1942,19 @@
         {
             *offset += chunk_size;
 
+            if (underQTMetaPath(mPath, 3)) {
+                break;
+            }
+
             uint32_t buffer;
             if (mDataSource->readAt(
                         data_offset + 8, &buffer, 4) < 4) {
                 return ERROR_IO;
             }
 
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             uint32_t type = ntohl(buffer);
             // For the 3GPP file format, the handler-type within the 'hdlr' box
             // shall be 'text'. We also want to support 'sbtl' handler type
@@ -1884,6 +1966,16 @@
             break;
         }
 
+        case FOURCC('k', 'e', 'y', 's'):
+        {
+            *offset += chunk_size;
+
+            if (underQTMetaPath(mPath, 3)) {
+                parseQTMetaKey(data_offset, chunk_data_size);
+            }
+            break;
+        }
+
         case FOURCC('t', 'r', 'e', 'x'):
         {
             *offset += chunk_size;
@@ -1905,6 +1997,9 @@
 
         case FOURCC('t', 'x', '3', 'g'):
         {
+            if (mLastTrack == NULL)
+                return ERROR_MALFORMED;
+
             uint32_t type;
             const void *data;
             size_t size = 0;
@@ -1950,8 +2045,8 @@
             *offset += chunk_size;
 
             if (mFileMetaData != NULL) {
-                ALOGV("chunk_data_size = %lld and data_offset = %lld",
-                        chunk_data_size, data_offset);
+                ALOGV("chunk_data_size = %" PRId64 " and data_offset = %" PRId64,
+                      chunk_data_size, data_offset);
 
                 if (chunk_data_size < 0 || static_cast<uint64_t>(chunk_data_size) >= SIZE_MAX - 1) {
                     return ERROR_MALFORMED;
@@ -2023,6 +2118,12 @@
 
         default:
         {
+            // check if we're parsing 'ilst' for meta keys
+            // if so, treat type as a number (key-id).
+            if (underQTMetaPath(mPath, 3)) {
+                parseQTMetaVal(chunk_type, data_offset, chunk_data_size);
+            }
+
             *offset += chunk_size;
             break;
         }
@@ -2058,6 +2159,8 @@
         return ERROR_MALFORMED;
     }
     ALOGV("sidx refid/timescale: %d/%d", referenceId, timeScale);
+    if (timeScale == 0)
+        return ERROR_MALFORMED;
 
     uint64_t earliestPresentationTime;
     uint64_t firstOffset;
@@ -2141,6 +2244,9 @@
 
     uint64_t sidxDuration = total_duration * 1000000 / timeScale;
 
+    if (mLastTrack == NULL)
+        return ERROR_MALFORMED;
+
     int64_t metaDuration;
     if (!mLastTrack->meta->findInt64(kKeyDuration, &metaDuration) || metaDuration == 0) {
         mLastTrack->meta->setInt64(kKeyDuration, sidxDuration);
@@ -2148,7 +2254,108 @@
     return OK;
 }
 
+status_t MPEG4Extractor::parseQTMetaKey(off64_t offset, size_t size) {
+    if (size < 8) {
+        return ERROR_MALFORMED;
+    }
 
+    uint32_t count;
+    if (!mDataSource->getUInt32(offset + 4, &count)) {
+        return ERROR_MALFORMED;
+    }
+
+    if (mMetaKeyMap.size() > 0) {
+        ALOGW("'keys' atom seen again, discarding existing entries");
+        mMetaKeyMap.clear();
+    }
+
+    off64_t keyOffset = offset + 8;
+    off64_t stopOffset = offset + size;
+    for (size_t i = 1; i <= count; i++) {
+        if (keyOffset + 8 > stopOffset) {
+            return ERROR_MALFORMED;
+        }
+
+        uint32_t keySize;
+        if (!mDataSource->getUInt32(keyOffset, &keySize)
+                || keySize < 8
+                || keyOffset + keySize > stopOffset) {
+            return ERROR_MALFORMED;
+        }
+
+        uint32_t type;
+        if (!mDataSource->getUInt32(keyOffset + 4, &type)
+                || type != FOURCC('m', 'd', 't', 'a')) {
+            return ERROR_MALFORMED;
+        }
+
+        keySize -= 8;
+        keyOffset += 8;
+
+        sp<ABuffer> keyData = new ABuffer(keySize);
+        if (keyData->data() == NULL) {
+            return ERROR_MALFORMED;
+        }
+        if (mDataSource->readAt(
+                keyOffset, keyData->data(), keySize) < (ssize_t) keySize) {
+            return ERROR_MALFORMED;
+        }
+
+        AString key((const char *)keyData->data(), keySize);
+        mMetaKeyMap.add(i, key);
+
+        keyOffset += keySize;
+    }
+    return OK;
+}
+
+status_t MPEG4Extractor::parseQTMetaVal(
+        int32_t keyId, off64_t offset, size_t size) {
+    ssize_t index = mMetaKeyMap.indexOfKey(keyId);
+    if (index < 0) {
+        // corresponding key is not present, ignore
+        return ERROR_MALFORMED;
+    }
+
+    if (size <= 16) {
+        return ERROR_MALFORMED;
+    }
+    uint32_t dataSize;
+    if (!mDataSource->getUInt32(offset, &dataSize)
+            || dataSize > size || dataSize <= 16) {
+        return ERROR_MALFORMED;
+    }
+    uint32_t atomFourCC;
+    if (!mDataSource->getUInt32(offset + 4, &atomFourCC)
+            || atomFourCC != FOURCC('d', 'a', 't', 'a')) {
+        return ERROR_MALFORMED;
+    }
+    uint32_t dataType;
+    if (!mDataSource->getUInt32(offset + 8, &dataType)
+            || ((dataType & 0xff000000) != 0)) {
+        // not well-known type
+        return ERROR_MALFORMED;
+    }
+
+    dataSize -= 16;
+    offset += 16;
+
+    if (dataType == 23 && dataSize >= 4) {
+        // BE Float32
+        uint32_t val;
+        if (!mDataSource->getUInt32(offset, &val)) {
+            return ERROR_MALFORMED;
+        }
+        if (!strcasecmp(mMetaKeyMap[index].c_str(), "com.android.capture.fps")) {
+            mFileMetaData->setFloat(kKeyCaptureFramerate, *(float *)&val);
+        }
+    } else {
+        // add more keys if needed
+        ALOGV("ignoring key: type %d, size %d", dataType, dataSize);
+    }
+
+    return OK;
+}
 
 status_t MPEG4Extractor::parseTrackHeader(
         off64_t data_offset, off64_t data_size) {
@@ -2191,6 +2398,9 @@
         return ERROR_UNSUPPORTED;
     }
 
+    if (mLastTrack == NULL)
+        return ERROR_MALFORMED;
+
     mLastTrack->meta->setInt32(kKeyTrackID, id);
 
     size_t matrixOffset = dynSize + 16;
@@ -2262,7 +2472,7 @@
     uint32_t metadataKey = 0;
     char chunk[5];
     MakeFourCCString(mPath[4], chunk);
-    ALOGV("meta: %s @ %lld", chunk, offset);
+    ALOGV("meta: %s @ %lld", chunk, (long long)offset);
     switch ((int32_t)mPath[4]) {
         case FOURCC(0xa9, 'a', 'l', 'b'):
         {
@@ -2373,6 +2583,9 @@
                     int32_t delay, padding;
                     if (sscanf(mLastCommentData,
                                " %*x %x %x %*x", &delay, &padding) == 2) {
+                        if (mLastTrack == NULL)
+                            return ERROR_MALFORMED;
+
                         mLastTrack->meta->setInt32(kKeyEncoderDelay, delay);
                         mLastTrack->meta->setInt32(kKeyEncoderPadding, padding);
                     }
@@ -2661,6 +2874,7 @@
             return ERROR_MALFORMED;
         }
     } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4)
+            || !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG2)
             || !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
         if (!track->meta->findData(kKeyESDS, &type, &data, &size)
                 || type != kTypeESDS) {
@@ -2745,6 +2959,9 @@
 
     if (objectTypeIndication == 0xe1) {
         // This isn't MPEG4 audio at all, it's QCELP 14k...
+        if (mLastTrack == NULL)
+            return ERROR_MALFORMED;
+
         mLastTrack->meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_QCELP);
         return OK;
     }
@@ -2765,7 +2982,7 @@
     }
 
     if (kUseHexDump) {
-        printf("ESD of size %d\n", csd_size);
+        printf("ESD of size %zu\n", csd_size);
         hexdump(csd, csd_size);
     }
 
@@ -2793,6 +3010,9 @@
         objectType = 32 + br.getBits(6);
     }
 
+    if (mLastTrack == NULL)
+        return ERROR_MALFORMED;
+
     //keep AOT type
     mLastTrack->meta->setInt32(kKeyAACAOT, objectType);
 
@@ -2801,12 +3021,11 @@
     int32_t sampleRate = 0;
     int32_t numChannels = 0;
     if (freqIndex == 15) {
-        if (csd_size < 5) {
-            return ERROR_MALFORMED;
-        }
+        if (br.numBitsLeft() < 28) return ERROR_MALFORMED;
         sampleRate = br.getBits(24);
         numChannels = br.getBits(4);
     } else {
+        if (br.numBitsLeft() < 4) return ERROR_MALFORMED;
         numChannels = br.getBits(4);
 
         if (freqIndex == 13 || freqIndex == 14) {
@@ -2817,12 +3036,14 @@
     }
 
     if (objectType == AOT_SBR || objectType == AOT_PS) {//SBR specific config per 14496-3 table 1.13
+        if (br.numBitsLeft() < 4) return ERROR_MALFORMED;
         uint32_t extFreqIndex = br.getBits(4);
         int32_t extSampleRate __unused;
         if (extFreqIndex == 15) {
             if (csd_size < 8) {
                 return ERROR_MALFORMED;
             }
+            if (br.numBitsLeft() < 24) return ERROR_MALFORMED;
             extSampleRate = br.getBits(24);
         } else {
             if (extFreqIndex == 13 || extFreqIndex == 14) {
@@ -2859,20 +3080,24 @@
 
     {
         if (objectType == AOT_SBR || objectType == AOT_PS) {
+            if (br.numBitsLeft() < 5) return ERROR_MALFORMED;
             objectType = br.getBits(5);
 
             if (objectType == AOT_ESCAPE) {
+                if (br.numBitsLeft() < 6) return ERROR_MALFORMED;
                 objectType = 32 + br.getBits(6);
             }
         }
         if (objectType == AOT_AAC_LC || objectType == AOT_ER_AAC_LC ||
                 objectType == AOT_ER_AAC_LD || objectType == AOT_ER_AAC_SCAL ||
                 objectType == AOT_ER_BSAC) {
+            if (br.numBitsLeft() < 2) return ERROR_MALFORMED;
             const int32_t frameLengthFlag __unused = br.getBits(1);
 
             const int32_t dependsOnCoreCoder = br.getBits(1);
 
             if (dependsOnCoreCoder ) {
+                if (br.numBitsLeft() < 14) return ERROR_MALFORMED;
                 const int32_t coreCoderDelay __unused = br.getBits(14);
             }
 
@@ -2892,7 +3117,7 @@
                     extensionFlag = 1;
                     break;
                 default:
-                    TRESPASS();
+                    return ERROR_MALFORMED;
                     break;
                 }
                 ALOGW("csd missing extension flag; assuming %d for object type %u.",
@@ -2902,6 +3127,9 @@
             if (numChannels == 0) {
                 int32_t channelsEffectiveNum = 0;
                 int32_t channelsNum = 0;
+                if (br.numBitsLeft() < 32) {
+                    return ERROR_MALFORMED;
+                }
                 const int32_t ElementInstanceTag __unused = br.getBits(4);
                 const int32_t Profile __unused = br.getBits(2);
                 const int32_t SamplingFrequencyIndex __unused = br.getBits(4);
@@ -2913,35 +3141,44 @@
                 const int32_t NumValidCcElements __unused = br.getBits(4);
 
                 const int32_t MonoMixdownPresent = br.getBits(1);
+
                 if (MonoMixdownPresent != 0) {
+                    if (br.numBitsLeft() < 4) return ERROR_MALFORMED;
                     const int32_t MonoMixdownElementNumber __unused = br.getBits(4);
                 }
 
+                if (br.numBitsLeft() < 1) return ERROR_MALFORMED;
                 const int32_t StereoMixdownPresent = br.getBits(1);
                 if (StereoMixdownPresent != 0) {
+                    if (br.numBitsLeft() < 4) return ERROR_MALFORMED;
                     const int32_t StereoMixdownElementNumber __unused = br.getBits(4);
                 }
 
+                if (br.numBitsLeft() < 1) return ERROR_MALFORMED;
                 const int32_t MatrixMixdownIndexPresent = br.getBits(1);
                 if (MatrixMixdownIndexPresent != 0) {
+                    if (br.numBitsLeft() < 3) return ERROR_MALFORMED;
                     const int32_t MatrixMixdownIndex __unused = br.getBits(2);
                     const int32_t PseudoSurroundEnable __unused = br.getBits(1);
                 }
 
                 int i;
                 for (i=0; i < NumFrontChannelElements; i++) {
+                    if (br.numBitsLeft() < 5) return ERROR_MALFORMED;
                     const int32_t FrontElementIsCpe = br.getBits(1);
                     const int32_t FrontElementTagSelect __unused = br.getBits(4);
                     channelsNum += FrontElementIsCpe ? 2 : 1;
                 }
 
                 for (i=0; i < NumSideChannelElements; i++) {
+                    if (br.numBitsLeft() < 5) return ERROR_MALFORMED;
                     const int32_t SideElementIsCpe = br.getBits(1);
                     const int32_t SideElementTagSelect __unused = br.getBits(4);
                     channelsNum += SideElementIsCpe ? 2 : 1;
                 }
 
                 for (i=0; i < NumBackChannelElements; i++) {
+                    if (br.numBitsLeft() < 5) return ERROR_MALFORMED;
                     const int32_t BackElementIsCpe = br.getBits(1);
                     const int32_t BackElementTagSelect __unused = br.getBits(4);
                     channelsNum += BackElementIsCpe ? 2 : 1;
@@ -2949,6 +3186,7 @@
                 channelsEffectiveNum = channelsNum;
 
                 for (i=0; i < NumLfeChannelElements; i++) {
+                    if (br.numBitsLeft() < 4) return ERROR_MALFORMED;
                     const int32_t LfeElementTagSelect __unused = br.getBits(4);
                     channelsNum += 1;
                 }
@@ -2963,6 +3201,9 @@
         return ERROR_UNSUPPORTED;
     }
 
+    if (mLastTrack == NULL)
+        return ERROR_MALFORMED;
+
     int32_t prevSampleRate;
     CHECK(mLastTrack->meta->findInt32(kKeySampleRate, &prevSampleRate));
 
@@ -3174,7 +3415,7 @@
 
     char chunk[5];
     MakeFourCCString(chunk_type, chunk);
-    ALOGV("MPEG4Source chunk %s @ %llx", chunk, *offset);
+    ALOGV("MPEG4Source chunk %s @ %#llx", chunk, (long long)*offset);
 
     off64_t chunk_data_size = *offset + chunk_size - data_offset;
 
@@ -3654,7 +3895,7 @@
         sampleCtsOffset = 0;
     }
 
-    if (size < (off64_t)sampleCount * bytesPerSample) {
+    if (size < (off64_t)(sampleCount * bytesPerSample)) {
         return -EINVAL;
     }
 
@@ -4446,7 +4687,7 @@
 
         char chunkstring[5];
         MakeFourCCString(chunkType, chunkstring);
-        ALOGV("saw chunk type %s, size %" PRIu64 " @ %lld", chunkstring, chunkSize, offset);
+        ALOGV("saw chunk type %s, size %" PRIu64 " @ %lld", chunkstring, chunkSize, (long long)offset);
         switch (chunkType) {
             case FOURCC('f', 't', 'y', 'p'):
             {
@@ -4509,7 +4750,7 @@
         *meta = new AMessage;
         (*meta)->setInt64("meta-data-size", moovAtomEndOffset);
 
-        ALOGV("found metadata size: %lld", moovAtomEndOffset);
+        ALOGV("found metadata size: %lld", (long long)moovAtomEndOffset);
     }
 
     return true;
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 844a019..47f114a 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -29,6 +29,7 @@
 #include <utils/Log.h>
 
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MPEG4Writer.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MetaData.h>
@@ -62,6 +63,16 @@
 static const uint8_t kNalUnitTypePicParamSet = 0x08;
 static const int64_t kInitialDelayTimeUs     = 700000LL;
 
+static const char kMetaKey_Version[]    = "com.android.version";
+#ifdef SHOW_MODEL_BUILD
+static const char kMetaKey_Model[]      = "com.android.model";
+static const char kMetaKey_Build[]      = "com.android.build";
+#endif
+static const char kMetaKey_CaptureFps[] = "com.android.capture.fps";
+
+/* uncomment to include model and build in meta */
+//#define SHOW_MODEL_BUILD 1
+
 class MPEG4Writer::Track {
 public:
     Track(MPEG4Writer *owner, const sp<MediaSource> &source, size_t trackId);
@@ -83,6 +94,7 @@
     void addChunkOffset(off64_t offset);
     int32_t getTrackId() const { return mTrackId; }
     status_t dump(int fd, const Vector<String16>& args) const;
+    static const char *getFourCCForMime(const char *mime);
 
 private:
     enum {
@@ -101,6 +113,8 @@
             mCurrTableEntriesElement(NULL) {
             CHECK_GT(mElementCapacity, 0);
             CHECK_GT(mEntryCapacity, 0);
+            // Ensure no integer overflow on allocation in add().
+            CHECK_LT(mEntryCapacity, UINT32_MAX / mElementCapacity);
         }
 
         // Free the allocated memory.
@@ -345,31 +359,6 @@
     Track &operator=(const Track &);
 };
 
-MPEG4Writer::MPEG4Writer(const char *filename)
-    : mFd(-1),
-      mInitCheck(NO_INIT),
-      mIsRealTimeRecording(true),
-      mUse4ByteNalLength(true),
-      mUse32BitOffset(true),
-      mIsFileSizeLimitExplicitlyRequested(false),
-      mPaused(false),
-      mStarted(false),
-      mWriterThreadStarted(false),
-      mOffset(0),
-      mMdatOffset(0),
-      mEstimatedMoovBoxSize(0),
-      mInterleaveDurationUs(1000000),
-      mLatitudex10000(0),
-      mLongitudex10000(0),
-      mAreGeoTagsAvailable(false),
-      mStartTimeOffsetMs(-1) {
-
-    mFd = open(filename, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
-    if (mFd >= 0) {
-        mInitCheck = OK;
-    }
-}
-
 MPEG4Writer::MPEG4Writer(int fd)
     : mFd(dup(fd)),
       mInitCheck(mFd < 0? NO_INIT: OK),
@@ -382,12 +371,29 @@
       mWriterThreadStarted(false),
       mOffset(0),
       mMdatOffset(0),
+      mMoovBoxBuffer(NULL),
+      mMoovBoxBufferOffset(0),
+      mWriteMoovBoxToMemory(false),
+      mFreeBoxOffset(0),
+      mStreamableFile(false),
       mEstimatedMoovBoxSize(0),
+      mMoovExtraSize(0),
       mInterleaveDurationUs(1000000),
+      mTimeScale(-1),
+      mStartTimestampUs(-1ll),
       mLatitudex10000(0),
       mLongitudex10000(0),
       mAreGeoTagsAvailable(false),
-      mStartTimeOffsetMs(-1) {
+      mStartTimeOffsetMs(-1),
+      mMetaKeys(new AMessage()) {
+    addDeviceMeta();
+
+    // Verify mFd is seekable
+    off64_t off = lseek64(mFd, 0, SEEK_SET);
+    if (off < 0) {
+        ALOGE("cannot seek mFd: %s (%d)", strerror(errno), errno);
+        release();
+    }
 }
 
 MPEG4Writer::~MPEG4Writer() {
@@ -437,6 +443,33 @@
     return OK;
 }
 
+// static
+const char *MPEG4Writer::Track::getFourCCForMime(const char *mime) {
+    if (mime == NULL) {
+        return NULL;
+    }
+    if (!strncasecmp(mime, "audio/", 6)) {
+        if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_NB, mime)) {
+            return "samr";
+        } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_WB, mime)) {
+            return "sawb";
+        } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AAC, mime)) {
+            return "mp4a";
+        }
+    } else if (!strncasecmp(mime, "video/", 6)) {
+        if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) {
+            return "mp4v";
+        } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) {
+            return "s263";
+        } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
+            return "avc1";
+        }
+    } else {
+        ALOGE("Track (%s) other than video or audio is not supported", mime);
+    }
+    return NULL;
+}
+
 status_t MPEG4Writer::addSource(const sp<MediaSource> &source) {
     Mutex::Autolock l(mLock);
     if (mStarted) {
@@ -452,14 +485,11 @@
 
     CHECK(source.get() != NULL);
 
-    // A track of type other than video or audio is not supported.
     const char *mime;
     source->getFormat()->findCString(kKeyMIMEType, &mime);
     bool isAudio = !strncasecmp(mime, "audio/", 6);
-    bool isVideo = !strncasecmp(mime, "video/", 6);
-    if (!isAudio && !isVideo) {
-        ALOGE("Track (%s) other than video or audio is not supported",
-            mime);
+    if (Track::getFourCCForMime(mime) == NULL) {
+        ALOGE("Unsupported mime '%s'", mime);
         return ERROR_UNSUPPORTED;
     }
 
@@ -507,6 +537,34 @@
     return OK;
 }
 
+void MPEG4Writer::addDeviceMeta() {
+    // add device info and estimate space in 'moov'
+    char val[PROPERTY_VALUE_MAX];
+    size_t n;
+    // meta size is estimated by adding up the following:
+    // - meta header structures, which occur only once (total 66 bytes)
+    // - size for each key, which consists of a fixed header (32 bytes),
+    //   plus key length and data length.
+    mMoovExtraSize += 66;
+    if (property_get("ro.build.version.release", val, NULL)
+            && (n = strlen(val)) > 0) {
+        mMetaKeys->setString(kMetaKey_Version, val, n + 1);
+        mMoovExtraSize += sizeof(kMetaKey_Version) + n + 32;
+    }
+#ifdef SHOW_MODEL_BUILD
+    if (property_get("ro.product.model", val, NULL)
+            && (n = strlen(val)) > 0) {
+        mMetaKeys->setString(kMetaKey_Model, val, n + 1);
+        mMoovExtraSize += sizeof(kMetaKey_Model) + n + 32;
+    }
+    if (property_get("ro.build.display.id", val, NULL)
+            && (n = strlen(val)) > 0) {
+        mMetaKeys->setString(kMetaKey_Build, val, n + 1);
+        mMoovExtraSize += sizeof(kMetaKey_Build) + n + 32;
+    }
+#endif
+}
+
 int64_t MPEG4Writer::estimateMoovBoxSize(int32_t bitRate) {
     // This implementation is highly experimental/heurisitic.
     //
@@ -560,6 +618,9 @@
         size = MAX_MOOV_BOX_SIZE;
     }
 
+    // Account for the extra stuff (Geo, meta keys, etc.)
+    size += mMoovExtraSize;
+
     ALOGI("limits: %" PRId64 "/%" PRId64 " bytes/us, bit rate: %d bps and the"
          " estimated moov size %" PRId64 " bytes",
          mMaxFileSizeLimitBytes, mMaxFileDurationLimitUs, bitRate, size);
@@ -973,6 +1034,7 @@
     if (mAreGeoTagsAvailable) {
         writeUdtaBox();
     }
+    writeMetaBox();
     int32_t id = 1;
     for (List<Track *>::iterator it = mTracks.begin();
         it != mTracks.end(); ++it, ++id) {
@@ -1142,6 +1204,14 @@
     return bytes;
 }
 
+void MPEG4Writer::beginBox(uint32_t id) {
+    mBoxes.push_back(mWriteMoovBoxToMemory?
+            mMoovBoxBufferOffset: mOffset);
+
+    writeInt32(0);
+    writeInt32(id);
+}
+
 void MPEG4Writer::beginBox(const char *fourcc) {
     CHECK_EQ(strlen(fourcc), 4);
 
@@ -1266,6 +1336,18 @@
     mLatitudex10000 = latitudex10000;
     mLongitudex10000 = longitudex10000;
     mAreGeoTagsAvailable = true;
+    mMoovExtraSize += 30;
+    return OK;
+}
+
+status_t MPEG4Writer::setCaptureRate(float captureFps) {
+    if (captureFps <= 0.0f) {
+        return BAD_VALUE;
+    }
+
+    mMetaKeys->setFloat(kMetaKey_CaptureFps, captureFps);
+    mMoovExtraSize += sizeof(kMetaKey_CaptureFps) + 4 + 32;
+
     return OK;
 }
 
@@ -2689,17 +2771,13 @@
     const char *mime;
     bool success = mMeta->findCString(kKeyMIMEType, &mime);
     CHECK(success);
-    if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime)) {
-        mOwner->beginBox("mp4v");
-    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_H263, mime)) {
-        mOwner->beginBox("s263");
-    } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
-        mOwner->beginBox("avc1");
-    } else {
+    const char *fourcc = getFourCCForMime(mime);
+    if (fourcc == NULL) {
         ALOGE("Unknown mime type '%s'.", mime);
         CHECK(!"should not be here, unknown mime type.");
     }
 
+    mOwner->beginBox(fourcc);        // video format
     mOwner->writeInt32(0);           // reserved
     mOwner->writeInt16(0);           // reserved
     mOwner->writeInt16(1);           // data ref index
@@ -2743,14 +2821,8 @@
     const char *mime;
     bool success = mMeta->findCString(kKeyMIMEType, &mime);
     CHECK(success);
-    const char *fourcc = NULL;
-    if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_NB, mime)) {
-        fourcc = "samr";
-    } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AMR_WB, mime)) {
-        fourcc = "sawb";
-    } else if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_AAC, mime)) {
-        fourcc = "mp4a";
-    } else {
+    const char *fourcc = getFourCCForMime(mime);
+    if (fourcc == NULL) {
         ALOGE("Unknown mime type '%s'.", mime);
         CHECK(!"should not be here, unknown mime type.");
     }
@@ -3099,6 +3171,103 @@
     endBox();
 }
 
+void MPEG4Writer::writeHdlr() {
+    beginBox("hdlr");
+    writeInt32(0); // Version, Flags
+    writeInt32(0); // Predefined
+    writeFourcc("mdta");
+    writeInt32(0); // Reserved[0]
+    writeInt32(0); // Reserved[1]
+    writeInt32(0); // Reserved[2]
+    writeInt8(0);  // Name (empty)
+    endBox();
+}
+
+void MPEG4Writer::writeKeys() {
+    size_t count = mMetaKeys->countEntries();
+
+    beginBox("keys");
+    writeInt32(0);     // Version, Flags
+    writeInt32(count); // Entry_count
+    for (size_t i = 0; i < count; i++) {
+        AMessage::Type type;
+        const char *key = mMetaKeys->getEntryNameAt(i, &type);
+        size_t n = strlen(key);
+        writeInt32(n + 8);
+        writeFourcc("mdta");
+        write(key, n); // write without the \0
+    }
+    endBox();
+}
+
+void MPEG4Writer::writeIlst() {
+    size_t count = mMetaKeys->countEntries();
+
+    beginBox("ilst");
+    for (size_t i = 0; i < count; i++) {
+        beginBox(i + 1); // key id (1-based)
+        beginBox("data");
+        AMessage::Type type;
+        const char *key = mMetaKeys->getEntryNameAt(i, &type);
+        switch (type) {
+            case AMessage::kTypeString:
+            {
+                AString val;
+                CHECK(mMetaKeys->findString(key, &val));
+                writeInt32(1); // type = UTF8
+                writeInt32(0); // default country/language
+                write(val.c_str(), strlen(val.c_str())); // write without \0
+                break;
+            }
+
+            case AMessage::kTypeFloat:
+            {
+                float val;
+                CHECK(mMetaKeys->findFloat(key, &val));
+                writeInt32(23); // type = float32
+                writeInt32(0);  // default country/language
+                writeInt32(*reinterpret_cast<int32_t *>(&val));
+                break;
+            }
+
+            case AMessage::kTypeInt32:
+            {
+                int32_t val;
+                CHECK(mMetaKeys->findInt32(key, &val));
+                writeInt32(67); // type = signed int32
+                writeInt32(0);  // default country/language
+                writeInt32(val);
+                break;
+            }
+
+            default:
+            {
+                ALOGW("Unsupported key type, writing 0 instead");
+                writeInt32(77); // type = unsigned int32
+                writeInt32(0);  // default country/language
+                writeInt32(0);
+                break;
+            }
+        }
+        endBox(); // data
+        endBox(); // key id
+    }
+    endBox(); // ilst
+}
+
+void MPEG4Writer::writeMetaBox() {
+    size_t count = mMetaKeys->countEntries();
+    if (count == 0) {
+        return;
+    }
+
+    beginBox("meta");
+    writeHdlr();
+    writeKeys();
+    writeIlst();
+    endBox();
+}
+
 /*
  * Geodata is stored according to ISO-6709 standard.
  */
diff --git a/media/libstagefright/MediaClock.cpp b/media/libstagefright/MediaClock.cpp
new file mode 100644
index 0000000..2641e4e
--- /dev/null
+++ b/media/libstagefright/MediaClock.cpp
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaClock"
+#include <utils/Log.h>
+
+#include <media/stagefright/MediaClock.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+
+namespace android {
+
+MediaClock::MediaClock()
+    : mAnchorTimeMediaUs(-1),
+      mAnchorTimeRealUs(-1),
+      mMaxTimeMediaUs(INT64_MAX),
+      mStartingTimeMediaUs(-1),
+      mPlaybackRate(1.0) {
+}
+
+MediaClock::~MediaClock() {
+}
+
+void MediaClock::setStartingTimeMedia(int64_t startingTimeMediaUs) {
+    Mutex::Autolock autoLock(mLock);
+    mStartingTimeMediaUs = startingTimeMediaUs;
+}
+
+void MediaClock::clearAnchor() {
+    Mutex::Autolock autoLock(mLock);
+    mAnchorTimeMediaUs = -1;
+    mAnchorTimeRealUs = -1;
+}
+
+void MediaClock::updateAnchor(
+        int64_t anchorTimeMediaUs,
+        int64_t anchorTimeRealUs,
+        int64_t maxTimeMediaUs) {
+    if (anchorTimeMediaUs < 0 || anchorTimeRealUs < 0) {
+        ALOGW("reject anchor time since it is negative.");
+        return;
+    }
+
+    Mutex::Autolock autoLock(mLock);
+    int64_t nowUs = ALooper::GetNowUs();
+    int64_t nowMediaUs =
+        anchorTimeMediaUs + (nowUs - anchorTimeRealUs) * (double)mPlaybackRate;
+    if (nowMediaUs < 0) {
+        ALOGW("reject anchor time since it leads to negative media time.");
+        return;
+    }
+    mAnchorTimeRealUs = nowUs;
+    mAnchorTimeMediaUs = nowMediaUs;
+    mMaxTimeMediaUs = maxTimeMediaUs;
+}
+
+void MediaClock::updateMaxTimeMedia(int64_t maxTimeMediaUs) {
+    Mutex::Autolock autoLock(mLock);
+    mMaxTimeMediaUs = maxTimeMediaUs;
+}
+
+void MediaClock::setPlaybackRate(float rate) {
+    CHECK_GE(rate, 0.0);
+    Mutex::Autolock autoLock(mLock);
+    if (mAnchorTimeRealUs == -1) {
+        mPlaybackRate = rate;
+        return;
+    }
+
+    int64_t nowUs = ALooper::GetNowUs();
+    mAnchorTimeMediaUs += (nowUs - mAnchorTimeRealUs) * (double)mPlaybackRate;
+    if (mAnchorTimeMediaUs < 0) {
+        ALOGW("setRate: anchor time should not be negative, set to 0.");
+        mAnchorTimeMediaUs = 0;
+    }
+    mAnchorTimeRealUs = nowUs;
+    mPlaybackRate = rate;
+}
+
+float MediaClock::getPlaybackRate() const {
+    Mutex::Autolock autoLock(mLock);
+    return mPlaybackRate;
+}
+
+status_t MediaClock::getMediaTime(
+        int64_t realUs, int64_t *outMediaUs, bool allowPastMaxTime) const {
+    if (outMediaUs == NULL) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock autoLock(mLock);
+    return getMediaTime_l(realUs, outMediaUs, allowPastMaxTime);
+}
+
+status_t MediaClock::getMediaTime_l(
+        int64_t realUs, int64_t *outMediaUs, bool allowPastMaxTime) const {
+    if (mAnchorTimeRealUs == -1) {
+        return NO_INIT;
+    }
+
+    int64_t mediaUs = mAnchorTimeMediaUs
+            + (realUs - mAnchorTimeRealUs) * (double)mPlaybackRate;
+    if (mediaUs > mMaxTimeMediaUs && !allowPastMaxTime) {
+        mediaUs = mMaxTimeMediaUs;
+    }
+    if (mediaUs < mStartingTimeMediaUs) {
+        mediaUs = mStartingTimeMediaUs;
+    }
+    if (mediaUs < 0) {
+        mediaUs = 0;
+    }
+    *outMediaUs = mediaUs;
+    return OK;
+}
+
+status_t MediaClock::getRealTimeFor(
+        int64_t targetMediaUs, int64_t *outRealUs) const {
+    if (outRealUs == NULL) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock autoLock(mLock);
+    if (mPlaybackRate == 0.0) {
+        return NO_INIT;
+    }
+
+    int64_t nowUs = ALooper::GetNowUs();
+    int64_t nowMediaUs;
+    status_t status =
+            getMediaTime_l(nowUs, &nowMediaUs, true /* allowPastMaxTime */);
+    if (status != OK) {
+        return status;
+    }
+    *outRealUs = (targetMediaUs - nowMediaUs) / (double)mPlaybackRate + nowUs;
+    return OK;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 6ca123a..cd59709 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -21,10 +21,15 @@
 #include "include/avc_utils.h"
 #include "include/SoftwareRenderer.h"
 
-#include <binder/IBatteryStats.h>
+#include <binder/IMemory.h>
+#include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
+#include <binder/MemoryDealer.h>
+#include <gui/BufferQueue.h>
 #include <gui/Surface.h>
 #include <media/ICrypto.h>
+#include <media/IOMX.h>
+#include <media/IResourceManagerService.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -36,85 +41,131 @@
 #include <media/stagefright/MediaCodecList.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaFilter.h>
 #include <media/stagefright/MetaData.h>
-#include <media/stagefright/NativeWindowWrapper.h>
+#include <media/stagefright/OMXClient.h>
+#include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/PersistentSurface.h>
+#include <media/stagefright/SurfaceUtils.h>
+#include <mediautils/BatteryNotifier.h>
 #include <private/android_filesystem_config.h>
 #include <utils/Log.h>
 #include <utils/Singleton.h>
 
 namespace android {
 
-struct MediaCodec::BatteryNotifier : public Singleton<BatteryNotifier> {
-    BatteryNotifier();
+static int64_t getId(sp<IResourceManagerClient> client) {
+    return (int64_t) client.get();
+}
 
-    void noteStartVideo();
-    void noteStopVideo();
-    void noteStartAudio();
-    void noteStopAudio();
+static bool isResourceError(status_t err) {
+    return (err == NO_MEMORY);
+}
+
+static const int kMaxRetry = 2;
+
+struct ResourceManagerClient : public BnResourceManagerClient {
+    ResourceManagerClient(MediaCodec* codec) : mMediaCodec(codec) {}
+
+    virtual bool reclaimResource() {
+        sp<MediaCodec> codec = mMediaCodec.promote();
+        if (codec == NULL) {
+            // codec is already gone.
+            return true;
+        }
+        status_t err = codec->reclaim();
+        if (err != OK) {
+            ALOGW("ResourceManagerClient failed to release codec with err %d", err);
+        }
+        return (err == OK);
+    }
+
+    virtual String8 getName() {
+        String8 ret;
+        sp<MediaCodec> codec = mMediaCodec.promote();
+        if (codec == NULL) {
+            // codec is already gone.
+            return ret;
+        }
+
+        AString name;
+        if (codec->getName(&name) == OK) {
+            ret.setTo(name.c_str());
+        }
+        return ret;
+    }
+
+protected:
+    virtual ~ResourceManagerClient() {}
 
 private:
-    int32_t mVideoRefCount;
-    int32_t mAudioRefCount;
-    sp<IBatteryStats> mBatteryStatService;
+    wp<MediaCodec> mMediaCodec;
+
+    DISALLOW_EVIL_CONSTRUCTORS(ResourceManagerClient);
 };
 
-ANDROID_SINGLETON_STATIC_INSTANCE(MediaCodec::BatteryNotifier)
-
-MediaCodec::BatteryNotifier::BatteryNotifier() :
-    mVideoRefCount(0),
-    mAudioRefCount(0) {
-    // get battery service
-    const sp<IServiceManager> sm(defaultServiceManager());
-    if (sm != NULL) {
-        const String16 name("batterystats");
-        mBatteryStatService = interface_cast<IBatteryStats>(sm->getService(name));
-        if (mBatteryStatService == NULL) {
-            ALOGE("batterystats service unavailable!");
-        }
+MediaCodec::ResourceManagerServiceProxy::ResourceManagerServiceProxy(pid_t pid)
+        : mPid(pid) {
+    if (mPid == MediaCodec::kNoPid) {
+        mPid = IPCThreadState::self()->getCallingPid();
     }
 }
 
-void MediaCodec::BatteryNotifier::noteStartVideo() {
-    if (mVideoRefCount == 0 && mBatteryStatService != NULL) {
-        mBatteryStatService->noteStartVideo(AID_MEDIA);
+MediaCodec::ResourceManagerServiceProxy::~ResourceManagerServiceProxy() {
+    if (mService != NULL) {
+        IInterface::asBinder(mService)->unlinkToDeath(this);
     }
-    mVideoRefCount++;
 }
 
-void MediaCodec::BatteryNotifier::noteStopVideo() {
-    if (mVideoRefCount == 0) {
-        ALOGW("BatteryNotifier::noteStop(): video refcount is broken!");
+void MediaCodec::ResourceManagerServiceProxy::init() {
+    sp<IServiceManager> sm = defaultServiceManager();
+    sp<IBinder> binder = sm->getService(String16("media.resource_manager"));
+    mService = interface_cast<IResourceManagerService>(binder);
+    if (mService == NULL) {
+        ALOGE("Failed to get ResourceManagerService");
         return;
     }
-
-    mVideoRefCount--;
-    if (mVideoRefCount == 0 && mBatteryStatService != NULL) {
-        mBatteryStatService->noteStopVideo(AID_MEDIA);
-    }
+    IInterface::asBinder(mService)->linkToDeath(this);
 }
 
-void MediaCodec::BatteryNotifier::noteStartAudio() {
-    if (mAudioRefCount == 0 && mBatteryStatService != NULL) {
-        mBatteryStatService->noteStartAudio(AID_MEDIA);
-    }
-    mAudioRefCount++;
+void MediaCodec::ResourceManagerServiceProxy::binderDied(const wp<IBinder>& /*who*/) {
+    ALOGW("ResourceManagerService died.");
+    Mutex::Autolock _l(mLock);
+    mService.clear();
 }
 
-void MediaCodec::BatteryNotifier::noteStopAudio() {
-    if (mAudioRefCount == 0) {
-        ALOGW("BatteryNotifier::noteStop(): audio refcount is broken!");
+void MediaCodec::ResourceManagerServiceProxy::addResource(
+        int64_t clientId,
+        const sp<IResourceManagerClient> client,
+        const Vector<MediaResource> &resources) {
+    Mutex::Autolock _l(mLock);
+    if (mService == NULL) {
         return;
     }
-
-    mAudioRefCount--;
-    if (mAudioRefCount == 0 && mBatteryStatService != NULL) {
-        mBatteryStatService->noteStopAudio(AID_MEDIA);
-    }
+    mService->addResource(mPid, clientId, client, resources);
 }
+
+void MediaCodec::ResourceManagerServiceProxy::removeResource(int64_t clientId) {
+    Mutex::Autolock _l(mLock);
+    if (mService == NULL) {
+        return;
+    }
+    mService->removeResource(mPid, clientId);
+}
+
+bool MediaCodec::ResourceManagerServiceProxy::reclaimResource(
+        const Vector<MediaResource> &resources) {
+    Mutex::Autolock _l(mLock);
+    if (mService == NULL) {
+        return false;
+    }
+    return mService->reclaimResource(mPid, resources);
+}
+
 // static
 sp<MediaCodec> MediaCodec::CreateByType(
-        const sp<ALooper> &looper, const char *mime, bool encoder, status_t *err) {
-    sp<MediaCodec> codec = new MediaCodec(looper);
+        const sp<ALooper> &looper, const char *mime, bool encoder, status_t *err, pid_t pid) {
+    sp<MediaCodec> codec = new MediaCodec(looper, pid);
 
     const status_t ret = codec->init(mime, true /* nameIsType */, encoder);
     if (err != NULL) {
@@ -125,8 +176,8 @@
 
 // static
 sp<MediaCodec> MediaCodec::CreateByComponentName(
-        const sp<ALooper> &looper, const char *name, status_t *err) {
-    sp<MediaCodec> codec = new MediaCodec(looper);
+        const sp<ALooper> &looper, const char *name, status_t *err, pid_t pid) {
+    sp<MediaCodec> codec = new MediaCodec(looper, pid);
 
     const status_t ret = codec->init(name, false /* nameIsType */, false /* encoder */);
     if (err != NULL) {
@@ -135,25 +186,82 @@
     return ret == OK ? codec : NULL; // NULL deallocates codec.
 }
 
-MediaCodec::MediaCodec(const sp<ALooper> &looper)
+// static
+sp<PersistentSurface> MediaCodec::CreatePersistentInputSurface() {
+    OMXClient client;
+    CHECK_EQ(client.connect(), (status_t)OK);
+    sp<IOMX> omx = client.interface();
+
+    const sp<IMediaCodecList> mediaCodecList = MediaCodecList::getInstance();
+    if (mediaCodecList == NULL) {
+        ALOGE("Failed to obtain MediaCodecList!");
+        return NULL; // if called from Java should raise IOException
+    }
+
+    AString tmp;
+    sp<AMessage> globalSettings = mediaCodecList->getGlobalSettings();
+    if (globalSettings == NULL || !globalSettings->findString(
+            kMaxEncoderInputBuffers, &tmp)) {
+        ALOGE("Failed to get encoder input buffer count!");
+        return NULL;
+    }
+
+    int32_t bufferCount = strtol(tmp.c_str(), NULL, 10);
+    if (bufferCount <= 0
+            || bufferCount > BufferQueue::MAX_MAX_ACQUIRED_BUFFERS) {
+        ALOGE("Encoder input buffer count is invalid!");
+        return NULL;
+    }
+
+    sp<IGraphicBufferProducer> bufferProducer;
+    sp<IGraphicBufferConsumer> bufferConsumer;
+
+    status_t err = omx->createPersistentInputSurface(
+            &bufferProducer, &bufferConsumer);
+
+    if (err != OK) {
+        ALOGE("Failed to create persistent input surface.");
+        return NULL;
+    }
+
+    err = bufferConsumer->setMaxAcquiredBufferCount(bufferCount);
+
+    if (err != NO_ERROR) {
+        ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
+                bufferCount, err);
+        return NULL;
+    }
+
+    return new PersistentSurface(bufferProducer, bufferConsumer);
+}
+
+MediaCodec::MediaCodec(const sp<ALooper> &looper, pid_t pid)
     : mState(UNINITIALIZED),
+      mReleasedByResourceManager(false),
       mLooper(looper),
       mCodec(NULL),
       mReplyID(0),
       mFlags(0),
       mStickyError(OK),
       mSoftRenderer(NULL),
+      mResourceManagerClient(new ResourceManagerClient(this)),
+      mResourceManagerService(new ResourceManagerServiceProxy(pid)),
       mBatteryStatNotified(false),
       mIsVideo(false),
+      mVideoWidth(0),
+      mVideoHeight(0),
+      mRotationDegrees(0),
       mDequeueInputTimeoutGeneration(0),
       mDequeueInputReplyID(0),
       mDequeueOutputTimeoutGeneration(0),
       mDequeueOutputReplyID(0),
-      mHaveInputSurface(false) {
+      mHaveInputSurface(false),
+      mHavePendingInputBuffers(false) {
 }
 
 MediaCodec::~MediaCodec() {
     CHECK_EQ(mState, UNINITIALIZED);
+    mResourceManagerService->removeResource(getId(mResourceManagerClient));
 }
 
 // static
@@ -172,14 +280,21 @@
     return err;
 }
 
-// static
-void MediaCodec::PostReplyWithError(int32_t replyID, int32_t err) {
+void MediaCodec::PostReplyWithError(const sp<AReplyToken> &replyID, int32_t err) {
+    int32_t finalErr = err;
+    if (mReleasedByResourceManager) {
+        // override the err code if MediaCodec has been released by ResourceManager.
+        finalErr = DEAD_OBJECT;
+    }
+
     sp<AMessage> response = new AMessage;
-    response->setInt32("err", err);
+    response->setInt32("err", finalErr);
     response->postReply(replyID);
 }
 
 status_t MediaCodec::init(const AString &name, bool nameIsType, bool encoder) {
+    mResourceManagerService->init();
+
     // save init parameters for reset
     mInitName = name;
     mInitNameIsType = nameIsType;
@@ -189,16 +304,30 @@
     // quickly, violating the OpenMAX specs, until that is remedied
     // we need to invest in an extra looper to free the main event
     // queue.
-    mCodec = new ACodec;
-    bool needDedicatedLooper = false;
+
+    if (nameIsType || !strncasecmp(name.c_str(), "omx.", 4)) {
+        mCodec = new ACodec;
+    } else if (!nameIsType
+            && !strncasecmp(name.c_str(), "android.filter.", 15)) {
+        mCodec = new MediaFilter;
+    } else {
+        return NAME_NOT_FOUND;
+    }
+
+    bool secureCodec = false;
     if (nameIsType && !strncasecmp(name.c_str(), "video/", 6)) {
-        needDedicatedLooper = true;
+        mIsVideo = true;
     } else {
         AString tmp = name;
         if (tmp.endsWith(".secure")) {
+            secureCodec = true;
             tmp.erase(tmp.size() - 7, 7);
         }
         const sp<IMediaCodecList> mcl = MediaCodecList::getInstance();
+        if (mcl == NULL) {
+            mCodec = NULL;  // remove the codec.
+            return NO_INIT; // if called from Java should raise IOException
+        }
         ssize_t codecIdx = mcl->findCodecByName(tmp.c_str());
         if (codecIdx >= 0) {
             const sp<MediaCodecInfo> info = mcl->getCodecInfo(codecIdx);
@@ -206,14 +335,15 @@
             info->getSupportedMimes(&mimes);
             for (size_t i = 0; i < mimes.size(); i++) {
                 if (mimes[i].startsWith("video/")) {
-                    needDedicatedLooper = true;
+                    mIsVideo = true;
                     break;
                 }
             }
         }
     }
 
-    if (needDedicatedLooper) {
+    if (mIsVideo) {
+        // video codec needs dedicated looper
         if (mCodecLooper == NULL) {
             mCodecLooper = new ALooper;
             mCodecLooper->setName("CodecLooper");
@@ -227,9 +357,9 @@
 
     mLooper->registerHandler(this);
 
-    mCodec->setNotificationMessage(new AMessage(kWhatCodecNotify, id()));
+    mCodec->setNotificationMessage(new AMessage(kWhatCodecNotify, this));
 
-    sp<AMessage> msg = new AMessage(kWhatInit, id());
+    sp<AMessage> msg = new AMessage(kWhatInit, this);
     msg->setString("name", name);
     msg->setInt32("nameIsType", nameIsType);
 
@@ -237,58 +367,124 @@
         msg->setInt32("encoder", encoder);
     }
 
-    sp<AMessage> response;
-    return PostAndAwaitResponse(msg, &response);
+    status_t err;
+    Vector<MediaResource> resources;
+    const char *type = secureCodec ? kResourceSecureCodec : kResourceNonSecureCodec;
+    const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
+    resources.push_back(MediaResource(String8(type), String8(subtype), 1));
+    for (int i = 0; i <= kMaxRetry; ++i) {
+        if (i > 0) {
+            // Don't try to reclaim resource for the first time.
+            if (!mResourceManagerService->reclaimResource(resources)) {
+                break;
+            }
+        }
+
+        sp<AMessage> response;
+        err = PostAndAwaitResponse(msg, &response);
+        if (!isResourceError(err)) {
+            break;
+        }
+    }
+    return err;
 }
 
 status_t MediaCodec::setCallback(const sp<AMessage> &callback) {
-    sp<AMessage> msg = new AMessage(kWhatSetCallback, id());
+    sp<AMessage> msg = new AMessage(kWhatSetCallback, this);
     msg->setMessage("callback", callback);
 
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
 }
 
+status_t MediaCodec::setOnFrameRenderedNotification(const sp<AMessage> &notify) {
+    sp<AMessage> msg = new AMessage(kWhatSetNotification, this);
+    msg->setMessage("on-frame-rendered", notify);
+    return msg->post();
+}
+
 status_t MediaCodec::configure(
         const sp<AMessage> &format,
-        const sp<Surface> &nativeWindow,
+        const sp<Surface> &surface,
         const sp<ICrypto> &crypto,
         uint32_t flags) {
-    sp<AMessage> msg = new AMessage(kWhatConfigure, id());
+    sp<AMessage> msg = new AMessage(kWhatConfigure, this);
+
+    if (mIsVideo) {
+        format->findInt32("width", &mVideoWidth);
+        format->findInt32("height", &mVideoHeight);
+        if (!format->findInt32("rotation-degrees", &mRotationDegrees)) {
+            mRotationDegrees = 0;
+        }
+    }
 
     msg->setMessage("format", format);
     msg->setInt32("flags", flags);
-
-    if (nativeWindow != NULL) {
-        msg->setObject(
-                "native-window",
-                new NativeWindowWrapper(nativeWindow));
-    }
+    msg->setObject("surface", surface);
 
     if (crypto != NULL) {
         msg->setPointer("crypto", crypto.get());
     }
 
-    sp<AMessage> response;
-    status_t err = PostAndAwaitResponse(msg, &response);
+    // save msg for reset
+    mConfigureMsg = msg;
 
-    if (err != OK && err != INVALID_OPERATION) {
-        // MediaCodec now set state to UNINITIALIZED upon any fatal error.
-        // To maintain backward-compatibility, do a reset() to put codec
-        // back into INITIALIZED state.
-        // But don't reset if the err is INVALID_OPERATION, which means
-        // the configure failure is due to wrong state.
+    status_t err;
+    Vector<MediaResource> resources;
+    const char *type = (mFlags & kFlagIsSecure) ?
+            kResourceSecureCodec : kResourceNonSecureCodec;
+    const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
+    resources.push_back(MediaResource(String8(type), String8(subtype), 1));
+    // Don't know the buffer size at this point, but it's fine to use 1 because
+    // the reclaimResource call doesn't consider the requester's buffer size for now.
+    resources.push_back(MediaResource(String8(kResourceGraphicMemory), 1));
+    for (int i = 0; i <= kMaxRetry; ++i) {
+        if (i > 0) {
+            // Don't try to reclaim resource for the first time.
+            if (!mResourceManagerService->reclaimResource(resources)) {
+                break;
+            }
+        }
 
-        ALOGE("configure failed with err 0x%08x, resetting...", err);
-        reset();
+        sp<AMessage> response;
+        err = PostAndAwaitResponse(msg, &response);
+        if (err != OK && err != INVALID_OPERATION) {
+            // MediaCodec now set state to UNINITIALIZED upon any fatal error.
+            // To maintain backward-compatibility, do a reset() to put codec
+            // back into INITIALIZED state.
+            // But don't reset if the err is INVALID_OPERATION, which means
+            // the configure failure is due to wrong state.
+
+            ALOGE("configure failed with err 0x%08x, resetting...", err);
+            reset();
+        }
+        if (!isResourceError(err)) {
+            break;
+        }
     }
-
     return err;
 }
 
+status_t MediaCodec::setInputSurface(
+        const sp<PersistentSurface> &surface) {
+    sp<AMessage> msg = new AMessage(kWhatSetInputSurface, this);
+    msg->setObject("input-surface", surface.get());
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
+status_t MediaCodec::setSurface(const sp<Surface> &surface) {
+    sp<AMessage> msg = new AMessage(kWhatSetSurface, this);
+    msg->setObject("surface", surface);
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
 status_t MediaCodec::createInputSurface(
         sp<IGraphicBufferProducer>* bufferProducer) {
-    sp<AMessage> msg = new AMessage(kWhatCreateInputSurface, id());
+    sp<AMessage> msg = new AMessage(kWhatCreateInputSurface, this);
 
     sp<AMessage> response;
     status_t err = PostAndAwaitResponse(msg, &response);
@@ -306,22 +502,85 @@
     return err;
 }
 
+uint64_t MediaCodec::getGraphicBufferSize() {
+    if (!mIsVideo) {
+        return 0;
+    }
+
+    uint64_t size = 0;
+    size_t portNum = sizeof(mPortBuffers) / sizeof((mPortBuffers)[0]);
+    for (size_t i = 0; i < portNum; ++i) {
+        // TODO: this is just an estimation, we should get the real buffer size from ACodec.
+        size += mPortBuffers[i].size() * mVideoWidth * mVideoHeight * 3 / 2;
+    }
+    return size;
+}
+
+void MediaCodec::addResource(const String8 &type, const String8 &subtype, uint64_t value) {
+    Vector<MediaResource> resources;
+    resources.push_back(MediaResource(type, subtype, value));
+    mResourceManagerService->addResource(
+            getId(mResourceManagerClient), mResourceManagerClient, resources);
+}
+
 status_t MediaCodec::start() {
-    sp<AMessage> msg = new AMessage(kWhatStart, id());
+    sp<AMessage> msg = new AMessage(kWhatStart, this);
+
+    status_t err;
+    Vector<MediaResource> resources;
+    const char *type = (mFlags & kFlagIsSecure) ?
+            kResourceSecureCodec : kResourceNonSecureCodec;
+    const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
+    resources.push_back(MediaResource(String8(type), String8(subtype), 1));
+    // Don't know the buffer size at this point, but it's fine to use 1 because
+    // the reclaimResource call doesn't consider the requester's buffer size for now.
+    resources.push_back(MediaResource(String8(kResourceGraphicMemory), 1));
+    for (int i = 0; i <= kMaxRetry; ++i) {
+        if (i > 0) {
+            // Don't try to reclaim resource for the first time.
+            if (!mResourceManagerService->reclaimResource(resources)) {
+                break;
+            }
+            // Recover codec from previous error before retry start.
+            err = reset();
+            if (err != OK) {
+                ALOGE("retrying start: failed to reset codec");
+                break;
+            }
+            sp<AMessage> response;
+            err = PostAndAwaitResponse(mConfigureMsg, &response);
+            if (err != OK) {
+                ALOGE("retrying start: failed to configure codec");
+                break;
+            }
+        }
+
+        sp<AMessage> response;
+        err = PostAndAwaitResponse(msg, &response);
+        if (!isResourceError(err)) {
+            break;
+        }
+    }
+    return err;
+}
+
+status_t MediaCodec::stop() {
+    sp<AMessage> msg = new AMessage(kWhatStop, this);
 
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
 }
 
-status_t MediaCodec::stop() {
-    sp<AMessage> msg = new AMessage(kWhatStop, id());
+status_t MediaCodec::reclaim() {
+    sp<AMessage> msg = new AMessage(kWhatRelease, this);
+    msg->setInt32("reclaimed", 1);
 
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
 }
 
 status_t MediaCodec::release() {
-    sp<AMessage> msg = new AMessage(kWhatRelease, id());
+    sp<AMessage> msg = new AMessage(kWhatRelease, this);
 
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
@@ -373,7 +632,7 @@
         errorDetailMsg->clear();
     }
 
-    sp<AMessage> msg = new AMessage(kWhatQueueInputBuffer, id());
+    sp<AMessage> msg = new AMessage(kWhatQueueInputBuffer, this);
     msg->setSize("index", index);
     msg->setSize("offset", offset);
     msg->setSize("size", size);
@@ -400,7 +659,7 @@
         errorDetailMsg->clear();
     }
 
-    sp<AMessage> msg = new AMessage(kWhatQueueInputBuffer, id());
+    sp<AMessage> msg = new AMessage(kWhatQueueInputBuffer, this);
     msg->setSize("index", index);
     msg->setSize("offset", offset);
     msg->setPointer("subSamples", (void *)subSamples);
@@ -419,7 +678,7 @@
 }
 
 status_t MediaCodec::dequeueInputBuffer(size_t *index, int64_t timeoutUs) {
-    sp<AMessage> msg = new AMessage(kWhatDequeueInputBuffer, id());
+    sp<AMessage> msg = new AMessage(kWhatDequeueInputBuffer, this);
     msg->setInt64("timeoutUs", timeoutUs);
 
     sp<AMessage> response;
@@ -440,7 +699,7 @@
         int64_t *presentationTimeUs,
         uint32_t *flags,
         int64_t timeoutUs) {
-    sp<AMessage> msg = new AMessage(kWhatDequeueOutputBuffer, id());
+    sp<AMessage> msg = new AMessage(kWhatDequeueOutputBuffer, this);
     msg->setInt64("timeoutUs", timeoutUs);
 
     sp<AMessage> response;
@@ -459,7 +718,7 @@
 }
 
 status_t MediaCodec::renderOutputBufferAndRelease(size_t index) {
-    sp<AMessage> msg = new AMessage(kWhatReleaseOutputBuffer, id());
+    sp<AMessage> msg = new AMessage(kWhatReleaseOutputBuffer, this);
     msg->setSize("index", index);
     msg->setInt32("render", true);
 
@@ -468,7 +727,7 @@
 }
 
 status_t MediaCodec::renderOutputBufferAndRelease(size_t index, int64_t timestampNs) {
-    sp<AMessage> msg = new AMessage(kWhatReleaseOutputBuffer, id());
+    sp<AMessage> msg = new AMessage(kWhatReleaseOutputBuffer, this);
     msg->setSize("index", index);
     msg->setInt32("render", true);
     msg->setInt64("timestampNs", timestampNs);
@@ -478,7 +737,7 @@
 }
 
 status_t MediaCodec::releaseOutputBuffer(size_t index) {
-    sp<AMessage> msg = new AMessage(kWhatReleaseOutputBuffer, id());
+    sp<AMessage> msg = new AMessage(kWhatReleaseOutputBuffer, this);
     msg->setSize("index", index);
 
     sp<AMessage> response;
@@ -486,14 +745,14 @@
 }
 
 status_t MediaCodec::signalEndOfInputStream() {
-    sp<AMessage> msg = new AMessage(kWhatSignalEndOfInputStream, id());
+    sp<AMessage> msg = new AMessage(kWhatSignalEndOfInputStream, this);
 
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
 }
 
 status_t MediaCodec::getOutputFormat(sp<AMessage> *format) const {
-    sp<AMessage> msg = new AMessage(kWhatGetOutputFormat, id());
+    sp<AMessage> msg = new AMessage(kWhatGetOutputFormat, this);
 
     sp<AMessage> response;
     status_t err;
@@ -507,7 +766,7 @@
 }
 
 status_t MediaCodec::getInputFormat(sp<AMessage> *format) const {
-    sp<AMessage> msg = new AMessage(kWhatGetInputFormat, id());
+    sp<AMessage> msg = new AMessage(kWhatGetInputFormat, this);
 
     sp<AMessage> response;
     status_t err;
@@ -521,7 +780,7 @@
 }
 
 status_t MediaCodec::getName(AString *name) const {
-    sp<AMessage> msg = new AMessage(kWhatGetName, id());
+    sp<AMessage> msg = new AMessage(kWhatGetName, this);
 
     sp<AMessage> response;
     status_t err;
@@ -534,8 +793,18 @@
     return OK;
 }
 
+status_t MediaCodec::getWidevineLegacyBuffers(Vector<sp<ABuffer> > *buffers) const {
+    sp<AMessage> msg = new AMessage(kWhatGetBuffers, this);
+    msg->setInt32("portIndex", kPortIndexInput);
+    msg->setPointer("buffers", buffers);
+    msg->setInt32("widevine", true);
+
+    sp<AMessage> response;
+    return PostAndAwaitResponse(msg, &response);
+}
+
 status_t MediaCodec::getInputBuffers(Vector<sp<ABuffer> > *buffers) const {
-    sp<AMessage> msg = new AMessage(kWhatGetBuffers, id());
+    sp<AMessage> msg = new AMessage(kWhatGetBuffers, this);
     msg->setInt32("portIndex", kPortIndexInput);
     msg->setPointer("buffers", buffers);
 
@@ -544,7 +813,7 @@
 }
 
 status_t MediaCodec::getOutputBuffers(Vector<sp<ABuffer> > *buffers) const {
-    sp<AMessage> msg = new AMessage(kWhatGetBuffers, id());
+    sp<AMessage> msg = new AMessage(kWhatGetBuffers, this);
     msg->setInt32("portIndex", kPortIndexOutput);
     msg->setPointer("buffers", buffers);
 
@@ -576,6 +845,10 @@
         sp<ABuffer> *buffer, sp<AMessage> *format) {
     // use mutex instead of a context switch
 
+    if (mReleasedByResourceManager) {
+        return DEAD_OBJECT;
+    }
+
     buffer->clear();
     format->clear();
     if (!isExecuting()) {
@@ -602,20 +875,20 @@
 }
 
 status_t MediaCodec::flush() {
-    sp<AMessage> msg = new AMessage(kWhatFlush, id());
+    sp<AMessage> msg = new AMessage(kWhatFlush, this);
 
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
 }
 
 status_t MediaCodec::requestIDRFrame() {
-    (new AMessage(kWhatRequestIDRFrame, id()))->post();
+    (new AMessage(kWhatRequestIDRFrame, this))->post();
 
     return OK;
 }
 
 void MediaCodec::requestActivityNotification(const sp<AMessage> &notify) {
-    sp<AMessage> msg = new AMessage(kWhatRequestActivityNotification, id());
+    sp<AMessage> msg = new AMessage(kWhatRequestActivityNotification, this);
     msg->setMessage("notify", notify);
     msg->post();
 }
@@ -640,7 +913,7 @@
     }
 }
 
-bool MediaCodec::handleDequeueInputBuffer(uint32_t replyID, bool newRequest) {
+bool MediaCodec::handleDequeueInputBuffer(const sp<AReplyToken> &replyID, bool newRequest) {
     if (!isExecuting() || (mFlags & kFlagIsAsync)
             || (newRequest && (mFlags & kFlagDequeueInputPending))) {
         PostReplyWithError(replyID, INVALID_OPERATION);
@@ -664,21 +937,20 @@
     return true;
 }
 
-bool MediaCodec::handleDequeueOutputBuffer(uint32_t replyID, bool newRequest) {
-    sp<AMessage> response = new AMessage;
-
+bool MediaCodec::handleDequeueOutputBuffer(const sp<AReplyToken> &replyID, bool newRequest) {
     if (!isExecuting() || (mFlags & kFlagIsAsync)
             || (newRequest && (mFlags & kFlagDequeueOutputPending))) {
-        response->setInt32("err", INVALID_OPERATION);
+        PostReplyWithError(replyID, INVALID_OPERATION);
     } else if (mFlags & kFlagStickyError) {
-        response->setInt32("err", getStickyError());
+        PostReplyWithError(replyID, getStickyError());
     } else if (mFlags & kFlagOutputBuffersChanged) {
-        response->setInt32("err", INFO_OUTPUT_BUFFERS_CHANGED);
+        PostReplyWithError(replyID, INFO_OUTPUT_BUFFERS_CHANGED);
         mFlags &= ~kFlagOutputBuffersChanged;
     } else if (mFlags & kFlagOutputFormatChanged) {
-        response->setInt32("err", INFO_FORMAT_CHANGED);
+        PostReplyWithError(replyID, INFO_FORMAT_CHANGED);
         mFlags &= ~kFlagOutputFormatChanged;
     } else {
+        sp<AMessage> response = new AMessage;
         ssize_t index = dequeuePortBuffer(kPortIndexOutput);
 
         if (index < 0) {
@@ -713,10 +985,9 @@
         }
 
         response->setInt32("flags", flags);
+        response->postReply(replyID);
     }
 
-    response->postReply(replyID);
-
     return true;
 }
 
@@ -874,18 +1145,30 @@
                         mFlags &= ~kFlagUsesSoftwareRenderer;
                     }
 
+                    String8 resourceType;
                     if (mComponentName.endsWith(".secure")) {
                         mFlags |= kFlagIsSecure;
+                        resourceType = String8(kResourceSecureCodec);
                     } else {
                         mFlags &= ~kFlagIsSecure;
+                        resourceType = String8(kResourceNonSecureCodec);
                     }
 
+                    const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
+                    addResource(resourceType, String8(subtype), 1);
+
                     (new AMessage)->postReply(mReplyID);
                     break;
                 }
 
                 case CodecBase::kWhatComponentConfigured:
                 {
+                    if (mState == UNINITIALIZED || mState == INITIALIZED) {
+                        // In case a kWhatError message came in and replied with error,
+                        // we log a warning and ignore.
+                        ALOGW("configure interrupted by error, current state %d", mState);
+                        break;
+                    }
                     CHECK_EQ(mState, CONFIGURING);
 
                     // reset input surface flag
@@ -908,7 +1191,7 @@
                 {
                     // response to initiateCreateInputSurface()
                     status_t err = NO_ERROR;
-                    sp<AMessage> response = new AMessage();
+                    sp<AMessage> response = new AMessage;
                     if (!msg->findInt32("err", &err)) {
                         sp<RefBase> obj;
                         msg->findObject("input-surface", &obj);
@@ -922,10 +1205,24 @@
                     break;
                 }
 
+                case CodecBase::kWhatInputSurfaceAccepted:
+                {
+                    // response to initiateSetInputSurface()
+                    status_t err = NO_ERROR;
+                    sp<AMessage> response = new AMessage();
+                    if (!msg->findInt32("err", &err)) {
+                        mHaveInputSurface = true;
+                    } else {
+                        response->setInt32("err", err);
+                    }
+                    response->postReply(mReplyID);
+                    break;
+                }
+
                 case CodecBase::kWhatSignaledInputEOS:
                 {
                     // response to signalEndOfInputStream()
-                    sp<AMessage> response = new AMessage();
+                    sp<AMessage> response = new AMessage;
                     status_t err;
                     if (msg->findInt32("err", &err)) {
                         response->setInt32("err", err);
@@ -959,6 +1256,17 @@
 
                     size_t numBuffers = portDesc->countBuffers();
 
+                    size_t totalSize = 0;
+                    for (size_t i = 0; i < numBuffers; ++i) {
+                        if (portIndex == kPortIndexInput && mCrypto != NULL) {
+                            totalSize += portDesc->bufferAt(i)->capacity();
+                        }
+                    }
+
+                    if (totalSize) {
+                        mDealer = new MemoryDealer(totalSize, "MediaCodec");
+                    }
+
                     for (size_t i = 0; i < numBuffers; ++i) {
                         BufferInfo info;
                         info.mBufferID = portDesc->bufferIDAt(i);
@@ -966,8 +1274,10 @@
                         info.mData = portDesc->bufferAt(i);
 
                         if (portIndex == kPortIndexInput && mCrypto != NULL) {
+                            sp<IMemory> mem = mDealer->allocate(info.mData->capacity());
                             info.mEncryptedData =
-                                new ABuffer(info.mData->capacity());
+                                new ABuffer(mem->pointer(), info.mData->capacity());
+                            info.mSharedEncryptedBuffer = mem;
                         }
 
                         buffers->push_back(info);
@@ -978,6 +1288,13 @@
                             // We're always allocating output buffers after
                             // allocating input buffers, so this is a good
                             // indication that now all buffers are allocated.
+                            if (mIsVideo) {
+                                String8 subtype;
+                                addResource(
+                                        String8(kResourceGraphicMemory),
+                                        subtype,
+                                        getGraphicBufferSize());
+                            }
                             setState(STARTED);
                             (new AMessage)->postReply(mReplyID);
                         } else {
@@ -993,13 +1310,13 @@
                     ALOGV("codec output format changed");
 
                     if (mSoftRenderer == NULL &&
-                            mNativeWindow != NULL &&
+                            mSurface != NULL &&
                             (mFlags & kFlagUsesSoftwareRenderer)) {
                         AString mime;
                         CHECK(msg->findString("mime", &mime));
 
                         if (mime.startsWithIgnoreCase("video/")) {
-                            mSoftRenderer = new SoftwareRenderer(mNativeWindow);
+                            mSoftRenderer = new SoftwareRenderer(mSurface, mRotationDegrees);
                         }
                     }
 
@@ -1031,6 +1348,18 @@
                     break;
                 }
 
+                case CodecBase::kWhatOutputFramesRendered:
+                {
+                    // ignore these in all states except running, and check that we have a
+                    // notification set
+                    if (mState == STARTED && mOnFrameRenderedNotification != NULL) {
+                        sp<AMessage> notify = mOnFrameRenderedNotification->dup();
+                        notify->setMessage("data", msg);
+                        notify->post();
+                    }
+                    break;
+                }
+
                 case CodecBase::kWhatFillThisBuffer:
                 {
                     /* size_t index = */updateBuffers(kPortIndexInput, msg);
@@ -1068,7 +1397,11 @@
 
                     if (mFlags & kFlagIsAsync) {
                         if (!mHaveInputSurface) {
-                            onInputBufferAvailable();
+                            if (mState == FLUSHED) {
+                                mHavePendingInputBuffers = true;
+                            } else {
+                                onInputBufferAvailable();
+                            }
                         }
                     } else if (mFlags & kFlagDequeueInputPending) {
                         CHECK(handleDequeueInputBuffer(mDequeueInputReplyID));
@@ -1157,6 +1490,8 @@
                     }
                     mFlags &= ~kFlagIsComponentAllocated;
 
+                    mResourceManagerService->removeResource(getId(mResourceManagerClient));
+
                     (new AMessage)->postReply(mReplyID);
                     break;
                 }
@@ -1188,7 +1523,7 @@
 
         case kWhatInit:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             if (mState != UNINITIALIZED) {
@@ -1222,9 +1557,18 @@
             break;
         }
 
+        case kWhatSetNotification:
+        {
+            sp<AMessage> notify;
+            if (msg->findMessage("on-frame-rendered", &notify)) {
+                mOnFrameRenderedNotification = notify;
+            }
+            break;
+        }
+
         case kWhatSetCallback:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             if (mState == UNINITIALIZED
@@ -1256,7 +1600,7 @@
 
         case kWhatConfigure:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             if (mState != INITIALIZED) {
@@ -1265,26 +1609,25 @@
             }
 
             sp<RefBase> obj;
-            if (!msg->findObject("native-window", &obj)) {
-                obj.clear();
-            }
+            CHECK(msg->findObject("surface", &obj));
 
             sp<AMessage> format;
             CHECK(msg->findMessage("format", &format));
 
+            int32_t push;
+            if (msg->findInt32("push-blank-buffers-on-shutdown", &push) && push != 0) {
+                mFlags |= kFlagPushBlankBuffersOnShutdown;
+            }
+
             if (obj != NULL) {
                 format->setObject("native-window", obj);
-
-                status_t err = setNativeWindow(
-                    static_cast<NativeWindowWrapper *>(obj.get())
-                        ->getSurfaceTextureClient());
-
+                status_t err = handleSetSurface(static_cast<Surface *>(obj.get()));
                 if (err != OK) {
                     PostReplyWithError(replyID, err);
                     break;
                 }
             } else {
-                setNativeWindow(NULL);
+                handleSetSurface(NULL);
             }
 
             mReplyID = replyID;
@@ -1311,9 +1654,69 @@
             break;
         }
 
-        case kWhatCreateInputSurface:
+        case kWhatSetSurface:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
+            CHECK(msg->senderAwaitsResponse(&replyID));
+
+            status_t err = OK;
+            sp<Surface> surface;
+
+            switch (mState) {
+                case CONFIGURED:
+                case STARTED:
+                case FLUSHED:
+                {
+                    sp<RefBase> obj;
+                    (void)msg->findObject("surface", &obj);
+                    sp<Surface> surface = static_cast<Surface *>(obj.get());
+                    if (mSurface == NULL) {
+                        // do not support setting surface if it was not set
+                        err = INVALID_OPERATION;
+                    } else if (obj == NULL) {
+                        // do not support unsetting surface
+                        err = BAD_VALUE;
+                    } else {
+                        err = connectToSurface(surface);
+                        if (err == BAD_VALUE) {
+                            // assuming reconnecting to same surface
+                            // TODO: check if it is the same surface
+                            err = OK;
+                        } else {
+                            if (err == OK) {
+                                if (mFlags & kFlagUsesSoftwareRenderer) {
+                                    if (mSoftRenderer != NULL
+                                            && (mFlags & kFlagPushBlankBuffersOnShutdown)) {
+                                        pushBlankBuffersToNativeWindow(mSurface.get());
+                                    }
+                                    mSoftRenderer = new SoftwareRenderer(surface);
+                                    // TODO: check if this was successful
+                                } else {
+                                    err = mCodec->setSurface(surface);
+                                }
+                            }
+                            if (err == OK) {
+                                (void)disconnectFromSurface();
+                                mSurface = surface;
+                            }
+                        }
+                    }
+                    break;
+                }
+
+                default:
+                    err = INVALID_OPERATION;
+                    break;
+            }
+
+            PostReplyWithError(replyID, err);
+            break;
+        }
+
+        case kWhatCreateInputSurface:
+        case kWhatSetInputSurface:
+        {
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             // Must be configured, but can't have been started yet.
@@ -1323,17 +1726,28 @@
             }
 
             mReplyID = replyID;
-            mCodec->initiateCreateInputSurface();
+            if (msg->what() == kWhatCreateInputSurface) {
+                mCodec->initiateCreateInputSurface();
+            } else {
+                sp<RefBase> obj;
+                CHECK(msg->findObject("input-surface", &obj));
+
+                mCodec->initiateSetInputSurface(
+                        static_cast<PersistentSurface *>(obj.get()));
+            }
             break;
         }
-
         case kWhatStart:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             if (mState == FLUSHED) {
                 setState(STARTED);
+                if (mHavePendingInputBuffers) {
+                    onInputBufferAvailable();
+                    mHavePendingInputBuffers = false;
+                }
                 mCodec->signalResume();
                 PostReplyWithError(replyID, OK);
                 break;
@@ -1355,9 +1769,23 @@
             State targetState =
                 (msg->what() == kWhatStop) ? INITIALIZED : UNINITIALIZED;
 
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
+            // already stopped/released
+            if (mState == UNINITIALIZED && mReleasedByResourceManager) {
+                sp<AMessage> response = new AMessage;
+                response->setInt32("err", OK);
+                response->postReply(replyID);
+                break;
+            }
+
+            int32_t reclaimed = 0;
+            msg->findInt32("reclaimed", &reclaimed);
+            if (reclaimed) {
+                mReleasedByResourceManager = true;
+            }
+
             if (!((mFlags & kFlagIsComponentAllocated) && targetState == UNINITIALIZED) // See 1
                     && mState != INITIALIZED
                     && mState != CONFIGURED && !isExecuting()) {
@@ -1371,6 +1799,8 @@
                 // and it should be in this case, no harm to allow a release()
                 // if we're already uninitialized.
                 sp<AMessage> response = new AMessage;
+                // TODO: we shouldn't throw an exception for stop/release. Change this to wait until
+                // the previous stop/release completes and then reply with OK.
                 status_t err = mState == targetState ? OK : INVALID_OPERATION;
                 response->setInt32("err", err);
                 if (err == OK && targetState == UNINITIALIZED) {
@@ -1398,12 +1828,16 @@
                     msg->what() == kWhatStop /* keepComponentAllocated */);
 
             returnBuffersToCodec();
+
+            if (mSoftRenderer != NULL && (mFlags & kFlagPushBlankBuffersOnShutdown)) {
+                pushBlankBuffersToNativeWindow(mSurface.get());
+            }
             break;
         }
 
         case kWhatDequeueInputBuffer:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             if (mFlags & kFlagIsAsync) {
@@ -1435,7 +1869,7 @@
 
             if (timeoutUs > 0ll) {
                 sp<AMessage> timeoutMsg =
-                    new AMessage(kWhatDequeueInputTimedOut, id());
+                    new AMessage(kWhatDequeueInputTimedOut, this);
                 timeoutMsg->setInt32(
                         "generation", ++mDequeueInputTimeoutGeneration);
                 timeoutMsg->post(timeoutUs);
@@ -1464,7 +1898,7 @@
 
         case kWhatQueueInputBuffer:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             if (!isExecuting()) {
@@ -1483,7 +1917,7 @@
 
         case kWhatDequeueOutputBuffer:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             if (mFlags & kFlagIsAsync) {
@@ -1509,7 +1943,7 @@
 
             if (timeoutUs > 0ll) {
                 sp<AMessage> timeoutMsg =
-                    new AMessage(kWhatDequeueOutputTimedOut, id());
+                    new AMessage(kWhatDequeueOutputTimedOut, this);
                 timeoutMsg->setInt32(
                         "generation", ++mDequeueOutputTimeoutGeneration);
                 timeoutMsg->post(timeoutUs);
@@ -1538,7 +1972,7 @@
 
         case kWhatReleaseOutputBuffer:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             if (!isExecuting()) {
@@ -1557,7 +1991,7 @@
 
         case kWhatSignalEndOfInputStream:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             if (!isExecuting()) {
@@ -1575,10 +2009,14 @@
 
         case kWhatGetBuffers:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
+            // Unfortunately widevine legacy source requires knowing all of the
+            // codec input buffers, so we have to provide them even in async mode.
+            int32_t widevine = 0;
+            msg->findInt32("widevine", &widevine);
 
-            if (!isExecuting() || (mFlags & kFlagIsAsync)) {
+            if (!isExecuting() || ((mFlags & kFlagIsAsync) && !widevine)) {
                 PostReplyWithError(replyID, INVALID_OPERATION);
                 break;
             } else if (mFlags & kFlagStickyError) {
@@ -1609,7 +2047,7 @@
 
         case kWhatFlush:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             if (!isExecuting()) {
@@ -1635,7 +2073,7 @@
             sp<AMessage> format =
                 (msg->what() == kWhatGetOutputFormat ? mOutputFormat : mInputFormat);
 
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             if ((mState != CONFIGURED && mState != STARTING &&
@@ -1672,7 +2110,7 @@
 
         case kWhatGetName:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             if (mComponentName.empty()) {
@@ -1688,7 +2126,7 @@
 
         case kWhatSetParameters:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             sp<AMessage> params;
@@ -1742,7 +2180,7 @@
 
     AString errorDetailMsg;
 
-    sp<AMessage> msg = new AMessage(kWhatQueueInputBuffer, id());
+    sp<AMessage> msg = new AMessage(kWhatQueueInputBuffer, this);
     msg->setSize("index", bufferIndex);
     msg->setSize("offset", 0);
     msg->setSize("size", csd->size());
@@ -1759,7 +2197,7 @@
         mSoftRenderer = NULL;
 
         mCrypto.clear();
-        setNativeWindow(NULL);
+        handleSetSurface(NULL);
 
         mInputFormat.clear();
         mOutputFormat.clear();
@@ -1943,7 +2381,8 @@
                 key,
                 iv,
                 mode,
-                info->mEncryptedData->base() + offset,
+                info->mSharedEncryptedBuffer,
+                offset,
                 subSamples,
                 numSubSamples,
                 info->mData->base(),
@@ -1969,6 +2408,23 @@
     return OK;
 }
 
+//static
+size_t MediaCodec::CreateFramesRenderedMessage(
+        std::list<FrameRenderTracker::Info> done, sp<AMessage> &msg) {
+    size_t index = 0;
+
+    for (std::list<FrameRenderTracker::Info>::const_iterator it = done.cbegin();
+            it != done.cend(); ++it) {
+        if (it->getRenderTimeNs() < 0) {
+            continue; // dropped frame from tracking
+        }
+        msg->setInt64(AStringPrintf("%zu-media-time-us", index).c_str(), it->getMediaTimeUs());
+        msg->setInt64(AStringPrintf("%zu-system-nano", index).c_str(), it->getRenderTimeNs());
+        ++index;
+    }
+    return index;
+}
+
 status_t MediaCodec::onReleaseOutputBuffer(const sp<AMessage> &msg) {
     size_t index;
     CHECK(msg->findSize("index", &index));
@@ -2001,26 +2457,31 @@
     if (render && info->mData != NULL && info->mData->size() != 0) {
         info->mNotify->setInt32("render", true);
 
-        int64_t timestampNs = 0;
-        if (msg->findInt64("timestampNs", &timestampNs)) {
-            info->mNotify->setInt64("timestampNs", timestampNs);
-        } else {
-            // TODO: it seems like we should use the timestamp
-            // in the (media)buffer as it potentially came from
-            // an input surface, but we did not propagate it prior to
-            // API 20.  Perhaps check for target SDK version.
-#if 0
-            if (info->mData->meta()->findInt64("timeUs", &timestampNs)) {
-                ALOGV("using buffer PTS of %" PRId64, timestampNs);
-                timestampNs *= 1000;
-            }
-#endif
+        int64_t mediaTimeUs = -1;
+        info->mData->meta()->findInt64("timeUs", &mediaTimeUs);
+
+        int64_t renderTimeNs = 0;
+        if (!msg->findInt64("timestampNs", &renderTimeNs)) {
+            // use media timestamp if client did not request a specific render timestamp
+            ALOGV("using buffer PTS of %lld", (long long)mediaTimeUs);
+            renderTimeNs = mediaTimeUs * 1000;
         }
+        info->mNotify->setInt64("timestampNs", renderTimeNs);
 
         if (mSoftRenderer != NULL) {
-            mSoftRenderer->render(
+            std::list<FrameRenderTracker::Info> doneFrames = mSoftRenderer->render(
                     info->mData->data(), info->mData->size(),
-                    timestampNs, NULL, info->mFormat);
+                    mediaTimeUs, renderTimeNs, NULL, info->mFormat);
+
+            // if we are running, notify rendered frames
+            if (!doneFrames.empty() && mState == STARTED && mOnFrameRenderedNotification != NULL) {
+                sp<AMessage> notify = mOnFrameRenderedNotification->dup();
+                sp<AMessage> data = new AMessage;
+                if (CreateFramesRenderedMessage(doneFrames, data)) {
+                    notify->setMessage("data", data);
+                    notify->post();
+                }
+            }
         }
     }
 
@@ -2064,37 +2525,65 @@
     return index;
 }
 
-status_t MediaCodec::setNativeWindow(
-        const sp<Surface> &surfaceTextureClient) {
-    status_t err;
-
-    if (mNativeWindow != NULL) {
-        err = native_window_api_disconnect(
-                mNativeWindow.get(), NATIVE_WINDOW_API_MEDIA);
-
-        if (err != OK) {
-            ALOGW("native_window_api_disconnect returned an error: %s (%d)",
-                    strerror(-err), err);
-        }
-
-        mNativeWindow.clear();
-    }
-
-    if (surfaceTextureClient != NULL) {
-        err = native_window_api_connect(
-                surfaceTextureClient.get(), NATIVE_WINDOW_API_MEDIA);
-
-        if (err != OK) {
-            ALOGE("native_window_api_connect returned an error: %s (%d)",
-                    strerror(-err), err);
-
+status_t MediaCodec::connectToSurface(const sp<Surface> &surface) {
+    status_t err = OK;
+    if (surface != NULL) {
+        err = native_window_api_connect(surface.get(), NATIVE_WINDOW_API_MEDIA);
+        if (err == BAD_VALUE) {
+            ALOGI("native window already connected. Assuming no change of surface");
             return err;
+        } else if (err == OK) {
+            // Require a fresh set of buffers after each connect by using a unique generation
+            // number. Rely on the fact that max supported process id by Linux is 2^22.
+            // PID is never 0 so we don't have to worry that we use the default generation of 0.
+            // TODO: come up with a unique scheme if other producers also set the generation number.
+            static uint32_t mSurfaceGeneration = 0;
+            uint32_t generation = (getpid() << 10) | (++mSurfaceGeneration & ((1 << 10) - 1));
+            surface->setGenerationNumber(generation);
+            ALOGI("[%s] setting surface generation to %u", mComponentName.c_str(), generation);
+
+            // HACK: clear any free buffers. Remove when connect will automatically do this.
+            // This is needed as the consumer may be holding onto stale frames that it can reattach
+            // to this surface after disconnect/connect, and those free frames would inherit the new
+            // generation number. Disconnecting after setting a unique generation prevents this.
+            native_window_api_disconnect(surface.get(), NATIVE_WINDOW_API_MEDIA);
+            err = native_window_api_connect(surface.get(), NATIVE_WINDOW_API_MEDIA);
         }
 
-        mNativeWindow = surfaceTextureClient;
+        if (err != OK) {
+            ALOGE("native_window_api_connect returned an error: %s (%d)", strerror(-err), err);
+        }
     }
+    return err;
+}
 
-    return OK;
+status_t MediaCodec::disconnectFromSurface() {
+    status_t err = OK;
+    if (mSurface != NULL) {
+        // Resetting generation is not technically needed, but there is no need to keep it either
+        mSurface->setGenerationNumber(0);
+        err = native_window_api_disconnect(mSurface.get(), NATIVE_WINDOW_API_MEDIA);
+        if (err != OK) {
+            ALOGW("native_window_api_disconnect returned an error: %s (%d)", strerror(-err), err);
+        }
+        // assume disconnected even on error
+        mSurface.clear();
+    }
+    return err;
+}
+
+status_t MediaCodec::handleSetSurface(const sp<Surface> &surface) {
+    status_t err = OK;
+    if (mSurface != NULL) {
+        (void)disconnectFromSurface();
+    }
+    if (surface != NULL) {
+        err = connectToSurface(surface);
+        if (err == OK) {
+            mSurface = surface;
+        }
+    }
+    return err;
 }
 
 void MediaCodec::onInputBufferAvailable() {
@@ -2197,7 +2686,7 @@
 }
 
 status_t MediaCodec::setParameters(const sp<AMessage> &params) {
-    sp<AMessage> msg = new AMessage(kWhatSetParameters, id());
+    sp<AMessage> msg = new AMessage(kWhatSetParameters, this);
     msg->setMessage("params", params);
 
     sp<AMessage> response;
@@ -2253,12 +2742,6 @@
 
 void MediaCodec::updateBatteryStat() {
     if (mState == CONFIGURED && !mBatteryStatNotified) {
-        AString mime;
-        CHECK(mOutputFormat != NULL &&
-                mOutputFormat->findString("mime", &mime));
-
-        mIsVideo = mime.startsWithIgnoreCase("video/");
-
         BatteryNotifier& notifier(BatteryNotifier::getInstance());
 
         if (mIsVideo) {
diff --git a/media/libstagefright/MediaCodecList.cpp b/media/libstagefright/MediaCodecList.cpp
index cf6e937..5edc04c 100644
--- a/media/libstagefright/MediaCodecList.cpp
+++ b/media/libstagefright/MediaCodecList.cpp
@@ -18,11 +18,15 @@
 #define LOG_TAG "MediaCodecList"
 #include <utils/Log.h>
 
+#include "MediaCodecListOverrides.h"
+
 #include <binder/IServiceManager.h>
 
 #include <media/IMediaCodecList.h>
 #include <media/IMediaPlayerService.h>
+#include <media/IResourceManagerService.h>
 #include <media/MediaCodecInfo.h>
+#include <media/MediaResourcePolicy.h>
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -31,27 +35,100 @@
 #include <media/stagefright/OMXClient.h>
 #include <media/stagefright/OMXCodec.h>
 
+#include <sys/stat.h>
 #include <utils/threads.h>
 
+#include <cutils/properties.h>
 #include <libexpat/expat.h>
 
 namespace android {
 
+const char *kMaxEncoderInputBuffers = "max-video-encoder-input-buffers";
+
 static Mutex sInitMutex;
 
-static MediaCodecList *gCodecList = NULL;
+static bool parseBoolean(const char *s) {
+    if (!strcasecmp(s, "true") || !strcasecmp(s, "yes") || !strcasecmp(s, "y")) {
+        return true;
+    }
+    char *end;
+    unsigned long res = strtoul(s, &end, 10);
+    return *s != '\0' && *end == '\0' && res > 0;
+}
+
+static bool isProfilingNeeded() {
+    int8_t value = property_get_bool("debug.stagefright.profilecodec", 0);
+    if (value == 0) {
+        return false;
+    }
+
+    bool profilingNeeded = true;
+    FILE *resultsFile = fopen(kProfilingResults, "r");
+    if (resultsFile) {
+        AString currentVersion = getProfilingVersionString();
+        size_t currentVersionSize = currentVersion.size();
+        char *versionString = new char[currentVersionSize + 1];
+        fgets(versionString, currentVersionSize + 1, resultsFile);
+        if (strcmp(versionString, currentVersion.c_str()) == 0) {
+            // profiling result up to date
+            profilingNeeded = false;
+        }
+        fclose(resultsFile);
+        delete[] versionString;
+    }
+    return profilingNeeded;
+}
 
 // static
 sp<IMediaCodecList> MediaCodecList::sCodecList;
 
 // static
+void *MediaCodecList::profilerThreadWrapper(void * /*arg*/) {
+    ALOGV("Enter profilerThreadWrapper.");
+    remove(kProfilingResults);  // remove previous result so that it won't be loaded to
+                                // the new MediaCodecList
+    MediaCodecList *codecList = new MediaCodecList();
+    if (codecList->initCheck() != OK) {
+        ALOGW("Failed to create a new MediaCodecList, skipping codec profiling.");
+        delete codecList;
+        return NULL;
+    }
+
+    Vector<sp<MediaCodecInfo>> infos;
+    for (size_t i = 0; i < codecList->countCodecs(); ++i) {
+        infos.push_back(codecList->getCodecInfo(i));
+    }
+    ALOGV("Codec profiling started.");
+    profileCodecs(infos);
+    ALOGV("Codec profiling completed.");
+    codecList->parseTopLevelXMLFile(kProfilingResults, true /* ignore_errors */);
+
+    {
+        Mutex::Autolock autoLock(sInitMutex);
+        sCodecList = codecList;
+    }
+    return NULL;
+}
+
+// static
 sp<IMediaCodecList> MediaCodecList::getLocalInstance() {
     Mutex::Autolock autoLock(sInitMutex);
 
-    if (gCodecList == NULL) {
-        gCodecList = new MediaCodecList;
-        if (gCodecList->initCheck() == OK) {
-            sCodecList = gCodecList;
+    if (sCodecList == NULL) {
+        MediaCodecList *codecList = new MediaCodecList;
+        if (codecList->initCheck() == OK) {
+            sCodecList = codecList;
+
+            if (isProfilingNeeded()) {
+                ALOGV("Codec profiling needed, will be run in separated thread.");
+                pthread_t profiler;
+                if (pthread_create(&profiler, NULL, profilerThreadWrapper, NULL) != 0) {
+                    ALOGW("Failed to create thread for codec profiling.");
+                }
+            }
+        } else {
+            // failure to initialize may be temporary. retry on next call.
+            delete codecList;
         }
     }
 
@@ -94,11 +171,15 @@
 }
 
 MediaCodecList::MediaCodecList()
-    : mInitCheck(NO_INIT) {
+    : mInitCheck(NO_INIT),
+      mUpdate(false),
+      mGlobalSettings(new AMessage()) {
     parseTopLevelXMLFile("/etc/media_codecs.xml");
+    parseTopLevelXMLFile("/etc/media_codecs_performance.xml", true/* ignore_errors */);
+    parseTopLevelXMLFile(kProfilingResults, true/* ignore_errors */);
 }
 
-void MediaCodecList::parseTopLevelXMLFile(const char *codecs_xml) {
+void MediaCodecList::parseTopLevelXMLFile(const char *codecs_xml, bool ignore_errors) {
     // get href_base
     char *href_base_end = strrchr(codecs_xml, '/');
     if (href_base_end != NULL) {
@@ -112,20 +193,49 @@
     OMXClient client;
     mInitCheck = client.connect();
     if (mInitCheck != OK) {
-        return;
+        return;  // this may fail if IMediaPlayerService is not available.
     }
     mOMX = client.interface();
     parseXMLFile(codecs_xml);
     mOMX.clear();
 
     if (mInitCheck != OK) {
+        if (ignore_errors) {
+            mInitCheck = OK;
+            return;
+        }
         mCodecInfos.clear();
         return;
     }
 
-    for (size_t i = mCodecInfos.size(); i-- > 0;) {
-        const MediaCodecInfo &info = *mCodecInfos.itemAt(i).get();
+    Vector<MediaResourcePolicy> policies;
+    AString value;
+    if (mGlobalSettings->findString(kPolicySupportsMultipleSecureCodecs, &value)) {
+        policies.push_back(
+                MediaResourcePolicy(
+                        String8(kPolicySupportsMultipleSecureCodecs),
+                        String8(value.c_str())));
+    }
+    if (mGlobalSettings->findString(kPolicySupportsSecureWithNonSecureCodec, &value)) {
+        policies.push_back(
+                MediaResourcePolicy(
+                        String8(kPolicySupportsSecureWithNonSecureCodec),
+                        String8(value.c_str())));
+    }
+    if (policies.size() > 0) {
+        sp<IServiceManager> sm = defaultServiceManager();
+        sp<IBinder> binder = sm->getService(String16("media.resource_manager"));
+        sp<IResourceManagerService> service = interface_cast<IResourceManagerService>(binder);
+        if (service == NULL) {
+            ALOGE("MediaCodecList: failed to get ResourceManagerService");
+        } else {
+            service->config(policies);
+        }
+    }
 
+    for (size_t i = mCodecInfos.size(); i > 0;) {
+        i--;
+        const MediaCodecInfo &info = *mCodecInfos.itemAt(i).get();
         if (info.mCaps.size() == 0) {
             // No types supported by this component???
             ALOGW("Component %s does not support any type of media?",
@@ -169,6 +279,16 @@
                     }
                     ALOGV("    levels=[%s]", nice.c_str());
                 }
+                {
+                    AString quirks;
+                    for (size_t ix = 0; ix < info.mQuirks.size(); ix++) {
+                        if (ix > 0) {
+                            quirks.append(", ");
+                        }
+                        quirks.append(info.mQuirks[ix]);
+                    }
+                    ALOGV("    quirks=[%s]", quirks.c_str());
+                }
             }
 #endif
         }
@@ -328,6 +448,16 @@
                 mCurrentSection = SECTION_DECODERS;
             } else if (!strcmp(name, "Encoders")) {
                 mCurrentSection = SECTION_ENCODERS;
+            } else if (!strcmp(name, "Settings")) {
+                mCurrentSection = SECTION_SETTINGS;
+            }
+            break;
+        }
+
+        case SECTION_SETTINGS:
+        {
+            if (!strcmp(name, "Setting")) {
+                mInitCheck = addSettingFromAttributes(attrs);
             }
             break;
         }
@@ -397,6 +527,14 @@
     }
 
     switch (mCurrentSection) {
+        case SECTION_SETTINGS:
+        {
+            if (!strcmp(name, "Settings")) {
+                mCurrentSection = SECTION_TOPLEVEL;
+            }
+            break;
+        }
+
         case SECTION_DECODERS:
         {
             if (!strcmp(name, "Decoders")) {
@@ -462,10 +600,81 @@
     --mDepth;
 }
 
+status_t MediaCodecList::addSettingFromAttributes(const char **attrs) {
+    const char *name = NULL;
+    const char *value = NULL;
+    const char *update = NULL;
+
+    size_t i = 0;
+    while (attrs[i] != NULL) {
+        if (!strcmp(attrs[i], "name")) {
+            if (attrs[i + 1] == NULL) {
+                return -EINVAL;
+            }
+            name = attrs[i + 1];
+            ++i;
+        } else if (!strcmp(attrs[i], "value")) {
+            if (attrs[i + 1] == NULL) {
+                return -EINVAL;
+            }
+            value = attrs[i + 1];
+            ++i;
+        } else if (!strcmp(attrs[i], "update")) {
+            if (attrs[i + 1] == NULL) {
+                return -EINVAL;
+            }
+            update = attrs[i + 1];
+            ++i;
+        } else {
+            return -EINVAL;
+        }
+
+        ++i;
+    }
+
+    if (name == NULL || value == NULL) {
+        return -EINVAL;
+    }
+
+    mUpdate = (update != NULL) && parseBoolean(update);
+    if (mUpdate != mGlobalSettings->contains(name)) {
+        return -EINVAL;
+    }
+
+    mGlobalSettings->setString(name, value);
+    return OK;
+}
+
+void MediaCodecList::setCurrentCodecInfo(bool encoder, const char *name, const char *type) {
+    for (size_t i = 0; i < mCodecInfos.size(); ++i) {
+        if (AString(name) == mCodecInfos[i]->getCodecName()) {
+            if (mCodecInfos[i]->getCapabilitiesFor(type) == NULL) {
+                ALOGW("Overrides with an unexpected mime %s", type);
+                // Create a new MediaCodecInfo (but don't add it to mCodecInfos) to hold the
+                // overrides we don't want.
+                mCurrentInfo = new MediaCodecInfo(name, encoder, type);
+            } else {
+                mCurrentInfo = mCodecInfos.editItemAt(i);
+                mCurrentInfo->updateMime(type);  // to set the current cap
+            }
+            return;
+        }
+    }
+    mCurrentInfo = new MediaCodecInfo(name, encoder, type);
+    // The next step involves trying to load the codec, which may
+    // fail.  Only list the codec if this succeeds.
+    // However, keep mCurrentInfo object around until parsing
+    // of full codec info is completed.
+    if (initializeCapabilities(type) == OK) {
+        mCodecInfos.push_back(mCurrentInfo);
+    }
+}
+
 status_t MediaCodecList::addMediaCodecFromAttributes(
         bool encoder, const char **attrs) {
     const char *name = NULL;
     const char *type = NULL;
+    const char *update = NULL;
 
     size_t i = 0;
     while (attrs[i] != NULL) {
@@ -481,6 +690,12 @@
             }
             type = attrs[i + 1];
             ++i;
+        } else if (!strcmp(attrs[i], "update")) {
+            if (attrs[i + 1] == NULL) {
+                return -EINVAL;
+            }
+            update = attrs[i + 1];
+            ++i;
         } else {
             return -EINVAL;
         }
@@ -492,14 +707,39 @@
         return -EINVAL;
     }
 
-    mCurrentInfo = new MediaCodecInfo(name, encoder, type);
-    // The next step involves trying to load the codec, which may
-    // fail.  Only list the codec if this succeeds.
-    // However, keep mCurrentInfo object around until parsing
-    // of full codec info is completed.
-    if (initializeCapabilities(type) == OK) {
-        mCodecInfos.push_back(mCurrentInfo);
+    mUpdate = (update != NULL) && parseBoolean(update);
+    ssize_t index = -1;
+    for (size_t i = 0; i < mCodecInfos.size(); ++i) {
+        if (AString(name) == mCodecInfos[i]->getCodecName()) {
+            index = i;
+        }
     }
+    if (mUpdate != (index >= 0)) {
+        return -EINVAL;
+    }
+
+    if (index >= 0) {
+        // existing codec
+        mCurrentInfo = mCodecInfos.editItemAt(index);
+        if (type != NULL) {
+            // existing type
+            if (mCodecInfos[index]->getCapabilitiesFor(type) == NULL) {
+                return -EINVAL;
+            }
+            mCurrentInfo->updateMime(type);
+        }
+    } else {
+        // new codec
+        mCurrentInfo = new MediaCodecInfo(name, encoder, type);
+        // The next step involves trying to load the codec, which may
+        // fail.  Only list the codec if this succeeds.
+        // However, keep mCurrentInfo object around until parsing
+        // of full codec info is completed.
+        if (initializeCapabilities(type) == OK) {
+            mCodecInfos.push_back(mCurrentInfo);
+        }
+    }
+
     return OK;
 }
 
@@ -553,6 +793,7 @@
 
 status_t MediaCodecList::addTypeFromAttributes(const char **attrs) {
     const char *name = NULL;
+    const char *update = NULL;
 
     size_t i = 0;
     while (attrs[i] != NULL) {
@@ -562,6 +803,12 @@
             }
             name = attrs[i + 1];
             ++i;
+        } else if (!strcmp(attrs[i], "update")) {
+            if (attrs[i + 1] == NULL) {
+                return -EINVAL;
+            }
+            update = attrs[i + 1];
+            ++i;
         } else {
             return -EINVAL;
         }
@@ -573,14 +820,25 @@
         return -EINVAL;
     }
 
-    status_t ret = mCurrentInfo->addMime(name);
+    bool isExistingType = (mCurrentInfo->getCapabilitiesFor(name) != NULL);
+    if (mUpdate != isExistingType) {
+        return -EINVAL;
+    }
+
+    status_t ret;
+    if (mUpdate) {
+        ret = mCurrentInfo->updateMime(name);
+    } else {
+        ret = mCurrentInfo->addMime(name);
+    }
+
     if (ret != OK) {
         return ret;
     }
 
     // The next step involves trying to load the codec, which may
     // fail.  Handle this gracefully (by not reporting such mime).
-    if (initializeCapabilities(name) != OK) {
+    if (!mUpdate && initializeCapabilities(name) != OK) {
         mCurrentInfo->removeMime(name);
     }
     return OK;
@@ -675,14 +933,16 @@
         return -EINVAL;
     }
 
-    // size, blocks, bitrate, frame-rate, blocks-per-second, aspect-ratio: range
+    // size, blocks, bitrate, frame-rate, blocks-per-second, aspect-ratio,
+    // measured-frame-rate, measured-blocks-per-second: range
     // quality: range + default + [scale]
     // complexity: range + default
     bool found;
 
     if (name == "aspect-ratio" || name == "bitrate" || name == "block-count"
             || name == "blocks-per-second" || name == "complexity"
-            || name == "frame-rate" || name == "quality" || name == "size") {
+            || name == "frame-rate" || name == "quality" || name == "size"
+            || name == "measured-blocks-per-second" || name.startsWith("measured-frame-rate-")) {
         AString min, max;
         if (msg->findString("min", &min) && msg->findString("max", &max)) {
             min.append("-");
@@ -746,7 +1006,7 @@
             return limitFoundMissingAttr(name, "default");
         } else if (msg->contains("in")) {
             return limitFoundMissingAttr(name, "in");
-        } else if ((name == "channel-count") ^
+        } else if ((name == "channel-count" || name == "concurrent-instances") ^
                 (found = msg->findString("max", &max))) {
             return limitFoundMissingAttr(name, "max", found);
         } else if (msg->contains("min")) {
@@ -780,15 +1040,6 @@
     return OK;
 }
 
-static bool parseBoolean(const char *s) {
-    if (!strcasecmp(s, "true") || !strcasecmp(s, "yes") || !strcasecmp(s, "y")) {
-        return true;
-    }
-    char *end;
-    unsigned long res = strtoul(s, &end, 10);
-    return *s != '\0' && *end == '\0' && res > 0;
-}
-
 status_t MediaCodecList::addFeature(const char **attrs) {
     size_t i = 0;
     const char *name = NULL;
@@ -860,4 +1111,8 @@
     return mCodecInfos.size();
 }
 
+const sp<AMessage> MediaCodecList::getGlobalSettings() const {
+    return mGlobalSettings;
+}
+
 }  // namespace android
diff --git a/media/libstagefright/MediaCodecListOverrides.cpp b/media/libstagefright/MediaCodecListOverrides.cpp
new file mode 100644
index 0000000..4ec36b5
--- /dev/null
+++ b/media/libstagefright/MediaCodecListOverrides.cpp
@@ -0,0 +1,421 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaCodecListOverrides"
+#include <utils/Log.h>
+
+#include "MediaCodecListOverrides.h"
+
+#include <cutils/properties.h>
+#include <gui/Surface.h>
+#include <media/ICrypto.h>
+#include <media/IMediaCodecList.h>
+#include <media/MediaCodecInfo.h>
+#include <media/MediaResourcePolicy.h>
+#include <media/openmax/OMX_IVCommon.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaCodecList.h>
+
+namespace android {
+
+const char *kProfilingResults = "/data/misc/media/media_codecs_profiling_results.xml";
+
+AString getProfilingVersionString() {
+    char val[PROPERTY_VALUE_MAX];
+    if (property_get("ro.build.display.id", val, NULL) && (strlen(val) > 0)) {
+        return AStringPrintf("<!-- Profiled-with: %s -->", val);
+    }
+
+    return "<!-- Profiled-with: UNKNOWN_BUILD_ID -->";
+}
+
+// a limit to avoid allocating unreasonable number of codec instances in the measurement.
+// this should be in sync with the MAX_SUPPORTED_INSTANCES defined in MediaCodecInfo.java.
+static const int kMaxInstances = 32;
+
+// TODO: move MediaCodecInfo to C++. Until then, some temp methods to parse out info.
+static bool getMeasureSize(sp<MediaCodecInfo::Capabilities> caps, int32_t *width, int32_t *height) {
+    AString sizeRange;
+    if (!caps->getDetails()->findString("size-range", &sizeRange)) {
+        return false;
+    }
+    AString minSize;
+    AString maxSize;
+    if (!splitString(sizeRange, "-", &minSize, &maxSize)) {
+        return false;
+    }
+    AString sWidth;
+    AString sHeight;
+    if (!splitString(minSize, "x", &sWidth, &sHeight)) {
+        if (!splitString(minSize, "*", &sWidth, &sHeight)) {
+            return false;
+        }
+    }
+
+    *width = strtol(sWidth.c_str(), NULL, 10);
+    *height = strtol(sHeight.c_str(), NULL, 10);
+    return (*width > 0) && (*height > 0);
+}
+
+static void getMeasureBitrate(sp<MediaCodecInfo::Capabilities> caps, int32_t *bitrate) {
+    // Until have native MediaCodecInfo, we cannot get bitrates based on profile/levels.
+    // We use 200000 as default value for our measurement.
+    *bitrate = 200000;
+    AString bitrateRange;
+    if (!caps->getDetails()->findString("bitrate-range", &bitrateRange)) {
+        return;
+    }
+    AString minBitrate;
+    AString maxBitrate;
+    if (!splitString(bitrateRange, "-", &minBitrate, &maxBitrate)) {
+        return;
+    }
+
+    *bitrate = strtol(minBitrate.c_str(), NULL, 10);
+}
+
+static sp<AMessage> getMeasureFormat(
+        bool isEncoder, AString mime, sp<MediaCodecInfo::Capabilities> caps) {
+    sp<AMessage> format = new AMessage();
+    format->setString("mime", mime);
+
+    if (isEncoder) {
+        int32_t bitrate = 0;
+        getMeasureBitrate(caps, &bitrate);
+        format->setInt32("bitrate", bitrate);
+        format->setInt32("encoder", 1);
+    }
+
+    if (mime.startsWith("video/")) {
+        int32_t width = 0;
+        int32_t height = 0;
+        if (!getMeasureSize(caps, &width, &height)) {
+            return NULL;
+        }
+        format->setInt32("width", width);
+        format->setInt32("height", height);
+
+        Vector<uint32_t> colorFormats;
+        caps->getSupportedColorFormats(&colorFormats);
+        if (colorFormats.size() == 0) {
+            return NULL;
+        }
+        format->setInt32("color-format", colorFormats[0]);
+
+        format->setFloat("frame-rate", 10.0);
+        format->setInt32("i-frame-interval", 10);
+    } else {
+        // TODO: profile hw audio
+        return NULL;
+    }
+
+    return format;
+}
+
+static size_t doProfileEncoderInputBuffers(
+        AString name, AString mime, sp<MediaCodecInfo::Capabilities> caps) {
+    ALOGV("doProfileEncoderInputBuffers: name %s, mime %s", name.c_str(), mime.c_str());
+
+    sp<AMessage> format = getMeasureFormat(true /* isEncoder */, mime, caps);
+    if (format == NULL) {
+        return 0;
+    }
+
+    format->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
+    ALOGV("doProfileEncoderInputBuffers: format %s", format->debugString().c_str());
+
+    status_t err = OK;
+    sp<ALooper> looper = new ALooper;
+    looper->setName("MediaCodec_looper");
+    looper->start(
+            false /* runOnCallingThread */, false /* canCallJava */, ANDROID_PRIORITY_AUDIO);
+
+    sp<MediaCodec> codec = MediaCodec::CreateByComponentName(looper, name.c_str(), &err);
+    if (err != OK) {
+        ALOGE("Failed to create codec: %s", name.c_str());
+        return 0;
+    }
+
+    err = codec->configure(format, NULL, NULL, MediaCodec::CONFIGURE_FLAG_ENCODE);
+    if (err != OK) {
+        ALOGE("Failed to configure codec: %s with mime: %s", name.c_str(), mime.c_str());
+        codec->release();
+        return 0;
+    }
+
+    sp<IGraphicBufferProducer> bufferProducer;
+    err = codec->createInputSurface(&bufferProducer);
+    if (err != OK) {
+        ALOGE("Failed to create surface: %s with mime: %s", name.c_str(), mime.c_str());
+        codec->release();
+        return 0;
+    }
+
+    int minUndequeued = 0;
+    err = bufferProducer->query(
+            NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeued);
+    if (err != OK) {
+        ALOGE("Failed to query NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS");
+        minUndequeued = 0;
+    }
+
+    err = codec->release();
+    if (err != OK) {
+        ALOGW("Failed to release codec: %s with mime: %s", name.c_str(), mime.c_str());
+    }
+
+    return minUndequeued;
+}
+
+static size_t doProfileCodecs(
+        bool isEncoder, AString name, AString mime, sp<MediaCodecInfo::Capabilities> caps) {
+    sp<AMessage> format = getMeasureFormat(isEncoder, mime, caps);
+    if (format == NULL) {
+        return 0;
+    }
+    ALOGV("doProfileCodecs %s %s %s %s",
+            name.c_str(), mime.c_str(), isEncoder ? "encoder" : "decoder",
+            format->debugString().c_str());
+
+    status_t err = OK;
+    Vector<sp<MediaCodec>> codecs;
+    while (err == OK && codecs.size() < kMaxInstances) {
+        sp<ALooper> looper = new ALooper;
+        looper->setName("MediaCodec_looper");
+        ALOGV("doProfileCodecs for codec #%zu", codecs.size());
+        ALOGV("doProfileCodecs start looper");
+        looper->start(
+                false /* runOnCallingThread */, false /* canCallJava */, ANDROID_PRIORITY_AUDIO);
+        ALOGV("doProfileCodecs CreateByComponentName");
+        sp<MediaCodec> codec = MediaCodec::CreateByComponentName(looper, name.c_str(), &err);
+        if (err != OK) {
+            ALOGV("Failed to create codec: %s", name.c_str());
+            break;
+        }
+        const sp<Surface> nativeWindow;
+        const sp<ICrypto> crypto;
+        uint32_t flags = isEncoder ? MediaCodec::CONFIGURE_FLAG_ENCODE : 0;
+        ALOGV("doProfileCodecs configure");
+        err = codec->configure(format, nativeWindow, crypto, flags);
+        if (err != OK) {
+            ALOGV("Failed to configure codec: %s with mime: %s", name.c_str(), mime.c_str());
+            codec->release();
+            break;
+        }
+        ALOGV("doProfileCodecs start");
+        err = codec->start();
+        if (err != OK) {
+            ALOGV("Failed to start codec: %s with mime: %s", name.c_str(), mime.c_str());
+            codec->release();
+            break;
+        }
+        codecs.push_back(codec);
+    }
+
+    for (size_t i = 0; i < codecs.size(); ++i) {
+        ALOGV("doProfileCodecs release %s", name.c_str());
+        err = codecs[i]->release();
+        if (err != OK) {
+            ALOGE("Failed to release codec: %s with mime: %s", name.c_str(), mime.c_str());
+        }
+    }
+
+    return codecs.size();
+}
+
+bool splitString(const AString &s, const AString &delimiter, AString *s1, AString *s2) {
+    ssize_t pos = s.find(delimiter.c_str());
+    if (pos < 0) {
+        return false;
+    }
+    *s1 = AString(s, 0, pos);
+    *s2 = AString(s, pos + 1, s.size() - pos - 1);
+    return true;
+}
+
+bool splitString(
+        const AString &s, const AString &delimiter, AString *s1, AString *s2, AString *s3) {
+    AString temp;
+    if (!splitString(s, delimiter, s1, &temp)) {
+        return false;
+    }
+    if (!splitString(temp, delimiter, s2, s3)) {
+        return false;
+    }
+    return true;
+}
+
+void profileCodecs(const Vector<sp<MediaCodecInfo>> &infos) {
+    CodecSettings global_results;
+    KeyedVector<AString, CodecSettings> encoder_results;
+    KeyedVector<AString, CodecSettings> decoder_results;
+    profileCodecs(infos, &global_results, &encoder_results, &decoder_results);
+    exportResultsToXML(kProfilingResults, global_results, encoder_results, decoder_results);
+}
+
+void profileCodecs(
+        const Vector<sp<MediaCodecInfo>> &infos,
+        CodecSettings *global_results,
+        KeyedVector<AString, CodecSettings> *encoder_results,
+        KeyedVector<AString, CodecSettings> *decoder_results,
+        bool forceToMeasure) {
+    KeyedVector<AString, sp<MediaCodecInfo::Capabilities>> codecsNeedMeasure;
+    AString supportMultipleSecureCodecs = "true";
+    size_t maxEncoderInputBuffers = 0;
+    for (size_t i = 0; i < infos.size(); ++i) {
+        const sp<MediaCodecInfo> info = infos[i];
+        AString name = info->getCodecName();
+        if (name.startsWith("OMX.google.") ||
+                // TODO: reenable below codecs once fixed
+                name == "OMX.Intel.VideoDecoder.VP9.hybrid") {
+            continue;
+        }
+
+        Vector<AString> mimes;
+        info->getSupportedMimes(&mimes);
+        for (size_t i = 0; i < mimes.size(); ++i) {
+            const sp<MediaCodecInfo::Capabilities> &caps =
+                    info->getCapabilitiesFor(mimes[i].c_str());
+            if (!forceToMeasure &&
+                (caps->getDetails()->contains("max-supported-instances") ||
+                 caps->getDetails()->contains("max-concurrent-instances"))) {
+                continue;
+            }
+
+            size_t max = doProfileCodecs(info->isEncoder(), name, mimes[i], caps);
+            if (max > 0) {
+                CodecSettings settings;
+                char maxStr[32];
+                sprintf(maxStr, "%zu", max);
+                settings.add("max-supported-instances", maxStr);
+
+                AString key = name;
+                key.append(" ");
+                key.append(mimes[i]);
+
+                if (info->isEncoder()) {
+                    encoder_results->add(key, settings);
+                } else {
+                    decoder_results->add(key, settings);
+                }
+
+                if (name.endsWith(".secure")) {
+                    if (max <= 1) {
+                        supportMultipleSecureCodecs = "false";
+                    }
+                }
+                if (info->isEncoder() && mimes[i].startsWith("video/")) {
+                    size_t encoderInputBuffers =
+                        doProfileEncoderInputBuffers(name, mimes[i], caps);
+                    if (encoderInputBuffers > maxEncoderInputBuffers) {
+                        maxEncoderInputBuffers = encoderInputBuffers;
+                    }
+                }
+            }
+        }
+    }
+    if (maxEncoderInputBuffers > 0) {
+        char tmp[32];
+        sprintf(tmp, "%zu", maxEncoderInputBuffers);
+        global_results->add(kMaxEncoderInputBuffers, tmp);
+    }
+    global_results->add(kPolicySupportsMultipleSecureCodecs, supportMultipleSecureCodecs);
+}
+
+static AString globalResultsToXml(const CodecSettings& results) {
+    AString ret;
+    for (size_t i = 0; i < results.size(); ++i) {
+        AString setting = AStringPrintf(
+                "        <Setting name=\"%s\" value=\"%s\" />\n",
+                results.keyAt(i).c_str(),
+                results.valueAt(i).c_str());
+        ret.append(setting);
+    }
+    return ret;
+}
+
+static AString codecResultsToXml(const KeyedVector<AString, CodecSettings>& results) {
+    AString ret;
+    for (size_t i = 0; i < results.size(); ++i) {
+        AString name;
+        AString mime;
+        if (!splitString(results.keyAt(i), " ", &name, &mime)) {
+            continue;
+        }
+        AString codec =
+                AStringPrintf("        <MediaCodec name=\"%s\" type=\"%s\" update=\"true\" >\n",
+                              name.c_str(),
+                              mime.c_str());
+        ret.append(codec);
+        CodecSettings settings = results.valueAt(i);
+        for (size_t i = 0; i < settings.size(); ++i) {
+            // WARNING: we assume all the settings are "Limit". Currently we have only one type
+            // of setting in this case, which is "max-supported-instances".
+            AString setting = AStringPrintf(
+                    "            <Limit name=\"%s\" value=\"%s\" />\n",
+                    settings.keyAt(i).c_str(),
+                    settings.valueAt(i).c_str());
+            ret.append(setting);
+        }
+        ret.append("        </MediaCodec>\n");
+    }
+    return ret;
+}
+
+void exportResultsToXML(
+        const char *fileName,
+        const CodecSettings& global_results,
+        const KeyedVector<AString, CodecSettings>& encoder_results,
+        const KeyedVector<AString, CodecSettings>& decoder_results) {
+    if (global_results.size() == 0 && encoder_results.size() == 0 && decoder_results.size() == 0) {
+        return;
+    }
+
+    AString overrides;
+    overrides.append(getProfilingVersionString());
+    overrides.append("\n");
+    overrides.append("<MediaCodecs>\n");
+    if (global_results.size() > 0) {
+        overrides.append("    <Settings>\n");
+        overrides.append(globalResultsToXml(global_results));
+        overrides.append("    </Settings>\n");
+    }
+    if (encoder_results.size() > 0) {
+        overrides.append("    <Encoders>\n");
+        overrides.append(codecResultsToXml(encoder_results));
+        overrides.append("    </Encoders>\n");
+    }
+    if (decoder_results.size() > 0) {
+        overrides.append("    <Decoders>\n");
+        overrides.append(codecResultsToXml(decoder_results));
+        overrides.append("    </Decoders>\n");
+    }
+    overrides.append("</MediaCodecs>\n");
+
+    FILE *f = fopen(fileName, "wb");
+    if (f == NULL) {
+        ALOGE("Failed to open %s for writing.", fileName);
+        return;
+    }
+    if (fwrite(overrides.c_str(), 1, overrides.size(), f) != overrides.size()) {
+        ALOGE("Failed to write to %s.", fileName);
+    }
+    fclose(f);
+}
+
+}  // namespace android
diff --git a/media/libstagefright/MediaCodecListOverrides.h b/media/libstagefright/MediaCodecListOverrides.h
new file mode 100644
index 0000000..d4bb225
--- /dev/null
+++ b/media/libstagefright/MediaCodecListOverrides.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_CODEC_LIST_OVERRIDES_H_
+
+#define MEDIA_CODEC_LIST_OVERRIDES_H_
+
+#include <media/MediaCodecInfo.h>
+#include <media/stagefright/foundation/AString.h>
+
+#include <utils/StrongPointer.h>
+#include <utils/KeyedVector.h>
+
+namespace android {
+
+extern const char *kProfilingVersionString;
+extern const char *kProfilingResults;
+
+struct MediaCodecInfo;
+
+AString getProfilingVersionString();
+
+bool splitString(const AString &s, const AString &delimiter, AString *s1, AString *s2);
+
+// profile codecs and save the result to xml file named kProfilingResults.
+void profileCodecs(const Vector<sp<MediaCodecInfo>> &infos);
+
+// profile codecs and save the result to global_results, encoder_results and decoder_results.
+void profileCodecs(
+        const Vector<sp<MediaCodecInfo>> &infos,
+        CodecSettings *global_results,
+        KeyedVector<AString, CodecSettings> *encoder_results,
+        KeyedVector<AString, CodecSettings> *decoder_results,
+        bool forceToMeasure = false);
+
+void exportResultsToXML(
+        const char *fileName,
+        const CodecSettings& global_results,
+        const KeyedVector<AString, CodecSettings>& encoder_results,
+        const KeyedVector<AString, CodecSettings>& decoder_results);
+
+}  // namespace android
+
+#endif  // MEDIA_CODEC_LIST_OVERRIDES_H_
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index c26e909..7f9f824 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -20,6 +20,7 @@
 
 #include <inttypes.h>
 
+#include <gui/IGraphicBufferConsumer.h>
 #include <gui/IGraphicBufferProducer.h>
 #include <gui/Surface.h>
 #include <media/ICrypto.h>
@@ -29,14 +30,18 @@
 #include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaCodec.h>
-#include <media/stagefright/MetaData.h>
+#include <media/stagefright/MediaCodecSource.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaSource.h>
-#include <media/stagefright/MediaCodecSource.h>
+#include <media/stagefright/MetaData.h>
+#include <media/stagefright/PersistentSurface.h>
 #include <media/stagefright/Utils.h>
 
 namespace android {
 
+const int kDefaultSwVideoEncoderFormat = HAL_PIXEL_FORMAT_YCbCr_420_888;
+const int kDefaultSwVideoEncoderDataSpace = HAL_DATASPACE_BT709;
+
 struct MediaCodecSource::Puller : public AHandler {
     Puller(const sp<MediaSource> &source);
 
@@ -121,7 +126,7 @@
     mLooper->registerHandler(this);
     mNotify = notify;
 
-    sp<AMessage> msg = new AMessage(kWhatStart, id());
+    sp<AMessage> msg = new AMessage(kWhatStart, this);
     msg->setObject("meta", meta);
     return postSynchronouslyAndReturnError(msg);
 }
@@ -137,19 +142,19 @@
     mSource->stop();
     ALOGV("source (%s) stopped", mIsAudio ? "audio" : "video");
 
-    (new AMessage(kWhatStop, id()))->post();
+    (new AMessage(kWhatStop, this))->post();
 }
 
 void MediaCodecSource::Puller::pause() {
-    (new AMessage(kWhatPause, id()))->post();
+    (new AMessage(kWhatPause, this))->post();
 }
 
 void MediaCodecSource::Puller::resume() {
-    (new AMessage(kWhatResume, id()))->post();
+    (new AMessage(kWhatResume, this))->post();
 }
 
 void MediaCodecSource::Puller::schedulePull() {
-    sp<AMessage> msg = new AMessage(kWhatPull, id());
+    sp<AMessage> msg = new AMessage(kWhatPull, this);
     msg->setInt32("generation", mPullGeneration);
     msg->post();
 }
@@ -182,7 +187,7 @@
             sp<AMessage> response = new AMessage;
             response->setInt32("err", err);
 
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
             response->postReply(replyID);
             break;
@@ -258,9 +263,10 @@
         const sp<ALooper> &looper,
         const sp<AMessage> &format,
         const sp<MediaSource> &source,
+        const sp<IGraphicBufferConsumer> &consumer,
         uint32_t flags) {
     sp<MediaCodecSource> mediaSource =
-            new MediaCodecSource(looper, format, source, flags);
+            new MediaCodecSource(looper, format, source, consumer, flags);
 
     if (mediaSource->init() == OK) {
         return mediaSource;
@@ -269,13 +275,13 @@
 }
 
 status_t MediaCodecSource::start(MetaData* params) {
-    sp<AMessage> msg = new AMessage(kWhatStart, mReflector->id());
+    sp<AMessage> msg = new AMessage(kWhatStart, mReflector);
     msg->setObject("meta", params);
     return postSynchronouslyAndReturnError(msg);
 }
 
 status_t MediaCodecSource::stop() {
-    sp<AMessage> msg = new AMessage(kWhatStop, mReflector->id());
+    sp<AMessage> msg = new AMessage(kWhatStop, mReflector);
     status_t err = postSynchronouslyAndReturnError(msg);
 
     // mPuller->stop() needs to be done outside MediaCodecSource's looper,
@@ -294,7 +300,7 @@
 }
 
 status_t MediaCodecSource::pause() {
-    (new AMessage(kWhatPause, mReflector->id()))->post();
+    (new AMessage(kWhatPause, mReflector))->post();
     return OK;
 }
 
@@ -328,6 +334,7 @@
         const sp<ALooper> &looper,
         const sp<AMessage> &outputFormat,
         const sp<MediaSource> &source,
+        const sp<IGraphicBufferConsumer> &consumer,
         uint32_t flags)
     : mLooper(looper),
       mOutputFormat(outputFormat),
@@ -337,6 +344,10 @@
       mStarted(false),
       mStopping(false),
       mDoMoreWorkPending(false),
+      mSetEncoderFormat(false),
+      mEncoderFormat(0),
+      mEncoderDataSpace(0),
+      mGraphicBufferConsumer(consumer),
       mFirstSampleTimeUs(-1ll),
       mEncoderReachedEOS(false),
       mErrorCode(OK) {
@@ -399,6 +410,9 @@
 
     ALOGV("output format is '%s'", mOutputFormat->debugString(0).c_str());
 
+    mEncoderActivityNotify = new AMessage(kWhatEncoderActivity, mReflector);
+    mEncoder->setCallback(mEncoderActivityNotify);
+
     status_t err = mEncoder->configure(
                 mOutputFormat,
                 NULL /* nativeWindow */,
@@ -415,16 +429,32 @@
     if (mFlags & FLAG_USE_SURFACE_INPUT) {
         CHECK(mIsVideo);
 
-        err = mEncoder->createInputSurface(&mGraphicBufferProducer);
+        if (mGraphicBufferConsumer != NULL) {
+            // When using persistent surface, we are only interested in the
+            // consumer, but have to use PersistentSurface as a wrapper to
+            // pass consumer over messages (similar to BufferProducerWrapper)
+            err = mEncoder->setInputSurface(
+                    new PersistentSurface(NULL, mGraphicBufferConsumer));
+        } else {
+            err = mEncoder->createInputSurface(&mGraphicBufferProducer);
+        }
 
         if (err != OK) {
             return err;
         }
     }
 
-    mEncoderActivityNotify = new AMessage(
-            kWhatEncoderActivity, mReflector->id());
-    mEncoder->setCallback(mEncoderActivityNotify);
+    sp<AMessage> inputFormat;
+    int32_t usingSwReadOften;
+    mSetEncoderFormat = false;
+    if (mEncoder->getInputFormat(&inputFormat) == OK
+            && inputFormat->findInt32("using-sw-read-often", &usingSwReadOften)
+            && usingSwReadOften) {
+        // this is a SW encoder; signal source to allocate SW readable buffers
+        mSetEncoderFormat = true;
+        mEncoderFormat = kDefaultSwVideoEncoderFormat;
+        mEncoderDataSpace = kDefaultSwVideoEncoderDataSpace;
+    }
 
     err = mEncoder->start();
 
@@ -492,7 +522,7 @@
     if (mStopping && mEncoderReachedEOS) {
         ALOGI("encoder (%s) stopped", mIsVideo ? "video" : "audio");
         // posting reply to everyone that's waiting
-        List<uint32_t>::iterator it;
+        List<sp<AReplyToken>>::iterator it;
         for (it = mStopReplyIDQueue.begin();
                 it != mStopReplyIDQueue.end(); it++) {
             (new AMessage)->postReply(*it);
@@ -620,9 +650,17 @@
         resume(startTimeUs);
     } else {
         CHECK(mPuller != NULL);
-        sp<AMessage> notify = new AMessage(
-                kWhatPullerNotify, mReflector->id());
-        err = mPuller->start(params, notify);
+        sp<MetaData> meta = params;
+        if (mSetEncoderFormat) {
+            if (meta == NULL) {
+                meta = new MetaData;
+            }
+            meta->setInt32(kKeyPixelFormat, mEncoderFormat);
+            meta->setInt32(kKeyColorSpace, mEncoderDataSpace);
+        }
+
+        sp<AMessage> notify = new AMessage(kWhatPullerNotify, mReflector);
+        err = mPuller->start(meta.get(), notify);
         if (err != OK) {
             return err;
         }
@@ -684,7 +722,6 @@
             size_t size;
             int64_t timeUs;
             int32_t flags;
-            native_handle_t* handle = NULL;
 
             CHECK(msg->findInt32("index", &index));
             CHECK(msg->findSize("offset", &offset));
@@ -768,7 +805,7 @@
     }
     case kWhatStart:
     {
-        uint32_t replyID;
+        sp<AReplyToken> replyID;
         CHECK(msg->senderAwaitsResponse(&replyID));
 
         sp<RefBase> obj;
@@ -784,7 +821,7 @@
     {
         ALOGI("encoder (%s) stopping", mIsVideo ? "video" : "audio");
 
-        uint32_t replyID;
+        sp<AReplyToken> replyID;
         CHECK(msg->senderAwaitsResponse(&replyID));
 
         if (mEncoderReachedEOS) {
diff --git a/media/libstagefright/MediaDefs.cpp b/media/libstagefright/MediaDefs.cpp
index c48a5ae..2a50692 100644
--- a/media/libstagefright/MediaDefs.cpp
+++ b/media/libstagefright/MediaDefs.cpp
@@ -62,5 +62,6 @@
 const char *MEDIA_MIMETYPE_TEXT_SUBRIP = "application/x-subrip";
 const char *MEDIA_MIMETYPE_TEXT_VTT = "text/vtt";
 const char *MEDIA_MIMETYPE_TEXT_CEA_608 = "text/cea-608";
+const char *MEDIA_MIMETYPE_DATA_TIMED_ID3 = "application/x-id3v4";
 
 }  // namespace android
diff --git a/media/libstagefright/MediaMuxer.cpp b/media/libstagefright/MediaMuxer.cpp
index c7c6f34..b13877d 100644
--- a/media/libstagefright/MediaMuxer.cpp
+++ b/media/libstagefright/MediaMuxer.cpp
@@ -38,21 +38,6 @@
 
 namespace android {
 
-MediaMuxer::MediaMuxer(const char *path, OutputFormat format)
-    : mFormat(format),
-      mState(UNINITIALIZED) {
-    if (format == OUTPUT_FORMAT_MPEG_4) {
-        mWriter = new MPEG4Writer(path);
-    } else if (format == OUTPUT_FORMAT_WEBM) {
-        mWriter = new WebmWriter(path);
-    }
-
-    if (mWriter != NULL) {
-        mFileMeta = new MetaData;
-        mState = INITIALIZED;
-    }
-}
-
 MediaMuxer::MediaMuxer(int fd, OutputFormat format)
     : mFormat(format),
       mState(UNINITIALIZED) {
diff --git a/media/libstagefright/MediaSync.cpp b/media/libstagefright/MediaSync.cpp
new file mode 100644
index 0000000..3a45e25
--- /dev/null
+++ b/media/libstagefright/MediaSync.cpp
@@ -0,0 +1,861 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaSync"
+#include <inttypes.h>
+
+#include <gui/BufferQueue.h>
+#include <gui/IGraphicBufferConsumer.h>
+#include <gui/IGraphicBufferProducer.h>
+
+#include <media/AudioTrack.h>
+#include <media/stagefright/MediaClock.h>
+#include <media/stagefright/MediaSync.h>
+#include <media/stagefright/VideoFrameScheduler.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+#include <ui/GraphicBuffer.h>
+
+// Maximum late time allowed for a video frame to be rendered. When a video
+// frame arrives later than this number, it will be discarded without rendering.
+static const int64_t kMaxAllowedVideoLateTimeUs = 40000ll;
+
+namespace android {
+
+// static
+sp<MediaSync> MediaSync::create() {
+    sp<MediaSync> sync = new MediaSync();
+    sync->mLooper->registerHandler(sync);
+    return sync;
+}
+
+MediaSync::MediaSync()
+      : mIsAbandoned(false),
+        mMutex(),
+        mReleaseCondition(),
+        mNumOutstandingBuffers(0),
+        mUsageFlagsFromOutput(0),
+        mMaxAcquiredBufferCount(1),
+        mReturnPendingInputFrame(false),
+        mNativeSampleRateInHz(0),
+        mNumFramesWritten(0),
+        mHasAudio(false),
+        mNextBufferItemMediaUs(-1),
+        mPlaybackRate(0.0) {
+    mMediaClock = new MediaClock;
+
+    // initialize settings
+    mPlaybackSettings = AUDIO_PLAYBACK_RATE_DEFAULT;
+    mPlaybackSettings.mSpeed = mPlaybackRate;
+
+    mLooper = new ALooper;
+    mLooper->setName("MediaSync");
+    mLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
+}
+
+MediaSync::~MediaSync() {
+    if (mInput != NULL) {
+        mInput->consumerDisconnect();
+    }
+    if (mOutput != NULL) {
+        mOutput->disconnect(NATIVE_WINDOW_API_MEDIA);
+    }
+
+    if (mLooper != NULL) {
+        mLooper->unregisterHandler(id());
+        mLooper->stop();
+    }
+}
+
+status_t MediaSync::setSurface(const sp<IGraphicBufferProducer> &output) {
+    Mutex::Autolock lock(mMutex);
+
+    if (output == mOutput) {
+        return NO_ERROR;  // same output surface.
+    }
+
+    if (output == NULL && mSyncSettings.mSource == AVSYNC_SOURCE_VSYNC) {
+        ALOGE("setSurface: output surface is used as sync source and cannot be removed.");
+        return INVALID_OPERATION;
+    }
+
+    if (output != NULL) {
+        int newUsage = 0;
+        output->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS, &newUsage);
+
+        // Check usage flags only when current output surface has been used to create input surface.
+        if (mOutput != NULL && mInput != NULL) {
+            int ignoredFlags = (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_COMPOSER
+                    | GRALLOC_USAGE_EXTERNAL_DISP);
+            // New output surface is not allowed to add new usage flag except ignored ones.
+            if ((newUsage & ~(mUsageFlagsFromOutput | ignoredFlags)) != 0) {
+                ALOGE("setSurface: new output surface has new usage flag not used by current one.");
+                return BAD_VALUE;
+            }
+        }
+
+        // Try to connect to new output surface. If failed, current output surface will not
+        // be changed.
+        IGraphicBufferProducer::QueueBufferOutput queueBufferOutput;
+        sp<OutputListener> listener(new OutputListener(this, output));
+        IInterface::asBinder(output)->linkToDeath(listener);
+        status_t status =
+            output->connect(listener,
+                            NATIVE_WINDOW_API_MEDIA,
+                            true /* producerControlledByApp */,
+                            &queueBufferOutput);
+        if (status != NO_ERROR) {
+            ALOGE("setSurface: failed to connect (%d)", status);
+            return status;
+        }
+
+        if (mFrameScheduler == NULL) {
+            mFrameScheduler = new VideoFrameScheduler();
+            mFrameScheduler->init();
+        }
+    }
+
+    if (mOutput != NULL) {
+        mOutput->disconnect(NATIVE_WINDOW_API_MEDIA);
+        while (!mBuffersSentToOutput.isEmpty()) {
+            returnBufferToInput_l(mBuffersSentToOutput.valueAt(0), Fence::NO_FENCE);
+            mBuffersSentToOutput.removeItemsAt(0);
+        }
+    }
+
+    mOutput = output;
+
+    return NO_ERROR;
+}
+
+// |audioTrack| is used only for querying information.
+status_t MediaSync::setAudioTrack(const sp<AudioTrack> &audioTrack) {
+    Mutex::Autolock lock(mMutex);
+
+    // TODO: support audio track change.
+    if (mAudioTrack != NULL) {
+        ALOGE("setAudioTrack: audioTrack has already been configured.");
+        return INVALID_OPERATION;
+    }
+
+    if (audioTrack == NULL && mSyncSettings.mSource == AVSYNC_SOURCE_AUDIO) {
+        ALOGE("setAudioTrack: audioTrack is used as sync source and cannot be removed.");
+        return INVALID_OPERATION;
+    }
+
+    if (audioTrack != NULL) {
+        // check if audio track supports the playback settings
+        if (mPlaybackSettings.mSpeed != 0.f
+                && audioTrack->setPlaybackRate(mPlaybackSettings) != OK) {
+            ALOGE("playback settings are not supported by the audio track");
+            return INVALID_OPERATION;
+        }
+        uint32_t nativeSampleRateInHz = audioTrack->getOriginalSampleRate();
+        if (nativeSampleRateInHz <= 0) {
+            ALOGE("setAudioTrack: native sample rate should be positive.");
+            return BAD_VALUE;
+        }
+        mAudioTrack = audioTrack;
+        mNativeSampleRateInHz = nativeSampleRateInHz;
+        (void)setPlaybackSettings_l(mPlaybackSettings);
+    }
+    else {
+        mAudioTrack = NULL;
+        mNativeSampleRateInHz = 0;
+    }
+
+    // potentially resync to new source
+    resync_l();
+    return OK;
+}
+
+status_t MediaSync::createInputSurface(
+        sp<IGraphicBufferProducer> *outBufferProducer) {
+    if (outBufferProducer == NULL) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock lock(mMutex);
+
+    if (mOutput == NULL) {
+        return NO_INIT;
+    }
+
+    if (mInput != NULL) {
+        return INVALID_OPERATION;
+    }
+
+    sp<IGraphicBufferProducer> bufferProducer;
+    sp<IGraphicBufferConsumer> bufferConsumer;
+    BufferQueue::createBufferQueue(&bufferProducer, &bufferConsumer);
+
+    sp<InputListener> listener(new InputListener(this));
+    IInterface::asBinder(bufferConsumer)->linkToDeath(listener);
+    status_t status =
+        bufferConsumer->consumerConnect(listener, false /* controlledByApp */);
+    if (status == NO_ERROR) {
+        bufferConsumer->setConsumerName(String8("MediaSync"));
+        // propagate usage bits from output surface
+        mUsageFlagsFromOutput = 0;
+        mOutput->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS, &mUsageFlagsFromOutput);
+        bufferConsumer->setConsumerUsageBits(mUsageFlagsFromOutput);
+        *outBufferProducer = bufferProducer;
+        mInput = bufferConsumer;
+
+        // set undequeued buffer count
+        int minUndequeuedBuffers;
+        mOutput->query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBuffers);
+        mMaxAcquiredBufferCount = minUndequeuedBuffers;
+        bufferConsumer->setMaxAcquiredBufferCount(mMaxAcquiredBufferCount);
+    }
+    return status;
+}
+
+void MediaSync::resync_l() {
+    AVSyncSource src = mSyncSettings.mSource;
+    if (src == AVSYNC_SOURCE_DEFAULT) {
+        if (mAudioTrack != NULL) {
+            src = AVSYNC_SOURCE_AUDIO;
+        } else {
+            src = AVSYNC_SOURCE_SYSTEM_CLOCK;
+        }
+    }
+
+    // TODO: resync ourselves to the current clock (e.g. on sync source change)
+    updatePlaybackRate_l(mPlaybackRate);
+}
+
+void MediaSync::updatePlaybackRate_l(float rate) {
+    if (rate > mPlaybackRate) {
+        mNextBufferItemMediaUs = -1;
+    }
+    mPlaybackRate = rate;
+    // TODO: update frame scheduler with this info
+    mMediaClock->setPlaybackRate(rate);
+    onDrainVideo_l();
+}
+
+sp<const MediaClock> MediaSync::getMediaClock() {
+    return mMediaClock;
+}
+
+status_t MediaSync::getPlayTimeForPendingAudioFrames(int64_t *outTimeUs) {
+    Mutex::Autolock lock(mMutex);
+    // User should check the playback rate if it doesn't want to receive a
+    // huge number for play time.
+    if (mPlaybackRate == 0.0f) {
+        *outTimeUs = INT64_MAX;
+        return OK;
+    }
+
+    uint32_t numFramesPlayed = 0;
+    if (mAudioTrack != NULL) {
+        status_t res = mAudioTrack->getPosition(&numFramesPlayed);
+        if (res != OK) {
+            return res;
+        }
+    }
+
+    int64_t numPendingFrames = mNumFramesWritten - numFramesPlayed;
+    if (numPendingFrames < 0) {
+        numPendingFrames = 0;
+        ALOGW("getPlayTimeForPendingAudioFrames: pending frame count is negative.");
+    }
+    double timeUs = numPendingFrames * 1000000.0
+            / (mNativeSampleRateInHz * (double)mPlaybackRate);
+    if (timeUs > (double)INT64_MAX) {
+        // Overflow.
+        *outTimeUs = INT64_MAX;
+        ALOGW("getPlayTimeForPendingAudioFrames: play time for pending audio frames "
+              "is too high, possibly due to super low playback rate(%f)", mPlaybackRate);
+    } else {
+        *outTimeUs = (int64_t)timeUs;
+    }
+
+    return OK;
+}
+
+status_t MediaSync::updateQueuedAudioData(
+        size_t sizeInBytes, int64_t presentationTimeUs) {
+    if (sizeInBytes == 0) {
+        return OK;
+    }
+
+    Mutex::Autolock lock(mMutex);
+
+    if (mAudioTrack == NULL) {
+        ALOGW("updateQueuedAudioData: audioTrack has NOT been configured.");
+        return INVALID_OPERATION;
+    }
+
+    int64_t numFrames = sizeInBytes / mAudioTrack->frameSize();
+    int64_t maxMediaTimeUs = presentationTimeUs
+            + getDurationIfPlayedAtNativeSampleRate_l(numFrames);
+
+    int64_t nowUs = ALooper::GetNowUs();
+    int64_t nowMediaUs = presentationTimeUs
+            - getDurationIfPlayedAtNativeSampleRate_l(mNumFramesWritten)
+            + getPlayedOutAudioDurationMedia_l(nowUs);
+
+    mNumFramesWritten += numFrames;
+
+    int64_t oldRealTime = -1;
+    if (mNextBufferItemMediaUs != -1) {
+        oldRealTime = getRealTime(mNextBufferItemMediaUs, nowUs);
+    }
+
+    mMediaClock->updateAnchor(nowMediaUs, nowUs, maxMediaTimeUs);
+    mHasAudio = true;
+
+    if (oldRealTime != -1) {
+        int64_t newRealTime = getRealTime(mNextBufferItemMediaUs, nowUs);
+        if (newRealTime >= oldRealTime) {
+            return OK;
+        }
+    }
+
+    mNextBufferItemMediaUs = -1;
+    onDrainVideo_l();
+    return OK;
+}
+
+void MediaSync::setName(const AString &name) {
+    Mutex::Autolock lock(mMutex);
+    mInput->setConsumerName(String8(name.c_str()));
+}
+
+void MediaSync::flush() {
+    Mutex::Autolock lock(mMutex);
+    if (mFrameScheduler != NULL) {
+        mFrameScheduler->restart();
+    }
+    while (!mBufferItems.empty()) {
+        BufferItem *bufferItem = &*mBufferItems.begin();
+        returnBufferToInput_l(bufferItem->mGraphicBuffer, bufferItem->mFence);
+        mBufferItems.erase(mBufferItems.begin());
+    }
+    mNextBufferItemMediaUs = -1;
+    mNumFramesWritten = 0;
+    mReturnPendingInputFrame = true;
+    mReleaseCondition.signal();
+    mMediaClock->clearAnchor();
+}
+
+status_t MediaSync::setVideoFrameRateHint(float rate) {
+    Mutex::Autolock lock(mMutex);
+    if (rate < 0.f) {
+        return BAD_VALUE;
+    }
+    if (mFrameScheduler != NULL) {
+        mFrameScheduler->init(rate);
+    }
+    return OK;
+}
+
+float MediaSync::getVideoFrameRate() {
+    Mutex::Autolock lock(mMutex);
+    if (mFrameScheduler != NULL) {
+        float fps = mFrameScheduler->getFrameRate();
+        if (fps > 0.f) {
+            return fps;
+        }
+    }
+
+    // we don't have or know the frame rate
+    return -1.f;
+}
+
+status_t MediaSync::setSyncSettings(const AVSyncSettings &syncSettings) {
+    // validate settings
+    if (syncSettings.mSource >= AVSYNC_SOURCE_MAX
+            || syncSettings.mAudioAdjustMode >= AVSYNC_AUDIO_ADJUST_MODE_MAX
+            || syncSettings.mTolerance < 0.f
+            || syncSettings.mTolerance >= AVSYNC_TOLERANCE_MAX) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock lock(mMutex);
+
+    // verify that we have the sync source
+    switch (syncSettings.mSource) {
+        case AVSYNC_SOURCE_AUDIO:
+            if (mAudioTrack == NULL) {
+                ALOGE("setSyncSettings: audio sync source requires an audio track");
+                return BAD_VALUE;
+            }
+            break;
+        case AVSYNC_SOURCE_VSYNC:
+            if (mOutput == NULL) {
+                ALOGE("setSyncSettings: vsync sync source requires an output surface");
+                return BAD_VALUE;
+            }
+            break;
+        default:
+            break;
+    }
+
+    mSyncSettings = syncSettings;
+    resync_l();
+    return OK;
+}
+
+void MediaSync::getSyncSettings(AVSyncSettings *syncSettings) {
+    Mutex::Autolock lock(mMutex);
+    *syncSettings = mSyncSettings;
+}
+
+status_t MediaSync::setPlaybackSettings(const AudioPlaybackRate &rate) {
+    Mutex::Autolock lock(mMutex);
+
+    status_t err = setPlaybackSettings_l(rate);
+    if (err == OK) {
+        // TODO: adjust rate if using VSYNC as source
+        updatePlaybackRate_l(rate.mSpeed);
+    }
+    return err;
+}
+
+status_t MediaSync::setPlaybackSettings_l(const AudioPlaybackRate &rate) {
+    if (rate.mSpeed < 0.f || rate.mPitch < 0.f) {
+        // We don't validate other audio settings.
+        // They will be validated when/if audiotrack is set.
+        return BAD_VALUE;
+    }
+
+    if (mAudioTrack != NULL) {
+        if (rate.mSpeed == 0.f) {
+            mAudioTrack->pause();
+        } else {
+            status_t err = mAudioTrack->setPlaybackRate(rate);
+            if (err != OK) {
+                return BAD_VALUE;
+            }
+
+            // ignore errors
+            (void)mAudioTrack->start();
+        }
+    }
+    mPlaybackSettings = rate;
+    return OK;
+}
+
+void MediaSync::getPlaybackSettings(AudioPlaybackRate *rate) {
+    Mutex::Autolock lock(mMutex);
+    *rate = mPlaybackSettings;
+}
+
+int64_t MediaSync::getRealTime(int64_t mediaTimeUs, int64_t nowUs) {
+    int64_t realUs;
+    if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
+        // If failed to get current position, e.g. due to audio clock is
+        // not ready, then just play out video immediately without delay.
+        return nowUs;
+    }
+    return realUs;
+}
+
+int64_t MediaSync::getDurationIfPlayedAtNativeSampleRate_l(int64_t numFrames) {
+    return (numFrames * 1000000LL / mNativeSampleRateInHz);
+}
+
+int64_t MediaSync::getPlayedOutAudioDurationMedia_l(int64_t nowUs) {
+    CHECK(mAudioTrack != NULL);
+
+    uint32_t numFramesPlayed;
+    int64_t numFramesPlayedAt;
+    AudioTimestamp ts;
+    static const int64_t kStaleTimestamp100ms = 100000;
+
+    status_t res = mAudioTrack->getTimestamp(ts);
+    if (res == OK) {
+        // case 1: mixing audio tracks.
+        numFramesPlayed = ts.mPosition;
+        numFramesPlayedAt =
+            ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
+        const int64_t timestampAge = nowUs - numFramesPlayedAt;
+        if (timestampAge > kStaleTimestamp100ms) {
+            // This is an audio FIXME.
+            // getTimestamp returns a timestamp which may come from audio
+            // mixing threads. After pausing, the MixerThread may go idle,
+            // thus the mTime estimate may become stale. Assuming that the
+            // MixerThread runs 20ms, with FastMixer at 5ms, the max latency
+            // should be about 25ms with an average around 12ms (to be
+            // verified). For safety we use 100ms.
+            ALOGV("getTimestamp: returned stale timestamp nowUs(%lld) "
+                  "numFramesPlayedAt(%lld)",
+                  (long long)nowUs, (long long)numFramesPlayedAt);
+            numFramesPlayedAt = nowUs - kStaleTimestamp100ms;
+        }
+        //ALOGD("getTimestamp: OK %d %lld",
+        //      numFramesPlayed, (long long)numFramesPlayedAt);
+    } else if (res == WOULD_BLOCK) {
+        // case 2: transitory state on start of a new track
+        numFramesPlayed = 0;
+        numFramesPlayedAt = nowUs;
+        //ALOGD("getTimestamp: WOULD_BLOCK %d %lld",
+        //      numFramesPlayed, (long long)numFramesPlayedAt);
+    } else {
+        // case 3: transitory at new track or audio fast tracks.
+        res = mAudioTrack->getPosition(&numFramesPlayed);
+        CHECK_EQ(res, (status_t)OK);
+        numFramesPlayedAt = nowUs;
+        numFramesPlayedAt += 1000LL * mAudioTrack->latency() / 2; /* XXX */
+        //ALOGD("getPosition: %d %lld", numFramesPlayed, (long long)numFramesPlayedAt);
+    }
+
+    //can't be negative until 12.4 hrs, test.
+    //CHECK_EQ(numFramesPlayed & (1 << 31), 0);
+    int64_t durationUs =
+        getDurationIfPlayedAtNativeSampleRate_l(numFramesPlayed)
+            + nowUs - numFramesPlayedAt;
+    if (durationUs < 0) {
+        // Occurs when numFramesPlayed position is very small and the following:
+        // (1) In case 1, the time nowUs is computed before getTimestamp() is
+        //     called and numFramesPlayedAt is greater than nowUs by time more
+        //     than numFramesPlayed.
+        // (2) In case 3, using getPosition and adding mAudioTrack->latency()
+        //     to numFramesPlayedAt, by a time amount greater than
+        //     numFramesPlayed.
+        //
+        // Both of these are transitory conditions.
+        ALOGV("getPlayedOutAudioDurationMedia_l: negative duration %lld "
+              "set to zero", (long long)durationUs);
+        durationUs = 0;
+    }
+    ALOGV("getPlayedOutAudioDurationMedia_l(%lld) nowUs(%lld) frames(%u) "
+          "framesAt(%lld)",
+          (long long)durationUs, (long long)nowUs, numFramesPlayed,
+          (long long)numFramesPlayedAt);
+    return durationUs;
+}
+
+void MediaSync::onDrainVideo_l() {
+    if (!isPlaying()) {
+        return;
+    }
+
+    while (!mBufferItems.empty()) {
+        int64_t nowUs = ALooper::GetNowUs();
+        BufferItem *bufferItem = &*mBufferItems.begin();
+        int64_t itemMediaUs = bufferItem->mTimestamp / 1000;
+        int64_t itemRealUs = getRealTime(itemMediaUs, nowUs);
+
+        // adjust video frame PTS based on vsync
+        itemRealUs = mFrameScheduler->schedule(itemRealUs * 1000) / 1000;
+        int64_t twoVsyncsUs = 2 * (mFrameScheduler->getVsyncPeriod() / 1000);
+
+        // post 2 display refreshes before rendering is due
+        if (itemRealUs <= nowUs + twoVsyncsUs) {
+            ALOGV("adjusting PTS from %lld to %lld",
+                    (long long)bufferItem->mTimestamp / 1000, (long long)itemRealUs);
+            bufferItem->mTimestamp = itemRealUs * 1000;
+            bufferItem->mIsAutoTimestamp = false;
+
+            if (mHasAudio) {
+                if (nowUs - itemRealUs <= kMaxAllowedVideoLateTimeUs) {
+                    renderOneBufferItem_l(*bufferItem);
+                } else {
+                    // too late.
+                    returnBufferToInput_l(
+                            bufferItem->mGraphicBuffer, bufferItem->mFence);
+                    mFrameScheduler->restart();
+                }
+            } else {
+                // always render video buffer in video-only mode.
+                renderOneBufferItem_l(*bufferItem);
+
+                // smooth out videos >= 10fps
+                mMediaClock->updateAnchor(
+                        itemMediaUs, nowUs, itemMediaUs + 100000);
+            }
+
+            mBufferItems.erase(mBufferItems.begin());
+            mNextBufferItemMediaUs = -1;
+        } else {
+            if (mNextBufferItemMediaUs == -1
+                    || mNextBufferItemMediaUs > itemMediaUs) {
+                sp<AMessage> msg = new AMessage(kWhatDrainVideo, this);
+                msg->post(itemRealUs - nowUs - twoVsyncsUs);
+                mNextBufferItemMediaUs = itemMediaUs;
+            }
+            break;
+        }
+    }
+}
+
+void MediaSync::onFrameAvailableFromInput() {
+    Mutex::Autolock lock(mMutex);
+
+    const static nsecs_t kAcquireWaitTimeout = 2000000000; // 2 seconds
+
+    mReturnPendingInputFrame = false;
+
+    // If there are too many outstanding buffers, wait until a buffer is
+    // released back to the input in onBufferReleased.
+    // NOTE: BufferQueue allows dequeuing maxAcquiredBufferCount + 1 buffers
+    while (mNumOutstandingBuffers > mMaxAcquiredBufferCount
+            && !mIsAbandoned && !mReturnPendingInputFrame) {
+        if (mReleaseCondition.waitRelative(mMutex, kAcquireWaitTimeout) != OK) {
+            ALOGI_IF(mPlaybackRate != 0.f, "still waiting to release a buffer before acquire");
+        }
+
+        // If the sync is abandoned while we are waiting, the release
+        // condition variable will be broadcast, and we should just return
+        // without attempting to do anything more (since the input queue will
+        // also be abandoned).
+        if (mIsAbandoned) {
+            return;
+        }
+    }
+
+    // Acquire and detach the buffer from the input.
+    BufferItem bufferItem;
+    status_t status = mInput->acquireBuffer(&bufferItem, 0 /* presentWhen */);
+    if (status != NO_ERROR) {
+        ALOGE("acquiring buffer from input failed (%d)", status);
+        return;
+    }
+    ++mNumOutstandingBuffers;
+
+    ALOGV("acquired buffer %#llx from input", (long long)bufferItem.mGraphicBuffer->getId());
+
+    status = mInput->detachBuffer(bufferItem.mBuf);
+    if (status != NO_ERROR) {
+        ALOGE("detaching buffer from input failed (%d)", status);
+        if (status == NO_INIT) {
+            // If the input has been abandoned, move on.
+            onAbandoned_l(true /* isInput */);
+        }
+        return;
+    }
+
+    if (mBuffersFromInput.indexOfKey(bufferItem.mGraphicBuffer->getId()) >= 0) {
+        // Something is wrong since this buffer should be at our hands, bail.
+        ALOGE("received buffer multiple times from input");
+        mInput->consumerDisconnect();
+        onAbandoned_l(true /* isInput */);
+        return;
+    }
+    mBuffersFromInput.add(bufferItem.mGraphicBuffer->getId(), bufferItem.mGraphicBuffer);
+
+    // If flush happened while waiting for a buffer to be released, simply return it
+    // TRICKY: do it here after it is detached so that we don't have to cache mGraphicBuffer.
+    if (mReturnPendingInputFrame) {
+        mReturnPendingInputFrame = false;
+        returnBufferToInput_l(bufferItem.mGraphicBuffer, bufferItem.mFence);
+        return;
+    }
+
+    mBufferItems.push_back(bufferItem);
+
+    if (mBufferItems.size() == 1) {
+        onDrainVideo_l();
+    }
+}
+
+void MediaSync::renderOneBufferItem_l(const BufferItem &bufferItem) {
+    IGraphicBufferProducer::QueueBufferInput queueInput(
+            bufferItem.mTimestamp,
+            bufferItem.mIsAutoTimestamp,
+            bufferItem.mDataSpace,
+            bufferItem.mCrop,
+            static_cast<int32_t>(bufferItem.mScalingMode),
+            bufferItem.mTransform,
+            bufferItem.mIsDroppable,
+            bufferItem.mFence);
+
+    // Attach and queue the buffer to the output.
+    int slot;
+    mOutput->setGenerationNumber(bufferItem.mGraphicBuffer->getGenerationNumber());
+    status_t status = mOutput->attachBuffer(&slot, bufferItem.mGraphicBuffer);
+    ALOGE_IF(status != NO_ERROR, "attaching buffer to output failed (%d)", status);
+    if (status == NO_ERROR) {
+        IGraphicBufferProducer::QueueBufferOutput queueOutput;
+        status = mOutput->queueBuffer(slot, queueInput, &queueOutput);
+        ALOGE_IF(status != NO_ERROR, "queueing buffer to output failed (%d)", status);
+    }
+
+    if (status != NO_ERROR) {
+        returnBufferToInput_l(bufferItem.mGraphicBuffer, bufferItem.mFence);
+        if (status == NO_INIT) {
+            // If the output has been abandoned, move on.
+            onAbandoned_l(false /* isInput */);
+        }
+        return;
+    }
+
+    if (mBuffersSentToOutput.indexOfKey(bufferItem.mGraphicBuffer->getId()) >= 0) {
+        // Something is wrong since this buffer should be held by output now, bail.
+        mInput->consumerDisconnect();
+        onAbandoned_l(true /* isInput */);
+        return;
+    }
+    mBuffersSentToOutput.add(bufferItem.mGraphicBuffer->getId(), bufferItem.mGraphicBuffer);
+
+    ALOGV("queued buffer %#llx to output", (long long)bufferItem.mGraphicBuffer->getId());
+}
+
+void MediaSync::onBufferReleasedByOutput(sp<IGraphicBufferProducer> &output) {
+    Mutex::Autolock lock(mMutex);
+
+    if (output != mOutput) {
+        return;  // This is not the current output, ignore.
+    }
+
+    sp<GraphicBuffer> buffer;
+    sp<Fence> fence;
+    status_t status = mOutput->detachNextBuffer(&buffer, &fence);
+    ALOGE_IF(status != NO_ERROR, "detaching buffer from output failed (%d)", status);
+
+    if (status == NO_INIT) {
+        // If the output has been abandoned, we can't do anything else,
+        // since buffer is invalid.
+        onAbandoned_l(false /* isInput */);
+        return;
+    }
+
+    ALOGV("detached buffer %#llx from output", (long long)buffer->getId());
+
+    // If we've been abandoned, we can't return the buffer to the input, so just
+    // move on.
+    if (mIsAbandoned) {
+        return;
+    }
+
+    ssize_t ix = mBuffersSentToOutput.indexOfKey(buffer->getId());
+    if (ix < 0) {
+        // The buffer is unknown, maybe leftover, ignore.
+        return;
+    }
+    mBuffersSentToOutput.removeItemsAt(ix);
+
+    returnBufferToInput_l(buffer, fence);
+}
+
+void MediaSync::returnBufferToInput_l(
+        const sp<GraphicBuffer> &buffer, const sp<Fence> &fence) {
+    ssize_t ix = mBuffersFromInput.indexOfKey(buffer->getId());
+    if (ix < 0) {
+        // The buffer is unknown, something is wrong, bail.
+        ALOGE("output returned unknown buffer");
+        mOutput->disconnect(NATIVE_WINDOW_API_MEDIA);
+        onAbandoned_l(false /* isInput */);
+        return;
+    }
+    sp<GraphicBuffer> oldBuffer = mBuffersFromInput.valueAt(ix);
+    mBuffersFromInput.removeItemsAt(ix);
+
+    // Attach and release the buffer back to the input.
+    int consumerSlot;
+    status_t status = mInput->attachBuffer(&consumerSlot, oldBuffer);
+    ALOGE_IF(status != NO_ERROR, "attaching buffer to input failed (%d)", status);
+    if (status == NO_ERROR) {
+        status = mInput->releaseBuffer(consumerSlot, 0 /* frameNumber */,
+                EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, fence);
+        ALOGE_IF(status != NO_ERROR, "releasing buffer to input failed (%d)", status);
+    }
+
+    // Notify any waiting onFrameAvailable calls.
+    --mNumOutstandingBuffers;
+    mReleaseCondition.signal();
+
+    if (status == NO_ERROR) {
+        ALOGV("released buffer %#llx to input", (long long)oldBuffer->getId());
+    }
+}
+
+void MediaSync::onAbandoned_l(bool isInput) {
+    ALOGE("the %s has abandoned me", (isInput ? "input" : "output"));
+    if (!mIsAbandoned) {
+        if (isInput) {
+            mOutput->disconnect(NATIVE_WINDOW_API_MEDIA);
+        } else {
+            mInput->consumerDisconnect();
+        }
+        mIsAbandoned = true;
+    }
+    mReleaseCondition.broadcast();
+}
+
+void MediaSync::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatDrainVideo:
+        {
+            Mutex::Autolock lock(mMutex);
+            if (mNextBufferItemMediaUs != -1) {
+                int64_t nowUs = ALooper::GetNowUs();
+                int64_t itemRealUs = getRealTime(mNextBufferItemMediaUs, nowUs);
+
+                // The message could arrive earlier than expected due to
+                // various reasons, e.g., media clock has been changed because
+                // of new anchor time or playback rate. In such cases, the
+                // message needs to be re-posted.
+                if (itemRealUs > nowUs) {
+                    msg->post(itemRealUs - nowUs);
+                    break;
+                }
+            }
+
+            onDrainVideo_l();
+            break;
+        }
+
+        default:
+            TRESPASS();
+            break;
+    }
+}
+
+MediaSync::InputListener::InputListener(const sp<MediaSync> &sync)
+      : mSync(sync) {}
+
+MediaSync::InputListener::~InputListener() {}
+
+void MediaSync::InputListener::onFrameAvailable(const BufferItem &/* item */) {
+    mSync->onFrameAvailableFromInput();
+}
+
+// We don't care about sideband streams, since we won't relay them.
+void MediaSync::InputListener::onSidebandStreamChanged() {
+    ALOGE("onSidebandStreamChanged: got sideband stream unexpectedly.");
+}
+
+
+void MediaSync::InputListener::binderDied(const wp<IBinder> &/* who */) {
+    Mutex::Autolock lock(mSync->mMutex);
+    mSync->onAbandoned_l(true /* isInput */);
+}
+
+MediaSync::OutputListener::OutputListener(const sp<MediaSync> &sync,
+        const sp<IGraphicBufferProducer> &output)
+      : mSync(sync),
+        mOutput(output) {}
+
+MediaSync::OutputListener::~OutputListener() {}
+
+void MediaSync::OutputListener::onBufferReleased() {
+    mSync->onBufferReleasedByOutput(mOutput);
+}
+
+void MediaSync::OutputListener::binderDied(const wp<IBinder> &/* who */) {
+    Mutex::Autolock lock(mSync->mMutex);
+    mSync->onAbandoned_l(false /* isInput */);
+}
+
+} // namespace android
diff --git a/media/libstagefright/MidiExtractor.cpp b/media/libstagefright/MidiExtractor.cpp
index 66fab77..f6b8c84 100644
--- a/media/libstagefright/MidiExtractor.cpp
+++ b/media/libstagefright/MidiExtractor.cpp
@@ -217,7 +217,7 @@
 }
 
 status_t MidiEngine::seekTo(int64_t positionUs) {
-    ALOGV("seekTo %lld", positionUs);
+    ALOGV("seekTo %lld", (long long)positionUs);
     EAS_RESULT result = EAS_Locate(mEasData, mEasHandle, positionUs / 1000, false);
     return result == EAS_SUCCESS ? OK : UNKNOWN_ERROR;
 }
diff --git a/media/libstagefright/NuCachedSource2.cpp b/media/libstagefright/NuCachedSource2.cpp
index 7d7d631..f82636b 100644
--- a/media/libstagefright/NuCachedSource2.cpp
+++ b/media/libstagefright/NuCachedSource2.cpp
@@ -226,7 +226,7 @@
     mLooper->start(false /* runOnCallingThread */, true /* canCallJava */);
 
     Mutex::Autolock autoLock(mLock);
-    (new AMessage(kWhatFetchMore, mReflector->id()))->post();
+    (new AMessage(kWhatFetchMore, mReflector))->post();
 }
 
 NuCachedSource2::~NuCachedSource2() {
@@ -433,7 +433,7 @@
         delayUs = 100000ll;
     }
 
-    (new AMessage(kWhatFetchMore, mReflector->id()))->post(delayUs);
+    (new AMessage(kWhatFetchMore, mReflector))->post(delayUs);
 }
 
 void NuCachedSource2::onRead(const sp<AMessage> &msg) {
@@ -503,7 +503,7 @@
 ssize_t NuCachedSource2::readAt(off64_t offset, void *data, size_t size) {
     Mutex::Autolock autoSerializer(mSerializer);
 
-    ALOGV("readAt offset %lld, size %zu", offset, size);
+    ALOGV("readAt offset %lld, size %zu", (long long)offset, size);
 
     Mutex::Autolock autoLock(mLock);
     if (mDisconnecting) {
@@ -522,7 +522,7 @@
         return size;
     }
 
-    sp<AMessage> msg = new AMessage(kWhatRead, mReflector->id());
+    sp<AMessage> msg = new AMessage(kWhatRead, mReflector);
     msg->setInt64("offset", offset);
     msg->setPointer("data", data);
     msg->setSize("size", size);
@@ -579,10 +579,17 @@
 ssize_t NuCachedSource2::readInternal(off64_t offset, void *data, size_t size) {
     CHECK_LE(size, (size_t)mHighwaterThresholdBytes);
 
-    ALOGV("readInternal offset %lld size %zu", offset, size);
+    ALOGV("readInternal offset %lld size %zu", (long long)offset, size);
 
     Mutex::Autolock autoLock(mLock);
 
+    // If we're disconnecting, return EOS and don't access *data pointer.
+    // data could be on the stack of the caller to NuCachedSource2::readAt(),
+    // which may have exited already.
+    if (mDisconnecting) {
+        return ERROR_END_OF_STREAM;
+    }
+
     if (!mFetching) {
         mLastAccessPos = offset;
         restartPrefetcherIfNecessary_l(
@@ -640,7 +647,7 @@
         return OK;
     }
 
-    ALOGI("new range: offset= %lld", offset);
+    ALOGI("new range: offset= %lld", (long long)offset);
 
     mCacheOffset = offset;
 
@@ -719,10 +726,10 @@
         mKeepAliveIntervalUs = kDefaultKeepAliveIntervalUs;
     }
 
-    ALOGV("lowwater = %zu bytes, highwater = %zu bytes, keepalive = %" PRId64 " us",
+    ALOGV("lowwater = %zu bytes, highwater = %zu bytes, keepalive = %lld us",
          mLowwaterThresholdBytes,
          mHighwaterThresholdBytes,
-         mKeepAliveIntervalUs);
+         (long long)mKeepAliveIntervalUs);
 }
 
 // static
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index 230c1f7..e69890d 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -72,7 +72,7 @@
             node_id node, OMX_STATETYPE* state);
 
     virtual status_t storeMetaDataInBuffers(
-            node_id node, OMX_U32 port_index, OMX_BOOL enable);
+            node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type);
 
     virtual status_t prepareForAdaptivePlayback(
             node_id node, OMX_U32 port_index, OMX_BOOL enable,
@@ -90,7 +90,7 @@
 
     virtual status_t useBuffer(
             node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-            buffer_id *buffer);
+            buffer_id *buffer, OMX_U32 allottedSize);
 
     virtual status_t useGraphicBuffer(
             node_id node, OMX_U32 port_index,
@@ -102,7 +102,15 @@
 
     virtual status_t createInputSurface(
             node_id node, OMX_U32 port_index,
-            sp<IGraphicBufferProducer> *bufferProducer);
+            sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type);
+
+    virtual status_t createPersistentInputSurface(
+            sp<IGraphicBufferProducer> *bufferProducer,
+            sp<IGraphicBufferConsumer> *bufferConsumer);
+
+    virtual status_t setInputSurface(
+            node_id node, OMX_U32 port_index,
+            const sp<IGraphicBufferConsumer> &bufferConsumer, MetadataBufferType *type);
 
     virtual status_t signalEndOfInputStream(node_id node);
 
@@ -112,18 +120,18 @@
 
     virtual status_t allocateBufferWithBackup(
             node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-            buffer_id *buffer);
+            buffer_id *buffer, OMX_U32 allottedSize);
 
     virtual status_t freeBuffer(
             node_id node, OMX_U32 port_index, buffer_id buffer);
 
-    virtual status_t fillBuffer(node_id node, buffer_id buffer);
+    virtual status_t fillBuffer(node_id node, buffer_id buffer, int fenceFd);
 
     virtual status_t emptyBuffer(
             node_id node,
             buffer_id buffer,
             OMX_U32 range_offset, OMX_U32 range_length,
-            OMX_U32 flags, OMX_TICKS timestamp);
+            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
 
     virtual status_t getExtensionIndex(
             node_id node,
@@ -284,8 +292,8 @@
 }
 
 status_t MuxOMX::storeMetaDataInBuffers(
-        node_id node, OMX_U32 port_index, OMX_BOOL enable) {
-    return getOMX(node)->storeMetaDataInBuffers(node, port_index, enable);
+        node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type) {
+    return getOMX(node)->storeMetaDataInBuffers(node, port_index, enable, type);
 }
 
 status_t MuxOMX::prepareForAdaptivePlayback(
@@ -314,8 +322,8 @@
 
 status_t MuxOMX::useBuffer(
         node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-        buffer_id *buffer) {
-    return getOMX(node)->useBuffer(node, port_index, params, buffer);
+        buffer_id *buffer, OMX_U32 allottedSize) {
+    return getOMX(node)->useBuffer(node, port_index, params, buffer, allottedSize);
 }
 
 status_t MuxOMX::useGraphicBuffer(
@@ -334,12 +342,26 @@
 
 status_t MuxOMX::createInputSurface(
         node_id node, OMX_U32 port_index,
-        sp<IGraphicBufferProducer> *bufferProducer) {
+        sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
     status_t err = getOMX(node)->createInputSurface(
-            node, port_index, bufferProducer);
+            node, port_index, bufferProducer, type);
     return err;
 }
 
+status_t MuxOMX::createPersistentInputSurface(
+        sp<IGraphicBufferProducer> *bufferProducer,
+        sp<IGraphicBufferConsumer> *bufferConsumer) {
+    // TODO: local or remote? Always use remote for now
+    return mRemoteOMX->createPersistentInputSurface(
+            bufferProducer, bufferConsumer);
+}
+
+status_t MuxOMX::setInputSurface(
+        node_id node, OMX_U32 port_index,
+        const sp<IGraphicBufferConsumer> &bufferConsumer, MetadataBufferType *type) {
+    return getOMX(node)->setInputSurface(node, port_index, bufferConsumer, type);
+}
+
 status_t MuxOMX::signalEndOfInputStream(node_id node) {
     return getOMX(node)->signalEndOfInputStream(node);
 }
@@ -353,9 +375,9 @@
 
 status_t MuxOMX::allocateBufferWithBackup(
         node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-        buffer_id *buffer) {
+        buffer_id *buffer, OMX_U32 allottedSize) {
     return getOMX(node)->allocateBufferWithBackup(
-            node, port_index, params, buffer);
+            node, port_index, params, buffer, allottedSize);
 }
 
 status_t MuxOMX::freeBuffer(
@@ -363,17 +385,17 @@
     return getOMX(node)->freeBuffer(node, port_index, buffer);
 }
 
-status_t MuxOMX::fillBuffer(node_id node, buffer_id buffer) {
-    return getOMX(node)->fillBuffer(node, buffer);
+status_t MuxOMX::fillBuffer(node_id node, buffer_id buffer, int fenceFd) {
+    return getOMX(node)->fillBuffer(node, buffer, fenceFd);
 }
 
 status_t MuxOMX::emptyBuffer(
         node_id node,
         buffer_id buffer,
         OMX_U32 range_offset, OMX_U32 range_length,
-        OMX_U32 flags, OMX_TICKS timestamp) {
+        OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
     return getOMX(node)->emptyBuffer(
-            node, buffer, range_offset, range_length, flags, timestamp);
+            node, buffer, range_offset, range_length, flags, timestamp, fenceFd);
 }
 
 status_t MuxOMX::getExtensionIndex(
@@ -400,10 +422,16 @@
     sp<IBinder> binder = sm->getService(String16("media.player"));
     sp<IMediaPlayerService> service = interface_cast<IMediaPlayerService>(binder);
 
-    CHECK(service.get() != NULL);
+    if (service.get() == NULL) {
+        ALOGE("Cannot obtain IMediaPlayerService");
+        return NO_INIT;
+    }
 
     mOMX = service->getOMX();
-    CHECK(mOMX.get() != NULL);
+    if (mOMX.get() == NULL) {
+        ALOGE("Cannot obtain IOMX");
+        return NO_INIT;
+    }
 
     if (!mOMX->livesLocally(0 /* node */, getpid())) {
         ALOGI("Using client-side OMX mux.");
diff --git a/media/libstagefright/OMXCodec.cpp b/media/libstagefright/OMXCodec.cpp
index f41bbc2..b1dde80 100644
--- a/media/libstagefright/OMXCodec.cpp
+++ b/media/libstagefright/OMXCodec.cpp
@@ -43,6 +43,7 @@
 #include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/OMXCodec.h>
+#include <media/stagefright/SurfaceUtils.h>
 #include <media/stagefright/Utils.h>
 #include <media/stagefright/SkipCutBuffer.h>
 #include <utils/Vector.h>
@@ -115,12 +116,15 @@
     }
 
     // from IOMXObserver
-    virtual void onMessage(const omx_message &msg) {
+    virtual void onMessages(const std::list<omx_message> &messages) {
         sp<OMXCodec> codec = mTarget.promote();
 
         if (codec.get() != NULL) {
             Mutex::Autolock autoLock(codec->mLock);
-            codec->on_message(msg);
+            for (std::list<omx_message>::const_iterator it = messages.cbegin();
+                  it != messages.cend(); ++it) {
+                codec->on_message(*it);
+            }
             codec.clear();
         }
     }
@@ -1057,7 +1061,7 @@
         const sp<MetaData>& meta,
         const CodecProfileLevel& defaultProfileLevel,
         CodecProfileLevel &profileLevel) {
-    CODEC_LOGV("Default profile: %ld, level %ld",
+    CODEC_LOGV("Default profile: %u, level #x%x",
             defaultProfileLevel.mProfile, defaultProfileLevel.mLevel);
 
     // Are the default profile and level overwriten?
@@ -1283,7 +1287,7 @@
     success = success && meta->findInt32(kKeyHeight, &height);
     CHECK(success);
 
-    CODEC_LOGV("setVideoOutputFormat width=%ld, height=%ld", width, height);
+    CODEC_LOGV("setVideoOutputFormat width=%d, height=%d", width, height);
 
     OMX_VIDEO_CODINGTYPE compressionFormat = OMX_VIDEO_CodingUnused;
     if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime)) {
@@ -1650,7 +1654,7 @@
         return err;
     }
 
-    CODEC_LOGV("allocating %lu buffers of size %lu on %s port",
+    CODEC_LOGV("allocating %u buffers of size %u on %s port",
             def.nBufferCountActual, def.nBufferSize,
             portIndex == kPortIndexInput ? "input" : "output");
 
@@ -1680,7 +1684,7 @@
                         &info.mData);
             } else {
                 err = mOMX->allocateBufferWithBackup(
-                        mNode, portIndex, mem, &buffer);
+                        mNode, portIndex, mem, &buffer, mem->size());
             }
         } else if (portIndex == kPortIndexOutput
                 && (mQuirks & kRequiresAllocateBufferOnOutputPorts)) {
@@ -1692,10 +1696,10 @@
                         &info.mData);
             } else {
                 err = mOMX->allocateBufferWithBackup(
-                        mNode, portIndex, mem, &buffer);
+                        mNode, portIndex, mem, &buffer, mem->size());
             }
         } else {
-            err = mOMX->useBuffer(mNode, portIndex, mem, &buffer);
+            err = mOMX->useBuffer(mNode, portIndex, mem, &buffer, mem->size());
         }
 
         if (err != OK) {
@@ -1726,7 +1730,7 @@
 
         mPortBuffers[portIndex].push(info);
 
-        CODEC_LOGV("allocated buffer %p on %s port", buffer,
+        CODEC_LOGV("allocated buffer %u on %s port", buffer,
              portIndex == kPortIndexInput ? "input" : "output");
     }
 
@@ -1748,7 +1752,7 @@
                 if (mSkipCutBuffer != NULL) {
                     size_t prevbuffersize = mSkipCutBuffer->size();
                     if (prevbuffersize != 0) {
-                        ALOGW("Replacing SkipCutBuffer holding %d bytes", prevbuffersize);
+                        ALOGW("Replacing SkipCutBuffer holding %zu bytes", prevbuffersize);
                     }
                 }
                 mSkipCutBuffer = new SkipCutBuffer(delay * frameSize, padding * frameSize);
@@ -1786,35 +1790,6 @@
     return OK;
 }
 
-status_t OMXCodec::applyRotation() {
-    sp<MetaData> meta = mSource->getFormat();
-
-    int32_t rotationDegrees;
-    if (!meta->findInt32(kKeyRotation, &rotationDegrees)) {
-        rotationDegrees = 0;
-    }
-
-    uint32_t transform;
-    switch (rotationDegrees) {
-        case 0: transform = 0; break;
-        case 90: transform = HAL_TRANSFORM_ROT_90; break;
-        case 180: transform = HAL_TRANSFORM_ROT_180; break;
-        case 270: transform = HAL_TRANSFORM_ROT_270; break;
-        default: transform = 0; break;
-    }
-
-    status_t err = OK;
-
-    if (transform) {
-        err = native_window_set_buffers_transform(
-                mNativeWindow.get(), transform);
-        ALOGE("native_window_set_buffers_transform failed: %s (%d)",
-                strerror(-err), -err);
-    }
-
-    return err;
-}
-
 status_t OMXCodec::allocateOutputBuffersFromNativeWindow() {
     // Get the number of buffers needed.
     OMX_PARAM_PORTDEFINITIONTYPE def;
@@ -1828,21 +1803,11 @@
         return err;
     }
 
-    err = native_window_set_buffers_geometry(
-            mNativeWindow.get(),
-            def.format.video.nFrameWidth,
-            def.format.video.nFrameHeight,
-            def.format.video.eColorFormat);
+    sp<MetaData> meta = mSource->getFormat();
 
-    if (err != 0) {
-        ALOGE("native_window_set_buffers_geometry failed: %s (%d)",
-                strerror(-err), -err);
-        return err;
-    }
-
-    err = applyRotation();
-    if (err != OK) {
-        return err;
+    int32_t rotationDegrees;
+    if (!meta->findInt32(kKeyRotation, &rotationDegrees)) {
+        rotationDegrees = 0;
     }
 
     // Set up the native window.
@@ -1853,34 +1818,19 @@
         // XXX: Currently this error is logged, but not fatal.
         usage = 0;
     }
+
     if (mFlags & kEnableGrallocUsageProtected) {
         usage |= GRALLOC_USAGE_PROTECTED;
     }
 
-    // Make sure to check whether either Stagefright or the video decoder
-    // requested protected buffers.
-    if (usage & GRALLOC_USAGE_PROTECTED) {
-        // Verify that the ANativeWindow sends images directly to
-        // SurfaceFlinger.
-        int queuesToNativeWindow = 0;
-        err = mNativeWindow->query(
-                mNativeWindow.get(), NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER,
-                &queuesToNativeWindow);
-        if (err != 0) {
-            ALOGE("error authenticating native window: %d", err);
-            return err;
-        }
-        if (queuesToNativeWindow != 1) {
-            ALOGE("native window could not be authenticated");
-            return PERMISSION_DENIED;
-        }
-    }
-
-    ALOGV("native_window_set_usage usage=0x%lx", usage);
-    err = native_window_set_usage(
-            mNativeWindow.get(), usage | GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_EXTERNAL_DISP);
+    err = setNativeWindowSizeFormatAndUsage(
+            mNativeWindow.get(),
+            def.format.video.nFrameWidth,
+            def.format.video.nFrameHeight,
+            def.format.video.eColorFormat,
+            rotationDegrees,
+            usage | GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_EXTERNAL_DISP);
     if (err != 0) {
-        ALOGE("native_window_set_usage failed: %s (%d)", strerror(-err), -err);
         return err;
     }
 
@@ -2047,150 +1997,6 @@
     return bufInfo;
 }
 
-status_t OMXCodec::pushBlankBuffersToNativeWindow() {
-    status_t err = NO_ERROR;
-    ANativeWindowBuffer* anb = NULL;
-    int numBufs = 0;
-    int minUndequeuedBufs = 0;
-
-    // We need to reconnect to the ANativeWindow as a CPU client to ensure that
-    // no frames get dropped by SurfaceFlinger assuming that these are video
-    // frames.
-    err = native_window_api_disconnect(mNativeWindow.get(),
-            NATIVE_WINDOW_API_MEDIA);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)",
-                strerror(-err), -err);
-        return err;
-    }
-
-    err = native_window_api_connect(mNativeWindow.get(),
-            NATIVE_WINDOW_API_CPU);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: api_connect failed: %s (%d)",
-                strerror(-err), -err);
-        return err;
-    }
-
-    err = native_window_set_buffers_geometry(mNativeWindow.get(), 1, 1,
-            HAL_PIXEL_FORMAT_RGBX_8888);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: set_buffers_geometry failed: %s (%d)",
-                strerror(-err), -err);
-        goto error;
-    }
-
-    err = native_window_set_usage(mNativeWindow.get(),
-            GRALLOC_USAGE_SW_WRITE_OFTEN);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: set_usage failed: %s (%d)",
-                strerror(-err), -err);
-        goto error;
-    }
-
-    err = native_window_set_scaling_mode(mNativeWindow.get(),
-            NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
-    if (err != OK) {
-        ALOGE("error pushing blank frames: set_scaling_mode failed: %s (%d)",
-                strerror(-err), -err);
-        goto error;
-    }
-
-    err = mNativeWindow->query(mNativeWindow.get(),
-            NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBufs);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: MIN_UNDEQUEUED_BUFFERS query "
-                "failed: %s (%d)", strerror(-err), -err);
-        goto error;
-    }
-
-    numBufs = minUndequeuedBufs + 1;
-    err = native_window_set_buffer_count(mNativeWindow.get(), numBufs);
-    if (err != NO_ERROR) {
-        ALOGE("error pushing blank frames: set_buffer_count failed: %s (%d)",
-                strerror(-err), -err);
-        goto error;
-    }
-
-    // We  push numBufs + 1 buffers to ensure that we've drawn into the same
-    // buffer twice.  This should guarantee that the buffer has been displayed
-    // on the screen and then been replaced, so an previous video frames are
-    // guaranteed NOT to be currently displayed.
-    for (int i = 0; i < numBufs + 1; i++) {
-        err = native_window_dequeue_buffer_and_wait(mNativeWindow.get(), &anb);
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: dequeueBuffer failed: %s (%d)",
-                    strerror(-err), -err);
-            goto error;
-        }
-
-        sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
-
-        // Fill the buffer with the a 1x1 checkerboard pattern ;)
-        uint32_t* img = NULL;
-        err = buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img));
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: lock failed: %s (%d)",
-                    strerror(-err), -err);
-            goto error;
-        }
-
-        *img = 0;
-
-        err = buf->unlock();
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: unlock failed: %s (%d)",
-                    strerror(-err), -err);
-            goto error;
-        }
-
-        err = mNativeWindow->queueBuffer(mNativeWindow.get(),
-                buf->getNativeBuffer(), -1);
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: queueBuffer failed: %s (%d)",
-                    strerror(-err), -err);
-            goto error;
-        }
-
-        anb = NULL;
-    }
-
-error:
-
-    if (err != NO_ERROR) {
-        // Clean up after an error.
-        if (anb != NULL) {
-            mNativeWindow->cancelBuffer(mNativeWindow.get(), anb, -1);
-        }
-
-        native_window_api_disconnect(mNativeWindow.get(),
-                NATIVE_WINDOW_API_CPU);
-        native_window_api_connect(mNativeWindow.get(),
-                NATIVE_WINDOW_API_MEDIA);
-
-        return err;
-    } else {
-        // Clean up after success.
-        err = native_window_api_disconnect(mNativeWindow.get(),
-                NATIVE_WINDOW_API_CPU);
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)",
-                    strerror(-err), -err);
-            return err;
-        }
-
-        err = native_window_api_connect(mNativeWindow.get(),
-                NATIVE_WINDOW_API_MEDIA);
-        if (err != NO_ERROR) {
-            ALOGE("error pushing blank frames: api_connect failed: %s (%d)",
-                    strerror(-err), -err);
-            return err;
-        }
-
-        return NO_ERROR;
-    }
-}
-
 int64_t OMXCodec::getDecodingTimeUs() {
     CHECK(mIsEncoder && mIsVideo);
 
@@ -2711,7 +2517,7 @@
 
         default:
         {
-            CODEC_LOGV("CMD_COMPLETE(%d, %ld)", cmd, data);
+            CODEC_LOGV("CMD_COMPLETE(%d, %u)", cmd, data);
             break;
         }
     }
@@ -2737,7 +2543,7 @@
                 if (countBuffersWeOwn(mPortBuffers[kPortIndexInput]) !=
                     mPortBuffers[kPortIndexInput].size()) {
                     ALOGE("Codec did not return all input buffers "
-                          "(received %d / %d)",
+                          "(received %zu / %zu)",
                             countBuffersWeOwn(mPortBuffers[kPortIndexInput]),
                             mPortBuffers[kPortIndexInput].size());
                     TRESPASS();
@@ -2746,7 +2552,7 @@
                 if (countBuffersWeOwn(mPortBuffers[kPortIndexOutput]) !=
                     mPortBuffers[kPortIndexOutput].size()) {
                     ALOGE("Codec did not return all output buffers "
-                          "(received %d / %d)",
+                          "(received %zu / %zu)",
                             countBuffersWeOwn(mPortBuffers[kPortIndexOutput]),
                             mPortBuffers[kPortIndexOutput].size());
                     TRESPASS();
@@ -2772,7 +2578,7 @@
                     // them has made it to the display.  This allows the OMX
                     // component teardown to zero out any protected buffers
                     // without the risk of scanning out one of those buffers.
-                    pushBlankBuffersToNativeWindow();
+                    pushBlankBuffersToNativeWindow(mNativeWindow.get());
                 }
 
                 setState(IDLE_TO_LOADED);
@@ -2850,7 +2656,7 @@
         CHECK(info->mStatus == OWNED_BY_US
                 || info->mStatus == OWNED_BY_NATIVE_WINDOW);
 
-        CODEC_LOGV("freeing buffer %p on port %ld", info->mBuffer, portIndex);
+        CODEC_LOGV("freeing buffer %u on port %u", info->mBuffer, portIndex);
 
         status_t err = freeBuffer(portIndex, i);
 
@@ -2897,7 +2703,7 @@
 }
 
 void OMXCodec::onPortSettingsChanged(OMX_U32 portIndex) {
-    CODEC_LOGV("PORT_SETTINGS_CHANGED(%ld)", portIndex);
+    CODEC_LOGV("PORT_SETTINGS_CHANGED(%u)", portIndex);
 
     CHECK(mState == EXECUTING || mState == EXECUTING_TO_IDLE);
     CHECK_EQ(portIndex, (OMX_U32)kPortIndexOutput);
@@ -2924,7 +2730,7 @@
     CHECK(mState == EXECUTING || mState == RECONFIGURING
             || mState == EXECUTING_TO_IDLE);
 
-    CODEC_LOGV("flushPortAsync(%ld): we own %d out of %d buffers already.",
+    CODEC_LOGV("flushPortAsync(%u): we own %zu out of %zu buffers already.",
          portIndex, countBuffersWeOwn(mPortBuffers[portIndex]),
          mPortBuffers[portIndex].size());
 
@@ -2953,7 +2759,7 @@
     CHECK_EQ((int)mPortStatus[portIndex], (int)ENABLED);
     mPortStatus[portIndex] = DISABLING;
 
-    CODEC_LOGV("sending OMX_CommandPortDisable(%ld)", portIndex);
+    CODEC_LOGV("sending OMX_CommandPortDisable(%u)", portIndex);
     status_t err =
         mOMX->sendCommand(mNode, OMX_CommandPortDisable, portIndex);
     CHECK_EQ(err, (status_t)OK);
@@ -2967,7 +2773,7 @@
     CHECK_EQ((int)mPortStatus[portIndex], (int)DISABLED);
     mPortStatus[portIndex] = ENABLING;
 
-    CODEC_LOGV("sending OMX_CommandPortEnable(%ld)", portIndex);
+    CODEC_LOGV("sending OMX_CommandPortEnable(%u)", portIndex);
     return mOMX->sendCommand(mNode, OMX_CommandPortEnable, portIndex);
 }
 
@@ -3040,7 +2846,7 @@
 
         if (info->mData == ptr) {
             CODEC_LOGV(
-                    "input buffer data ptr = %p, buffer_id = %p",
+                    "input buffer data ptr = %p, buffer_id = %u",
                     ptr,
                     info->mBuffer);
 
@@ -3150,7 +2956,7 @@
                 if (srcBuffer->meta_data()->findInt64(
                             kKeyTargetTime, &targetTimeUs)
                         && targetTimeUs >= 0) {
-                    CODEC_LOGV("targetTimeUs = %lld us", targetTimeUs);
+                    CODEC_LOGV("targetTimeUs = %lld us", (long long)targetTimeUs);
                     mTargetTimeUs = targetTimeUs;
                 } else {
                     mTargetTimeUs = -1;
@@ -3184,7 +2990,7 @@
             if (offset == 0) {
                 CODEC_LOGE(
                      "Codec's input buffers are too small to accomodate "
-                     "buffer read from source (info->mSize = %d, srcLength = %d)",
+                     "buffer read from source (info->mSize = %zu, srcLength = %zu)",
                      info->mSize, srcBuffer->range_length());
 
                 srcBuffer->release();
@@ -3290,10 +3096,10 @@
         info = findEmptyInputBuffer();
     }
 
-    CODEC_LOGV("Calling emptyBuffer on buffer %p (length %d), "
+    CODEC_LOGV("Calling emptyBuffer on buffer %u (length %zu), "
                "timestamp %lld us (%.2f secs)",
                info->mBuffer, offset,
-               timestampUs, timestampUs / 1E6);
+               (long long)timestampUs, timestampUs / 1E6);
 
     err = mOMX->emptyBuffer(
             mNode, info->mBuffer, 0, offset,
@@ -3318,7 +3124,7 @@
         return;
     }
 
-    CODEC_LOGV("Calling fillBuffer on buffer %p", info->mBuffer);
+    CODEC_LOGV("Calling fillBuffer on buffer %u", info->mBuffer);
     status_t err = mOMX->fillBuffer(mNode, info->mBuffer);
 
     if (err != OK) {
@@ -3375,7 +3181,7 @@
     }
     status_t err = mBufferFilled.waitRelative(mLock, kBufferFilledEventTimeOutNs);
     if (err != OK) {
-        CODEC_LOGE("Timed out waiting for output buffers: %d/%d",
+        CODEC_LOGE("Timed out waiting for output buffers: %zu/%zu",
             countBuffersWeOwn(mPortBuffers[kPortIndexInput]),
             countBuffersWeOwn(mPortBuffers[kPortIndexOutput]));
     }
@@ -3630,7 +3436,7 @@
 
 void OMXCodec::setImageOutputFormat(
         OMX_COLOR_FORMATTYPE format, OMX_U32 width, OMX_U32 height) {
-    CODEC_LOGV("setImageOutputFormat(%ld, %ld)", width, height);
+    CODEC_LOGV("setImageOutputFormat(%u, %u)", width, height);
 
 #if 0
     OMX_INDEXTYPE index;
@@ -4284,14 +4090,14 @@
                 if ((OMX_U32)numChannels != params.nChannels) {
                     ALOGV("Codec outputs a different number of channels than "
                          "the input stream contains (contains %d channels, "
-                         "codec outputs %ld channels).",
+                         "codec outputs %u channels).",
                          numChannels, params.nChannels);
                 }
 
                 if (sampleRate != (int32_t)params.nSamplingRate) {
                     ALOGV("Codec outputs at different sampling rate than "
                          "what the input stream contains (contains data at "
-                         "%d Hz, codec outputs %lu Hz)",
+                         "%d Hz, codec outputs %u Hz)",
                          sampleRate, params.nSamplingRate);
                 }
 
@@ -4393,8 +4199,7 @@
                             mNode, OMX_IndexConfigCommonOutputCrop,
                             &rect, sizeof(rect));
 
-                CODEC_LOGI(
-                        "video dimensions are %ld x %ld",
+                CODEC_LOGI("video dimensions are %u x %u",
                         video_def->nFrameWidth, video_def->nFrameHeight);
 
                 if (err == OK) {
@@ -4412,8 +4217,7 @@
                             rect.nLeft + rect.nWidth - 1,
                             rect.nTop + rect.nHeight - 1);
 
-                    CODEC_LOGI(
-                            "Crop rect is %ld x %ld @ (%ld, %ld)",
+                    CODEC_LOGI("Crop rect is %u x %u @ (%d, %d)",
                             rect.nWidth, rect.nHeight, rect.nLeft, rect.nTop);
                 } else {
                     mOutputFormat->setRect(
diff --git a/media/libstagefright/OggExtractor.cpp b/media/libstagefright/OggExtractor.cpp
index 976763c..6fba8e1 100644
--- a/media/libstagefright/OggExtractor.cpp
+++ b/media/libstagefright/OggExtractor.cpp
@@ -21,6 +21,7 @@
 #include "include/OggExtractor.h"
 
 #include <cutils/properties.h>
+#include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaBuffer.h>
@@ -65,24 +66,28 @@
     OggSource &operator=(const OggSource &);
 };
 
-struct MyVorbisExtractor {
-    MyVorbisExtractor(const sp<DataSource> &source);
-    virtual ~MyVorbisExtractor();
+struct MyOggExtractor {
+    MyOggExtractor(
+            const sp<DataSource> &source,
+            const char *mimeType,
+            size_t numHeaders,
+            int64_t seekPreRollUs);
+    virtual ~MyOggExtractor();
 
     sp<MetaData> getFormat() const;
 
     // Returns an approximate bitrate in bits per second.
-    uint64_t approxBitrate();
+    virtual uint64_t approxBitrate() const = 0;
 
     status_t seekToTime(int64_t timeUs);
     status_t seekToOffset(off64_t offset);
-    status_t readNextPacket(MediaBuffer **buffer, bool conf);
+    virtual status_t readNextPacket(MediaBuffer **buffer) = 0;
 
     status_t init();
 
     sp<MetaData> getFileMetaData() { return mFileMeta; }
 
-private:
+protected:
     struct Page {
         uint64_t mGranulePosition;
         int32_t mPrevPacketSize;
@@ -102,12 +107,17 @@
     sp<DataSource> mSource;
     off64_t mOffset;
     Page mCurrentPage;
+    uint64_t mCurGranulePosition;
     uint64_t mPrevGranulePosition;
     size_t mCurrentPageSize;
     bool mFirstPacketInPage;
     uint64_t mCurrentPageSamples;
     size_t mNextLaceIndex;
 
+    const char *mMimeType;
+    size_t mNumHeaders;
+    int64_t mSeekPreRollUs;
+
     off64_t mFirstDataOffset;
 
     vorbis_info mVi;
@@ -121,10 +131,26 @@
     ssize_t readPage(off64_t offset, Page *page);
     status_t findNextPage(off64_t startOffset, off64_t *pageOffset);
 
-    status_t verifyHeader(
-            MediaBuffer *buffer, uint8_t type);
+    virtual int64_t getTimeUsOfGranule(uint64_t granulePos) const = 0;
 
-    int32_t packetBlockSize(MediaBuffer *buffer);
+    // Extract codec format, metadata tags, and various codec specific data;
+    // the format and CSD's are required to setup the decoders for the enclosed media content.
+    //
+    // Valid values for `type` are:
+    // 1 - bitstream identification header
+    // 3 - comment header
+    // 5 - codec setup header (Vorbis only)
+    virtual status_t verifyHeader(MediaBuffer *buffer, uint8_t type) = 0;
+
+    // Read the next ogg packet from the underlying data source; optionally
+    // calculate the timestamp for the output packet whilst pretending
+    // that we are parsing an Ogg Vorbis stream.
+    //
+    // *buffer is NULL'ed out immediately upon entry, and if successful a new buffer is allocated;
+    // clients are responsible for releasing the original buffer.
+    status_t _readNextPacket(MediaBuffer **buffer, bool calcVorbisTimestamp);
+
+    int32_t getPacketBlockSize(MediaBuffer *buffer);
 
     void parseFileMetaData();
 
@@ -132,8 +158,61 @@
 
     void buildTableOfContents();
 
-    MyVorbisExtractor(const MyVorbisExtractor &);
-    MyVorbisExtractor &operator=(const MyVorbisExtractor &);
+    MyOggExtractor(const MyOggExtractor &);
+    MyOggExtractor &operator=(const MyOggExtractor &);
+};
+
+struct MyVorbisExtractor : public MyOggExtractor {
+    MyVorbisExtractor(const sp<DataSource> &source)
+        : MyOggExtractor(source,
+                MEDIA_MIMETYPE_AUDIO_VORBIS,
+                /* numHeaders */ 3,
+                /* seekPreRollUs */ 0) {
+    }
+
+    virtual uint64_t approxBitrate() const;
+
+    virtual status_t readNextPacket(MediaBuffer **buffer) {
+        return _readNextPacket(buffer, /* calcVorbisTimestamp = */ true);
+    }
+
+protected:
+    virtual int64_t getTimeUsOfGranule(uint64_t granulePos) const {
+        return granulePos * 1000000ll / mVi.rate;
+    }
+
+    virtual status_t verifyHeader(MediaBuffer *buffer, uint8_t type);
+};
+
+struct MyOpusExtractor : public MyOggExtractor {
+    static const int32_t kOpusSampleRate = 48000;
+    static const int64_t kOpusSeekPreRollUs = 80000; // 80 ms
+
+    MyOpusExtractor(const sp<DataSource> &source)
+        : MyOggExtractor(source, MEDIA_MIMETYPE_AUDIO_OPUS, /*numHeaders*/ 2, kOpusSeekPreRollUs),
+          mChannelCount(0),
+          mCodecDelay(0),
+          mStartGranulePosition(-1) {
+    }
+
+    virtual uint64_t approxBitrate() const {
+        return 0;
+    }
+
+    virtual status_t readNextPacket(MediaBuffer **buffer);
+
+protected:
+    virtual int64_t getTimeUsOfGranule(uint64_t granulePos) const;
+    virtual status_t verifyHeader(MediaBuffer *buffer, uint8_t type);
+
+private:
+    status_t verifyOpusHeader(MediaBuffer *buffer);
+    status_t verifyOpusComments(MediaBuffer *buffer);
+    uint32_t getNumSamplesInPacket(MediaBuffer *buffer) const;
+
+    uint8_t mChannelCount;
+    uint16_t mCodecDelay;
+    int64_t mStartGranulePosition;
 };
 
 static void extractAlbumArt(
@@ -179,13 +258,14 @@
     int64_t seekTimeUs;
     ReadOptions::SeekMode mode;
     if (options && options->getSeekTo(&seekTimeUs, &mode)) {
-        if (mExtractor->mImpl->seekToTime(seekTimeUs) != OK) {
-            return ERROR_END_OF_STREAM;
+        status_t err = mExtractor->mImpl->seekToTime(seekTimeUs);
+        if (err != OK) {
+            return err;
         }
     }
 
     MediaBuffer *packet;
-    status_t err = mExtractor->mImpl->readNextPacket(&packet, /* conf = */ false);
+    status_t err = mExtractor->mImpl->readNextPacket(&packet);
 
     if (err != OK) {
         return err;
@@ -209,14 +289,22 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-MyVorbisExtractor::MyVorbisExtractor(const sp<DataSource> &source)
+MyOggExtractor::MyOggExtractor(
+        const sp<DataSource> &source,
+        const char *mimeType,
+        size_t numHeaders,
+        int64_t seekPreRollUs)
     : mSource(source),
       mOffset(0),
+      mCurGranulePosition(0),
       mPrevGranulePosition(0),
       mCurrentPageSize(0),
       mFirstPacketInPage(true),
       mCurrentPageSamples(0),
       mNextLaceIndex(0),
+      mMimeType(mimeType),
+      mNumHeaders(numHeaders),
+      mSeekPreRollUs(seekPreRollUs),
       mFirstDataOffset(-1) {
     mCurrentPage.mNumSegments = 0;
 
@@ -224,16 +312,16 @@
     vorbis_comment_init(&mVc);
 }
 
-MyVorbisExtractor::~MyVorbisExtractor() {
+MyOggExtractor::~MyOggExtractor() {
     vorbis_comment_clear(&mVc);
     vorbis_info_clear(&mVi);
 }
 
-sp<MetaData> MyVorbisExtractor::getFormat() const {
+sp<MetaData> MyOggExtractor::getFormat() const {
     return mMeta;
 }
 
-status_t MyVorbisExtractor::findNextPage(
+status_t MyOggExtractor::findNextPage(
         off64_t startOffset, off64_t *pageOffset) {
     *pageOffset = startOffset;
 
@@ -250,7 +338,7 @@
         if (!memcmp(signature, "OggS", 4)) {
             if (*pageOffset > startOffset) {
                 ALOGV("skipped %lld bytes of junk to reach next frame",
-                     *pageOffset - startOffset);
+                     (long long)(*pageOffset - startOffset));
             }
 
             return OK;
@@ -264,7 +352,7 @@
 // it (if any) and return its granule position.
 // To do this we back up from the "current" page's offset until we find any
 // page preceding it and then scan forward to just before the current page.
-status_t MyVorbisExtractor::findPrevGranulePosition(
+status_t MyOggExtractor::findPrevGranulePosition(
         off64_t pageOffset, uint64_t *granulePos) {
     *granulePos = 0;
 
@@ -277,10 +365,14 @@
             prevGuess = 0;
         }
 
-        ALOGV("backing up %lld bytes", pageOffset - prevGuess);
+        ALOGV("backing up %lld bytes", (long long)(pageOffset - prevGuess));
 
         status_t err = findNextPage(prevGuess, &prevPageOffset);
-        if (err != OK) {
+        if (err == ERROR_END_OF_STREAM) {
+            // We are at the last page and didn't back off enough;
+            // back off 5000 bytes more and try again.
+            continue;
+        } else if (err != OK) {
             return err;
         }
 
@@ -295,7 +387,7 @@
     }
 
     ALOGV("prevPageOffset at %lld, pageOffset at %lld",
-         prevPageOffset, pageOffset);
+            (long long)prevPageOffset, (long long)pageOffset);
 
     for (;;) {
         Page prevPage;
@@ -314,13 +406,22 @@
     }
 }
 
-status_t MyVorbisExtractor::seekToTime(int64_t timeUs) {
+status_t MyOggExtractor::seekToTime(int64_t timeUs) {
+    timeUs -= mSeekPreRollUs;
+    if (timeUs < 0) {
+        timeUs = 0;
+    }
+
     if (mTableOfContents.isEmpty()) {
         // Perform approximate seeking based on avg. bitrate.
+        uint64_t bps = approxBitrate();
+        if (bps <= 0) {
+            return INVALID_OPERATION;
+        }
 
-        off64_t pos = timeUs * approxBitrate() / 8000000ll;
+        off64_t pos = timeUs * bps / 8000000ll;
 
-        ALOGV("seeking to offset %lld", pos);
+        ALOGV("seeking to offset %lld", (long long)pos);
         return seekToOffset(pos);
     }
 
@@ -348,12 +449,12 @@
     const TOCEntry &entry = mTableOfContents.itemAt(left);
 
     ALOGV("seeking to entry %zu / %zu at offset %lld",
-         left, mTableOfContents.size(), entry.mPageOffset);
+         left, mTableOfContents.size(), (long long)entry.mPageOffset);
 
     return seekToOffset(entry.mPageOffset);
 }
 
-status_t MyVorbisExtractor::seekToOffset(off64_t offset) {
+status_t MyOggExtractor::seekToOffset(off64_t offset) {
     if (mFirstDataOffset >= 0 && offset < mFirstDataOffset) {
         // Once we know where the actual audio data starts (past the headers)
         // don't ever seek to anywhere before that.
@@ -386,13 +487,13 @@
     return OK;
 }
 
-ssize_t MyVorbisExtractor::readPage(off64_t offset, Page *page) {
+ssize_t MyOggExtractor::readPage(off64_t offset, Page *page) {
     uint8_t header[27];
     ssize_t n;
     if ((n = mSource->readAt(offset, header, sizeof(header)))
             < (ssize_t)sizeof(header)) {
-        ALOGV("failed to read %zu bytes at offset 0x%016llx, got %zd bytes",
-             sizeof(header), offset, n);
+        ALOGV("failed to read %zu bytes at offset %#016llx, got %zd bytes",
+                sizeof(header), (long long)offset, n);
 
         if (n < 0) {
             return n;
@@ -457,7 +558,110 @@
     return sizeof(header) + page->mNumSegments + totalSize;
 }
 
-status_t MyVorbisExtractor::readNextPacket(MediaBuffer **out, bool conf) {
+status_t MyOpusExtractor::readNextPacket(MediaBuffer **out) {
+    if (mOffset <= mFirstDataOffset && mStartGranulePosition < 0) {
+        // The first sample might not start at time 0; find out where by subtracting
+        // the number of samples on the first page from the granule position
+        // (position of last complete sample) of the first page. This happens
+        // the first time before we attempt to read a packet from the first page.
+        MediaBuffer *mBuf;
+        uint32_t numSamples = 0;
+        uint64_t curGranulePosition = 0;
+        while (true) {
+            status_t err = _readNextPacket(&mBuf, /* calcVorbisTimestamp = */false);
+            if (err != OK && err != ERROR_END_OF_STREAM) {
+                return err;
+            }
+            // First two pages are header pages.
+            if (err == ERROR_END_OF_STREAM || mCurrentPage.mPageNo > 2) {
+                break;
+            }
+            curGranulePosition = mCurrentPage.mGranulePosition;
+            numSamples += getNumSamplesInPacket(mBuf);
+            mBuf->release();
+            mBuf = NULL;
+        }
+
+        if (curGranulePosition > numSamples) {
+            mStartGranulePosition = curGranulePosition - numSamples;
+        } else {
+            mStartGranulePosition = 0;
+        }
+        seekToOffset(0);
+    }
+
+    status_t err = _readNextPacket(out, /* calcVorbisTimestamp = */false);
+    if (err != OK) {
+        return err;
+    }
+
+    int32_t currentPageSamples;
+    // Calculate timestamps by accumulating durations starting from the first sample of a page;
+    // We assume that we only seek to page boundaries.
+    if ((*out)->meta_data()->findInt32(kKeyValidSamples, &currentPageSamples)) {
+        // first packet in page
+        if (mOffset == mFirstDataOffset) {
+            currentPageSamples -= mStartGranulePosition;
+            (*out)->meta_data()->setInt32(kKeyValidSamples, currentPageSamples);
+        }
+        mCurGranulePosition = mCurrentPage.mGranulePosition - currentPageSamples;
+    }
+
+    int64_t timeUs = getTimeUsOfGranule(mCurGranulePosition);
+    (*out)->meta_data()->setInt64(kKeyTime, timeUs);
+
+    uint32_t frames = getNumSamplesInPacket(*out);
+    mCurGranulePosition += frames;
+    return OK;
+}
+
+uint32_t MyOpusExtractor::getNumSamplesInPacket(MediaBuffer *buffer) const {
+    if (buffer == NULL || buffer->range_length() < 1) {
+        return 0;
+    }
+
+    uint8_t *data = (uint8_t *)buffer->data() + buffer->range_offset();
+    uint8_t toc = data[0];
+    uint8_t config = (toc >> 3) & 0x1f;
+    uint32_t frameSizesUs[] = {
+        10000, 20000, 40000, 60000, // 0...3
+        10000, 20000, 40000, 60000, // 4...7
+        10000, 20000, 40000, 60000, // 8...11
+        10000, 20000,               // 12...13
+        10000, 20000,               // 14...15
+        2500, 5000, 10000, 20000,   // 16...19
+        2500, 5000, 10000, 20000,   // 20...23
+        2500, 5000, 10000, 20000,   // 24...27
+        2500, 5000, 10000, 20000    // 28...31
+    };
+    uint32_t frameSizeUs = frameSizesUs[config];
+
+    uint32_t numFrames;
+    uint8_t c = toc & 3;
+    switch (c) {
+    case 0:
+        numFrames = 1;
+        break;
+    case 1:
+    case 2:
+        numFrames = 2;
+        break;
+    case 3:
+        if (buffer->range_length() < 3) {
+            numFrames = 0;
+        } else {
+            numFrames = data[2] & 0x3f;
+        }
+        break;
+    default:
+        TRESPASS();
+    }
+
+    uint32_t numSamples = frameSizeUs * numFrames * kOpusSampleRate / 1000000;
+    return numSamples;
+}
+
+status_t MyOggExtractor::_readNextPacket(MediaBuffer **out, bool calcVorbisTimestamp) {
     *out = NULL;
 
     MediaBuffer *buffer = NULL;
@@ -505,8 +709,8 @@
                     packetSize);
 
             if (n < (ssize_t)packetSize) {
-                ALOGV("failed to read %zu bytes at 0x%016llx, got %zd bytes",
-                     packetSize, dataOffset, n);
+                ALOGV("failed to read %zu bytes at %#016llx, got %zd bytes",
+                        packetSize, (long long)dataOffset, n);
                 return ERROR_IO;
             }
 
@@ -523,9 +727,8 @@
                     mFirstPacketInPage = false;
                 }
 
-                // ignore timestamp for configuration packets
-                if (!conf) {
-                    int32_t curBlockSize = packetBlockSize(buffer);
+                if (calcVorbisTimestamp) {
+                    int32_t curBlockSize = getPacketBlockSize(buffer);
                     if (mCurrentPage.mPrevPacketSize < 0) {
                         mCurrentPage.mPrevPacketSize = curBlockSize;
                         mCurrentPage.mPrevPacketPos =
@@ -597,43 +800,24 @@
     }
 }
 
-status_t MyVorbisExtractor::init() {
+status_t MyOggExtractor::init() {
     mMeta = new MetaData;
-    mMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_VORBIS);
+    mMeta->setCString(kKeyMIMEType, mMimeType);
 
-    MediaBuffer *packet;
     status_t err;
-    if ((err = readNextPacket(&packet, /* conf = */ true)) != OK) {
-        return err;
-    }
-    ALOGV("read packet of size %zu\n", packet->range_length());
-    err = verifyHeader(packet, 1);
-    packet->release();
-    packet = NULL;
-    if (err != OK) {
-        return err;
-    }
-
-    if ((err = readNextPacket(&packet, /* conf = */ true)) != OK) {
-        return err;
-    }
-    ALOGV("read packet of size %zu\n", packet->range_length());
-    err = verifyHeader(packet, 3);
-    packet->release();
-    packet = NULL;
-    if (err != OK) {
-        return err;
-    }
-
-    if ((err = readNextPacket(&packet, /* conf = */ true)) != OK) {
-        return err;
-    }
-    ALOGV("read packet of size %zu\n", packet->range_length());
-    err = verifyHeader(packet, 5);
-    packet->release();
-    packet = NULL;
-    if (err != OK) {
-        return err;
+    MediaBuffer *packet;
+    for (size_t i = 0; i < mNumHeaders; ++i) {
+        // ignore timestamp for configuration packets
+        if ((err = _readNextPacket(&packet, /* calcVorbisTimestamp = */ false)) != OK) {
+            return err;
+        }
+        ALOGV("read packet of size %zu\n", packet->range_length());
+        err = verifyHeader(packet, /* type = */ i * 2 + 1);
+        packet->release();
+        packet = NULL;
+        if (err != OK) {
+            return err;
+        }
     }
 
     mFirstDataOffset = mOffset + mCurrentPageSize;
@@ -649,7 +833,7 @@
         // we can only approximate using avg. bitrate if seeking to
         // the end is too expensive or impossible (live streaming).
 
-        int64_t durationUs = lastGranulePosition * 1000000ll / mVi.rate;
+        int64_t durationUs = getTimeUsOfGranule(lastGranulePosition);
 
         mMeta->setInt64(kKeyDuration, durationUs);
 
@@ -659,7 +843,7 @@
     return OK;
 }
 
-void MyVorbisExtractor::buildTableOfContents() {
+void MyOggExtractor::buildTableOfContents() {
     off64_t offset = mFirstDataOffset;
     Page page;
     ssize_t pageSize;
@@ -670,7 +854,7 @@
             mTableOfContents.editItemAt(mTableOfContents.size() - 1);
 
         entry.mPageOffset = offset;
-        entry.mTimeUs = page.mGranulePosition * 1000000ll / mVi.rate;
+        entry.mTimeUs = getTimeUsOfGranule(page.mGranulePosition);
 
         offset += (size_t)pageSize;
     }
@@ -698,7 +882,7 @@
     }
 }
 
-int32_t MyVorbisExtractor::packetBlockSize(MediaBuffer *buffer) {
+int32_t MyOggExtractor::getPacketBlockSize(MediaBuffer *buffer) {
     const uint8_t *data =
         (const uint8_t *)buffer->data() + buffer->range_offset();
 
@@ -727,6 +911,144 @@
     return vorbis_packet_blocksize(&mVi, &pack);
 }
 
+int64_t MyOpusExtractor::getTimeUsOfGranule(uint64_t granulePos) const {
+    uint64_t pcmSamplePosition = 0;
+    if (granulePos > mCodecDelay) {
+        pcmSamplePosition = granulePos - mCodecDelay;
+    }
+    return pcmSamplePosition * 1000000ll / kOpusSampleRate;
+}
+
+status_t MyOpusExtractor::verifyHeader(MediaBuffer *buffer, uint8_t type) {
+    switch (type) {
+        // there are actually no header types defined in the Opus spec; we choose 1 and 3 to mean
+        // header and comments such that we can share code with MyVorbisExtractor.
+        case 1:
+            return verifyOpusHeader(buffer);
+        case 3:
+            return verifyOpusComments(buffer);
+        default:
+            return INVALID_OPERATION;
+    }
+}
+
+status_t MyOpusExtractor::verifyOpusHeader(MediaBuffer *buffer) {
+    const size_t kOpusHeaderSize = 19;
+    const uint8_t *data =
+        (const uint8_t *)buffer->data() + buffer->range_offset();
+
+    size_t size = buffer->range_length();
+
+    if (size < kOpusHeaderSize
+            || memcmp(data, "OpusHead", 8)
+            || /* version = */ data[8] != 1) {
+        return ERROR_MALFORMED;
+    }
+
+    mChannelCount = data[9];
+    mCodecDelay = U16LE_AT(&data[10]);
+
+    mMeta->setData(kKeyOpusHeader, 0, data, size);
+    mMeta->setInt32(kKeySampleRate, kOpusSampleRate);
+    mMeta->setInt32(kKeyChannelCount, mChannelCount);
+    mMeta->setInt64(kKeyOpusSeekPreRoll /* ns */, kOpusSeekPreRollUs * 1000 /* = 80 ms*/);
+    mMeta->setInt64(kKeyOpusCodecDelay /* ns */,
+            mCodecDelay /* sample/s */ * 1000000000 / kOpusSampleRate);
+
+    return OK;
+}
+
+status_t MyOpusExtractor::verifyOpusComments(MediaBuffer *buffer) {
+    // add artificial framing bit so we can reuse _vorbis_unpack_comment
+    int32_t commentSize = buffer->range_length() + 1;
+    sp<ABuffer> aBuf = new ABuffer(commentSize);
+    if (aBuf->capacity() <= buffer->range_length()) {
+        return ERROR_MALFORMED;
+    }
+
+    uint8_t* commentData = aBuf->data();
+    memcpy(commentData,
+            (uint8_t *)buffer->data() + buffer->range_offset(),
+            buffer->range_length());
+
+    ogg_buffer buf;
+    buf.data = commentData;
+    buf.size = commentSize;
+    buf.refcount = 1;
+    buf.ptr.owner = NULL;
+
+    ogg_reference ref;
+    ref.buffer = &buf;
+    ref.begin = 0;
+    ref.length = commentSize;
+    ref.next = NULL;
+
+    oggpack_buffer bits;
+    oggpack_readinit(&bits, &ref);
+
+    // skip 'OpusTags'
+    const char *OpusTags = "OpusTags";
+    const int32_t headerLen = strlen(OpusTags);
+    int32_t framingBitOffset = headerLen;
+    for (int i = 0; i < headerLen; ++i) {
+        char chr = oggpack_read(&bits, 8);
+        if (chr != OpusTags[i]) {
+            return ERROR_MALFORMED;
+        }
+    }
+
+    int32_t vendorLen = oggpack_read(&bits, 32);
+    framingBitOffset += 4;
+    if (vendorLen < 0 || vendorLen > commentSize - 8) {
+        return ERROR_MALFORMED;
+    }
+    // skip vendor string
+    framingBitOffset += vendorLen;
+    for (int i = 0; i < vendorLen; ++i) {
+        oggpack_read(&bits, 8);
+    }
+
+    int32_t n = oggpack_read(&bits, 32);
+    framingBitOffset += 4;
+    if (n < 0 || n > ((commentSize - oggpack_bytes(&bits)) >> 2)) {
+        return ERROR_MALFORMED;
+    }
+    for (int i = 0; i < n; ++i) {
+        int32_t len = oggpack_read(&bits, 32);
+        framingBitOffset += 4;
+        if (len  < 0 || len  > (commentSize - oggpack_bytes(&bits))) {
+            return ERROR_MALFORMED;
+        }
+        framingBitOffset += len;
+        for (int j = 0; j < len; ++j) {
+            oggpack_read(&bits, 8);
+        }
+    }
+    if (framingBitOffset < 0 || framingBitOffset >= commentSize) {
+        return ERROR_MALFORMED;
+    }
+    commentData[framingBitOffset] = 1;
+
+    buf.data = commentData + headerLen;
+    buf.size = commentSize - headerLen;
+    buf.refcount = 1;
+    buf.ptr.owner = NULL;
+
+    ref.buffer = &buf;
+    ref.begin = 0;
+    ref.length = commentSize - headerLen;
+    ref.next = NULL;
+
+    oggpack_readinit(&bits, &ref);
+    int err = _vorbis_unpack_comment(&mVc, &bits);
+    if (0 != err) {
+        return ERROR_MALFORMED;
+    }
+
+    parseFileMetaData();
+    return OK;
+}
+
 status_t MyVorbisExtractor::verifyHeader(
         MediaBuffer *buffer, uint8_t type) {
     const uint8_t *data =
@@ -753,7 +1075,9 @@
     oggpack_buffer bits;
     oggpack_readinit(&bits, &ref);
 
-    CHECK_EQ(oggpack_read(&bits, 8), type);
+    if (oggpack_read(&bits, 8) != type) {
+        return ERROR_MALFORMED;
+    }
     for (size_t i = 0; i < 6; ++i) {
         oggpack_read(&bits, 8);  // skip 'vorbis'
     }
@@ -761,7 +1085,9 @@
     switch (type) {
         case 1:
         {
-            CHECK_EQ(0, _vorbis_unpack_info(&mVi, &bits));
+            if (0 != _vorbis_unpack_info(&mVi, &bits)) {
+                return ERROR_MALFORMED;
+            }
 
             mMeta->setData(kKeyVorbisInfo, 0, data, size);
             mMeta->setInt32(kKeySampleRate, mVi.rate);
@@ -810,7 +1136,7 @@
     return OK;
 }
 
-uint64_t MyVorbisExtractor::approxBitrate() {
+uint64_t MyVorbisExtractor::approxBitrate() const {
     if (mVi.bitrate_nominal != 0) {
         return mVi.bitrate_nominal;
     }
@@ -818,7 +1144,7 @@
     return (mVi.bitrate_lower + mVi.bitrate_upper) / 2;
 }
 
-void MyVorbisExtractor::parseFileMetaData() {
+void MyOggExtractor::parseFileMetaData() {
     mFileMeta = new MetaData;
     mFileMeta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_OGG);
 
@@ -1027,11 +1353,23 @@
     : mDataSource(source),
       mInitCheck(NO_INIT),
       mImpl(NULL) {
-    mImpl = new MyVorbisExtractor(mDataSource);
-    mInitCheck = mImpl->seekToOffset(0);
+    for (int i = 0; i < 2; ++i) {
+        if (mImpl != NULL) {
+            delete mImpl;
+        }
+        if (i == 0) {
+            mImpl = new MyVorbisExtractor(mDataSource);
+        } else {
+            mImpl = new MyOpusExtractor(mDataSource);
+        }
+        mInitCheck = mImpl->seekToOffset(0);
 
-    if (mInitCheck == OK) {
-        mInitCheck = mImpl->init();
+        if (mInitCheck == OK) {
+            mInitCheck = mImpl->init();
+            if (mInitCheck == OK) {
+                break;
+            }
+        }
     }
 }
 
diff --git a/media/libstagefright/ProcessInfo.cpp b/media/libstagefright/ProcessInfo.cpp
new file mode 100644
index 0000000..b4172b3
--- /dev/null
+++ b/media/libstagefright/ProcessInfo.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ProcessInfo"
+#include <utils/Log.h>
+
+#include <media/stagefright/ProcessInfo.h>
+
+#include <binder/IProcessInfoService.h>
+#include <binder/IServiceManager.h>
+
+namespace android {
+
+ProcessInfo::ProcessInfo() {}
+
+bool ProcessInfo::getPriority(int pid, int* priority) {
+    sp<IBinder> binder = defaultServiceManager()->getService(String16("processinfo"));
+    sp<IProcessInfoService> service = interface_cast<IProcessInfoService>(binder);
+
+    size_t length = 1;
+    int32_t states;
+    status_t err = service->getProcessStatesFromPids(length, &pid, &states);
+    if (err != OK) {
+        ALOGE("getProcessStatesFromPids failed");
+        return false;
+    }
+    ALOGV("pid %d states %d", pid, states);
+    if (states < 0) {
+        return false;
+    }
+
+    // Use process state as the priority. Lower the value, higher the priority.
+    *priority = states;
+    return true;
+}
+
+ProcessInfo::~ProcessInfo() {}
+
+}  // namespace android
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index d7251f4..97dff43 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -235,11 +235,13 @@
         return ERROR_MALFORMED;
     }
 
-    if (SIZE_MAX / sizeof(SampleToChunkEntry) <= mNumSampleToChunkOffsets)
+    if (SIZE_MAX / sizeof(SampleToChunkEntry) <= (size_t)mNumSampleToChunkOffsets)
         return ERROR_OUT_OF_RANGE;
 
     mSampleToChunkEntries =
-        new SampleToChunkEntry[mNumSampleToChunkOffsets];
+        new (std::nothrow) SampleToChunkEntry[mNumSampleToChunkOffsets];
+    if (!mSampleToChunkEntries)
+        return ERROR_OUT_OF_RANGE;
 
     for (uint32_t i = 0; i < mNumSampleToChunkOffsets; ++i) {
         uint8_t buffer[12];
@@ -342,10 +344,12 @@
 
     mTimeToSampleCount = U32_AT(&header[4]);
     uint64_t allocSize = (uint64_t)mTimeToSampleCount * 2 * sizeof(uint32_t);
-    if (allocSize > SIZE_MAX) {
+    if (allocSize > UINT32_MAX) {
         return ERROR_OUT_OF_RANGE;
     }
-    mTimeToSample = new uint32_t[mTimeToSampleCount * 2];
+    mTimeToSample = new (std::nothrow) uint32_t[mTimeToSampleCount * 2];
+    if (!mTimeToSample)
+        return ERROR_OUT_OF_RANGE;
 
     size_t size = sizeof(uint32_t) * mTimeToSampleCount * 2;
     if (mDataSource->readAt(
@@ -388,11 +392,13 @@
 
     mNumCompositionTimeDeltaEntries = numEntries;
     uint64_t allocSize = (uint64_t)numEntries * 2 * sizeof(uint32_t);
-    if (allocSize > SIZE_MAX) {
+    if (allocSize > UINT32_MAX) {
         return ERROR_OUT_OF_RANGE;
     }
 
-    mCompositionTimeDeltaEntries = new uint32_t[2 * numEntries];
+    mCompositionTimeDeltaEntries = new (std::nothrow) uint32_t[2 * numEntries];
+    if (!mCompositionTimeDeltaEntries)
+        return ERROR_OUT_OF_RANGE;
 
     if (mDataSource->readAt(
                 data_offset + 8, mCompositionTimeDeltaEntries, numEntries * 8)
@@ -442,7 +448,10 @@
         return ERROR_OUT_OF_RANGE;
     }
 
-    mSyncSamples = new uint32_t[mNumSyncSamples];
+    mSyncSamples = new (std::nothrow) uint32_t[mNumSyncSamples];
+    if (!mSyncSamples)
+        return ERROR_OUT_OF_RANGE;
+
     size_t size = mNumSyncSamples * sizeof(uint32_t);
     if (mDataSource->readAt(mSyncSampleOffset + 8, mSyncSamples, size)
             != (ssize_t)size) {
@@ -510,7 +519,9 @@
         return;
     }
 
-    mSampleTimeEntries = new SampleTimeEntry[mNumSampleSizes];
+    mSampleTimeEntries = new (std::nothrow) SampleTimeEntry[mNumSampleSizes];
+    if (!mSampleTimeEntries)
+        return;
 
     uint32_t sampleIndex = 0;
     uint32_t sampleTime = 0;
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 101fc8a..7c554db 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -20,22 +20,35 @@
 #include <inttypes.h>
 
 #include <utils/Log.h>
+#include <gui/Surface.h>
 
 #include "include/StagefrightMetadataRetriever.h"
 
+#include <media/ICrypto.h>
 #include <media/IMediaHTTPService.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
 #include <media/stagefright/ColorConverter.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/FileSource.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaExtractor.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/OMXCodec.h>
-#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/Utils.h>
+
 #include <CharacterEncodingDetector.h>
 
 namespace android {
 
+static const int64_t kBufferTimeOutUs = 30000ll; // 30 msec
+static const size_t kRetryCount = 20; // must be >0
+
 StagefrightMetadataRetriever::StagefrightMetadataRetriever()
     : mParsedMetaData(false),
       mAlbumArt(NULL) {
@@ -47,10 +60,7 @@
 
 StagefrightMetadataRetriever::~StagefrightMetadataRetriever() {
     ALOGV("~StagefrightMetadataRetriever()");
-
-    delete mAlbumArt;
-    mAlbumArt = NULL;
-
+    clearMetadata();
     mClient.disconnect();
 }
 
@@ -60,11 +70,7 @@
         const KeyedVector<String8, String8> *headers) {
     ALOGV("setDataSource(%s)", uri);
 
-    mParsedMetaData = false;
-    mMetaData.clear();
-    delete mAlbumArt;
-    mAlbumArt = NULL;
-
+    clearMetadata();
     mSource = DataSource::CreateFromURI(httpService, uri, headers);
 
     if (mSource == NULL) {
@@ -92,11 +98,7 @@
 
     ALOGV("setDataSource(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
 
-    mParsedMetaData = false;
-    mMetaData.clear();
-    delete mAlbumArt;
-    mAlbumArt = NULL;
-
+    clearMetadata();
     mSource = new FileSource(fd, offset, length);
 
     status_t err;
@@ -117,73 +119,69 @@
     return OK;
 }
 
-static bool isYUV420PlanarSupported(
-            OMXClient *client,
-            const sp<MetaData> &trackMeta) {
+status_t StagefrightMetadataRetriever::setDataSource(
+        const sp<DataSource>& source) {
+    ALOGV("setDataSource(DataSource)");
 
-    const char *mime;
-    CHECK(trackMeta->findCString(kKeyMIMEType, &mime));
+    clearMetadata();
+    mSource = source;
+    mExtractor = MediaExtractor::Create(mSource);
 
-    Vector<CodecCapabilities> caps;
-    if (QueryCodecs(client->interface(), mime,
-                    true, /* queryDecoders */
-                    true, /* hwCodecOnly */
-                    &caps) == OK) {
-
-        for (size_t j = 0; j < caps.size(); ++j) {
-            CodecCapabilities cap = caps[j];
-            for (size_t i = 0; i < cap.mColorFormats.size(); ++i) {
-                if (cap.mColorFormats[i] == OMX_COLOR_FormatYUV420Planar) {
-                    return true;
-                }
-            }
-        }
+    if (mExtractor == NULL) {
+        ALOGE("Failed to instantiate a MediaExtractor.");
+        mSource.clear();
+        return UNKNOWN_ERROR;
     }
-    return false;
+
+    return OK;
 }
 
-static VideoFrame *extractVideoFrameWithCodecFlags(
-        OMXClient *client,
+static VideoFrame *extractVideoFrame(
+        const char *componentName,
         const sp<MetaData> &trackMeta,
         const sp<MediaSource> &source,
-        uint32_t flags,
         int64_t frameTimeUs,
         int seekMode) {
 
     sp<MetaData> format = source->getFormat();
 
-    // XXX:
-    // Once all vendors support OMX_COLOR_FormatYUV420Planar, we can
-    // remove this check and always set the decoder output color format
-    if (isYUV420PlanarSupported(client, trackMeta)) {
-        format->setInt32(kKeyColorFormat, OMX_COLOR_FormatYUV420Planar);
-    }
+    sp<AMessage> videoFormat;
+    convertMetaDataToMessage(trackMeta, &videoFormat);
 
-    sp<MediaSource> decoder =
-        OMXCodec::Create(
-                client->interface(), format, false, source,
-                NULL, flags | OMXCodec::kClientNeedsFramebuffer);
+    // TODO: Use Flexible color instead
+    videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
 
-    if (decoder.get() == NULL) {
-        ALOGV("unable to instantiate video decoder.");
+    status_t err;
+    sp<ALooper> looper = new ALooper;
+    looper->start();
+    sp<MediaCodec> decoder = MediaCodec::CreateByComponentName(
+            looper, componentName, &err);
 
+    if (decoder.get() == NULL || err != OK) {
+        ALOGW("Failed to instantiate decoder [%s]", componentName);
         return NULL;
     }
 
-    status_t err = decoder->start();
+    err = decoder->configure(videoFormat, NULL /* surface */, NULL /* crypto */, 0 /* flags */);
     if (err != OK) {
-        ALOGW("OMXCodec::start returned error %d (0x%08x)\n", err, err);
+        ALOGW("configure returned error %d (%s)", err, asString(err));
+        decoder->release();
         return NULL;
     }
 
-    // Read one output buffer, ignore format change notifications
-    // and spurious empty buffers.
+    err = decoder->start();
+    if (err != OK) {
+        ALOGW("start returned error %d (%s)", err, asString(err));
+        decoder->release();
+        return NULL;
+    }
 
     MediaSource::ReadOptions options;
     if (seekMode < MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC ||
         seekMode > MediaSource::ReadOptions::SEEK_CLOSEST) {
 
         ALOGE("Unknown seek mode: %d", seekMode);
+        decoder->release();
         return NULL;
     }
 
@@ -202,64 +200,155 @@
         options.setSeekTo(frameTimeUs, mode);
     }
 
-    MediaBuffer *buffer = NULL;
-    do {
-        if (buffer != NULL) {
-            buffer->release();
-            buffer = NULL;
-        }
-        err = decoder->read(&buffer, &options);
-        options.clearSeekTo();
-    } while (err == INFO_FORMAT_CHANGED
-             || (buffer != NULL && buffer->range_length() == 0));
-
+    err = source->start();
     if (err != OK) {
-        CHECK(buffer == NULL);
+        ALOGW("source failed to start: %d (%s)", err, asString(err));
+        decoder->release();
+        return NULL;
+    }
 
-        ALOGV("decoding frame failed.");
+    Vector<sp<ABuffer> > inputBuffers;
+    err = decoder->getInputBuffers(&inputBuffers);
+    if (err != OK) {
+        ALOGW("failed to get input buffers: %d (%s)", err, asString(err));
+        decoder->release();
+        return NULL;
+    }
+
+    Vector<sp<ABuffer> > outputBuffers;
+    err = decoder->getOutputBuffers(&outputBuffers);
+    if (err != OK) {
+        ALOGW("failed to get output buffers: %d (%s)", err, asString(err));
+        decoder->release();
+        return NULL;
+    }
+
+    sp<AMessage> outputFormat = NULL;
+    bool haveMoreInputs = true;
+    size_t index, offset, size;
+    int64_t timeUs;
+    size_t retriesLeft = kRetryCount;
+    bool done = false;
+
+    do {
+        size_t inputIndex = -1;
+        int64_t ptsUs = 0ll;
+        uint32_t flags = 0;
+        sp<ABuffer> codecBuffer = NULL;
+
+        while (haveMoreInputs) {
+            err = decoder->dequeueInputBuffer(&inputIndex, kBufferTimeOutUs);
+            if (err != OK) {
+                ALOGW("Timed out waiting for input");
+                if (retriesLeft) {
+                    err = OK;
+                }
+                break;
+            }
+            codecBuffer = inputBuffers[inputIndex];
+
+            MediaBuffer *mediaBuffer = NULL;
+
+            err = source->read(&mediaBuffer, &options);
+            options.clearSeekTo();
+            if (err != OK) {
+                ALOGW("Input Error or EOS");
+                haveMoreInputs = false;
+                break;
+            }
+
+            if (mediaBuffer->range_length() > codecBuffer->capacity()) {
+                ALOGE("buffer size (%zu) too large for codec input size (%zu)",
+                        mediaBuffer->range_length(), codecBuffer->capacity());
+                err = BAD_VALUE;
+            } else {
+                codecBuffer->setRange(0, mediaBuffer->range_length());
+
+                CHECK(mediaBuffer->meta_data()->findInt64(kKeyTime, &ptsUs));
+                memcpy(codecBuffer->data(),
+                        (const uint8_t*)mediaBuffer->data() + mediaBuffer->range_offset(),
+                        mediaBuffer->range_length());
+            }
+
+            mediaBuffer->release();
+            break;
+        }
+
+        if (err == OK && inputIndex < inputBuffers.size()) {
+            ALOGV("QueueInput: size=%zu ts=%" PRId64 " us flags=%x",
+                    codecBuffer->size(), ptsUs, flags);
+            err = decoder->queueInputBuffer(
+                    inputIndex,
+                    codecBuffer->offset(),
+                    codecBuffer->size(),
+                    ptsUs,
+                    flags);
+
+            // we don't expect an output from codec config buffer
+            if (flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) {
+                continue;
+            }
+        }
+
+        while (err == OK) {
+            // wait for a decoded buffer
+            err = decoder->dequeueOutputBuffer(
+                    &index,
+                    &offset,
+                    &size,
+                    &timeUs,
+                    &flags,
+                    kBufferTimeOutUs);
+
+            if (err == INFO_FORMAT_CHANGED) {
+                ALOGV("Received format change");
+                err = decoder->getOutputFormat(&outputFormat);
+            } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
+                ALOGV("Output buffers changed");
+                err = decoder->getOutputBuffers(&outputBuffers);
+            } else {
+                if (err == -EAGAIN /* INFO_TRY_AGAIN_LATER */ && --retriesLeft > 0) {
+                    ALOGV("Timed-out waiting for output.. retries left = %zu", retriesLeft);
+                    err = OK;
+                } else if (err == OK) {
+                    ALOGV("Received an output buffer");
+                    done = true;
+                } else {
+                    ALOGW("Received error %d (%s) instead of output", err, asString(err));
+                    done = true;
+                }
+                break;
+            }
+        }
+    } while (err == OK && !done);
+
+    if (err != OK || size <= 0 || outputFormat == NULL) {
+        ALOGE("Failed to decode thumbnail frame");
+        source->stop();
         decoder->stop();
-
+        decoder->release();
         return NULL;
     }
 
     ALOGV("successfully decoded video frame.");
+    sp<ABuffer> videoFrameBuffer = outputBuffers.itemAt(index);
 
-    int32_t unreadable;
-    if (buffer->meta_data()->findInt32(kKeyIsUnreadable, &unreadable)
-            && unreadable != 0) {
-        ALOGV("video frame is unreadable, decoder does not give us access "
-             "to the video data.");
-
-        buffer->release();
-        buffer = NULL;
-
-        decoder->stop();
-
-        return NULL;
-    }
-
-    int64_t timeUs;
-    CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
     if (thumbNailTime >= 0) {
         if (timeUs != thumbNailTime) {
-            const char *mime;
-            CHECK(trackMeta->findCString(kKeyMIMEType, &mime));
+            AString mime;
+            CHECK(outputFormat->findString("mime", &mime));
 
-            ALOGV("thumbNailTime = %" PRId64 " us, timeUs = %" PRId64 " us, mime = %s",
-                 thumbNailTime, timeUs, mime);
+            ALOGV("thumbNailTime = %lld us, timeUs = %lld us, mime = %s",
+                    (long long)thumbNailTime, (long long)timeUs, mime.c_str());
         }
     }
 
-    sp<MetaData> meta = decoder->getFormat();
-
     int32_t width, height;
-    CHECK(meta->findInt32(kKeyWidth, &width));
-    CHECK(meta->findInt32(kKeyHeight, &height));
+    CHECK(outputFormat->findInt32("width", &width));
+    CHECK(outputFormat->findInt32("height", &height));
 
     int32_t crop_left, crop_top, crop_right, crop_bottom;
-    if (!meta->findRect(
-                kKeyCropRect,
-                &crop_left, &crop_top, &crop_right, &crop_bottom)) {
+    if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
         crop_left = crop_top = 0;
         crop_right = width - 1;
         crop_bottom = height - 1;
@@ -279,23 +368,21 @@
     frame->mData = new uint8_t[frame->mSize];
     frame->mRotationAngle = rotationAngle;
 
-    int32_t displayWidth, displayHeight;
-    if (meta->findInt32(kKeyDisplayWidth, &displayWidth)) {
-        frame->mDisplayWidth = displayWidth;
-    }
-    if (meta->findInt32(kKeyDisplayHeight, &displayHeight)) {
-        frame->mDisplayHeight = displayHeight;
+    int32_t sarWidth, sarHeight;
+    if (trackMeta->findInt32(kKeySARWidth, &sarWidth)
+            && trackMeta->findInt32(kKeySARHeight, &sarHeight)
+            && sarHeight != 0) {
+        frame->mDisplayWidth = (frame->mDisplayWidth * sarWidth) / sarHeight;
     }
 
     int32_t srcFormat;
-    CHECK(meta->findInt32(kKeyColorFormat, &srcFormat));
+    CHECK(outputFormat->findInt32("color-format", &srcFormat));
 
-    ColorConverter converter(
-            (OMX_COLOR_FORMATTYPE)srcFormat, OMX_COLOR_Format16bitRGB565);
+    ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, OMX_COLOR_Format16bitRGB565);
 
     if (converter.isValid()) {
         err = converter.convert(
-                (const uint8_t *)buffer->data() + buffer->range_offset(),
+                (const uint8_t *)videoFrameBuffer->data(),
                 width, height,
                 crop_left, crop_top, crop_right, crop_bottom,
                 frame->mData,
@@ -303,17 +390,16 @@
                 frame->mHeight,
                 0, 0, frame->mWidth - 1, frame->mHeight - 1);
     } else {
-        ALOGE("Unable to instantiate color conversion from format 0x%08x to "
-              "RGB565",
-              srcFormat);
+        ALOGE("Unable to convert from format 0x%08x to RGB565", srcFormat);
 
         err = ERROR_UNSUPPORTED;
     }
 
-    buffer->release();
-    buffer = NULL;
-
+    videoFrameBuffer.clear();
+    source->stop();
+    decoder->releaseOutputBuffer(index);
     decoder->stop();
+    decoder->release();
 
     if (err != OK) {
         ALOGE("Colorconverter failed to convert frame.");
@@ -384,20 +470,29 @@
         mAlbumArt = MediaAlbumArt::fromData(dataSize, data);
     }
 
-    VideoFrame *frame =
-        extractVideoFrameWithCodecFlags(
-                &mClient, trackMeta, source, OMXCodec::kPreferSoftwareCodecs,
-                timeUs, option);
+    const char *mime;
+    CHECK(trackMeta->findCString(kKeyMIMEType, &mime));
 
-    if (frame == NULL) {
-        ALOGV("Software decoder failed to extract thumbnail, "
-             "trying hardware decoder.");
+    Vector<OMXCodec::CodecNameAndQuirks> matchingCodecs;
+    OMXCodec::findMatchingCodecs(
+            mime,
+            false, /* encoder */
+            NULL, /* matchComponentName */
+            OMXCodec::kPreferSoftwareCodecs,
+            &matchingCodecs);
 
-        frame = extractVideoFrameWithCodecFlags(&mClient, trackMeta, source, 0,
-                        timeUs, option);
+    for (size_t i = 0; i < matchingCodecs.size(); ++i) {
+        const char *componentName = matchingCodecs[i].mName.string();
+        VideoFrame *frame =
+            extractVideoFrame(componentName, trackMeta, source, timeUs, option);
+
+        if (frame != NULL) {
+            return frame;
+        }
+        ALOGV("%s failed to extract thumbnail, trying next decoder.", componentName);
     }
 
-    return frame;
+    return NULL;
 }
 
 MediaAlbumArt *StagefrightMetadataRetriever::extractAlbumArt() {
@@ -519,6 +614,12 @@
 
     mMetaData.add(METADATA_KEY_NUM_TRACKS, String8(tmp));
 
+    float captureFps;
+    if (meta->findFloat(kKeyCaptureFramerate, &captureFps)) {
+        sprintf(tmp, "%f", captureFps);
+        mMetaData.add(METADATA_KEY_CAPTURE_FRAMERATE, String8(tmp));
+    }
+
     bool hasAudio = false;
     bool hasVideo = false;
     int32_t videoWidth = -1;
@@ -629,4 +730,11 @@
     }
 }
 
+void StagefrightMetadataRetriever::clearMetadata() {
+    mParsedMetaData = false;
+    mMetaData.clear();
+    delete mAlbumArt;
+    mAlbumArt = NULL;
+}
+
 }  // namespace android
diff --git a/media/libstagefright/SurfaceUtils.cpp b/media/libstagefright/SurfaceUtils.cpp
new file mode 100644
index 0000000..6b62e43
--- /dev/null
+++ b/media/libstagefright/SurfaceUtils.cpp
@@ -0,0 +1,215 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SurfaceUtils"
+#include <utils/Log.h>
+
+#include <media/stagefright/SurfaceUtils.h>
+
+#include <gui/Surface.h>
+
+namespace android {
+
+status_t setNativeWindowSizeFormatAndUsage(
+        ANativeWindow *nativeWindow /* nonnull */,
+        int width, int height, int format, int rotation, int usage) {
+    status_t err = native_window_set_buffers_dimensions(nativeWindow, width, height);
+    if (err != NO_ERROR) {
+        ALOGE("native_window_set_buffers_dimensions failed: %s (%d)", strerror(-err), -err);
+        return err;
+    }
+
+    err = native_window_set_buffers_format(nativeWindow, format);
+    if (err != NO_ERROR) {
+        ALOGE("native_window_set_buffers_format failed: %s (%d)", strerror(-err), -err);
+        return err;
+    }
+
+    int transform = 0;
+    if ((rotation % 90) == 0) {
+        switch ((rotation / 90) & 3) {
+            case 1:  transform = HAL_TRANSFORM_ROT_90;  break;
+            case 2:  transform = HAL_TRANSFORM_ROT_180; break;
+            case 3:  transform = HAL_TRANSFORM_ROT_270; break;
+            default: transform = 0;                     break;
+        }
+    }
+
+    err = native_window_set_buffers_transform(nativeWindow, transform);
+    if (err != NO_ERROR) {
+        ALOGE("native_window_set_buffers_transform failed: %s (%d)", strerror(-err), -err);
+        return err;
+    }
+
+    // Make sure to check whether either Stagefright or the video decoder
+    // requested protected buffers.
+    if (usage & GRALLOC_USAGE_PROTECTED) {
+        // Verify that the ANativeWindow sends images directly to
+        // SurfaceFlinger.
+        int queuesToNativeWindow = 0;
+        err = nativeWindow->query(
+                nativeWindow, NATIVE_WINDOW_QUEUES_TO_WINDOW_COMPOSER, &queuesToNativeWindow);
+        if (err != NO_ERROR) {
+            ALOGE("error authenticating native window: %s (%d)", strerror(-err), -err);
+            return err;
+        }
+        if (queuesToNativeWindow != 1) {
+            ALOGE("native window could not be authenticated");
+            return PERMISSION_DENIED;
+        }
+    }
+
+    int consumerUsage = 0;
+    err = nativeWindow->query(nativeWindow, NATIVE_WINDOW_CONSUMER_USAGE_BITS, &consumerUsage);
+    if (err != NO_ERROR) {
+        ALOGW("failed to get consumer usage bits. ignoring");
+        err = NO_ERROR;
+    }
+
+    int finalUsage = usage | consumerUsage;
+    ALOGV("gralloc usage: %#x(producer) + %#x(consumer) = %#x", usage, consumerUsage, finalUsage);
+    err = native_window_set_usage(nativeWindow, finalUsage);
+    if (err != NO_ERROR) {
+        ALOGE("native_window_set_usage failed: %s (%d)", strerror(-err), -err);
+        return err;
+    }
+
+    err = native_window_set_scaling_mode(
+            nativeWindow, NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
+    if (err != NO_ERROR) {
+        ALOGE("native_window_set_scaling_mode failed: %s (%d)", strerror(-err), -err);
+        return err;
+    }
+
+    ALOGD("set up nativeWindow %p for %dx%d, color %#x, rotation %d, usage %#x",
+            nativeWindow, width, height, format, rotation, finalUsage);
+    return NO_ERROR;
+}
+
+status_t pushBlankBuffersToNativeWindow(ANativeWindow *nativeWindow /* nonnull */) {
+    status_t err = NO_ERROR;
+    ANativeWindowBuffer* anb = NULL;
+    int numBufs = 0;
+    int minUndequeuedBufs = 0;
+
+    // We need to reconnect to the ANativeWindow as a CPU client to ensure that
+    // no frames get dropped by SurfaceFlinger assuming that these are video
+    // frames.
+    err = native_window_api_disconnect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+    if (err != NO_ERROR) {
+        ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)", strerror(-err), -err);
+        return err;
+    }
+
+    err = native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_CPU);
+    if (err != NO_ERROR) {
+        ALOGE("error pushing blank frames: api_connect failed: %s (%d)", strerror(-err), -err);
+        (void)native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+        return err;
+    }
+
+    err = setNativeWindowSizeFormatAndUsage(
+            nativeWindow, 1, 1, HAL_PIXEL_FORMAT_RGBX_8888, 0, GRALLOC_USAGE_SW_WRITE_OFTEN);
+    if (err != NO_ERROR) {
+        goto error;
+    }
+
+    static_cast<Surface*>(nativeWindow)->getIGraphicBufferProducer()->allowAllocation(true);
+
+    err = nativeWindow->query(nativeWindow,
+            NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBufs);
+    if (err != NO_ERROR) {
+        ALOGE("error pushing blank frames: MIN_UNDEQUEUED_BUFFERS query "
+                "failed: %s (%d)", strerror(-err), -err);
+        goto error;
+    }
+
+    numBufs = minUndequeuedBufs + 1;
+    err = native_window_set_buffer_count(nativeWindow, numBufs);
+    if (err != NO_ERROR) {
+        ALOGE("error pushing blank frames: set_buffer_count failed: %s (%d)", strerror(-err), -err);
+        goto error;
+    }
+
+    // We push numBufs + 1 buffers to ensure that we've drawn into the same
+    // buffer twice.  This should guarantee that the buffer has been displayed
+    // on the screen and then been replaced, so an previous video frames are
+    // guaranteed NOT to be currently displayed.
+    for (int i = 0; i < numBufs + 1; i++) {
+        err = native_window_dequeue_buffer_and_wait(nativeWindow, &anb);
+        if (err != NO_ERROR) {
+            ALOGE("error pushing blank frames: dequeueBuffer failed: %s (%d)",
+                    strerror(-err), -err);
+            break;
+        }
+
+        sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
+
+        // Fill the buffer with the a 1x1 checkerboard pattern ;)
+        uint32_t *img = NULL;
+        err = buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img));
+        if (err != NO_ERROR) {
+            ALOGE("error pushing blank frames: lock failed: %s (%d)", strerror(-err), -err);
+            break;
+        }
+
+        *img = 0;
+
+        err = buf->unlock();
+        if (err != NO_ERROR) {
+            ALOGE("error pushing blank frames: unlock failed: %s (%d)", strerror(-err), -err);
+            break;
+        }
+
+        err = nativeWindow->queueBuffer(nativeWindow, buf->getNativeBuffer(), -1);
+        if (err != NO_ERROR) {
+            ALOGE("error pushing blank frames: queueBuffer failed: %s (%d)", strerror(-err), -err);
+            break;
+        }
+
+        anb = NULL;
+    }
+
+error:
+
+    if (anb != NULL) {
+        nativeWindow->cancelBuffer(nativeWindow, anb, -1);
+        anb = NULL;
+    }
+
+    // Clean up after success or error.
+    status_t err2 = native_window_api_disconnect(nativeWindow, NATIVE_WINDOW_API_CPU);
+    if (err2 != NO_ERROR) {
+        ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)", strerror(-err2), -err2);
+        if (err == NO_ERROR) {
+            err = err2;
+        }
+    }
+
+    err2 = native_window_api_connect(nativeWindow, NATIVE_WINDOW_API_MEDIA);
+    if (err2 != NO_ERROR) {
+        ALOGE("error pushing blank frames: api_connect failed: %s (%d)", strerror(-err), -err);
+        if (err == NO_ERROR) {
+            err = err2;
+        }
+    }
+
+    return err;
+}
+
+}  // namespace android
+
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 73f23f0..f0a7277 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -183,11 +183,26 @@
         msg->setInt32("max-input-size", maxInputSize);
     }
 
+    int32_t maxWidth;
+    if (meta->findInt32(kKeyMaxWidth, &maxWidth)) {
+        msg->setInt32("max-width", maxWidth);
+    }
+
+    int32_t maxHeight;
+    if (meta->findInt32(kKeyMaxHeight, &maxHeight)) {
+        msg->setInt32("max-height", maxHeight);
+    }
+
     int32_t rotationDegrees;
     if (meta->findInt32(kKeyRotation, &rotationDegrees)) {
         msg->setInt32("rotation-degrees", rotationDegrees);
     }
 
+    int32_t fps;
+    if (meta->findInt32(kKeyFrameRate, &fps)) {
+        msg->setInt32("frame-rate", fps);
+    }
+
     uint32_t type;
     const void *data;
     size_t size;
@@ -388,6 +403,34 @@
         buffer->meta()->setInt32("csd", true);
         buffer->meta()->setInt64("timeUs", 0);
         msg->setBuffer("csd-0", buffer);
+
+        if (!meta->findData(kKeyOpusCodecDelay, &type, &data, &size)) {
+            return -EINVAL;
+        }
+
+        buffer = new (std::nothrow) ABuffer(size);
+        if (buffer.get() == NULL || buffer->base() == NULL) {
+            return NO_MEMORY;
+        }
+        memcpy(buffer->data(), data, size);
+
+        buffer->meta()->setInt32("csd", true);
+        buffer->meta()->setInt64("timeUs", 0);
+        msg->setBuffer("csd-1", buffer);
+
+        if (!meta->findData(kKeyOpusSeekPreRoll, &type, &data, &size)) {
+            return -EINVAL;
+        }
+
+        buffer = new (std::nothrow) ABuffer(size);
+        if (buffer.get() == NULL || buffer->base() == NULL) {
+            return NO_MEMORY;
+        }
+        memcpy(buffer->data(), data, size);
+
+        buffer->meta()->setInt32("csd", true);
+        buffer->meta()->setInt64("timeUs", 0);
+        msg->setBuffer("csd-2", buffer);
     }
 
     *format = msg;
@@ -590,19 +633,36 @@
         meta->setInt32(kKeyMaxInputSize, maxInputSize);
     }
 
+    int32_t maxWidth;
+    if (msg->findInt32("max-width", &maxWidth)) {
+        meta->setInt32(kKeyMaxWidth, maxWidth);
+    }
+
+    int32_t maxHeight;
+    if (msg->findInt32("max-height", &maxHeight)) {
+        meta->setInt32(kKeyMaxHeight, maxHeight);
+    }
+
+    int32_t fps;
+    if (msg->findInt32("frame-rate", &fps)) {
+        meta->setInt32(kKeyFrameRate, fps);
+    }
+
     // reassemble the csd data into its original form
     sp<ABuffer> csd0;
     if (msg->findBuffer("csd-0", &csd0)) {
-        if (mime.startsWith("video/")) { // do we need to be stricter than this?
+        if (mime == MEDIA_MIMETYPE_VIDEO_AVC) {
             sp<ABuffer> csd1;
             if (msg->findBuffer("csd-1", &csd1)) {
                 char avcc[1024]; // that oughta be enough, right?
                 size_t outsize = reassembleAVCC(csd0, csd1, avcc);
                 meta->setData(kKeyAVCC, kKeyAVCC, avcc, outsize);
             }
-        } else if (mime.startsWith("audio/")) {
+        } else if (mime == MEDIA_MIMETYPE_AUDIO_AAC || mime == MEDIA_MIMETYPE_VIDEO_MPEG4) {
             int csd0size = csd0->size();
             char esds[csd0size + 31];
+            // The written ESDS is actually for an audio stream, but it's enough
+            // for transporting the CSD to muxers.
             reassembleESDS(csd0, esds);
             meta->setData(kKeyESDS, kKeyESDS, esds, sizeof(esds));
         }
@@ -844,5 +904,88 @@
     return AString("<no-scheme URI suppressed>");
 }
 
+HLSTime::HLSTime(const sp<AMessage>& meta) :
+    mSeq(-1),
+    mTimeUs(-1ll),
+    mMeta(meta) {
+    if (meta != NULL) {
+        CHECK(meta->findInt32("discontinuitySeq", &mSeq));
+        CHECK(meta->findInt64("timeUs", &mTimeUs));
+    }
+}
+
+int64_t HLSTime::getSegmentTimeUs() const {
+    int64_t segmentStartTimeUs = -1ll;
+    if (mMeta != NULL) {
+        CHECK(mMeta->findInt64("segmentStartTimeUs", &segmentStartTimeUs));
+
+        int64_t segmentFirstTimeUs;
+        if (mMeta->findInt64("segmentFirstTimeUs", &segmentFirstTimeUs)) {
+            segmentStartTimeUs += mTimeUs - segmentFirstTimeUs;
+        }
+
+        // adjust segment time by playlist age (for live streaming)
+        int64_t playlistTimeUs;
+        if (mMeta->findInt64("playlistTimeUs", &playlistTimeUs)) {
+            int64_t playlistAgeUs = ALooper::GetNowUs() - playlistTimeUs;
+
+            int64_t durationUs;
+            CHECK(mMeta->findInt64("segmentDurationUs", &durationUs));
+
+            // round to nearest whole segment
+            playlistAgeUs = (playlistAgeUs + durationUs / 2)
+                    / durationUs * durationUs;
+
+            segmentStartTimeUs -= playlistAgeUs;
+            if (segmentStartTimeUs < 0) {
+                segmentStartTimeUs = 0;
+            }
+        }
+    }
+    return segmentStartTimeUs;
+}
+
+bool operator <(const HLSTime &t0, const HLSTime &t1) {
+    // we can only compare discontinuity sequence and timestamp.
+    // (mSegmentTimeUs is not reliable in live streaming case, it's the
+    // time starting from beginning of playlist but playlist could change.)
+    return t0.mSeq < t1.mSeq
+            || (t0.mSeq == t1.mSeq && t0.mTimeUs < t1.mTimeUs);
+}
+
+void writeToAMessage(sp<AMessage> msg, const AudioPlaybackRate &rate) {
+    msg->setFloat("speed", rate.mSpeed);
+    msg->setFloat("pitch", rate.mPitch);
+    msg->setInt32("audio-fallback-mode", rate.mFallbackMode);
+    msg->setInt32("audio-stretch-mode", rate.mStretchMode);
+}
+
+void readFromAMessage(const sp<AMessage> &msg, AudioPlaybackRate *rate /* nonnull */) {
+    *rate = AUDIO_PLAYBACK_RATE_DEFAULT;
+    CHECK(msg->findFloat("speed", &rate->mSpeed));
+    CHECK(msg->findFloat("pitch", &rate->mPitch));
+    CHECK(msg->findInt32("audio-fallback-mode", (int32_t *)&rate->mFallbackMode));
+    CHECK(msg->findInt32("audio-stretch-mode", (int32_t *)&rate->mStretchMode));
+}
+
+void writeToAMessage(sp<AMessage> msg, const AVSyncSettings &sync, float videoFpsHint) {
+    msg->setInt32("sync-source", sync.mSource);
+    msg->setInt32("audio-adjust-mode", sync.mAudioAdjustMode);
+    msg->setFloat("tolerance", sync.mTolerance);
+    msg->setFloat("video-fps", videoFpsHint);
+}
+
+void readFromAMessage(
+        const sp<AMessage> &msg,
+        AVSyncSettings *sync /* nonnull */,
+        float *videoFps /* nonnull */) {
+    AVSyncSettings settings;
+    CHECK(msg->findInt32("sync-source", (int32_t *)&settings.mSource));
+    CHECK(msg->findInt32("audio-adjust-mode", (int32_t *)&settings.mAudioAdjustMode));
+    CHECK(msg->findFloat("tolerance", &settings.mTolerance));
+    CHECK(msg->findFloat("video-fps", videoFps));
+    *sync = settings;
+}
+
 }  // namespace android
 
diff --git a/media/libstagefright/VBRISeeker.cpp b/media/libstagefright/VBRISeeker.cpp
index e988f6d..8a0fcac 100644
--- a/media/libstagefright/VBRISeeker.cpp
+++ b/media/libstagefright/VBRISeeker.cpp
@@ -122,7 +122,7 @@
 
         seeker->mSegments.push(numBytes);
 
-        ALOGV("entry #%zu: %u offset 0x%016llx", i, numBytes, offset);
+        ALOGV("entry #%zu: %u offset %#016llx", i, numBytes, (long long)offset);
         offset += numBytes;
     }
 
@@ -163,7 +163,7 @@
         *pos += mSegments.itemAt(segmentIndex++);
     }
 
-    ALOGV("getOffsetForTime %" PRId64 " us => 0x%016llx", *timeUs, *pos);
+    ALOGV("getOffsetForTime %lld us => 0x%016llx", (long long)*timeUs, (long long)*pos);
 
     *timeUs = nowUs;
 
diff --git a/media/libmediaplayerservice/VideoFrameScheduler.cpp b/media/libstagefright/VideoFrameScheduler.cpp
similarity index 96%
rename from media/libmediaplayerservice/VideoFrameScheduler.cpp
rename to media/libstagefright/VideoFrameScheduler.cpp
index ce5f5fe..5fe9bf9 100644
--- a/media/libmediaplayerservice/VideoFrameScheduler.cpp
+++ b/media/libstagefright/VideoFrameScheduler.cpp
@@ -28,8 +28,7 @@
 
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AUtils.h>
-
-#include "VideoFrameScheduler.h"
+#include <media/stagefright/VideoFrameScheduler.h>
 
 namespace android {
 
@@ -56,7 +55,7 @@
 static const size_t kMaxSamplesToEstimatePeriod = VideoFrameScheduler::kHistorySize;
 
 static const size_t kPrecision = 12;
-static const size_t kErrorThreshold = (1 << (kPrecision * 2)) / 10;
+static const int64_t kErrorThreshold = (1 << (kPrecision * 2)) / 10;
 static const int64_t kMultiplesThresholdDiv = 4;            // 25%
 static const int64_t kReFitThresholdDiv = 100;              // 1%
 static const nsecs_t kMaxAllowedFrameSkip = kNanosIn1s;     // 1 sec
@@ -258,7 +257,8 @@
             mPhase = firstTime;
         }
     }
-    ALOGV("priming[%zu] phase:%lld period:%lld", numSamplesToUse, mPhase, mPeriod);
+    ALOGV("priming[%zu] phase:%lld period:%lld",
+            numSamplesToUse, (long long)mPhase, (long long)mPeriod);
 }
 
 nsecs_t VideoFrameScheduler::PLL::addSample(nsecs_t time) {
@@ -316,6 +316,10 @@
     return mPeriod;
 }
 
+nsecs_t VideoFrameScheduler::PLL::getPeriod() const {
+    return mPrimed ? mPeriod : 0;
+}
+
 /* ======================================================================= */
 /*                             Frame Scheduler                             */
 /* ======================================================================= */
@@ -382,6 +386,14 @@
     return kDefaultVsyncPeriod;
 }
 
+float VideoFrameScheduler::getFrameRate() {
+    nsecs_t videoPeriod = mPll.getPeriod();
+    if (videoPeriod > 0) {
+        return 1e9 / videoPeriod;
+    }
+    return 0.f;
+}
+
 nsecs_t VideoFrameScheduler::schedule(nsecs_t renderTime) {
     nsecs_t origRenderTime = renderTime;
 
diff --git a/media/libstagefright/avc_utils.cpp b/media/libstagefright/avc_utils.cpp
index 5ec3438..8ef2dca 100644
--- a/media/libstagefright/avc_utils.cpp
+++ b/media/libstagefright/avc_utils.cpp
@@ -26,6 +26,7 @@
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MetaData.h>
+#include <utils/misc.h>
 
 namespace android {
 
@@ -186,17 +187,31 @@
             if (aspect_ratio_idc == 255 /* extendedSAR */) {
                 sar_width = br.getBits(16);
                 sar_height = br.getBits(16);
-            } else if (aspect_ratio_idc > 0 && aspect_ratio_idc < 14) {
-                static const int32_t kFixedSARWidth[] = {
-                    1, 12, 10, 16, 40, 24, 20, 32, 80, 18, 15, 64, 160
+            } else {
+                static const struct { unsigned width, height; } kFixedSARs[] = {
+                        {   0,  0 }, // Invalid
+                        {   1,  1 },
+                        {  12, 11 },
+                        {  10, 11 },
+                        {  16, 11 },
+                        {  40, 33 },
+                        {  24, 11 },
+                        {  20, 11 },
+                        {  32, 11 },
+                        {  80, 33 },
+                        {  18, 11 },
+                        {  15, 11 },
+                        {  64, 33 },
+                        { 160, 99 },
+                        {   4,  3 },
+                        {   3,  2 },
+                        {   2,  1 },
                 };
 
-                static const int32_t kFixedSARHeight[] = {
-                    1, 11, 11, 11, 33, 11, 11, 11, 33, 11, 11, 33, 99
-                };
-
-                sar_width = kFixedSARWidth[aspect_ratio_idc - 1];
-                sar_height = kFixedSARHeight[aspect_ratio_idc - 1];
+                if (aspect_ratio_idc > 0 && aspect_ratio_idc < NELEM(kFixedSARs)) {
+                    sar_width = kFixedSARs[aspect_ratio_idc].width;
+                    sar_height = kFixedSARs[aspect_ratio_idc].height;
+                }
             }
         }
 
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 495bad0..965c55e 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -75,7 +75,7 @@
 
 SoftAAC2::~SoftAAC2() {
     aacDecoder_Close(mAACDecoder);
-    delete mOutputDelayRingBuffer;
+    delete[] mOutputDelayRingBuffer;
 }
 
 void SoftAAC2::initPorts() {
@@ -623,7 +623,7 @@
                 } else {
                     int64_t currentTime = mBufferTimestamps.top();
                     currentTime += mStreamInfo->aacSamplesPerFrame *
-                            1000000ll / mStreamInfo->sampleRate;
+                            1000000ll / mStreamInfo->aacSampleRate;
                     mBufferTimestamps.add(currentTime);
                 }
             } else {
@@ -874,7 +874,7 @@
                         // adjust/interpolate next time stamp
                         *currentBufLeft -= decodedSize;
                         *nextTimeStamp += mStreamInfo->aacSamplesPerFrame *
-                                1000000ll / mStreamInfo->sampleRate;
+                                1000000ll / mStreamInfo->aacSampleRate;
                         ALOGV("adjusted nextTimeStamp/size to %lld/%d",
                                 (long long) *nextTimeStamp, *currentBufLeft);
                     } else {
@@ -975,6 +975,7 @@
         mBufferSizes.clear();
         mDecodedSizes.clear();
         mLastInHeader = NULL;
+        mEndOfInput = false;
     } else {
         int avail;
         while ((avail = outputDelayRingBufferSamplesAvailable()) > 0) {
@@ -989,6 +990,7 @@
             mOutputBufferCount++;
         }
         mOutputDelayRingBufferReadPos = mOutputDelayRingBufferWritePos;
+        mEndOfOutput = false;
     }
 }
 
diff --git a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
index d1b0f76..a9723ea 100644
--- a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
+++ b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
@@ -428,7 +428,15 @@
     }
 }
 
-void SoftAMR::onPortFlushCompleted(OMX_U32 /* portIndex */) {
+void SoftAMR::onPortFlushCompleted(OMX_U32 portIndex) {
+    ALOGV("onPortFlushCompleted portindex %d, resetting frame ", portIndex);
+    if (portIndex == 0) {
+        if (mMode == MODE_NARROW) {
+           Speech_Decode_Frame_reset(mState);
+        } else {
+           pvDecoder_AmrWb_Reset(mState, 0 /* reset_all */);
+        }
+    }
 }
 
 void SoftAMR::onPortEnableCompleted(OMX_U32 portIndex, bool enabled) {
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
index 8388472..e083315 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
@@ -21,7 +21,6 @@
 #include "ih264_typedefs.h"
 #include "iv.h"
 #include "ivd.h"
-#include "ithread.h"
 #include "ih264d.h"
 #include "SoftAVCDec.h"
 
@@ -115,13 +114,16 @@
             kProfileLevels, ARRAY_SIZE(kProfileLevels),
             320 /* width */, 240 /* height */, callbacks,
             appData, component),
+      mCodecCtx(NULL),
       mMemRecords(NULL),
       mFlushOutBuffer(NULL),
       mOmxColorFormat(OMX_COLOR_FormatYUV420Planar),
       mIvColorFormat(IV_YUV_420P),
       mNewWidth(mWidth),
       mNewHeight(mHeight),
-      mChangingResolution(false) {
+      mNewLevel(0),
+      mChangingResolution(false),
+      mSignalledError(false) {
     initPorts(
             kNumBuffers, INPUT_BUF_SIZE, kNumBuffers, CODEC_MIME_TYPE);
 
@@ -131,7 +133,7 @@
     GENERATE_FILE_NAMES();
     CREATE_DUMP_FILE(mInFile);
 
-    CHECK_EQ(initDecoder(), (status_t)OK);
+    CHECK_EQ(initDecoder(mWidth, mHeight), (status_t)OK);
 }
 
 SoftAVC::~SoftAVC() {
@@ -191,7 +193,7 @@
     s_ctl_ip.u4_size = sizeof(ivd_ctl_set_config_ip_t);
     s_ctl_op.u4_size = sizeof(ivd_ctl_set_config_op_t);
 
-    ALOGV("Set the run-time (dynamic) parameters stride = %u", stride);
+    ALOGV("Set the run-time (dynamic) parameters stride = %zu", stride);
     status = ivdec_api_function(mCodecCtx, (void *)&s_ctl_ip, (void *)&s_ctl_op);
 
     if (status != IV_SUCCESS) {
@@ -231,6 +233,7 @@
         ALOGE("Error in reset: 0x%x", s_ctl_op.u4_error_code);
         return UNKNOWN_ERROR;
     }
+    mSignalledError = false;
 
     /* Set the run-time (dynamic) parameters */
     setParams(outputBufferWidth());
@@ -284,7 +287,7 @@
     return OK;
 }
 
-status_t SoftAVC::initDecoder() {
+status_t SoftAVC::initDecoder(uint32_t width, uint32_t height) {
     IV_API_CALL_STATUS_T status;
 
     UWORD32 u4_num_reorder_frames;
@@ -293,28 +296,33 @@
     WORD32 i4_level;
 
     mNumCores = GetCPUCoreCount();
+    mCodecCtx = NULL;
 
     /* Initialize number of ref and reorder modes (for H264) */
     u4_num_reorder_frames = 16;
     u4_num_ref_frames = 16;
     u4_share_disp_buf = 0;
 
-    uint32_t displayStride = outputBufferWidth();
-    uint32_t displayHeight = outputBufferHeight();
+    uint32_t displayStride = mIsAdaptive ? mAdaptiveMaxWidth : width;
+    uint32_t displayHeight = mIsAdaptive ? mAdaptiveMaxHeight : height;
     uint32_t displaySizeY = displayStride * displayHeight;
 
-    if (displaySizeY > (1920 * 1088)) {
-        i4_level = 50;
-    } else if (displaySizeY > (1280 * 720)) {
-        i4_level = 40;
-    } else if (displaySizeY > (720 * 576)) {
-        i4_level = 31;
-    } else if (displaySizeY > (624 * 320)) {
-        i4_level = 30;
-    } else if (displaySizeY > (352 * 288)) {
-        i4_level = 21;
+    if(mNewLevel == 0){
+        if (displaySizeY > (1920 * 1088)) {
+            i4_level = 50;
+        } else if (displaySizeY > (1280 * 720)) {
+            i4_level = 40;
+        } else if (displaySizeY > (720 * 576)) {
+            i4_level = 31;
+        } else if (displaySizeY > (624 * 320)) {
+            i4_level = 30;
+        } else if (displaySizeY > (352 * 288)) {
+            i4_level = 21;
+        } else {
+            i4_level = 20;
+        }
     } else {
-        i4_level = 20;
+        i4_level = mNewLevel;
     }
 
     {
@@ -430,6 +438,7 @@
 
         status = ivdec_api_function(mCodecCtx, (void *)&s_init_ip, (void *)&s_init_op);
         if (status != IV_SUCCESS) {
+            mCodecCtx = NULL;
             ALOGE("Error in init: 0x%x",
                     s_init_op.s_ivd_init_op_t.u4_error_code);
             return UNKNOWN_ERROR;
@@ -452,7 +461,7 @@
     uint32_t bufferSize = displaySizeY * 3 / 2;
     mFlushOutBuffer = (uint8_t *)ivd_aligned_malloc(128, bufferSize);
     if (NULL == mFlushOutBuffer) {
-        ALOGE("Could not allocate flushOutputBuffer of size %zu", bufferSize);
+        ALOGE("Could not allocate flushOutputBuffer of size %u", bufferSize);
         return NO_MEMORY;
     }
 
@@ -489,12 +498,12 @@
     return OK;
 }
 
-status_t SoftAVC::reInitDecoder() {
+status_t SoftAVC::reInitDecoder(uint32_t width, uint32_t height) {
     status_t ret;
 
     deInitDecoder();
 
-    ret = initDecoder();
+    ret = initDecoder(width, height);
     if (OK != ret) {
         ALOGE("Create failure");
         deInitDecoder();
@@ -506,6 +515,7 @@
 void SoftAVC::onReset() {
     SoftVideoDecoderOMXComponent::onReset();
 
+    mSignalledError = false;
     resetDecoder();
     resetPlugin();
 }
@@ -515,7 +525,14 @@
     const uint32_t oldHeight = mHeight;
     OMX_ERRORTYPE ret = SoftVideoDecoderOMXComponent::internalSetParameter(index, params);
     if (mWidth != oldWidth || mHeight != oldHeight) {
-        reInitDecoder();
+        mNewWidth = mWidth;
+        mNewHeight = mHeight;
+        status_t err = reInitDecoder(mNewWidth, mNewHeight);
+        if (err != OK) {
+            notify(OMX_EventError, OMX_ErrorUnsupportedSetting, err, NULL);
+            mSignalledError = true;
+            return OMX_ErrorUnsupportedSetting;
+        }
     }
     return ret;
 }
@@ -590,6 +607,9 @@
 void SoftAVC::onQueueFilled(OMX_U32 portIndex) {
     UNUSED(portIndex);
 
+    if (mSignalledError) {
+        return;
+    }
     if (mOutputPortSettingsChange != NONE) {
         return;
     }
@@ -622,6 +642,11 @@
             if (!inQueue.empty()) {
                 inInfo = *inQueue.begin();
                 inHeader = inInfo->mHeader;
+                if (inHeader == NULL) {
+                    inQueue.erase(inQueue.begin());
+                    inInfo->mOwnedByUs = false;
+                    continue;
+                }
             } else {
                 break;
             }
@@ -633,14 +658,21 @@
         outHeader->nTimeStamp = 0;
         outHeader->nOffset = 0;
 
-        if (inHeader != NULL && (inHeader->nFlags & OMX_BUFFERFLAG_EOS)) {
-            mReceivedEOS = true;
+        if (inHeader != NULL) {
             if (inHeader->nFilledLen == 0) {
                 inQueue.erase(inQueue.begin());
                 inInfo->mOwnedByUs = false;
                 notifyEmptyBufferDone(inHeader);
+
+                if (!(inHeader->nFlags & OMX_BUFFERFLAG_EOS)) {
+                    continue;
+                }
+
+                mReceivedEOS = true;
                 inHeader = NULL;
                 setFlushMode();
+            } else if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+                mReceivedEOS = true;
             }
         }
 
@@ -648,9 +680,15 @@
         // update output port's definition and reinitialize decoder.
         if (mInitNeeded && !mIsInFlush) {
             bool portWillReset = false;
-            handlePortSettingsChange(&portWillReset, mNewWidth, mNewHeight);
 
-            CHECK_EQ(reInitDecoder(), (status_t)OK);
+            status_t err = reInitDecoder(mNewWidth, mNewHeight);
+            if (err != OK) {
+                notify(OMX_EventError, OMX_ErrorUnsupportedSetting, err, NULL);
+                mSignalledError = true;
+                return;
+            }
+
+            handlePortSettingsChange(&portWillReset, mNewWidth, mNewHeight);
             return;
         }
 
@@ -691,6 +729,7 @@
             bool unsupportedDimensions =
                 (IVD_STREAM_WIDTH_HEIGHT_NOT_SUPPORTED == (s_dec_op.u4_error_code & 0xFF));
             bool resChanged = (IVD_RES_CHANGED == (s_dec_op.u4_error_code & 0xFF));
+            bool unsupportedLevel = (IH264D_UNSUPPORTED_LEVEL == (s_dec_op.u4_error_code & 0xFF));
 
             GETTIME(&mTimeEnd, NULL);
             /* Compute time taken for decode() */
@@ -708,13 +747,39 @@
                 mTimeStampsValid[timeStampIx] = false;
             }
 
+
             // This is needed to handle CTS DecoderTest testCodecResetsH264WithoutSurface,
             // which is not sending SPS/PPS after port reconfiguration and flush to the codec.
             if (unsupportedDimensions && !mFlushNeeded) {
                 bool portWillReset = false;
-                handlePortSettingsChange(&portWillReset, s_dec_op.u4_pic_wd, s_dec_op.u4_pic_ht);
+                mNewWidth = s_dec_op.u4_pic_wd;
+                mNewHeight = s_dec_op.u4_pic_ht;
 
-                CHECK_EQ(reInitDecoder(), (status_t)OK);
+                status_t err = reInitDecoder(mNewWidth, mNewHeight);
+                if (err != OK) {
+                    notify(OMX_EventError, OMX_ErrorUnsupportedSetting, err, NULL);
+                    mSignalledError = true;
+                    return;
+                }
+
+                handlePortSettingsChange(&portWillReset, mNewWidth, mNewHeight);
+
+                setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx);
+
+                ivdec_api_function(mCodecCtx, (void *)&s_dec_ip, (void *)&s_dec_op);
+                return;
+            }
+
+            if (unsupportedLevel && !mFlushNeeded) {
+
+                mNewLevel = 51;
+
+                status_t err = reInitDecoder(mNewWidth, mNewHeight);
+                if (err != OK) {
+                    notify(OMX_EventError, OMX_ErrorUnsupportedSetting, err, NULL);
+                    mSignalledError = true;
+                    return;
+                }
 
                 setDecodeArgs(&s_dec_ip, &s_dec_op, inHeader, outHeader, timeStampIx);
 
@@ -745,6 +810,17 @@
                 continue;
             }
 
+            if (unsupportedLevel) {
+
+                if (mFlushNeeded) {
+                    setFlushMode();
+                }
+
+                mNewLevel = 51;
+                mInitNeeded = true;
+                continue;
+            }
+
             if ((0 < s_dec_op.u4_pic_wd) && (0 < s_dec_op.u4_pic_ht)) {
                 uint32_t width = s_dec_op.u4_pic_wd;
                 uint32_t height = s_dec_op.u4_pic_ht;
@@ -758,7 +834,7 @@
             }
 
             if (s_dec_op.u4_output_present) {
-                outHeader->nFilledLen = (mWidth * mHeight * 3) / 2;
+                outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * 3) / 2;
 
                 outHeader->nTimeStamp = mTimeStamps[s_dec_op.u4_ts];
                 mTimeStampsValid[s_dec_op.u4_ts] = false;
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.h b/media/libstagefright/codecs/avcdec/SoftAVCDec.h
index 191a71d..1ec8991 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.h
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.h
@@ -100,12 +100,14 @@
     bool mInitNeeded;
     uint32_t mNewWidth;
     uint32_t mNewHeight;
+    uint32_t mNewLevel;
     // The input stream has changed to a different resolution, which is still supported by the
     // codec. So the codec is switching to decode the new resolution.
     bool mChangingResolution;
     bool mFlushNeeded;
+    bool mSignalledError;
 
-    status_t initDecoder();
+    status_t initDecoder(uint32_t width, uint32_t height);
     status_t deInitDecoder();
     status_t setFlushMode();
     status_t setParams(size_t stride);
@@ -113,7 +115,7 @@
     status_t setNumCores();
     status_t resetDecoder();
     status_t resetPlugin();
-    status_t reInitDecoder();
+    status_t reInitDecoder(uint32_t width, uint32_t height);
 
     void setDecodeArgs(
             ivd_video_decode_ip_t *ps_dec_ip,
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
index bf5e353..6e55034 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
@@ -49,10 +49,10 @@
     params->nVersion.s.nStep = 0;
 }
 
-typedef struct LevelConversion {
+struct LevelConversion {
     OMX_VIDEO_AVCLEVELTYPE omxLevel;
     WORD32 avcLevel;
-} LevelConcersion;
+};
 
 static LevelConversion ConversionTable[] = {
     { OMX_VIDEO_AVCLevel1,  10 },
@@ -87,8 +87,21 @@
     { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel32 },
     { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel4  },
     { OMX_VIDEO_AVCProfileBaseline, OMX_VIDEO_AVCLevel41 },
-};
+    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel1  },
+    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel1b },
+    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel11 },
+    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel12 },
+    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel13 },
+    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel2  },
+    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel21 },
+    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel22 },
+    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel3  },
+    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel31 },
+    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel32 },
+    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel4  },
+    { OMX_VIDEO_AVCProfileMain, OMX_VIDEO_AVCLevel41 },
 
+};
 
 static size_t GetCPUCoreCount() {
     long cpuCoreCount = 1;
@@ -99,7 +112,7 @@
     cpuCoreCount = sysconf(_SC_NPROC_ONLN);
 #endif
     CHECK(cpuCoreCount >= 1);
-    ALOGD("Number of CPU cores: %ld", cpuCoreCount);
+    ALOGV("Number of CPU cores: %ld", cpuCoreCount);
     return (size_t)cpuCoreCount;
 }
 
@@ -144,15 +157,15 @@
             kProfileLevels, NELEM(kProfileLevels),
             176 /* width */, 144 /* height */,
             callbacks, appData, component),
+      mBitrateUpdated(false),
+      mKeyFrameRequested(false),
       mIvVideoColorFormat(IV_YUV_420P),
-      mIDRFrameRefreshIntervalInSec(1),
       mAVCEncProfile(IV_PROFILE_BASE),
-      mAVCEncLevel(31),
-      mPrevTimestampUs(-1),
+      mAVCEncLevel(41),
       mStarted(false),
       mSawInputEOS(false),
+      mSawOutputEOS(false),
       mSignalledError(false),
-      mConversionBuffer(NULL),
       mCodecCtx(NULL) {
 
     initPorts(kNumBuffers, kNumBuffers, ((mWidth * mHeight * 3) >> 1),
@@ -162,6 +175,10 @@
     GENERATE_FILE_NAMES();
     CREATE_DUMP_FILE(mInFile);
     CREATE_DUMP_FILE(mOutFile);
+    memset(mConversionBuffers, 0, sizeof(mConversionBuffers));
+    memset(mInputBufferInfo, 0, sizeof(mInputBufferInfo));
+
+    initEncParams();
 
 }
 
@@ -173,7 +190,7 @@
     CHECK(inQueue.empty());
 }
 
-OMX_ERRORTYPE SoftAVC::initEncParams() {
+void  SoftAVC::initEncParams() {
     mCodecCtx = NULL;
     mMemRecords = NULL;
     mNumMemRecords = DEFAULT_MEM_REC_CNT;
@@ -186,7 +203,6 @@
     mIInterval = DEFAULT_I_INTERVAL;
     mIDRInterval = DEFAULT_IDR_INTERVAL;
     mDisableDeblkLevel = DEFAULT_DISABLE_DEBLK_LEVEL;
-    mFrameRate = DEFAULT_SRC_FRAME_RATE;
     mEnableFastSad = DEFAULT_ENABLE_FAST_SAD;
     mEnableAltRef = DEFAULT_ENABLE_ALT_REF;
     mEncSpeed = DEFAULT_ENC_SPEED;
@@ -195,11 +211,12 @@
     mAIRRefreshPeriod = DEFAULT_AIR_REFRESH_PERIOD;
     mPSNREnable = DEFAULT_PSNR_ENABLE;
     mReconEnable = DEFAULT_RECON_ENABLE;
+    mEntropyMode = DEFAULT_ENTROPY_MODE;
+    mBframes = DEFAULT_B_FRAMES;
 
     gettimeofday(&mTimeStart, NULL);
     gettimeofday(&mTimeEnd, NULL);
 
-    return OMX_ErrorNone;
 }
 
 
@@ -212,7 +229,6 @@
     s_dimensions_ip.e_sub_cmd = IVE_CMD_CTL_SET_DIMENSIONS;
     s_dimensions_ip.u4_ht = mHeight;
     s_dimensions_ip.u4_wd = mWidth;
-    s_dimensions_ip.u4_strd = mStride;
 
     s_dimensions_ip.u4_timestamp_high = -1;
     s_dimensions_ip.u4_timestamp_low = -1;
@@ -260,8 +276,8 @@
     s_frame_rate_ip.e_cmd = IVE_CMD_VIDEO_CTL;
     s_frame_rate_ip.e_sub_cmd = IVE_CMD_CTL_SET_FRAMERATE;
 
-    s_frame_rate_ip.u4_src_frame_rate = mFrameRate;
-    s_frame_rate_ip.u4_tgt_frame_rate = mFrameRate;
+    s_frame_rate_ip.u4_src_frame_rate = mFramerate >> 16;
+    s_frame_rate_ip.u4_tgt_frame_rate = mFramerate >> 16;
 
     s_frame_rate_ip.u4_timestamp_high = -1;
     s_frame_rate_ip.u4_timestamp_low = -1;
@@ -332,7 +348,6 @@
     ive_ctl_set_frame_type_ip_t s_frame_type_ip;
     ive_ctl_set_frame_type_op_t s_frame_type_op;
     IV_STATUS_T status;
-
     s_frame_type_ip.e_cmd = IVE_CMD_VIDEO_CTL;
     s_frame_type_ip.e_sub_cmd = IVE_CMD_CTL_SET_FRAMETYPE;
 
@@ -503,7 +518,6 @@
 
     s_gop_params_ip.u4_i_frm_interval = mIInterval;
     s_gop_params_ip.u4_idr_frm_interval = mIDRInterval;
-    s_gop_params_ip.u4_num_b_frames = DEFAULT_B_FRAMES;
 
     s_gop_params_ip.u4_timestamp_high = -1;
     s_gop_params_ip.u4_timestamp_low = -1;
@@ -529,7 +543,7 @@
     s_profile_params_ip.e_sub_cmd = IVE_CMD_CTL_SET_PROFILE_PARAMS;
 
     s_profile_params_ip.e_profile = DEFAULT_EPROFILE;
-
+    s_profile_params_ip.u4_entropy_coding_mode = mEntropyMode;
     s_profile_params_ip.u4_timestamp_high = -1;
     s_profile_params_ip.u4_timestamp_low = -1;
 
@@ -595,7 +609,6 @@
 
 OMX_ERRORTYPE SoftAVC::initEncoder() {
     IV_STATUS_T status;
-    size_t i;
     WORD32 level;
     uint32_t displaySizeY;
     CHECK(!mStarted);
@@ -618,27 +631,26 @@
     }
     mAVCEncLevel = MAX(level, mAVCEncLevel);
 
-    if (OMX_ErrorNone != (errType = initEncParams())) {
-        ALOGE("Failed to initialize encoder params");
-        mSignalledError = true;
-        notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
-        return errType;
-    }
-
-    mStride = ALIGN16(mWidth);
+    mStride = mWidth;
 
     if (mInputDataIsMeta) {
-        if (mConversionBuffer) {
-            free(mConversionBuffer);
-            mConversionBuffer = NULL;
-        }
+        for (size_t i = 0; i < MAX_CONVERSION_BUFFERS; i++) {
+            if (mConversionBuffers[i] != NULL) {
+                free(mConversionBuffers[i]);
+            }
 
-        if (mConversionBuffer == NULL) {
-            mConversionBuffer = (uint8_t *)malloc(mStride * mHeight * 3 / 2);
-            if (mConversionBuffer == NULL) {
+            if (((uint64_t)mStride * mHeight) > ((uint64_t)INT32_MAX / 3)) {
+                ALOGE("Buffer size is too big.");
+                return OMX_ErrorUndefined;
+            }
+            mConversionBuffers[i] = (uint8_t *)malloc(mStride * mHeight * 3 / 2);
+
+            if (mConversionBuffers[i] == NULL) {
                 ALOGE("Allocating conversion buffer failed.");
                 return OMX_ErrorUndefined;
             }
+
+            mConversionBuffersFree[i] = 1;
         }
     }
 
@@ -654,7 +666,7 @@
             break;
     }
 
-    ALOGV("Params width %d height %d level %d colorFormat %d", mWidth,
+    ALOGD("Params width %d height %d level %d colorFormat %d", mWidth,
             mHeight, mAVCEncLevel, mIvVideoColorFormat);
 
     /* Getting Number of MemRecords */
@@ -679,9 +691,13 @@
     }
 
     /* Allocate array to hold memory records */
+    if (mNumMemRecords > SIZE_MAX / sizeof(iv_mem_rec_t)) {
+        ALOGE("requested memory size is too big.");
+        return OMX_ErrorUndefined;
+    }
     mMemRecords = (iv_mem_rec_t *)malloc(mNumMemRecords * sizeof(iv_mem_rec_t));
     if (NULL == mMemRecords) {
-        ALOGE("Unable to allocate memory for hold memory records: Size %d",
+        ALOGE("Unable to allocate memory for hold memory records: Size %zu",
                 mNumMemRecords * sizeof(iv_mem_rec_t));
         mSignalledError = true;
         notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
@@ -691,7 +707,7 @@
     {
         iv_mem_rec_t *ps_mem_rec;
         ps_mem_rec = mMemRecords;
-        for (i = 0; i < mNumMemRecords; i++) {
+        for (size_t i = 0; i < mNumMemRecords; i++) {
             ps_mem_rec->u4_size = sizeof(iv_mem_rec_t);
             ps_mem_rec->pv_base = NULL;
             ps_mem_rec->u4_mem_size = 0;
@@ -738,13 +754,13 @@
         WORD32 total_size;
         iv_mem_rec_t *ps_mem_rec;
         total_size = 0;
-
         ps_mem_rec = mMemRecords;
-        for (i = 0; i < mNumMemRecords; i++) {
+
+        for (size_t i = 0; i < mNumMemRecords; i++) {
             ps_mem_rec->pv_base = ive_aligned_malloc(
                     ps_mem_rec->u4_mem_alignment, ps_mem_rec->u4_mem_size);
             if (ps_mem_rec->pv_base == NULL) {
-                ALOGE("Allocation failure for mem record id %d size %d\n", i,
+                ALOGE("Allocation failure for mem record id %zu size %u\n", i,
                         ps_mem_rec->u4_mem_size);
                 mSignalledError = true;
                 notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
@@ -755,7 +771,6 @@
 
             ps_mem_rec++;
         }
-        printf("\nTotal memory for codec %d\n", total_size);
     }
 
     /* Codec Instance Creation */
@@ -789,7 +804,7 @@
         s_init_ip.e_rc_mode = DEFAULT_RC_MODE;
         s_init_ip.u4_max_framerate = DEFAULT_MAX_FRAMERATE;
         s_init_ip.u4_max_bitrate = DEFAULT_MAX_BITRATE;
-        s_init_ip.u4_max_num_bframes = DEFAULT_B_FRAMES;
+        s_init_ip.u4_num_bframes = mBframes;
         s_init_ip.e_content_type = IV_PROGRESSIVE;
         s_init_ip.u4_max_srch_rng_x = DEFAULT_MAX_SRCH_RANGE_X;
         s_init_ip.u4_max_srch_rng_y = DEFAULT_MAX_SRCH_RANGE_Y;
@@ -864,7 +879,6 @@
     iv_retrieve_mem_rec_ip_t s_retrieve_mem_ip;
     iv_retrieve_mem_rec_op_t s_retrieve_mem_op;
     iv_mem_rec_t *ps_mem_rec;
-    UWORD32 i;
 
     if (!mStarted) {
         return OMX_ErrorNone;
@@ -885,16 +899,18 @@
 
     /* Free memory records */
     ps_mem_rec = mMemRecords;
-    for (i = 0; i < s_retrieve_mem_op.u4_num_mem_rec_filled; i++) {
+    for (size_t i = 0; i < s_retrieve_mem_op.u4_num_mem_rec_filled; i++) {
         ive_aligned_free(ps_mem_rec->pv_base);
         ps_mem_rec++;
     }
 
     free(mMemRecords);
 
-    if (mConversionBuffer != NULL) {
-        free(mConversionBuffer);
-        mConversionBuffer = NULL;
+    for (size_t i = 0; i < MAX_CONVERSION_BUFFERS; i++) {
+        if (mConversionBuffers[i]) {
+            free(mConversionBuffers[i]);
+            mConversionBuffers[i] = NULL;
+        }
     }
 
     mStarted = false;
@@ -926,23 +942,21 @@
                 return OMX_ErrorUndefined;
             }
 
-            avcParams->eProfile = OMX_VIDEO_AVCProfileBaseline;
-            OMX_VIDEO_AVCLEVELTYPE omxLevel = OMX_VIDEO_AVCLevel31;
+            OMX_VIDEO_AVCLEVELTYPE omxLevel = OMX_VIDEO_AVCLevel41;
             if (OMX_ErrorNone
                     != ConvertAvcSpecLevelToOmxAvcLevel(mAVCEncLevel, &omxLevel)) {
                 return OMX_ErrorUndefined;
             }
 
+            avcParams->eProfile = OMX_VIDEO_AVCProfileBaseline;
             avcParams->eLevel = omxLevel;
             avcParams->nRefFrames = 1;
-            avcParams->nBFrames = 0;
             avcParams->bUseHadamard = OMX_TRUE;
             avcParams->nAllowedPictureTypes = (OMX_VIDEO_PictureTypeI
-                    | OMX_VIDEO_PictureTypeP);
+                    | OMX_VIDEO_PictureTypeP | OMX_VIDEO_PictureTypeB);
             avcParams->nRefIdx10ActiveMinus1 = 0;
             avcParams->nRefIdx11ActiveMinus1 = 0;
             avcParams->bWeightedPPrediction = OMX_FALSE;
-            avcParams->bEntropyCodingCABAC = OMX_FALSE;
             avcParams->bconstIpred = OMX_FALSE;
             avcParams->bDirect8x8Inference = OMX_FALSE;
             avcParams->bDirectSpatialTemporal = OMX_FALSE;
@@ -973,14 +987,26 @@
                 return OMX_ErrorUndefined;
             }
 
-            if (avcType->eProfile != OMX_VIDEO_AVCProfileBaseline
-                    || avcType->nRefFrames != 1 || avcType->nBFrames != 0
+            mEntropyMode = 0;
+
+            if (OMX_TRUE == avcType->bEntropyCodingCABAC)
+                mEntropyMode = 1;
+
+            if ((avcType->nAllowedPictureTypes & OMX_VIDEO_PictureTypeB) &&
+                    avcType->nPFrames) {
+                mBframes = avcType->nBFrames / avcType->nPFrames;
+            }
+
+            mIInterval = avcType->nPFrames + avcType->nBFrames;
+
+            if (OMX_VIDEO_AVCLoopFilterDisable == avcType->eLoopFilterMode)
+                mDisableDeblkLevel = 4;
+
+            if (avcType->nRefFrames != 1
                     || avcType->bUseHadamard != OMX_TRUE
-                    || (avcType->nAllowedPictureTypes & OMX_VIDEO_PictureTypeB) != 0
                     || avcType->nRefIdx10ActiveMinus1 != 0
                     || avcType->nRefIdx11ActiveMinus1 != 0
                     || avcType->bWeightedPPrediction != OMX_FALSE
-                    || avcType->bEntropyCodingCABAC != OMX_FALSE
                     || avcType->bconstIpred != OMX_FALSE
                     || avcType->bDirect8x8Inference != OMX_FALSE
                     || avcType->bDirectSpatialTemporal != OMX_FALSE
@@ -1075,14 +1101,27 @@
 
     /* Initialize color formats */
     ps_inp_raw_buf->e_color_fmt = mIvVideoColorFormat;
-
     source = NULL;
-    if (inputBufferHeader) {
+    if ((inputBufferHeader != NULL) && inputBufferHeader->nFilledLen) {
         source = inputBufferHeader->pBuffer + inputBufferHeader->nOffset;
 
         if (mInputDataIsMeta) {
+            uint8_t *conversionBuffer = NULL;
+            for (size_t i = 0; i < MAX_CONVERSION_BUFFERS; i++) {
+                if (mConversionBuffersFree[i]) {
+                    mConversionBuffersFree[i] = 0;
+                    conversionBuffer = mConversionBuffers[i];
+                    break;
+                }
+            }
+
+            if (NULL == conversionBuffer) {
+                ALOGE("No free buffers to hold conversion data");
+                return OMX_ErrorUndefined;
+            }
+
             source = extractGraphicBuffer(
-                    mConversionBuffer, (mWidth * mHeight * 3 / 2), source,
+                    conversionBuffer, (mWidth * mHeight * 3 / 2), source,
                     inputBufferHeader->nFilledLen, mWidth, mHeight);
 
             if (source == NULL) {
@@ -1091,6 +1130,18 @@
                 return OMX_ErrorUndefined;
             }
         }
+        ps_encode_ip->u4_is_last = 0;
+        ps_encode_ip->u4_timestamp_high = (inputBufferHeader->nTimeStamp) >> 32;
+        ps_encode_ip->u4_timestamp_low = (inputBufferHeader->nTimeStamp) & 0xFFFFFFFF;
+    }
+    else {
+        if (mSawInputEOS){
+            ps_encode_ip->u4_is_last = 1;
+        }
+        memset(ps_inp_raw_buf, 0, sizeof(iv_raw_buf_t));
+        ps_inp_raw_buf->e_color_fmt = mIvVideoColorFormat;
+        ps_inp_raw_buf->u4_size = sizeof(iv_raw_buf_t);
+        return OMX_ErrorNone;
     }
 
     pu1_buf = (UWORD8 *)source;
@@ -1145,14 +1196,6 @@
             break;
         }
     }
-
-    ps_encode_ip->u4_is_last = 0;
-
-    if (inputBufferHeader) {
-        ps_encode_ip->u4_timestamp_high = (inputBufferHeader->nTimeStamp) >> 32;
-        ps_encode_ip->u4_timestamp_low = (inputBufferHeader->nTimeStamp) & 0xFFFFFFFF;
-    }
-
     return OMX_ErrorNone;
 }
 
@@ -1170,35 +1213,31 @@
             return;
         }
     }
-    if (mSignalledError || mSawInputEOS) {
+    if (mSignalledError) {
         return;
     }
 
     List<BufferInfo *> &inQueue = getPortQueue(0);
     List<BufferInfo *> &outQueue = getPortQueue(1);
 
-    while (!mSawInputEOS && !inQueue.empty() && !outQueue.empty()) {
+    while (!mSawOutputEOS && !outQueue.empty()) {
+
         OMX_ERRORTYPE error;
         ive_video_encode_ip_t s_encode_ip;
         ive_video_encode_op_t s_encode_op;
-
-        BufferInfo *inputBufferInfo = *inQueue.begin();
-        OMX_BUFFERHEADERTYPE *inputBufferHeader = inputBufferInfo->mHeader;
-
         BufferInfo *outputBufferInfo = *outQueue.begin();
         OMX_BUFFERHEADERTYPE *outputBufferHeader = outputBufferInfo->mHeader;
 
-        if (inputBufferHeader->nFlags & OMX_BUFFERFLAG_EOS) {
-            inQueue.erase(inQueue.begin());
-            inputBufferInfo->mOwnedByUs = false;
-            notifyEmptyBufferDone(inputBufferHeader);
+        BufferInfo *inputBufferInfo;
+        OMX_BUFFERHEADERTYPE *inputBufferHeader;
 
-            outputBufferHeader->nFilledLen = 0;
-            outputBufferHeader->nFlags = OMX_BUFFERFLAG_EOS;
-
-            outQueue.erase(outQueue.begin());
-            outputBufferInfo->mOwnedByUs = false;
-            notifyFillBufferDone(outputBufferHeader);
+        if (mSawInputEOS) {
+            inputBufferHeader = NULL;
+            inputBufferInfo = NULL;
+        } else if (!inQueue.empty()) {
+            inputBufferInfo = *inQueue.begin();
+            inputBufferHeader = inputBufferInfo->mHeader;
+        } else {
             return;
         }
 
@@ -1208,6 +1247,10 @@
         outputBufferHeader->nFilledLen = 0;
         outputBufferHeader->nOffset = 0;
 
+        if (inputBufferHeader != NULL) {
+            outputBufferHeader->nFlags = inputBufferHeader->nFlags;
+        }
+
         uint8_t *outPtr = (uint8_t *)outputBufferHeader->pBuffer;
 
         if (!mSpsPpsHeaderReceived) {
@@ -1231,10 +1274,13 @@
 
             outputBufferHeader->nFlags = OMX_BUFFERFLAG_CODECCONFIG;
             outputBufferHeader->nFilledLen = s_encode_op.s_out_buf.u4_bytes;
-            outputBufferHeader->nTimeStamp = inputBufferHeader->nTimeStamp;
+            if (inputBufferHeader != NULL) {
+                outputBufferHeader->nTimeStamp = inputBufferHeader->nTimeStamp;
+            }
 
             outQueue.erase(outQueue.begin());
             outputBufferInfo->mOwnedByUs = false;
+
             DUMP_TO_FILE(
                     mOutFile, outputBufferHeader->pBuffer,
                     outputBufferHeader->nFilledLen);
@@ -1252,14 +1298,24 @@
             setFrameType(IV_IDR_FRAME);
         }
 
-        mPrevTimestampUs = inputBufferHeader->nTimeStamp;
-
-        if (inputBufferHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+        if ((inputBufferHeader != NULL)
+                && (inputBufferHeader->nFlags & OMX_BUFFERFLAG_EOS)) {
             mSawInputEOS = true;
         }
 
+        /* In normal mode, store inputBufferInfo and this will be returned
+           when encoder consumes this input */
+        if (!mInputDataIsMeta && (inputBufferInfo != NULL)) {
+            for (size_t i = 0; i < MAX_INPUT_BUFFER_HEADERS; i++) {
+                if (NULL == mInputBufferInfo[i]) {
+                    mInputBufferInfo[i] = inputBufferInfo;
+                    break;
+                }
+            }
+        }
         error = setEncodeArgs(
                 &s_encode_ip, &s_encode_op, inputBufferHeader, outputBufferHeader);
+
         if (error != OMX_ErrorNone) {
             mSignalledError = true;
             notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
@@ -1269,14 +1325,11 @@
         DUMP_TO_FILE(
                 mInFile, s_encode_ip.s_inp_buf.apv_bufs[0],
                 (mHeight * mStride * 3 / 2));
-        //DUMP_TO_FILE(mInFile, inputBufferHeader->pBuffer + inputBufferHeader->nOffset,
-        //    inputBufferHeader->nFilledLen);
 
         GETTIME(&mTimeStart, NULL);
         /* Compute time elapsed between end of previous decode()
          * to start of current decode() */
         TIME_DIFF(mTimeEnd, mTimeStart, timeDelay);
-
         status = ive_api_function(mCodecCtx, &s_encode_ip, &s_encode_op);
 
         if (IV_SUCCESS != status) {
@@ -1294,38 +1347,77 @@
         ALOGV("timeTaken=%6d delay=%6d numBytes=%6d", timeTaken, timeDelay,
                 s_encode_op.s_out_buf.u4_bytes);
 
+        /* In encoder frees up an input buffer, mark it as free */
+        if (s_encode_op.s_inp_buf.apv_bufs[0] != NULL) {
+            if (mInputDataIsMeta) {
+                for (size_t i = 0; i < MAX_CONVERSION_BUFFERS; i++) {
+                    if (mConversionBuffers[i] == s_encode_op.s_inp_buf.apv_bufs[0]) {
+                        mConversionBuffersFree[i] = 1;
+                        break;
+                    }
+                }
+            } else {
+                /* In normal mode, call EBD on inBuffeHeader that is freed by the codec */
+                for (size_t i = 0; i < MAX_INPUT_BUFFER_HEADERS; i++) {
+                    uint8_t *buf = NULL;
+                    OMX_BUFFERHEADERTYPE *bufHdr = NULL;
+                    if (mInputBufferInfo[i] != NULL) {
+                        bufHdr = mInputBufferInfo[i]->mHeader;
+                        buf = bufHdr->pBuffer + bufHdr->nOffset;
+                    }
+                    if (s_encode_op.s_inp_buf.apv_bufs[0] == buf) {
+                        mInputBufferInfo[i]->mOwnedByUs = false;
+                        notifyEmptyBufferDone(bufHdr);
+                        mInputBufferInfo[i] = NULL;
+                        break;
+                    }
+                }
+            }
+        }
 
-        outputBufferHeader->nFlags = inputBufferHeader->nFlags;
         outputBufferHeader->nFilledLen = s_encode_op.s_out_buf.u4_bytes;
-        outputBufferHeader->nTimeStamp = inputBufferHeader->nTimeStamp;
 
-        if (IV_IDR_FRAME
-                == s_encode_op.u4_encoded_frame_type) {
+        if (IV_IDR_FRAME == s_encode_op.u4_encoded_frame_type) {
             outputBufferHeader->nFlags |= OMX_BUFFERFLAG_SYNCFRAME;
         }
 
-        inQueue.erase(inQueue.begin());
-        inputBufferInfo->mOwnedByUs = false;
+        if (inputBufferHeader != NULL) {
+            inQueue.erase(inQueue.begin());
 
-        notifyEmptyBufferDone(inputBufferHeader);
-
-        if (mSawInputEOS) {
-            outputBufferHeader->nFlags |= OMX_BUFFERFLAG_EOS;
+            /* If in meta data, call EBD on input */
+            /* In case of normal mode, EBD will be done once encoder
+            releases the input buffer */
+            if (mInputDataIsMeta) {
+                inputBufferInfo->mOwnedByUs = false;
+                notifyEmptyBufferDone(inputBufferHeader);
+            }
         }
 
-        outputBufferInfo->mOwnedByUs = false;
-        outQueue.erase(outQueue.begin());
+        if (s_encode_op.u4_is_last) {
+            outputBufferHeader->nFlags |= OMX_BUFFERFLAG_EOS;
+            mSawOutputEOS = true;
+        } else {
+            outputBufferHeader->nFlags &= ~OMX_BUFFERFLAG_EOS;
+        }
 
-        DUMP_TO_FILE(
-                mOutFile, outputBufferHeader->pBuffer,
-                outputBufferHeader->nFilledLen);
-        notifyFillBufferDone(outputBufferHeader);
+        if (outputBufferHeader->nFilledLen || s_encode_op.u4_is_last) {
+            outputBufferHeader->nTimeStamp = s_encode_op.u4_timestamp_high;
+            outputBufferHeader->nTimeStamp <<= 32;
+            outputBufferHeader->nTimeStamp |= s_encode_op.u4_timestamp_low;
+            outputBufferInfo->mOwnedByUs = false;
+            outQueue.erase(outQueue.begin());
+            DUMP_TO_FILE(mOutFile, outputBufferHeader->pBuffer,
+                    outputBufferHeader->nFilledLen);
+            notifyFillBufferDone(outputBufferHeader);
+        }
 
+        if (s_encode_op.u4_is_last == 1) {
+            return;
+        }
     }
     return;
 }
 
-
 }  // namespace android
 
 android::SoftOMXComponent *createSoftOMXComponent(
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
index c4e26a9..4418a7f 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
@@ -17,7 +17,7 @@
 #ifndef __SOFT_AVC_ENC_H__
 #define __SOFT_AVC_ENC_H__
 
-#include <media/stagefright/MediaBuffer.h>
+
 #include <media/stagefright/foundation/ABase.h>
 #include <utils/Vector.h>
 
@@ -25,14 +25,14 @@
 
 namespace android {
 
-struct MediaBuffer;
-
+#define MAX_INPUT_BUFFER_HEADERS 4
+#define MAX_CONVERSION_BUFFERS   4
 #define CODEC_MAX_CORES          4
 #define LEN_STATUS_BUFFER        (10  * 1024)
 #define MAX_VBV_BUFF_SIZE        (120 * 16384)
 #define MAX_NUM_IO_BUFS           3
 
-#define DEFAULT_MAX_REF_FRM         1
+#define DEFAULT_MAX_REF_FRM         2
 #define DEFAULT_MAX_REORDER_FRM     0
 #define DEFAULT_QP_MIN              10
 #define DEFAULT_QP_MAX              40
@@ -57,7 +57,7 @@
 #define DEFAULT_TGT_FRAME_RATE      30
 #define DEFAULT_MAX_WD              1920
 #define DEFAULT_MAX_HT              1920
-#define DEFAULT_MAX_LEVEL           40
+#define DEFAULT_MAX_LEVEL           41
 #define DEFAULT_STRIDE              0
 #define DEFAULT_WD                  1280
 #define DEFAULT_HT                  720
@@ -88,6 +88,7 @@
 #define DEFAULT_QPEL                1
 #define DEFAULT_I4                  1
 #define DEFAULT_EPROFILE            IV_PROFILE_BASE
+#define DEFAULT_ENTROPY_MODE        0
 #define DEFAULT_SLICE_MODE          IVE_SLICE_MODE_NONE
 #define DEFAULT_SLICE_PARAM         256
 #define DEFAULT_ARCH                ARCH_ARM_A9Q
@@ -149,8 +150,6 @@
 
     int32_t  mStride;
 
-    uint32_t mFrameRate;
-
     struct timeval mTimeStart;   // Time at the start of decode()
     struct timeval mTimeEnd;     // Time at the end of decode()
 
@@ -167,32 +166,32 @@
 
     IV_COLOR_FORMAT_T mIvVideoColorFormat;
 
-    int32_t  mIDRFrameRefreshIntervalInSec;
     IV_PROFILE_T mAVCEncProfile;
     WORD32   mAVCEncLevel;
-    int64_t  mNumInputFrames;
-    int64_t  mPrevTimestampUs;
     bool     mStarted;
     bool     mSpsPpsHeaderReceived;
 
     bool     mSawInputEOS;
+    bool     mSawOutputEOS;
     bool     mSignalledError;
     bool     mIntra4x4;
     bool     mEnableFastSad;
     bool     mEnableAltRef;
-    bool    mReconEnable;
-    bool    mPSNREnable;
+    bool     mReconEnable;
+    bool     mPSNREnable;
+    bool     mEntropyMode;
     IVE_SPEED_CONFIG     mEncSpeed;
 
-    uint8_t *mConversionBuffer;
-
+    uint8_t *mConversionBuffers[MAX_CONVERSION_BUFFERS];
+    bool     mConversionBuffersFree[MAX_CONVERSION_BUFFERS];
+    BufferInfo *mInputBufferInfo[MAX_INPUT_BUFFER_HEADERS];
     iv_obj_t *mCodecCtx;         // Codec context
     iv_mem_rec_t *mMemRecords;   // Memory records requested by the codec
     size_t mNumMemRecords;       // Number of memory records requested by codec
     size_t mNumCores;            // Number of cores used by the codec
 
     UWORD32 mHeaderGenerated;
-
+    UWORD32 mBframes;
     IV_ARCH_T mArch;
     IVE_SLICE_MODE_T mSliceMode;
     UWORD32 mSliceParam;
@@ -203,7 +202,7 @@
     IVE_AIR_MODE_T mAIRMode;
     UWORD32 mAIRRefreshPeriod;
 
-    OMX_ERRORTYPE initEncParams();
+    void initEncParams();
     OMX_ERRORTYPE initEncoder();
     OMX_ERRORTYPE releaseEncoder();
 
@@ -292,6 +291,8 @@
         fclose(fp);                                     \
     } else {                                            \
         ALOGD("Could not write to file %s", m_filename);\
+        if (fp != NULL)                                 \
+            fclose(fp);                                 \
     }                                                   \
 }
 #else /* FILE_DUMP_ENABLE */
diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
index cddd176..4b2ec1c 100644
--- a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
+++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
@@ -21,7 +21,6 @@
 #include "ihevc_typedefs.h"
 #include "iv.h"
 #include "ivd.h"
-#include "ithread.h"
 #include "ihevcd_cxa.h"
 #include "SoftHEVC.h"
 
@@ -82,7 +81,10 @@
     initPorts(
             kNumBuffers, max(kMaxOutputBufferSize / kMinCompressionRatio, (size_t)INPUT_BUF_SIZE),
             kNumBuffers, CODEC_MIME_TYPE, kMinCompressionRatio);
-    CHECK_EQ(initDecoder(), (status_t)OK);
+}
+
+status_t SoftHEVC::init() {
+    return initDecoder();
 }
 
 SoftHEVC::~SoftHEVC() {
@@ -143,7 +145,7 @@
     s_ctl_ip.u4_size = sizeof(ivd_ctl_set_config_ip_t);
     s_ctl_op.u4_size = sizeof(ivd_ctl_set_config_op_t);
 
-    ALOGV("Set the run-time (dynamic) parameters stride = %u", stride);
+    ALOGV("Set the run-time (dynamic) parameters stride = %zu", stride);
     status = ivdec_api_function(mCodecCtx, (void *)&s_ctl_ip,
             (void *)&s_ctl_op);
 
@@ -408,7 +410,7 @@
     uint32_t bufferSize = displaySizeY * 3 / 2;
     mFlushOutBuffer = (uint8_t *)ivd_aligned_malloc(128, bufferSize);
     if (NULL == mFlushOutBuffer) {
-        ALOGE("Could not allocate flushOutputBuffer of size %zu", bufferSize);
+        ALOGE("Could not allocate flushOutputBuffer of size %u", bufferSize);
         return NO_MEMORY;
     }
 
@@ -766,5 +768,10 @@
 android::SoftOMXComponent *createSoftOMXComponent(const char *name,
         const OMX_CALLBACKTYPE *callbacks, OMX_PTR appData,
         OMX_COMPONENTTYPE **component) {
-    return new android::SoftHEVC(name, callbacks, appData, component);
+    android::SoftHEVC *codec = new android::SoftHEVC(name, callbacks, appData, component);
+    if (codec->init() != android::OK) {
+        android::sp<android::SoftOMXComponent> release = codec;
+        return NULL;
+    }
+    return codec;
 }
diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.h b/media/libstagefright/codecs/hevcdec/SoftHEVC.h
index a91f528..c6344cf 100644
--- a/media/libstagefright/codecs/hevcdec/SoftHEVC.h
+++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.h
@@ -56,6 +56,8 @@
     SoftHEVC(const char *name, const OMX_CALLBACKTYPE *callbacks,
             OMX_PTR appData, OMX_COMPONENTTYPE **component);
 
+    status_t init();
+
 protected:
     virtual ~SoftHEVC();
 
diff --git a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
index ede645c..0c1a149 100644
--- a/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
+++ b/media/libstagefright/codecs/m4v_h263/dec/SoftMPEG4.cpp
@@ -103,34 +103,41 @@
     while (!inQueue.empty() && outQueue.size() == kNumOutputBuffers) {
         BufferInfo *inInfo = *inQueue.begin();
         OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+        if (inHeader == NULL) {
+            inQueue.erase(inQueue.begin());
+            inInfo->mOwnedByUs = false;
+            continue;
+        }
 
         PortInfo *port = editPortInfo(1);
 
         OMX_BUFFERHEADERTYPE *outHeader =
             port->mBuffers.editItemAt(mNumSamplesOutput & 1).mHeader;
 
-        if ((inHeader->nFlags & OMX_BUFFERFLAG_EOS) && inHeader->nFilledLen == 0) {
+        if (inHeader->nFilledLen == 0) {
             inQueue.erase(inQueue.begin());
             inInfo->mOwnedByUs = false;
             notifyEmptyBufferDone(inHeader);
 
             ++mInputBufferCount;
 
-            outHeader->nFilledLen = 0;
-            outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+            if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+                outHeader->nFilledLen = 0;
+                outHeader->nFlags = OMX_BUFFERFLAG_EOS;
 
-            List<BufferInfo *>::iterator it = outQueue.begin();
-            while ((*it)->mHeader != outHeader) {
-                ++it;
+                List<BufferInfo *>::iterator it = outQueue.begin();
+                while ((*it)->mHeader != outHeader) {
+                    ++it;
+                }
+
+                BufferInfo *outInfo = *it;
+                outInfo->mOwnedByUs = false;
+                outQueue.erase(it);
+                outInfo = NULL;
+
+                notifyFillBufferDone(outHeader);
+                outHeader = NULL;
             }
-
-            BufferInfo *outInfo = *it;
-            outInfo->mOwnedByUs = false;
-            outQueue.erase(it);
-            outInfo = NULL;
-
-            notifyFillBufferDone(outHeader);
-            outHeader = NULL;
             return;
         }
 
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
index 5396022..f743b1c 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
@@ -283,6 +283,11 @@
             } else {
                 // This is recoverable, just ignore the current frame and
                 // play silence instead.
+
+                // TODO: should we skip silence (and consume input data)
+                // if mIsFirst is true as we may not have a valid
+                // mConfig->samplingRate and mConfig->num_channels?
+                ALOGV_IF(mIsFirst, "insufficient data for first frame, sending silence");
                 memset(outHeader->pBuffer,
                        0,
                        mConfig->outputFrameSize * sizeof(int16_t));
@@ -317,8 +322,7 @@
         }
 
         outHeader->nTimeStamp =
-            mAnchorTimeUs
-                + (mNumFramesOutput * 1000000ll) / mConfig->samplingRate;
+            mAnchorTimeUs + (mNumFramesOutput * 1000000ll) / mSamplingRate;
 
         if (inHeader) {
             CHECK_GE(inHeader->nFilledLen, mConfig->inputBufferUsedLength);
diff --git a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
index 7e98928..32e5da7 100644
--- a/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
+++ b/media/libstagefright/codecs/mpeg2dec/SoftMPEG2.cpp
@@ -21,7 +21,6 @@
 #include "iv_datatypedef.h"
 #include "iv.h"
 #include "ivd.h"
-#include "ithread.h"
 #include "impeg2d.h"
 #include "SoftMPEG2.h"
 
@@ -156,7 +155,7 @@
     s_ctl_ip.u4_size = sizeof(ivd_ctl_set_config_ip_t);
     s_ctl_op.u4_size = sizeof(ivd_ctl_set_config_op_t);
 
-    ALOGV("Set the run-time (dynamic) parameters stride = %u", stride);
+    ALOGV("Set the run-time (dynamic) parameters stride = %zu", stride);
     status = ivdec_api_function(mCodecCtx, (void *)&s_ctl_ip, (void *)&s_ctl_op);
 
     if (status != IV_SUCCESS) {
@@ -396,7 +395,7 @@
     uint32_t bufferSize = displaySizeY * 3 / 2;
     mFlushOutBuffer = (uint8_t *)ivd_aligned_malloc(128, bufferSize);
     if (NULL == mFlushOutBuffer) {
-        ALOGE("Could not allocate flushOutputBuffer of size %zu", bufferSize);
+        ALOGE("Could not allocate flushOutputBuffer of size %u", bufferSize);
         return NO_MEMORY;
     }
 
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index f026761..e161fb8 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -38,7 +38,10 @@
             NULL /* profileLevels */, 0 /* numProfileLevels */,
             320 /* width */, 240 /* height */, callbacks, appData, component),
       mMode(codingType == OMX_VIDEO_CodingVP8 ? MODE_VP8 : MODE_VP9),
+      mEOSStatus(INPUT_DATA_AVAILABLE),
       mCtx(NULL),
+      mFrameParallelMode(false),
+      mTimeStampIdx(0),
       mImg(NULL) {
     // arbitrary from avc/hevc as vpx does not specify a min compression ratio
     const size_t kMinCompressionRatio = mMode == MODE_VP8 ? 2 : 4;
@@ -51,9 +54,7 @@
 }
 
 SoftVPX::~SoftVPX() {
-    vpx_codec_destroy((vpx_codec_ctx_t *)mCtx);
-    delete (vpx_codec_ctx_t *)mCtx;
-    mCtx = NULL;
+    destroyDecoder();
 }
 
 static int GetCPUCoreCount() {
@@ -73,12 +74,19 @@
     mCtx = new vpx_codec_ctx_t;
     vpx_codec_err_t vpx_err;
     vpx_codec_dec_cfg_t cfg;
+    vpx_codec_flags_t flags;
     memset(&cfg, 0, sizeof(vpx_codec_dec_cfg_t));
+    memset(&flags, 0, sizeof(vpx_codec_flags_t));
     cfg.threads = GetCPUCoreCount();
+
+    if (mFrameParallelMode) {
+        flags |= VPX_CODEC_USE_FRAME_THREADING;
+    }
+
     if ((vpx_err = vpx_codec_dec_init(
                 (vpx_codec_ctx_t *)mCtx,
                  mMode == MODE_VP8 ? &vpx_codec_vp8_dx_algo : &vpx_codec_vp9_dx_algo,
-                 &cfg, 0))) {
+                 &cfg, flags))) {
         ALOGE("on2 decoder failed to initialize. (%d)", vpx_err);
         return UNKNOWN_ERROR;
     }
@@ -86,86 +94,155 @@
     return OK;
 }
 
+status_t SoftVPX::destroyDecoder() {
+    vpx_codec_destroy((vpx_codec_ctx_t *)mCtx);
+    delete (vpx_codec_ctx_t *)mCtx;
+    mCtx = NULL;
+    return OK;
+}
+
+bool SoftVPX::outputBuffers(bool flushDecoder, bool display, bool eos, bool *portWillReset) {
+    List<BufferInfo *> &inQueue = getPortQueue(0);
+    List<BufferInfo *> &outQueue = getPortQueue(1);
+    BufferInfo *outInfo = NULL;
+    OMX_BUFFERHEADERTYPE *outHeader = NULL;
+    vpx_codec_iter_t iter = NULL;
+
+    if (flushDecoder && mFrameParallelMode) {
+        // Flush decoder by passing NULL data ptr and 0 size.
+        // Ideally, this should never fail.
+        if (vpx_codec_decode((vpx_codec_ctx_t *)mCtx, NULL, 0, NULL, 0)) {
+            ALOGE("Failed to flush on2 decoder.");
+            return false;
+        }
+    }
+
+    if (!display) {
+        if (!flushDecoder) {
+            ALOGE("Invalid operation.");
+            return false;
+        }
+        // Drop all the decoded frames in decoder.
+        while ((mImg = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter))) {
+        }
+        return true;
+    }
+
+    while (!outQueue.empty()) {
+        if (mImg == NULL) {
+            mImg = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter);
+            if (mImg == NULL) {
+                break;
+            }
+        }
+        uint32_t width = mImg->d_w;
+        uint32_t height = mImg->d_h;
+        outInfo = *outQueue.begin();
+        outHeader = outInfo->mHeader;
+        CHECK_EQ(mImg->fmt, VPX_IMG_FMT_I420);
+        handlePortSettingsChange(portWillReset, width, height);
+        if (*portWillReset) {
+            return true;
+        }
+
+        outHeader->nOffset = 0;
+        outHeader->nFlags = 0;
+        outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * 3) / 2;
+        outHeader->nTimeStamp = *(OMX_TICKS *)mImg->user_priv;
+
+        uint8_t *dst = outHeader->pBuffer;
+        const uint8_t *srcY = (const uint8_t *)mImg->planes[VPX_PLANE_Y];
+        const uint8_t *srcU = (const uint8_t *)mImg->planes[VPX_PLANE_U];
+        const uint8_t *srcV = (const uint8_t *)mImg->planes[VPX_PLANE_V];
+        size_t srcYStride = mImg->stride[VPX_PLANE_Y];
+        size_t srcUStride = mImg->stride[VPX_PLANE_U];
+        size_t srcVStride = mImg->stride[VPX_PLANE_V];
+        copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
+
+        mImg = NULL;
+        outInfo->mOwnedByUs = false;
+        outQueue.erase(outQueue.begin());
+        outInfo = NULL;
+        notifyFillBufferDone(outHeader);
+        outHeader = NULL;
+    }
+
+    if (!eos) {
+        return true;
+    }
+
+    if (!outQueue.empty()) {
+        outInfo = *outQueue.begin();
+        outQueue.erase(outQueue.begin());
+        outHeader = outInfo->mHeader;
+        outHeader->nTimeStamp = 0;
+        outHeader->nFilledLen = 0;
+        outHeader->nFlags = OMX_BUFFERFLAG_EOS;
+        outInfo->mOwnedByUs = false;
+        notifyFillBufferDone(outHeader);
+        mEOSStatus = OUTPUT_FRAMES_FLUSHED;
+    }
+    return true;
+}
+
 void SoftVPX::onQueueFilled(OMX_U32 /* portIndex */) {
-    if (mOutputPortSettingsChange != NONE) {
+    if (mOutputPortSettingsChange != NONE || mEOSStatus == OUTPUT_FRAMES_FLUSHED) {
         return;
     }
 
     List<BufferInfo *> &inQueue = getPortQueue(0);
     List<BufferInfo *> &outQueue = getPortQueue(1);
     bool EOSseen = false;
+    vpx_codec_err_t err;
+    bool portWillReset = false;
 
-    while (!inQueue.empty() && !outQueue.empty()) {
-        BufferInfo *inInfo = *inQueue.begin();
-        OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
-
-        BufferInfo *outInfo = *outQueue.begin();
-        OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
-
-        if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
-            EOSseen = true;
-            if (inHeader->nFilledLen == 0) {
-                inQueue.erase(inQueue.begin());
-                inInfo->mOwnedByUs = false;
-                notifyEmptyBufferDone(inHeader);
-
-                outHeader->nFilledLen = 0;
-                outHeader->nFlags = OMX_BUFFERFLAG_EOS;
-
-                outQueue.erase(outQueue.begin());
-                outInfo->mOwnedByUs = false;
-                notifyFillBufferDone(outHeader);
-                return;
-            }
-        }
-
-        if (mImg == NULL) {
-            if (vpx_codec_decode(
-                        (vpx_codec_ctx_t *)mCtx,
-                        inHeader->pBuffer + inHeader->nOffset,
-                        inHeader->nFilledLen,
-                        NULL,
-                        0)) {
-                ALOGE("on2 decoder failed to decode frame.");
-
+    while ((mEOSStatus == INPUT_EOS_SEEN || !inQueue.empty())
+            && !outQueue.empty()) {
+        // Output the pending frames that left from last port reset or decoder flush.
+        if (mEOSStatus == INPUT_EOS_SEEN || mImg != NULL) {
+            if (!outputBuffers(
+                     mEOSStatus == INPUT_EOS_SEEN, true /* display */,
+                     mEOSStatus == INPUT_EOS_SEEN, &portWillReset)) {
+                ALOGE("on2 decoder failed to output frame.");
                 notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
                 return;
             }
-            vpx_codec_iter_t iter = NULL;
-            mImg = vpx_codec_get_frame((vpx_codec_ctx_t *)mCtx, &iter);
-        }
-
-        if (mImg != NULL) {
-            CHECK_EQ(mImg->fmt, IMG_FMT_I420);
-
-            uint32_t width = mImg->d_w;
-            uint32_t height = mImg->d_h;
-            bool portWillReset = false;
-            handlePortSettingsChange(&portWillReset, width, height);
-            if (portWillReset) {
+            if (portWillReset || mEOSStatus == OUTPUT_FRAMES_FLUSHED ||
+                    mEOSStatus == INPUT_EOS_SEEN) {
                 return;
             }
+        }
 
-            outHeader->nOffset = 0;
-            outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * 3) / 2;
-            outHeader->nFlags = EOSseen ? OMX_BUFFERFLAG_EOS : 0;
-            outHeader->nTimeStamp = inHeader->nTimeStamp;
+        BufferInfo *inInfo = *inQueue.begin();
+        OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
+        mTimeStamps[mTimeStampIdx] = inHeader->nTimeStamp;
 
-            uint8_t *dst = outHeader->pBuffer;
-            const uint8_t *srcY = (const uint8_t *)mImg->planes[PLANE_Y];
-            const uint8_t *srcU = (const uint8_t *)mImg->planes[PLANE_U];
-            const uint8_t *srcV = (const uint8_t *)mImg->planes[PLANE_V];
-            size_t srcYStride = mImg->stride[PLANE_Y];
-            size_t srcUStride = mImg->stride[PLANE_U];
-            size_t srcVStride = mImg->stride[PLANE_V];
-            copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
+        BufferInfo *outInfo = *outQueue.begin();
+        OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
+        if (inHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+            mEOSStatus = INPUT_EOS_SEEN;
+            EOSseen = true;
+        }
 
-            mImg = NULL;
-            outInfo->mOwnedByUs = false;
-            outQueue.erase(outQueue.begin());
-            outInfo = NULL;
-            notifyFillBufferDone(outHeader);
-            outHeader = NULL;
+        if (inHeader->nFilledLen > 0 &&
+            vpx_codec_decode((vpx_codec_ctx_t *)mCtx,
+                              inHeader->pBuffer + inHeader->nOffset,
+                              inHeader->nFilledLen,
+                              &mTimeStamps[mTimeStampIdx], 0)) {
+            ALOGE("on2 decoder failed to decode frame.");
+            notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+            return;
+        }
+        mTimeStampIdx = (mTimeStampIdx + 1) % kNumBuffers;
+
+        if (!outputBuffers(
+                 EOSseen /* flushDecoder */, true /* display */, EOSseen, &portWillReset)) {
+            ALOGE("on2 decoder failed to output frame.");
+            notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+            return;
+        }
+        if (portWillReset) {
+            return;
         }
 
         inInfo->mOwnedByUs = false;
@@ -176,6 +253,30 @@
     }
 }
 
+void SoftVPX::onPortFlushCompleted(OMX_U32 portIndex) {
+    if (portIndex == kInputPortIndex) {
+        bool portWillReset = false;
+        if (!outputBuffers(
+                 true /* flushDecoder */, false /* display */, false /* eos */, &portWillReset)) {
+            ALOGE("Failed to flush decoder.");
+            notify(OMX_EventError, OMX_ErrorUndefined, 0, NULL);
+            return;
+        }
+        mEOSStatus = INPUT_DATA_AVAILABLE;
+    }
+}
+
+void SoftVPX::onReset() {
+    bool portWillReset = false;
+    if (!outputBuffers(
+             true /* flushDecoder */, false /* display */, false /* eos */, &portWillReset)) {
+        ALOGW("Failed to flush decoder. Try to hard reset decoder");
+        destroyDecoder();
+        initDecoder();
+    }
+    mEOSStatus = INPUT_DATA_AVAILABLE;
+}
+
 }  // namespace android
 
 android::SoftOMXComponent *createSoftOMXComponent(
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.h b/media/libstagefright/codecs/on2/dec/SoftVPX.h
index 8f68693..8ccbae2 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.h
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.h
@@ -38,6 +38,8 @@
     virtual ~SoftVPX();
 
     virtual void onQueueFilled(OMX_U32 portIndex);
+    virtual void onPortFlushCompleted(OMX_U32 portIndex);
+    virtual void onReset();
 
 private:
     enum {
@@ -49,11 +51,21 @@
         MODE_VP9
     } mMode;
 
-    void *mCtx;
+    enum {
+        INPUT_DATA_AVAILABLE,  // VPX component is ready to decode data.
+        INPUT_EOS_SEEN,        // VPX component saw EOS and is flushing On2 decoder.
+        OUTPUT_FRAMES_FLUSHED  // VPX component finished flushing On2 decoder.
+    } mEOSStatus;
 
+    void *mCtx;
+    bool mFrameParallelMode;  // Frame parallel is only supported by VP9 decoder.
+    OMX_TICKS mTimeStamps[kNumBuffers];
+    uint8_t mTimeStampIdx;
     vpx_image_t *mImg;
 
     status_t initDecoder();
+    status_t destroyDecoder();
+    bool outputBuffers(bool flushDecoder, bool display, bool eos, bool *portWillReset);
 
     DISALLOW_EVIL_CONSTRUCTORS(SoftVPX);
 };
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index 970acf3..e654843 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -661,7 +661,8 @@
         BufferInfo *outputBufferInfo = *outputBufferInfoQueue.begin();
         OMX_BUFFERHEADERTYPE *outputBufferHeader = outputBufferInfo->mHeader;
 
-        if (inputBufferHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+        if ((inputBufferHeader->nFlags & OMX_BUFFERFLAG_EOS) &&
+                inputBufferHeader->nFilledLen == 0) {
             inputBufferInfoQueue.erase(inputBufferInfoQueue.begin());
             inputBufferInfo->mOwnedByUs = false;
             notifyEmptyBufferDone(inputBufferHeader);
@@ -762,6 +763,9 @@
                        encoded_packet->data.frame.sz);
                 outputBufferInfo->mOwnedByUs = false;
                 outputBufferInfoQueue.erase(outputBufferInfoQueue.begin());
+                if (inputBufferHeader->nFlags & OMX_BUFFERFLAG_EOS) {
+                    outputBufferHeader->nFlags |= OMX_BUFFERFLAG_EOS;
+                }
                 notifyFillBufferDone(outputBufferHeader);
             }
         }
diff --git a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
index ed52a37..cb10bce 100644
--- a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
+++ b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
@@ -345,9 +345,15 @@
             }
 
             uint8_t channel_mapping[kMaxChannels] = {0};
-            memcpy(&channel_mapping,
-                   kDefaultOpusChannelLayout,
-                   kMaxChannelsWithDefaultLayout);
+            if (mHeader->channels <= kMaxChannelsWithDefaultLayout) {
+                memcpy(&channel_mapping,
+                       kDefaultOpusChannelLayout,
+                       kMaxChannelsWithDefaultLayout);
+            } else {
+                memcpy(&channel_mapping,
+                       mHeader->stream_map,
+                       mHeader->channels);
+            }
 
             int status = OPUS_INVALID_STATE;
             mDecoder = opus_multistream_decoder_create(kRate,
@@ -397,6 +403,14 @@
         BufferInfo *inInfo = *inQueue.begin();
         OMX_BUFFERHEADERTYPE *inHeader = inInfo->mHeader;
 
+        // Ignore CSD re-submissions.
+        if (inHeader->nFlags & OMX_BUFFERFLAG_CODECCONFIG) {
+            inQueue.erase(inQueue.begin());
+            inInfo->mOwnedByUs = false;
+            notifyEmptyBufferDone(inHeader);
+            return;
+        }
+
         BufferInfo *outInfo = *outQueue.begin();
         OMX_BUFFERHEADERTYPE *outHeader = outInfo->mHeader;
 
diff --git a/media/libstagefright/colorconversion/Android.mk b/media/libstagefright/colorconversion/Android.mk
index 59a64ba..4f7c48f 100644
--- a/media/libstagefright/colorconversion/Android.mk
+++ b/media/libstagefright/colorconversion/Android.mk
@@ -9,6 +9,9 @@
         $(TOP)/frameworks/native/include/media/openmax \
         $(TOP)/hardware/msm7k
 
+LOCAL_CFLAGS += -Werror
+LOCAL_CLANG := true
+
 LOCAL_MODULE:= libstagefright_color_conversion
 
 include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index 04467b9..e92c192 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -38,7 +38,8 @@
     return (x + y - 1) & ~(y - 1);
 }
 
-SoftwareRenderer::SoftwareRenderer(const sp<ANativeWindow> &nativeWindow)
+SoftwareRenderer::SoftwareRenderer(
+        const sp<ANativeWindow> &nativeWindow, int32_t rotation)
     : mColorFormat(OMX_COLOR_FormatUnused),
       mConverter(NULL),
       mYUVMode(None),
@@ -50,7 +51,8 @@
       mCropRight(0),
       mCropBottom(0),
       mCropWidth(0),
-      mCropHeight(0) {
+      mCropHeight(0),
+      mRotationDegrees(rotation) {
 }
 
 SoftwareRenderer::~SoftwareRenderer() {
@@ -98,33 +100,49 @@
     mCropWidth = mCropRight - mCropLeft + 1;
     mCropHeight = mCropBottom - mCropTop + 1;
 
-    int halFormat;
-    size_t bufWidth, bufHeight;
+    // by default convert everything to RGB565
+    int halFormat = HAL_PIXEL_FORMAT_RGB_565;
+    size_t bufWidth = mCropWidth;
+    size_t bufHeight = mCropHeight;
 
-    switch (mColorFormat) {
-        case OMX_COLOR_FormatYUV420Planar:
-        case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar:
-        case OMX_COLOR_FormatYUV420SemiPlanar:
-        {
-            if (!runningInEmulator()) {
+    // hardware has YUV12 and RGBA8888 support, so convert known formats
+    if (!runningInEmulator()) {
+        switch (mColorFormat) {
+            case OMX_COLOR_FormatYUV420Planar:
+            case OMX_COLOR_FormatYUV420SemiPlanar:
+            case OMX_TI_COLOR_FormatYUV420PackedSemiPlanar:
+            {
                 halFormat = HAL_PIXEL_FORMAT_YV12;
                 bufWidth = (mCropWidth + 1) & ~1;
                 bufHeight = (mCropHeight + 1) & ~1;
                 break;
             }
-
-            // fall through.
+            case OMX_COLOR_Format24bitRGB888:
+            {
+                halFormat = HAL_PIXEL_FORMAT_RGB_888;
+                bufWidth = (mCropWidth + 1) & ~1;
+                bufHeight = (mCropHeight + 1) & ~1;
+                break;
+            }
+            case OMX_COLOR_Format32bitARGB8888:
+            case OMX_COLOR_Format32BitRGBA8888:
+            {
+                halFormat = HAL_PIXEL_FORMAT_RGBA_8888;
+                bufWidth = (mCropWidth + 1) & ~1;
+                bufHeight = (mCropHeight + 1) & ~1;
+                break;
+            }
+            default:
+            {
+                break;
+            }
         }
+    }
 
-        default:
-            halFormat = HAL_PIXEL_FORMAT_RGB_565;
-            bufWidth = mCropWidth;
-            bufHeight = mCropHeight;
-
-            mConverter = new ColorConverter(
-                    mColorFormat, OMX_COLOR_Format16bitRGB565);
-            CHECK(mConverter->isValid());
-            break;
+    if (halFormat == HAL_PIXEL_FORMAT_RGB_565) {
+        mConverter = new ColorConverter(
+                mColorFormat, OMX_COLOR_Format16bitRGB565);
+        CHECK(mConverter->isValid());
     }
 
     CHECK(mNativeWindow != NULL);
@@ -165,7 +183,7 @@
 
     int32_t rotationDegrees;
     if (!format->findInt32("rotation-degrees", &rotationDegrees)) {
-        rotationDegrees = 0;
+        rotationDegrees = mRotationDegrees;
     }
     uint32_t transform;
     switch (rotationDegrees) {
@@ -180,17 +198,29 @@
                 mNativeWindow.get(), transform));
 }
 
-void SoftwareRenderer::render(
-        const void *data, size_t size, int64_t timestampNs,
+void SoftwareRenderer::clearTracker() {
+    mRenderTracker.clear(-1 /* lastRenderTimeNs */);
+}
+
+std::list<FrameRenderTracker::Info> SoftwareRenderer::render(
+        const void *data, size_t size, int64_t mediaTimeUs, nsecs_t renderTimeNs,
         void* /*platformPrivate*/, const sp<AMessage>& format) {
     resetFormatIfChanged(format);
+    FrameRenderTracker::Info *info = NULL;
 
     ANativeWindowBuffer *buf;
-    int err;
-    if ((err = native_window_dequeue_buffer_and_wait(mNativeWindow.get(),
-            &buf)) != 0) {
+    int fenceFd = -1;
+    int err = mNativeWindow->dequeueBuffer(mNativeWindow.get(), &buf, &fenceFd);
+    if (err == 0 && fenceFd >= 0) {
+        info = mRenderTracker.updateInfoForDequeuedBuffer(buf, fenceFd, 0);
+        sp<Fence> fence = new Fence(fenceFd);
+        err = fence->waitForever("SoftwareRenderer::render");
+    }
+    if (err != 0) {
         ALOGW("Surface::dequeueBuffer returned error %d", err);
-        return;
+        // complete (drop) dequeued frame if fence wait failed; otherwise,
+        // this returns an empty list as no frames should have rendered and not yet returned.
+        return mRenderTracker.checkFencesAndGetRenderedFrames(info, false /* dropIncomplete */);
     }
 
     GraphicBufferMapper &mapper = GraphicBufferMapper::get();
@@ -201,6 +231,8 @@
     CHECK_EQ(0, mapper.lock(
                 buf->handle, GRALLOC_USAGE_SW_WRITE_OFTEN, bounds, &dst));
 
+    // TODO move the other conversions also into ColorConverter, and
+    // fix cropping issues (when mCropLeft/Top != 0 or mWidth != mCropWidth)
     if (mConverter) {
         mConverter->convert(
                 data,
@@ -214,7 +246,8 @@
             goto skip_copying;
         }
         const uint8_t *src_y = (const uint8_t *)data;
-        const uint8_t *src_u = (const uint8_t *)data + mWidth * mHeight;
+        const uint8_t *src_u =
+                (const uint8_t *)data + mWidth * mHeight;
         const uint8_t *src_v = src_u + (mWidth / 2 * mHeight / 2);
 
         uint8_t *dst_y = (uint8_t *)dst;
@@ -245,12 +278,9 @@
         if ((size_t)mWidth * mHeight * 3 / 2 > size) {
             goto skip_copying;
         }
-
-        const uint8_t *src_y =
-            (const uint8_t *)data;
-
-        const uint8_t *src_uv =
-            (const uint8_t *)data + mWidth * (mHeight - mCropTop / 2);
+        const uint8_t *src_y = (const uint8_t *)data;
+        const uint8_t *src_uv = (const uint8_t *)data
+                + mWidth * (mHeight - mCropTop / 2);
 
         uint8_t *dst_y = (uint8_t *)dst;
 
@@ -278,6 +308,47 @@
             dst_u += dst_c_stride;
             dst_v += dst_c_stride;
         }
+    } else if (mColorFormat == OMX_COLOR_Format24bitRGB888) {
+        if ((size_t)mWidth * mHeight * 3 > size) {
+            goto skip_copying;
+        }
+        uint8_t* srcPtr = (uint8_t*)data;
+        uint8_t* dstPtr = (uint8_t*)dst;
+
+        for (size_t y = 0; y < (size_t)mCropHeight; ++y) {
+            memcpy(dstPtr, srcPtr, mCropWidth * 3);
+            srcPtr += mWidth * 3;
+            dstPtr += buf->stride * 3;
+        }
+    } else if (mColorFormat == OMX_COLOR_Format32bitARGB8888) {
+        if ((size_t)mWidth * mHeight * 4 > size) {
+            goto skip_copying;
+        }
+        uint8_t *srcPtr, *dstPtr;
+
+        for (size_t y = 0; y < (size_t)mCropHeight; ++y) {
+            srcPtr = (uint8_t*)data + mWidth * 4 * y;
+            dstPtr = (uint8_t*)dst + buf->stride * 4 * y;
+            for (size_t x = 0; x < (size_t)mCropWidth; ++x) {
+                uint8_t a = *srcPtr++;
+                for (size_t i = 0; i < 3; ++i) {   // copy RGB
+                    *dstPtr++ = *srcPtr++;
+                }
+                *dstPtr++ = a;  // alpha last (ARGB to RGBA)
+            }
+        }
+    } else if (mColorFormat == OMX_COLOR_Format32BitRGBA8888) {
+        if ((size_t)mWidth * mHeight * 4 > size) {
+            goto skip_copying;
+        }
+        uint8_t* srcPtr = (uint8_t*)data;
+        uint8_t* dstPtr = (uint8_t*)dst;
+
+        for (size_t y = 0; y < (size_t)mCropHeight; ++y) {
+            memcpy(dstPtr, srcPtr, mCropWidth * 4);
+            srcPtr += mWidth * 4;
+            dstPtr += buf->stride * 4;
+        }
     } else {
         LOG_ALWAYS_FATAL("bad color format %#x", mColorFormat);
     }
@@ -285,16 +356,21 @@
 skip_copying:
     CHECK_EQ(0, mapper.unlock(buf->handle));
 
-    if ((err = native_window_set_buffers_timestamp(mNativeWindow.get(),
-            timestampNs)) != 0) {
-        ALOGW("Surface::set_buffers_timestamp returned error %d", err);
+    if (renderTimeNs >= 0) {
+        if ((err = native_window_set_buffers_timestamp(mNativeWindow.get(),
+                renderTimeNs)) != 0) {
+            ALOGW("Surface::set_buffers_timestamp returned error %d", err);
+        }
     }
 
-    if ((err = mNativeWindow->queueBuffer(mNativeWindow.get(), buf,
-            -1)) != 0) {
+    if ((err = mNativeWindow->queueBuffer(mNativeWindow.get(), buf, -1)) != 0) {
         ALOGW("Surface::queueBuffer returned error %d", err);
+    } else {
+        mRenderTracker.onFrameQueued(mediaTimeUs, (GraphicBuffer *)buf, Fence::NO_FENCE);
     }
+
     buf = NULL;
+    return mRenderTracker.checkFencesAndGetRenderedFrames(info, info != NULL /* dropIncomplete */);
 }
 
 }  // namespace android
diff --git a/media/libstagefright/filters/Android.mk b/media/libstagefright/filters/Android.mk
new file mode 100644
index 0000000..179f054
--- /dev/null
+++ b/media/libstagefright/filters/Android.mk
@@ -0,0 +1,28 @@
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+        ColorConvert.cpp          \
+        GraphicBufferListener.cpp \
+        IntrinsicBlurFilter.cpp   \
+        MediaFilter.cpp           \
+        RSFilter.cpp              \
+        SaturationFilter.cpp      \
+        saturationARGB.rs         \
+        SimpleFilter.cpp          \
+        ZeroFilter.cpp
+
+LOCAL_C_INCLUDES := \
+        $(TOP)/frameworks/native/include/media/openmax \
+        $(TOP)/frameworks/rs/cpp \
+        $(TOP)/frameworks/rs \
+
+intermediates := $(call intermediates-dir-for,STATIC_LIBRARIES,libRS,TARGET,)
+LOCAL_C_INCLUDES += $(intermediates)
+
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
+
+LOCAL_MODULE:= libstagefright_mediafilter
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libstagefright/filters/ColorConvert.cpp b/media/libstagefright/filters/ColorConvert.cpp
new file mode 100644
index 0000000..a8d5dd2
--- /dev/null
+++ b/media/libstagefright/filters/ColorConvert.cpp
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ColorConvert.h"
+
+#ifndef max
+#define max(a,b) ((a) > (b) ? (a) : (b))
+#endif
+#ifndef min
+#define min(a,b) ((a) < (b) ? (a) : (b))
+#endif
+
+namespace android {
+
+void YUVToRGB(
+        int32_t y, int32_t u, int32_t v,
+        int32_t* r, int32_t* g, int32_t* b) {
+    y -= 16;
+    u -= 128;
+    v -= 128;
+
+    *b = 1192 * y + 2066 * u;
+    *g = 1192 * y - 833 * v - 400 * u;
+    *r = 1192 * y + 1634 * v;
+
+    *r = min(262143, max(0, *r));
+    *g = min(262143, max(0, *g));
+    *b = min(262143, max(0, *b));
+
+    *r >>= 10;
+    *g >>= 10;
+    *b >>= 10;
+}
+
+void convertYUV420spToARGB(
+        uint8_t *pY, uint8_t *pUV, int32_t width, int32_t height,
+        uint8_t *dest) {
+    const int32_t bytes_per_pixel = 2;
+
+    for (int32_t i = 0; i < height; i++) {
+        for (int32_t j = 0; j < width; j++) {
+            int32_t y = *(pY + i * width + j);
+            int32_t u = *(pUV + (i/2) * width + bytes_per_pixel * (j/2));
+            int32_t v = *(pUV + (i/2) * width + bytes_per_pixel * (j/2) + 1);
+
+            int32_t r, g, b;
+            YUVToRGB(y, u, v, &r, &g, &b);
+
+            *dest++ = 0xFF;
+            *dest++ = r;
+            *dest++ = g;
+            *dest++ = b;
+        }
+    }
+}
+
+void convertYUV420spToRGB888(
+        uint8_t *pY, uint8_t *pUV, int32_t width, int32_t height,
+        uint8_t *dest) {
+    const int32_t bytes_per_pixel = 2;
+
+    for (int32_t i = 0; i < height; i++) {
+        for (int32_t j = 0; j < width; j++) {
+            int32_t y = *(pY + i * width + j);
+            int32_t u = *(pUV + (i/2) * width + bytes_per_pixel * (j/2));
+            int32_t v = *(pUV + (i/2) * width + bytes_per_pixel * (j/2) + 1);
+
+            int32_t r, g, b;
+            YUVToRGB(y, u, v, &r, &g, &b);
+
+            *dest++ = r;
+            *dest++ = g;
+            *dest++ = b;
+        }
+    }
+}
+
+// HACK - not even slightly optimized
+// TODO: remove when RGBA support is added to SoftwareRenderer
+void convertRGBAToARGB(
+        uint8_t *src, int32_t width, int32_t height, uint32_t stride,
+        uint8_t *dest) {
+    for (int32_t i = 0; i < height; ++i) {
+        for (int32_t j = 0; j < width; ++j) {
+            uint8_t r = *src++;
+            uint8_t g = *src++;
+            uint8_t b = *src++;
+            uint8_t a = *src++;
+            *dest++ = a;
+            *dest++ = r;
+            *dest++ = g;
+            *dest++ = b;
+        }
+        src += (stride - width) * 4;
+    }
+}
+
+}   // namespace android
diff --git a/media/libstagefright/filters/ColorConvert.h b/media/libstagefright/filters/ColorConvert.h
new file mode 100644
index 0000000..13faa02
--- /dev/null
+++ b/media/libstagefright/filters/ColorConvert.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef COLOR_CONVERT_H_
+#define COLOR_CONVERT_H_
+
+#include <inttypes.h>
+
+namespace android {
+
+void YUVToRGB(
+        int32_t y, int32_t u, int32_t v,
+        int32_t* r, int32_t* g, int32_t* b);
+
+void convertYUV420spToARGB(
+        uint8_t *pY, uint8_t *pUV, int32_t width, int32_t height,
+        uint8_t *dest);
+
+void convertYUV420spToRGB888(
+        uint8_t *pY, uint8_t *pUV, int32_t width, int32_t height,
+        uint8_t *dest);
+
+// TODO: remove when RGBA support is added to SoftwareRenderer
+void convertRGBAToARGB(
+        uint8_t *src, int32_t width, int32_t height, uint32_t stride,
+        uint8_t *dest);
+
+}   // namespace android
+
+#endif  // COLOR_CONVERT_H_
diff --git a/media/libstagefright/filters/GraphicBufferListener.cpp b/media/libstagefright/filters/GraphicBufferListener.cpp
new file mode 100644
index 0000000..a606315
--- /dev/null
+++ b/media/libstagefright/filters/GraphicBufferListener.cpp
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "GraphicBufferListener"
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaErrors.h>
+
+#include <gui/BufferItem.h>
+
+#include "GraphicBufferListener.h"
+
+namespace android {
+
+status_t GraphicBufferListener::init(
+        const sp<AMessage> &notify,
+        size_t bufferWidth, size_t bufferHeight, size_t bufferCount) {
+    mNotify = notify;
+
+    String8 name("GraphicBufferListener");
+    BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+    mConsumer->setConsumerName(name);
+    mConsumer->setDefaultBufferSize(bufferWidth, bufferHeight);
+    mConsumer->setConsumerUsageBits(GRALLOC_USAGE_SW_READ_OFTEN);
+
+    status_t err = mConsumer->setMaxAcquiredBufferCount(bufferCount);
+    if (err != NO_ERROR) {
+        ALOGE("Unable to set BQ max acquired buffer count to %zu: %d",
+                bufferCount, err);
+        return err;
+    }
+
+    wp<BufferQueue::ConsumerListener> listener =
+        static_cast<BufferQueue::ConsumerListener*>(this);
+    sp<BufferQueue::ProxyConsumerListener> proxy =
+        new BufferQueue::ProxyConsumerListener(listener);
+
+    err = mConsumer->consumerConnect(proxy, false);
+    if (err != NO_ERROR) {
+        ALOGE("Error connecting to BufferQueue: %s (%d)",
+                strerror(-err), err);
+        return err;
+    }
+
+    ALOGV("init() successful.");
+
+    return OK;
+}
+
+void GraphicBufferListener::onFrameAvailable(const BufferItem& /* item */) {
+    ALOGV("onFrameAvailable() called");
+
+    {
+        Mutex::Autolock autoLock(mMutex);
+        mNumFramesAvailable++;
+    }
+
+    sp<AMessage> notify = mNotify->dup();
+    mNotify->setWhat(kWhatFrameAvailable);
+    mNotify->post();
+}
+
+void GraphicBufferListener::onBuffersReleased() {
+    ALOGV("onBuffersReleased() called");
+    // nothing to do
+}
+
+void GraphicBufferListener::onSidebandStreamChanged() {
+    ALOGW("GraphicBufferListener cannot consume sideband streams.");
+    // nothing to do
+}
+
+BufferItem GraphicBufferListener::getBufferItem() {
+    BufferItem item;
+
+    {
+        Mutex::Autolock autoLock(mMutex);
+        if (mNumFramesAvailable <= 0) {
+            ALOGE("getBuffer() called with no frames available");
+            return item;
+        }
+        mNumFramesAvailable--;
+    }
+
+    status_t err = mConsumer->acquireBuffer(&item, 0);
+    if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
+        // shouldn't happen, since we track num frames available
+        ALOGE("frame was not available");
+        item.mBuf = -1;
+        return item;
+    } else if (err != OK) {
+        ALOGE("acquireBuffer returned err=%d", err);
+        item.mBuf = -1;
+        return item;
+    }
+
+    // Wait for it to become available.
+    err = item.mFence->waitForever("GraphicBufferListener::getBufferItem");
+    if (err != OK) {
+        ALOGW("failed to wait for buffer fence: %d", err);
+        // keep going
+    }
+
+    // If this is the first time we're seeing this buffer, add it to our
+    // slot table.
+    if (item.mGraphicBuffer != NULL) {
+        ALOGV("setting mBufferSlot %d", item.mBuf);
+        mBufferSlot[item.mBuf] = item.mGraphicBuffer;
+    }
+
+    return item;
+}
+
+sp<GraphicBuffer> GraphicBufferListener::getBuffer(BufferItem item) {
+    sp<GraphicBuffer> buf;
+    if (item.mBuf < 0 || item.mBuf >= BufferQueue::NUM_BUFFER_SLOTS) {
+        ALOGE("getBuffer() received invalid BufferItem: mBuf==%d", item.mBuf);
+        return buf;
+    }
+
+    buf = mBufferSlot[item.mBuf];
+    CHECK(buf.get() != NULL);
+
+    return buf;
+}
+
+status_t GraphicBufferListener::releaseBuffer(BufferItem item) {
+    if (item.mBuf < 0 || item.mBuf >= BufferQueue::NUM_BUFFER_SLOTS) {
+        ALOGE("getBuffer() received invalid BufferItem: mBuf==%d", item.mBuf);
+        return ERROR_OUT_OF_RANGE;
+    }
+
+    mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
+            EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
+
+    return OK;
+}
+
+}   // namespace android
diff --git a/media/libstagefright/filters/GraphicBufferListener.h b/media/libstagefright/filters/GraphicBufferListener.h
new file mode 100644
index 0000000..586bf65
--- /dev/null
+++ b/media/libstagefright/filters/GraphicBufferListener.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef GRAPHIC_BUFFER_LISTENER_H_
+#define GRAPHIC_BUFFER_LISTENER_H_
+
+#include <gui/BufferQueue.h>
+
+namespace android {
+
+struct AMessage;
+
+struct GraphicBufferListener : public BufferQueue::ConsumerListener {
+public:
+    GraphicBufferListener() {};
+
+    status_t init(
+            const sp<AMessage> &notify,
+            size_t bufferWidth, size_t bufferHeight, size_t bufferCount);
+
+    virtual void onFrameAvailable(const BufferItem& item);
+    virtual void onBuffersReleased();
+    virtual void onSidebandStreamChanged();
+
+    // Returns the handle to the producer side of the BufferQueue.  Buffers
+    // queued on this will be received by GraphicBufferListener.
+    sp<IGraphicBufferProducer> getIGraphicBufferProducer() const {
+        return mProducer;
+    }
+
+    BufferItem getBufferItem();
+    sp<GraphicBuffer> getBuffer(BufferItem item);
+    status_t releaseBuffer(BufferItem item);
+
+    enum {
+        kWhatFrameAvailable = 'frav',
+    };
+
+private:
+    sp<AMessage> mNotify;
+    size_t mNumFramesAvailable;
+
+    mutable Mutex mMutex;
+
+    // Our BufferQueue interfaces. mProducer is passed to the producer through
+    // getIGraphicBufferProducer, and mConsumer is used internally to retrieve
+    // the buffers queued by the producer.
+    sp<IGraphicBufferProducer> mProducer;
+    sp<IGraphicBufferConsumer> mConsumer;
+
+    // Cache of GraphicBuffers from the buffer queue.
+    sp<GraphicBuffer> mBufferSlot[BufferQueue::NUM_BUFFER_SLOTS];
+};
+
+}   // namespace android
+
+#endif  // GRAPHIC_BUFFER_LISTENER_H
diff --git a/media/libstagefright/filters/IntrinsicBlurFilter.cpp b/media/libstagefright/filters/IntrinsicBlurFilter.cpp
new file mode 100644
index 0000000..cbcf699
--- /dev/null
+++ b/media/libstagefright/filters/IntrinsicBlurFilter.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "IntrinsicBlurFilter"
+
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+#include "IntrinsicBlurFilter.h"
+
+namespace android {
+
+status_t IntrinsicBlurFilter::configure(const sp<AMessage> &msg) {
+    status_t err = SimpleFilter::configure(msg);
+    if (err != OK) {
+        return err;
+    }
+
+    if (!msg->findString("cacheDir", &mCacheDir)) {
+        ALOGE("Failed to find cache directory in config message.");
+        return NAME_NOT_FOUND;
+    }
+
+    return OK;
+}
+
+status_t IntrinsicBlurFilter::start() {
+    // TODO: use a single RS context object for entire application
+    mRS = new RSC::RS();
+
+    if (!mRS->init(mCacheDir.c_str())) {
+        ALOGE("Failed to initialize RenderScript context.");
+        return NO_INIT;
+    }
+
+    // 32-bit elements for ARGB8888
+    RSC::sp<const RSC::Element> e = RSC::Element::U8_4(mRS);
+
+    RSC::Type::Builder tb(mRS, e);
+    tb.setX(mWidth);
+    tb.setY(mHeight);
+    RSC::sp<const RSC::Type> t = tb.create();
+
+    mAllocIn = RSC::Allocation::createTyped(mRS, t);
+    mAllocOut = RSC::Allocation::createTyped(mRS, t);
+
+    mBlur = RSC::ScriptIntrinsicBlur::create(mRS, e);
+    mBlur->setRadius(mBlurRadius);
+    mBlur->setInput(mAllocIn);
+
+    return OK;
+}
+
+void IntrinsicBlurFilter::reset() {
+    mBlur.clear();
+    mAllocOut.clear();
+    mAllocIn.clear();
+    mRS.clear();
+}
+
+status_t IntrinsicBlurFilter::setParameters(const sp<AMessage> &msg) {
+    sp<AMessage> params;
+    CHECK(msg->findMessage("params", &params));
+
+    float blurRadius;
+    if (params->findFloat("blur-radius", &blurRadius)) {
+        mBlurRadius = blurRadius;
+    }
+
+    return OK;
+}
+
+status_t IntrinsicBlurFilter::processBuffers(
+        const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) {
+    mAllocIn->copy1DRangeFrom(0, mWidth * mHeight, srcBuffer->data());
+    mBlur->forEach(mAllocOut);
+    mAllocOut->copy1DRangeTo(0, mWidth * mHeight, outBuffer->data());
+
+    return OK;
+}
+
+}   // namespace android
diff --git a/media/libstagefright/filters/IntrinsicBlurFilter.h b/media/libstagefright/filters/IntrinsicBlurFilter.h
new file mode 100644
index 0000000..4707ab7
--- /dev/null
+++ b/media/libstagefright/filters/IntrinsicBlurFilter.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef INTRINSIC_BLUR_FILTER_H_
+#define INTRINSIC_BLUR_FILTER_H_
+
+#include "RenderScript.h"
+#include "SimpleFilter.h"
+
+namespace android {
+
+struct IntrinsicBlurFilter : public SimpleFilter {
+public:
+    IntrinsicBlurFilter() : mBlurRadius(1.f) {};
+
+    virtual status_t configure(const sp<AMessage> &msg);
+    virtual status_t start();
+    virtual void reset();
+    virtual status_t setParameters(const sp<AMessage> &msg);
+    virtual status_t processBuffers(
+            const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer);
+
+protected:
+    virtual ~IntrinsicBlurFilter() {};
+
+private:
+    AString mCacheDir;
+    RSC::sp<RSC::RS> mRS;
+    RSC::sp<RSC::Allocation> mAllocIn;
+    RSC::sp<RSC::Allocation> mAllocOut;
+    RSC::sp<RSC::ScriptIntrinsicBlur> mBlur;
+    float mBlurRadius;
+};
+
+}   // namespace android
+
+#endif  // INTRINSIC_BLUR_FILTER_H_
diff --git a/media/libstagefright/filters/MediaFilter.cpp b/media/libstagefright/filters/MediaFilter.cpp
new file mode 100644
index 0000000..0cf6b06
--- /dev/null
+++ b/media/libstagefright/filters/MediaFilter.cpp
@@ -0,0 +1,823 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaFilter"
+
+#include <inttypes.h>
+#include <utils/Trace.h>
+
+#include <binder/MemoryDealer.h>
+
+#include <media/stagefright/BufferProducerWrapper.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/MediaFilter.h>
+
+#include <gui/BufferItem.h>
+
+#include "ColorConvert.h"
+#include "GraphicBufferListener.h"
+#include "IntrinsicBlurFilter.h"
+#include "RSFilter.h"
+#include "SaturationFilter.h"
+#include "ZeroFilter.h"
+
+namespace android {
+
+// parameter: number of input and output buffers
+static const size_t kBufferCountActual = 4;
+
+MediaFilter::MediaFilter()
+    : mState(UNINITIALIZED),
+      mGeneration(0),
+      mGraphicBufferListener(NULL) {
+}
+
+MediaFilter::~MediaFilter() {
+}
+
+//////////////////// PUBLIC FUNCTIONS //////////////////////////////////////////
+
+void MediaFilter::setNotificationMessage(const sp<AMessage> &msg) {
+    mNotify = msg;
+}
+
+void MediaFilter::initiateAllocateComponent(const sp<AMessage> &msg) {
+    msg->setWhat(kWhatAllocateComponent);
+    msg->setTarget(this);
+    msg->post();
+}
+
+void MediaFilter::initiateConfigureComponent(const sp<AMessage> &msg) {
+    msg->setWhat(kWhatConfigureComponent);
+    msg->setTarget(this);
+    msg->post();
+}
+
+void MediaFilter::initiateCreateInputSurface() {
+    (new AMessage(kWhatCreateInputSurface, this))->post();
+}
+
+void MediaFilter::initiateSetInputSurface(
+        const sp<PersistentSurface> & /* surface */) {
+    ALOGW("initiateSetInputSurface() unsupported");
+}
+
+void MediaFilter::initiateStart() {
+    (new AMessage(kWhatStart, this))->post();
+}
+
+void MediaFilter::initiateShutdown(bool keepComponentAllocated) {
+    sp<AMessage> msg = new AMessage(kWhatShutdown, this);
+    msg->setInt32("keepComponentAllocated", keepComponentAllocated);
+    msg->post();
+}
+
+void MediaFilter::signalFlush() {
+    (new AMessage(kWhatFlush, this))->post();
+}
+
+void MediaFilter::signalResume() {
+    (new AMessage(kWhatResume, this))->post();
+}
+
+// nothing to do
+void MediaFilter::signalRequestIDRFrame() {
+    return;
+}
+
+void MediaFilter::signalSetParameters(const sp<AMessage> &params) {
+    sp<AMessage> msg = new AMessage(kWhatSetParameters, this);
+    msg->setMessage("params", params);
+    msg->post();
+}
+
+void MediaFilter::signalEndOfInputStream() {
+    (new AMessage(kWhatSignalEndOfInputStream, this))->post();
+}
+
+void MediaFilter::onMessageReceived(const sp<AMessage> &msg) {
+    switch (msg->what()) {
+        case kWhatAllocateComponent:
+        {
+            onAllocateComponent(msg);
+            break;
+        }
+        case kWhatConfigureComponent:
+        {
+            onConfigureComponent(msg);
+            break;
+        }
+        case kWhatStart:
+        {
+            onStart();
+            break;
+        }
+        case kWhatProcessBuffers:
+        {
+            processBuffers();
+            break;
+        }
+        case kWhatInputBufferFilled:
+        {
+            onInputBufferFilled(msg);
+            break;
+        }
+        case kWhatOutputBufferDrained:
+        {
+            onOutputBufferDrained(msg);
+            break;
+        }
+        case kWhatShutdown:
+        {
+            onShutdown(msg);
+            break;
+        }
+        case kWhatFlush:
+        {
+            onFlush();
+            break;
+        }
+        case kWhatResume:
+        {
+            // nothing to do
+            break;
+        }
+        case kWhatSetParameters:
+        {
+            onSetParameters(msg);
+            break;
+        }
+        case kWhatCreateInputSurface:
+        {
+            onCreateInputSurface();
+            break;
+        }
+        case GraphicBufferListener::kWhatFrameAvailable:
+        {
+            onInputFrameAvailable();
+            break;
+        }
+        case kWhatSignalEndOfInputStream:
+        {
+            onSignalEndOfInputStream();
+            break;
+        }
+        default:
+        {
+            ALOGE("Message not handled:\n%s", msg->debugString().c_str());
+            break;
+        }
+    }
+}
+
+//////////////////// PORT DESCRIPTION //////////////////////////////////////////
+
+MediaFilter::PortDescription::PortDescription() {
+}
+
+void MediaFilter::PortDescription::addBuffer(
+        IOMX::buffer_id id, const sp<ABuffer> &buffer) {
+    mBufferIDs.push_back(id);
+    mBuffers.push_back(buffer);
+}
+
+size_t MediaFilter::PortDescription::countBuffers() {
+    return mBufferIDs.size();
+}
+
+IOMX::buffer_id MediaFilter::PortDescription::bufferIDAt(size_t index) const {
+    return mBufferIDs.itemAt(index);
+}
+
+sp<ABuffer> MediaFilter::PortDescription::bufferAt(size_t index) const {
+    return mBuffers.itemAt(index);
+}
+
+//////////////////// HELPER FUNCTIONS //////////////////////////////////////////
+
+void MediaFilter::signalProcessBuffers() {
+    (new AMessage(kWhatProcessBuffers, this))->post();
+}
+
+void MediaFilter::signalError(status_t error) {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", CodecBase::kWhatError);
+    notify->setInt32("err", error);
+    notify->post();
+}
+
+status_t MediaFilter::allocateBuffersOnPort(OMX_U32 portIndex) {
+    CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
+    const bool isInput = portIndex == kPortIndexInput;
+    const size_t bufferSize = isInput ? mMaxInputSize : mMaxOutputSize;
+
+    CHECK(mDealer[portIndex] == NULL);
+    CHECK(mBuffers[portIndex].isEmpty());
+
+    ALOGV("Allocating %zu buffers of size %zu on %s port",
+            kBufferCountActual, bufferSize,
+            isInput ? "input" : "output");
+
+    size_t totalSize = kBufferCountActual * bufferSize;
+
+    mDealer[portIndex] = new MemoryDealer(totalSize, "MediaFilter");
+
+    for (size_t i = 0; i < kBufferCountActual; ++i) {
+        sp<IMemory> mem = mDealer[portIndex]->allocate(bufferSize);
+        CHECK(mem.get() != NULL);
+
+        BufferInfo info;
+        info.mStatus = BufferInfo::OWNED_BY_US;
+        info.mBufferID = i;
+        info.mGeneration = mGeneration;
+        info.mOutputFlags = 0;
+        info.mData = new ABuffer(mem->pointer(), bufferSize);
+        info.mData->meta()->setInt64("timeUs", 0);
+
+        mBuffers[portIndex].push_back(info);
+
+        if (!isInput) {
+            mAvailableOutputBuffers.push(
+                    &mBuffers[portIndex].editItemAt(i));
+        }
+    }
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", CodecBase::kWhatBuffersAllocated);
+
+    notify->setInt32("portIndex", portIndex);
+
+    sp<PortDescription> desc = new PortDescription;
+
+    for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
+        const BufferInfo &info = mBuffers[portIndex][i];
+
+        desc->addBuffer(info.mBufferID, info.mData);
+    }
+
+    notify->setObject("portDesc", desc);
+    notify->post();
+
+    return OK;
+}
+
+MediaFilter::BufferInfo* MediaFilter::findBufferByID(
+        uint32_t portIndex, IOMX::buffer_id bufferID,
+        ssize_t *index) {
+    for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
+        BufferInfo *info = &mBuffers[portIndex].editItemAt(i);
+
+        if (info->mBufferID == bufferID) {
+            if (index != NULL) {
+                *index = i;
+            }
+            return info;
+        }
+    }
+
+    TRESPASS();
+
+    return NULL;
+}
+
+void MediaFilter::postFillThisBuffer(BufferInfo *info) {
+    ALOGV("postFillThisBuffer on buffer %d", info->mBufferID);
+    if (mPortEOS[kPortIndexInput]) {
+        return;
+    }
+
+    CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_US);
+
+    info->mGeneration = mGeneration;
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", CodecBase::kWhatFillThisBuffer);
+    notify->setInt32("buffer-id", info->mBufferID);
+
+    info->mData->meta()->clear();
+    notify->setBuffer("buffer", info->mData);
+
+    sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, this);
+    reply->setInt32("buffer-id", info->mBufferID);
+
+    notify->setMessage("reply", reply);
+
+    info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;
+    notify->post();
+}
+
+void MediaFilter::postDrainThisBuffer(BufferInfo *info) {
+    CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_US);
+
+    info->mGeneration = mGeneration;
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", CodecBase::kWhatDrainThisBuffer);
+    notify->setInt32("buffer-id", info->mBufferID);
+    notify->setInt32("flags", info->mOutputFlags);
+    notify->setBuffer("buffer", info->mData);
+
+    sp<AMessage> reply = new AMessage(kWhatOutputBufferDrained, this);
+    reply->setInt32("buffer-id", info->mBufferID);
+
+    notify->setMessage("reply", reply);
+
+    notify->post();
+
+    info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;
+}
+
+void MediaFilter::postEOS() {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", CodecBase::kWhatEOS);
+    notify->setInt32("err", ERROR_END_OF_STREAM);
+    notify->post();
+
+    ALOGV("Sent kWhatEOS.");
+}
+
+void MediaFilter::sendFormatChange() {
+    sp<AMessage> notify = mNotify->dup();
+
+    notify->setInt32("what", kWhatOutputFormatChanged);
+
+    AString mime;
+    CHECK(mOutputFormat->findString("mime", &mime));
+    notify->setString("mime", mime.c_str());
+
+    notify->setInt32("stride", mStride);
+    notify->setInt32("slice-height", mSliceHeight);
+    notify->setInt32("color-format", mColorFormatOut);
+    notify->setRect("crop", 0, 0, mStride - 1, mSliceHeight - 1);
+    notify->setInt32("width", mWidth);
+    notify->setInt32("height", mHeight);
+
+    notify->post();
+}
+
+void MediaFilter::requestFillEmptyInput() {
+    if (mPortEOS[kPortIndexInput]) {
+        return;
+    }
+
+    for (size_t i = 0; i < mBuffers[kPortIndexInput].size(); ++i) {
+        BufferInfo *info = &mBuffers[kPortIndexInput].editItemAt(i);
+
+        if (info->mStatus == BufferInfo::OWNED_BY_US) {
+            postFillThisBuffer(info);
+        }
+    }
+}
+
+void MediaFilter::processBuffers() {
+    if (mAvailableInputBuffers.empty() || mAvailableOutputBuffers.empty()) {
+        ALOGV("Skipping process (buffers unavailable)");
+        return;
+    }
+
+    if (mPortEOS[kPortIndexOutput]) {
+        // TODO notify caller of queueInput error when it is supported
+        // in MediaCodec
+        ALOGW("Tried to process a buffer after EOS.");
+        return;
+    }
+
+    BufferInfo *inputInfo = mAvailableInputBuffers[0];
+    mAvailableInputBuffers.removeAt(0);
+    BufferInfo *outputInfo = mAvailableOutputBuffers[0];
+    mAvailableOutputBuffers.removeAt(0);
+
+    status_t err;
+    err = mFilter->processBuffers(inputInfo->mData, outputInfo->mData);
+    if (err != (status_t)OK) {
+        outputInfo->mData->meta()->setInt32("err", err);
+    }
+
+    int64_t timeUs;
+    CHECK(inputInfo->mData->meta()->findInt64("timeUs", &timeUs));
+    outputInfo->mData->meta()->setInt64("timeUs", timeUs);
+    outputInfo->mOutputFlags = 0;
+    int32_t eos = 0;
+    if (inputInfo->mData->meta()->findInt32("eos", &eos) && eos != 0) {
+        outputInfo->mOutputFlags |= OMX_BUFFERFLAG_EOS;
+        mPortEOS[kPortIndexOutput] = true;
+        outputInfo->mData->meta()->setInt32("eos", eos);
+        postEOS();
+        ALOGV("Output stream saw EOS.");
+    }
+
+    ALOGV("Processed input buffer %u [%zu], output buffer %u [%zu]",
+                inputInfo->mBufferID, inputInfo->mData->size(),
+                outputInfo->mBufferID, outputInfo->mData->size());
+
+    if (mGraphicBufferListener != NULL) {
+        delete inputInfo;
+    } else {
+        postFillThisBuffer(inputInfo);
+    }
+    postDrainThisBuffer(outputInfo);
+
+    // prevent any corner case where buffers could get stuck in queue
+    signalProcessBuffers();
+}
+
+void MediaFilter::onAllocateComponent(const sp<AMessage> &msg) {
+    CHECK_EQ(mState, UNINITIALIZED);
+
+    CHECK(msg->findString("componentName", &mComponentName));
+    const char* name = mComponentName.c_str();
+    if (!strcasecmp(name, "android.filter.zerofilter")) {
+        mFilter = new ZeroFilter;
+    } else if (!strcasecmp(name, "android.filter.saturation")) {
+        mFilter = new SaturationFilter;
+    } else if (!strcasecmp(name, "android.filter.intrinsicblur")) {
+        mFilter = new IntrinsicBlurFilter;
+    } else if (!strcasecmp(name, "android.filter.RenderScript")) {
+        mFilter = new RSFilter;
+    } else {
+        ALOGE("Unrecognized filter name: %s", name);
+        signalError(NAME_NOT_FOUND);
+        return;
+    }
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatComponentAllocated);
+    // HACK - need "OMX.google" to use MediaCodec's software renderer
+    notify->setString("componentName", "OMX.google.MediaFilter");
+    notify->post();
+    mState = INITIALIZED;
+    ALOGV("Handled kWhatAllocateComponent.");
+}
+
+void MediaFilter::onConfigureComponent(const sp<AMessage> &msg) {
+    // TODO: generalize to allow audio filters as well as video
+
+    CHECK_EQ(mState, INITIALIZED);
+
+    // get params - at least mime, width & height
+    AString mime;
+    CHECK(msg->findString("mime", &mime));
+    if (strcasecmp(mime.c_str(), MEDIA_MIMETYPE_VIDEO_RAW)) {
+        ALOGE("Bad mime: %s", mime.c_str());
+        signalError(BAD_VALUE);
+        return;
+    }
+
+    CHECK(msg->findInt32("width", &mWidth));
+    CHECK(msg->findInt32("height", &mHeight));
+    if (!msg->findInt32("stride", &mStride)) {
+        mStride = mWidth;
+    }
+    if (!msg->findInt32("slice-height", &mSliceHeight)) {
+        mSliceHeight = mHeight;
+    }
+
+    mMaxInputSize = mWidth * mHeight * 4;   // room for ARGB8888
+    int32_t maxInputSize;
+    if (msg->findInt32("max-input-size", &maxInputSize)
+            && (size_t)maxInputSize > mMaxInputSize) {
+        mMaxInputSize = maxInputSize;
+    }
+
+    if (!msg->findInt32("color-format", &mColorFormatIn)) {
+        // default to OMX_COLOR_Format32bitARGB8888
+        mColorFormatIn = OMX_COLOR_Format32bitARGB8888;
+        msg->setInt32("color-format", mColorFormatIn);
+    }
+    mColorFormatOut = mColorFormatIn;
+
+    mMaxOutputSize = mWidth * mHeight * 4;  // room for ARGB8888
+
+    AString cacheDir;
+    if (!msg->findString("cacheDir", &cacheDir)) {
+        ALOGE("Failed to find cache directory in config message.");
+        signalError(NAME_NOT_FOUND);
+        return;
+    }
+
+    status_t err;
+    err = mFilter->configure(msg);
+    if (err != (status_t)OK) {
+        ALOGE("Failed to configure filter component, err %d", err);
+        signalError(err);
+        return;
+    }
+
+    mInputFormat = new AMessage();
+    mInputFormat->setString("mime", mime.c_str());
+    mInputFormat->setInt32("stride", mStride);
+    mInputFormat->setInt32("slice-height", mSliceHeight);
+    mInputFormat->setInt32("color-format", mColorFormatIn);
+    mInputFormat->setRect("crop", 0, 0, mStride, mSliceHeight);
+    mInputFormat->setInt32("width", mWidth);
+    mInputFormat->setInt32("height", mHeight);
+
+    mOutputFormat = new AMessage();
+    mOutputFormat->setString("mime", mime.c_str());
+    mOutputFormat->setInt32("stride", mStride);
+    mOutputFormat->setInt32("slice-height", mSliceHeight);
+    mOutputFormat->setInt32("color-format", mColorFormatOut);
+    mOutputFormat->setRect("crop", 0, 0, mStride, mSliceHeight);
+    mOutputFormat->setInt32("width", mWidth);
+    mOutputFormat->setInt32("height", mHeight);
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatComponentConfigured);
+    notify->setString("componentName", "MediaFilter");
+    notify->setMessage("input-format", mInputFormat);
+    notify->setMessage("output-format", mOutputFormat);
+    notify->post();
+    mState = CONFIGURED;
+    ALOGV("Handled kWhatConfigureComponent.");
+
+    sendFormatChange();
+}
+
+void MediaFilter::onStart() {
+    CHECK_EQ(mState, CONFIGURED);
+
+    allocateBuffersOnPort(kPortIndexInput);
+
+    allocateBuffersOnPort(kPortIndexOutput);
+
+    status_t err = mFilter->start();
+    if (err != (status_t)OK) {
+        ALOGE("Failed to start filter component, err %d", err);
+        signalError(err);
+        return;
+    }
+
+    mPortEOS[kPortIndexInput] = false;
+    mPortEOS[kPortIndexOutput] = false;
+    mInputEOSResult = OK;
+    mState = STARTED;
+
+    requestFillEmptyInput();
+    ALOGV("Handled kWhatStart.");
+}
+
+void MediaFilter::onInputBufferFilled(const sp<AMessage> &msg) {
+    IOMX::buffer_id bufferID;
+    CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID));
+    BufferInfo *info = findBufferByID(kPortIndexInput, bufferID);
+
+    if (mState != STARTED) {
+        // we're not running, so we'll just keep that buffer...
+        info->mStatus = BufferInfo::OWNED_BY_US;
+        return;
+    }
+
+    if (info->mGeneration != mGeneration) {
+        ALOGV("Caught a stale input buffer [ID %d]", bufferID);
+        // buffer is stale (taken before a flush/shutdown) - repost it
+        CHECK_EQ(info->mStatus, BufferInfo::OWNED_BY_US);
+        postFillThisBuffer(info);
+        return;
+    }
+
+    CHECK_EQ(info->mStatus, BufferInfo::OWNED_BY_UPSTREAM);
+    info->mStatus = BufferInfo::OWNED_BY_US;
+
+    sp<ABuffer> buffer;
+    int32_t err = OK;
+    bool eos = false;
+
+    if (!msg->findBuffer("buffer", &buffer)) {
+        // these are unfilled buffers returned by client
+        CHECK(msg->findInt32("err", &err));
+
+        if (err == OK) {
+            // buffers with no errors are returned on MediaCodec.flush
+            ALOGV("saw unfilled buffer (MediaCodec.flush)");
+            postFillThisBuffer(info);
+            return;
+        } else {
+            ALOGV("saw error %d instead of an input buffer", err);
+            eos = true;
+        }
+
+        buffer.clear();
+    }
+
+    int32_t isCSD;
+    if (buffer != NULL && buffer->meta()->findInt32("csd", &isCSD)
+            && isCSD != 0) {
+        // ignore codec-specific data buffers
+        ALOGW("MediaFilter received a codec-specific data buffer");
+        postFillThisBuffer(info);
+        return;
+    }
+
+    int32_t tmp;
+    if (buffer != NULL && buffer->meta()->findInt32("eos", &tmp) && tmp) {
+        eos = true;
+        err = ERROR_END_OF_STREAM;
+    }
+
+    mAvailableInputBuffers.push_back(info);
+    processBuffers();
+
+    if (eos) {
+        mPortEOS[kPortIndexInput] = true;
+        mInputEOSResult = err;
+    }
+
+    ALOGV("Handled kWhatInputBufferFilled. [ID %u]", bufferID);
+}
+
+void MediaFilter::onOutputBufferDrained(const sp<AMessage> &msg) {
+    IOMX::buffer_id bufferID;
+    CHECK(msg->findInt32("buffer-id", (int32_t*)&bufferID));
+    BufferInfo *info = findBufferByID(kPortIndexOutput, bufferID);
+
+    if (mState != STARTED) {
+        // we're not running, so we'll just keep that buffer...
+        info->mStatus = BufferInfo::OWNED_BY_US;
+        return;
+    }
+
+    if (info->mGeneration != mGeneration) {
+        ALOGV("Caught a stale output buffer [ID %d]", bufferID);
+        // buffer is stale (taken before a flush/shutdown) - keep it
+        CHECK_EQ(info->mStatus, BufferInfo::OWNED_BY_US);
+        return;
+    }
+
+    CHECK_EQ(info->mStatus, BufferInfo::OWNED_BY_UPSTREAM);
+    info->mStatus = BufferInfo::OWNED_BY_US;
+
+    mAvailableOutputBuffers.push_back(info);
+
+    processBuffers();
+
+    ALOGV("Handled kWhatOutputBufferDrained. [ID %u]",
+            bufferID);
+}
+
+void MediaFilter::onShutdown(const sp<AMessage> &msg) {
+    mGeneration++;
+
+    if (mState != UNINITIALIZED) {
+        mFilter->reset();
+    }
+
+    int32_t keepComponentAllocated;
+    CHECK(msg->findInt32("keepComponentAllocated", &keepComponentAllocated));
+    if (!keepComponentAllocated || mState == UNINITIALIZED) {
+        mState = UNINITIALIZED;
+    } else {
+        mState = INITIALIZED;
+    }
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", CodecBase::kWhatShutdownCompleted);
+    notify->post();
+}
+
+void MediaFilter::onFlush() {
+    mGeneration++;
+
+    mAvailableInputBuffers.clear();
+    for (size_t i = 0; i < mBuffers[kPortIndexInput].size(); ++i) {
+        BufferInfo *info = &mBuffers[kPortIndexInput].editItemAt(i);
+        info->mStatus = BufferInfo::OWNED_BY_US;
+    }
+    mAvailableOutputBuffers.clear();
+    for (size_t i = 0; i < mBuffers[kPortIndexOutput].size(); ++i) {
+        BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
+        info->mStatus = BufferInfo::OWNED_BY_US;
+        mAvailableOutputBuffers.push_back(info);
+    }
+
+    mPortEOS[kPortIndexInput] = false;
+    mPortEOS[kPortIndexOutput] = false;
+    mInputEOSResult = OK;
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", CodecBase::kWhatFlushCompleted);
+    notify->post();
+    ALOGV("Posted kWhatFlushCompleted");
+
+    // MediaCodec returns all input buffers after flush, so in
+    // onInputBufferFilled we call postFillThisBuffer on them
+}
+
+void MediaFilter::onSetParameters(const sp<AMessage> &msg) {
+    CHECK(mState != STARTED);
+
+    status_t err = mFilter->setParameters(msg);
+    if (err != (status_t)OK) {
+        ALOGE("setParameters returned err %d", err);
+    }
+}
+
+void MediaFilter::onCreateInputSurface() {
+    CHECK(mState == CONFIGURED);
+
+    mGraphicBufferListener = new GraphicBufferListener;
+
+    sp<AMessage> notify = new AMessage();
+    notify->setTarget(this);
+    status_t err = mGraphicBufferListener->init(
+            notify, mStride, mSliceHeight, kBufferCountActual);
+
+    if (err != OK) {
+        ALOGE("Failed to init mGraphicBufferListener: %d", err);
+        signalError(err);
+        return;
+    }
+
+    sp<AMessage> reply = mNotify->dup();
+    reply->setInt32("what", CodecBase::kWhatInputSurfaceCreated);
+    reply->setObject(
+            "input-surface",
+            new BufferProducerWrapper(
+                    mGraphicBufferListener->getIGraphicBufferProducer()));
+    reply->post();
+}
+
+void MediaFilter::onInputFrameAvailable() {
+    BufferItem item = mGraphicBufferListener->getBufferItem();
+    sp<GraphicBuffer> buf = mGraphicBufferListener->getBuffer(item);
+
+    // get pointer to graphic buffer
+    void* bufPtr;
+    buf->lock(GraphicBuffer::USAGE_SW_READ_OFTEN, &bufPtr);
+
+    // HACK - there is no OMX_COLOR_FORMATTYPE value for RGBA, so the format
+    // conversion is hardcoded until we add this.
+    // TODO: check input format and convert only if necessary
+    // copy RGBA graphic buffer into temporary ARGB input buffer
+    BufferInfo *inputInfo = new BufferInfo;
+    inputInfo->mData = new ABuffer(buf->getWidth() * buf->getHeight() * 4);
+    ALOGV("Copying surface data into temp buffer.");
+    convertRGBAToARGB(
+            (uint8_t*)bufPtr, buf->getWidth(), buf->getHeight(),
+            buf->getStride(), inputInfo->mData->data());
+    inputInfo->mBufferID = item.mBuf;
+    inputInfo->mGeneration = mGeneration;
+    inputInfo->mOutputFlags = 0;
+    inputInfo->mStatus = BufferInfo::OWNED_BY_US;
+    inputInfo->mData->meta()->setInt64("timeUs", item.mTimestamp / 1000);
+
+    mAvailableInputBuffers.push_back(inputInfo);
+
+    mGraphicBufferListener->releaseBuffer(item);
+
+    signalProcessBuffers();
+}
+
+void MediaFilter::onSignalEndOfInputStream() {
+    // if using input surface, need to send an EOS output buffer
+    if (mGraphicBufferListener != NULL) {
+        Vector<BufferInfo> *outputBufs = &mBuffers[kPortIndexOutput];
+        BufferInfo* eosBuf;
+        bool foundBuf = false;
+        for (size_t i = 0; i < kBufferCountActual; i++) {
+            eosBuf = &outputBufs->editItemAt(i);
+            if (eosBuf->mStatus == BufferInfo::OWNED_BY_US) {
+                foundBuf = true;
+                break;
+            }
+        }
+
+        if (!foundBuf) {
+            ALOGE("onSignalEndOfInputStream failed to find an output buffer");
+            return;
+        }
+
+        eosBuf->mOutputFlags = OMX_BUFFERFLAG_EOS;
+        eosBuf->mGeneration = mGeneration;
+        eosBuf->mData->setRange(0, 0);
+        postDrainThisBuffer(eosBuf);
+        ALOGV("Posted EOS on output buffer %u", eosBuf->mBufferID);
+    }
+
+    mPortEOS[kPortIndexOutput] = true;
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", CodecBase::kWhatSignaledInputEOS);
+    notify->post();
+
+    ALOGV("Output stream saw EOS.");
+}
+
+}   // namespace android
diff --git a/media/libstagefright/filters/RSFilter.cpp b/media/libstagefright/filters/RSFilter.cpp
new file mode 100644
index 0000000..b569945
--- /dev/null
+++ b/media/libstagefright/filters/RSFilter.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "RSFilter"
+
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+#include "RSFilter.h"
+
+namespace android {
+
+RSFilter::RSFilter() {
+
+}
+
+RSFilter::~RSFilter() {
+
+}
+
+status_t RSFilter::configure(const sp<AMessage> &msg) {
+    status_t err = SimpleFilter::configure(msg);
+    if (err != OK) {
+        return err;
+    }
+
+    if (!msg->findString("cacheDir", &mCacheDir)) {
+        ALOGE("Failed to find cache directory in config message.");
+        return NAME_NOT_FOUND;
+    }
+
+    sp<RenderScriptWrapper> wrapper;
+    if (!msg->findObject("rs-wrapper", (sp<RefBase>*)&wrapper)) {
+        ALOGE("Failed to find RenderScriptWrapper in config message.");
+        return NAME_NOT_FOUND;
+    }
+
+    mRS = wrapper->mContext;
+    mCallback = wrapper->mCallback;
+
+    return OK;
+}
+
+status_t RSFilter::start() {
+    // 32-bit elements for ARGB8888
+    RSC::sp<const RSC::Element> e = RSC::Element::U8_4(mRS);
+
+    RSC::Type::Builder tb(mRS, e);
+    tb.setX(mWidth);
+    tb.setY(mHeight);
+    RSC::sp<const RSC::Type> t = tb.create();
+
+    mAllocIn = RSC::Allocation::createTyped(mRS, t);
+    mAllocOut = RSC::Allocation::createTyped(mRS, t);
+
+    return OK;
+}
+
+void RSFilter::reset() {
+    mCallback.clear();
+    mAllocOut.clear();
+    mAllocIn.clear();
+    mRS.clear();
+}
+
+status_t RSFilter::setParameters(const sp<AMessage> &msg) {
+    return mCallback->handleSetParameters(msg);
+}
+
+status_t RSFilter::processBuffers(
+        const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) {
+    mAllocIn->copy1DRangeFrom(0, mWidth * mHeight, srcBuffer->data());
+    mCallback->processBuffers(mAllocIn.get(), mAllocOut.get());
+    mAllocOut->copy1DRangeTo(0, mWidth * mHeight, outBuffer->data());
+
+    return OK;
+}
+
+}   // namespace android
diff --git a/media/libstagefright/filters/RSFilter.h b/media/libstagefright/filters/RSFilter.h
new file mode 100644
index 0000000..c5b5074
--- /dev/null
+++ b/media/libstagefright/filters/RSFilter.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef RS_FILTER_H_
+#define RS_FILTER_H_
+
+#include <media/stagefright/RenderScriptWrapper.h>
+#include <RenderScript.h>
+
+#include "SimpleFilter.h"
+
+namespace android {
+
+struct AString;
+
+struct RSFilter : public SimpleFilter {
+public:
+    RSFilter();
+
+    virtual status_t configure(const sp<AMessage> &msg);
+    virtual status_t start();
+    virtual void reset();
+    virtual status_t setParameters(const sp<AMessage> &msg);
+    virtual status_t processBuffers(
+            const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer);
+
+protected:
+    virtual ~RSFilter();
+
+private:
+    AString mCacheDir;
+    sp<RenderScriptWrapper::RSFilterCallback> mCallback;
+    RSC::sp<RSC::RS> mRS;
+    RSC::sp<RSC::Allocation> mAllocIn;
+    RSC::sp<RSC::Allocation> mAllocOut;
+};
+
+}   // namespace android
+
+#endif  // RS_FILTER_H_
diff --git a/media/libstagefright/filters/SaturationFilter.cpp b/media/libstagefright/filters/SaturationFilter.cpp
new file mode 100644
index 0000000..ba5f75a
--- /dev/null
+++ b/media/libstagefright/filters/SaturationFilter.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "SaturationFilter"
+
+#include <utils/Log.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+#include "SaturationFilter.h"
+
+namespace android {
+
+status_t SaturationFilter::configure(const sp<AMessage> &msg) {
+    status_t err = SimpleFilter::configure(msg);
+    if (err != OK) {
+        return err;
+    }
+
+    if (!msg->findString("cacheDir", &mCacheDir)) {
+        ALOGE("Failed to find cache directory in config message.");
+        return NAME_NOT_FOUND;
+    }
+
+    return OK;
+}
+
+status_t SaturationFilter::start() {
+    // TODO: use a single RS context object for entire application
+    mRS = new RSC::RS();
+
+    if (!mRS->init(mCacheDir.c_str())) {
+        ALOGE("Failed to initialize RenderScript context.");
+        return NO_INIT;
+    }
+
+    // 32-bit elements for ARGB8888
+    RSC::sp<const RSC::Element> e = RSC::Element::U8_4(mRS);
+
+    RSC::Type::Builder tb(mRS, e);
+    tb.setX(mWidth);
+    tb.setY(mHeight);
+    RSC::sp<const RSC::Type> t = tb.create();
+
+    mAllocIn = RSC::Allocation::createTyped(mRS, t);
+    mAllocOut = RSC::Allocation::createTyped(mRS, t);
+
+    mScript = new ScriptC_saturationARGB(mRS);
+
+    mScript->set_gSaturation(mSaturation);
+
+    return OK;
+}
+
+void SaturationFilter::reset() {
+    mScript.clear();
+    mAllocOut.clear();
+    mAllocIn.clear();
+    mRS.clear();
+}
+
+status_t SaturationFilter::setParameters(const sp<AMessage> &msg) {
+    sp<AMessage> params;
+    CHECK(msg->findMessage("params", &params));
+
+    float saturation;
+    if (params->findFloat("saturation", &saturation)) {
+        mSaturation = saturation;
+    }
+
+    return OK;
+}
+
+status_t SaturationFilter::processBuffers(
+        const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) {
+    mAllocIn->copy1DRangeFrom(0, mWidth * mHeight, srcBuffer->data());
+    mScript->forEach_root(mAllocIn, mAllocOut);
+    mAllocOut->copy1DRangeTo(0, mWidth * mHeight, outBuffer->data());
+
+    return OK;
+}
+
+}   // namespace android
diff --git a/media/libstagefright/filters/SaturationFilter.h b/media/libstagefright/filters/SaturationFilter.h
new file mode 100644
index 0000000..0545021
--- /dev/null
+++ b/media/libstagefright/filters/SaturationFilter.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SATURATION_FILTER_H_
+#define SATURATION_FILTER_H_
+
+#include <RenderScript.h>
+
+#include "ScriptC_saturationARGB.h"
+#include "SimpleFilter.h"
+
+namespace android {
+
+struct SaturationFilter : public SimpleFilter {
+public:
+    SaturationFilter() : mSaturation(1.f) {};
+
+    virtual status_t configure(const sp<AMessage> &msg);
+    virtual status_t start();
+    virtual void reset();
+    virtual status_t setParameters(const sp<AMessage> &msg);
+    virtual status_t processBuffers(
+            const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer);
+
+protected:
+    virtual ~SaturationFilter() {};
+
+private:
+    AString mCacheDir;
+    RSC::sp<RSC::RS> mRS;
+    RSC::sp<RSC::Allocation> mAllocIn;
+    RSC::sp<RSC::Allocation> mAllocOut;
+    RSC::sp<ScriptC_saturationARGB> mScript;
+    float mSaturation;
+};
+
+}   // namespace android
+
+#endif  // SATURATION_FILTER_H_
diff --git a/media/libstagefright/filters/SimpleFilter.cpp b/media/libstagefright/filters/SimpleFilter.cpp
new file mode 100644
index 0000000..6c1ca2c
--- /dev/null
+++ b/media/libstagefright/filters/SimpleFilter.cpp
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+#include "SimpleFilter.h"
+
+namespace android {
+
+status_t SimpleFilter::configure(const sp<AMessage> &msg) {
+    CHECK(msg->findInt32("width", &mWidth));
+    CHECK(msg->findInt32("height", &mHeight));
+    if (!msg->findInt32("stride", &mStride)) {
+        mStride = mWidth;
+    }
+    if (!msg->findInt32("slice-height", &mSliceHeight)) {
+        mSliceHeight = mHeight;
+    }
+    CHECK(msg->findInt32("color-format", &mColorFormatIn));
+    mColorFormatOut = mColorFormatIn;
+
+    return OK;
+}
+
+}   // namespace android
diff --git a/media/libstagefright/filters/SimpleFilter.h b/media/libstagefright/filters/SimpleFilter.h
new file mode 100644
index 0000000..4cd37ef
--- /dev/null
+++ b/media/libstagefright/filters/SimpleFilter.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef SIMPLE_FILTER_H_
+#define SIMPLE_FILTER_H_
+
+#include <stdint.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+struct ABuffer;
+struct AMessage;
+
+namespace android {
+
+struct SimpleFilter : public RefBase {
+public:
+    SimpleFilter() : mWidth(0), mHeight(0), mStride(0), mSliceHeight(0),
+            mColorFormatIn(0), mColorFormatOut(0) {};
+
+    virtual status_t configure(const sp<AMessage> &msg);
+
+    virtual status_t start() = 0;
+    virtual void reset() = 0;
+    virtual status_t setParameters(const sp<AMessage> &msg) = 0;
+    virtual status_t processBuffers(
+            const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) = 0;
+
+protected:
+    int32_t mWidth, mHeight;
+    int32_t mStride, mSliceHeight;
+    int32_t mColorFormatIn, mColorFormatOut;
+
+    virtual ~SimpleFilter() {};
+};
+
+}   // namespace android
+
+#endif  // SIMPLE_FILTER_H_
diff --git a/media/libstagefright/filters/ZeroFilter.cpp b/media/libstagefright/filters/ZeroFilter.cpp
new file mode 100644
index 0000000..3f1243c
--- /dev/null
+++ b/media/libstagefright/filters/ZeroFilter.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ZeroFilter"
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+
+#include "ZeroFilter.h"
+
+namespace android {
+
+status_t ZeroFilter::setParameters(const sp<AMessage> &msg) {
+    sp<AMessage> params;
+    CHECK(msg->findMessage("params", &params));
+
+    int32_t invert;
+    if (params->findInt32("invert", &invert)) {
+        mInvertData = (invert != 0);
+    }
+
+    return OK;
+}
+
+status_t ZeroFilter::processBuffers(
+        const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer) {
+    // assuming identical input & output buffers, since we're a copy filter
+    if (mInvertData) {
+        uint32_t* src = (uint32_t*)srcBuffer->data();
+        uint32_t* dest = (uint32_t*)outBuffer->data();
+        for (size_t i = 0; i < srcBuffer->size() / 4; ++i) {
+            *(dest++) = *(src++) ^ 0xFFFFFFFF;
+        }
+    } else {
+        memcpy(outBuffer->data(), srcBuffer->data(), srcBuffer->size());
+    }
+    outBuffer->setRange(0, srcBuffer->size());
+
+    return OK;
+}
+
+}   // namespace android
diff --git a/media/libstagefright/filters/ZeroFilter.h b/media/libstagefright/filters/ZeroFilter.h
new file mode 100644
index 0000000..bd34dfb
--- /dev/null
+++ b/media/libstagefright/filters/ZeroFilter.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ZERO_FILTER_H_
+#define ZERO_FILTER_H_
+
+#include "SimpleFilter.h"
+
+namespace android {
+
+struct ZeroFilter : public SimpleFilter {
+public:
+    ZeroFilter() : mInvertData(false) {};
+
+    virtual status_t start() { return OK; };
+    virtual void reset() {};
+    virtual status_t setParameters(const sp<AMessage> &msg);
+    virtual status_t processBuffers(
+            const sp<ABuffer> &srcBuffer, const sp<ABuffer> &outBuffer);
+
+protected:
+    virtual ~ZeroFilter() {};
+
+private:
+    bool mInvertData;
+};
+
+}   // namespace android
+
+#endif  // ZERO_FILTER_H_
diff --git a/media/libstagefright/filters/saturation.rs b/media/libstagefright/filters/saturation.rs
new file mode 100644
index 0000000..2c867ac
--- /dev/null
+++ b/media/libstagefright/filters/saturation.rs
@@ -0,0 +1,40 @@
+// Sample script for RGB888 support (compare to saturationARGB.rs)
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma version(1)
+#pragma rs java_package_name(com.android.rs.cppbasic)
+#pragma rs_fp_relaxed
+
+const static float3 gMonoMult = {0.299f, 0.587f, 0.114f};
+
+// global variables (parameters accessible to application code)
+float gSaturation = 1.0f;
+
+void root(const uchar3 *v_in, uchar3 *v_out) {
+    // scale 0-255 uchar to 0-1.0 float
+    float3 in = {v_in->r * 0.003921569f, v_in->g * 0.003921569f,
+            v_in->b * 0.003921569f};
+
+    // apply saturation filter
+    float3 result = dot(in, gMonoMult);
+    result = mix(result, in, gSaturation);
+
+    // convert to uchar, copied from rsPackColorTo8888
+    v_out->x = (uchar)clamp((result.r * 255.f + 0.5f), 0.f, 255.f);
+    v_out->y = (uchar)clamp((result.g * 255.f + 0.5f), 0.f, 255.f);
+    v_out->z = (uchar)clamp((result.b * 255.f + 0.5f), 0.f, 255.f);
+}
diff --git a/media/libstagefright/filters/saturationARGB.rs b/media/libstagefright/filters/saturationARGB.rs
new file mode 100644
index 0000000..1de9dd8
--- /dev/null
+++ b/media/libstagefright/filters/saturationARGB.rs
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma version(1)
+#pragma rs java_package_name(com.android.rs.cppbasic)
+#pragma rs_fp_relaxed
+
+const static float3 gMonoMult = {0.299f, 0.587f, 0.114f};
+
+// global variables (parameters accessible to application code)
+float gSaturation = 1.0f;
+
+void root(const uchar4 *v_in, uchar4 *v_out) {
+    v_out->x = v_in->x; // don't modify A
+
+    // get RGB, scale 0-255 uchar to 0-1.0 float
+    float3 rgb = {v_in->y * 0.003921569f, v_in->z * 0.003921569f,
+            v_in->w * 0.003921569f};
+
+    // apply saturation filter
+    float3 result = dot(rgb, gMonoMult);
+    result = mix(result, rgb, gSaturation);
+
+    v_out->y = (uchar)clamp((result.r * 255.f + 0.5f), 0.f, 255.f);
+    v_out->z = (uchar)clamp((result.g * 255.f + 0.5f), 0.f, 255.f);
+    v_out->w = (uchar)clamp((result.b * 255.f + 0.5f), 0.f, 255.f);
+}
diff --git a/media/libstagefright/foundation/ABuffer.cpp b/media/libstagefright/foundation/ABuffer.cpp
index 4913fd4..a5b81a8 100644
--- a/media/libstagefright/foundation/ABuffer.cpp
+++ b/media/libstagefright/foundation/ABuffer.cpp
@@ -52,6 +52,9 @@
 sp<ABuffer> ABuffer::CreateAsCopy(const void *data, size_t capacity)
 {
     sp<ABuffer> res = new ABuffer(capacity);
+    if (res->base() == NULL) {
+        return NULL;
+    }
     memcpy(res->data(), data, capacity);
     return res;
 }
diff --git a/media/libstagefright/foundation/ADebug.cpp b/media/libstagefright/foundation/ADebug.cpp
index ec4a960..2c5f544 100644
--- a/media/libstagefright/foundation/ADebug.cpp
+++ b/media/libstagefright/foundation/ADebug.cpp
@@ -19,6 +19,7 @@
 #include <ctype.h>
 
 #define LOG_TAG "ADebug"
+#include <cutils/atomic.h>
 #include <utils/Log.h>
 #include <utils/misc.h>
 
@@ -28,14 +29,15 @@
 #include <AStringUtils.h>
 #include <AUtils.h>
 
+#define UNUSED(x) ((void)(x))
+
 namespace android {
 
 //static
-ADebug::Level ADebug::GetDebugLevelFromString(
-        const char *name, const char *value, ADebug::Level def) {
+long ADebug::GetLevelFromSettingsString(
+        const char *name, const char *value, long def) {
     // split on ,
     const char *next = value, *current;
-    const unsigned long maxLevel = (unsigned long)kDebugMax;
     while (next != NULL) {
         current = next;
         next = strchr(current, ',');
@@ -51,8 +53,8 @@
 
         // get level
         char *end;
-        errno = 0;  // strtoul does not clear errno, but it can be set for any return value
-        unsigned long level = strtoul(current, &end, 10);
+        errno = 0;  // strtol does not clear errno, but it can be set for any return value
+        long level = strtol(current, &end, 10);
         while (isspace(*end)) {
             ++end;
         }
@@ -76,8 +78,18 @@
             }
         }
 
-        // update debug level
-        def = (Level)min(level, maxLevel);
+        // update value
+        def = level;
+    }
+    return def;
+}
+
+//static
+long ADebug::GetLevelFromProperty(
+        const char *name, const char *propertyName, long def) {
+    char value[PROPERTY_VALUE_MAX];
+    if (property_get(propertyName, value, NULL)) {
+        def = GetLevelFromSettingsString(name, value, def);
     }
     return def;
 }
@@ -85,11 +97,8 @@
 //static
 ADebug::Level ADebug::GetDebugLevelFromProperty(
         const char *name, const char *propertyName, ADebug::Level def) {
-    char value[PROPERTY_VALUE_MAX];
-    if (property_get(propertyName, value, NULL)) {
-        return GetDebugLevelFromString(name, value, def);
-    }
-    return def;
+    long level = GetLevelFromProperty(name, propertyName, (long)def);
+    return (Level)min(max(level, (long)kDebugNone), (long)kDebugMax);
 }
 
 //static
@@ -113,5 +122,69 @@
     return debugName;
 }
 
+//static
+bool ADebug::getExperimentFlag(
+        bool allow, const char *name, uint64_t modulo,
+        uint64_t limit, uint64_t plus, uint64_t timeDivisor) {
+    // see if this experiment should be disabled/enabled based on properties.
+    // default to 2 to allow 0/1 specification
+    const int undefined = 2;
+    long level = GetLevelFromProperty(name, "debug.stagefright.experiments", undefined);
+    if (level != undefined) {
+        ALOGI("experiment '%s': %s from property", name, level ? "ENABLED" : "disabled");
+        return allow && (level != 0);
+    }
+
+#ifndef ENABLE_STAGEFRIGHT_AUTO_EXPERIMENTS
+    UNUSED(modulo);
+    UNUSED(limit);
+    UNUSED(plus);
+    UNUSED(timeDivisor);
+    return false;
+#else
+    // Disable automatic experiments in "non-experimental" builds (that is, _all_ builds
+    // as there is no "experimental" build).
+    // TODO: change build type to enable automatic experiments in the future for some builds
+    char value[PROPERTY_VALUE_MAX];
+    if (property_get("ro.build.type", value, NULL)) {
+        if (strcmp(value, "experimental")) {
+            return false;
+        }
+    }
+
+    static volatile int32_t haveSerial = 0;
+    static uint64_t serialNum;
+    if (!android_atomic_acquire_load(&haveSerial)) {
+        // calculate initial counter value based on serial number
+        static char serial[PROPERTY_VALUE_MAX];
+        property_get("ro.serialno", serial, "0");
+        uint64_t num = 0; // it is okay for this number to overflow
+        for (size_t i = 0; i < NELEM(serial) && serial[i] != '\0'; ++i) {
+            const char &c = serial[i];
+            // try to use most letters of serialno
+            if (isdigit(c)) {
+                num = num * 10 + (c - '0');
+            } else if (islower(c)) {
+                num = num * 26 + (c - 'a');
+            } else if (isupper(c)) {
+                num = num * 26 + (c - 'A');
+            } else {
+                num = num * 256 + c;
+            }
+        }
+        serialNum = num;
+        android_atomic_release_store(1, &haveSerial);
+    }
+    ALOGD("serial: %llu, time: %lld", (long long unsigned)serialNum, (long long)time(NULL));
+    // MINOR: use modulo for counter and time, so that their sum does not
+    // roll over, and mess up the correlation between related experiments.
+    // e.g. keep (a mod 2N) = 0 impl (a mod N) = 0
+    time_t counter = (time(NULL) / timeDivisor) % modulo + plus + serialNum % modulo;
+    bool enable = allow && (counter % modulo < limit);
+    ALOGI("experiment '%s': %s", name, enable ? "ENABLED" : "disabled");
+    return enable;
+#endif
+}
+
 }  // namespace android
 
diff --git a/media/libstagefright/foundation/AHandler.cpp b/media/libstagefright/foundation/AHandler.cpp
index bd5f7e9..7dbbe54 100644
--- a/media/libstagefright/foundation/AHandler.cpp
+++ b/media/libstagefright/foundation/AHandler.cpp
@@ -19,15 +19,23 @@
 #include <utils/Log.h>
 
 #include <media/stagefright/foundation/AHandler.h>
-
-#include <media/stagefright/foundation/ALooperRoster.h>
+#include <media/stagefright/foundation/AMessage.h>
 
 namespace android {
 
-sp<ALooper> AHandler::looper() {
-    extern ALooperRoster gLooperRoster;
+void AHandler::deliverMessage(const sp<AMessage> &msg) {
+    onMessageReceived(msg);
+    mMessageCounter++;
 
-    return gLooperRoster.findLooper(id());
+    if (mVerboseStats) {
+        uint32_t what = msg->what();
+        ssize_t idx = mMessages.indexOfKey(what);
+        if (idx < 0) {
+            mMessages.add(what, 1);
+        } else {
+            mMessages.editValueAt(idx)++;
+        }
+    }
 }
 
 }  // namespace android
diff --git a/media/libstagefright/foundation/AHierarchicalStateMachine.cpp b/media/libstagefright/foundation/AHierarchicalStateMachine.cpp
index 5f7c70d..b837f66 100644
--- a/media/libstagefright/foundation/AHierarchicalStateMachine.cpp
+++ b/media/libstagefright/foundation/AHierarchicalStateMachine.cpp
@@ -109,7 +109,8 @@
         A.editItemAt(i)->stateExited();
     }
 
-    for (size_t i = B.size(); i-- > 0;) {
+    for (size_t i = B.size(); i > 0;) {
+        i--;
         B.editItemAt(i)->stateEntered();
     }
 }
diff --git a/media/libstagefright/foundation/ALooper.cpp b/media/libstagefright/foundation/ALooper.cpp
index 88b1c92..90b5f68 100644
--- a/media/libstagefright/foundation/ALooper.cpp
+++ b/media/libstagefright/foundation/ALooper.cpp
@@ -16,6 +16,9 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "ALooper"
+
+#include <media/stagefright/foundation/ADebug.h>
+
 #include <utils/Log.h>
 
 #include <sys/time.h>
@@ -210,7 +213,7 @@
         mEventQueue.erase(mEventQueue.begin());
     }
 
-    gLooperRoster.deliverMessage(event.mMessage);
+    event.mMessage->deliver();
 
     // NOTE: It's important to note that at this point our "ALooper" object
     // may no longer exist (its final reference may have gone away while
@@ -220,4 +223,29 @@
     return true;
 }
 
+// to be called by AMessage::postAndAwaitResponse only
+sp<AReplyToken> ALooper::createReplyToken() {
+    return new AReplyToken(this);
+}
+
+// to be called by AMessage::postAndAwaitResponse only
+status_t ALooper::awaitResponse(const sp<AReplyToken> &replyToken, sp<AMessage> *response) {
+    // return status in case we want to handle an interrupted wait
+    Mutex::Autolock autoLock(mRepliesLock);
+    CHECK(replyToken != NULL);
+    while (!replyToken->retrieveReply(response)) {
+        mRepliesCondition.wait(mRepliesLock);
+    }
+    return OK;
+}
+
+status_t ALooper::postReply(const sp<AReplyToken> &replyToken, const sp<AMessage> &reply) {
+    Mutex::Autolock autoLock(mRepliesLock);
+    status_t err = replyToken->setReply(reply);
+    if (err == OK) {
+        mRepliesCondition.broadcast();
+    }
+    return err;
+}
+
 }  // namespace android
diff --git a/media/libstagefright/foundation/ALooperRoster.cpp b/media/libstagefright/foundation/ALooperRoster.cpp
index 2d57aee..9ed53e7 100644
--- a/media/libstagefright/foundation/ALooperRoster.cpp
+++ b/media/libstagefright/foundation/ALooperRoster.cpp
@@ -30,8 +30,7 @@
 static bool verboseStats = false;
 
 ALooperRoster::ALooperRoster()
-    : mNextHandlerID(1),
-      mNextReplyID(1) {
+    : mNextHandlerID(1) {
 }
 
 ALooper::handler_id ALooperRoster::registerHandler(
@@ -49,7 +48,7 @@
     ALooper::handler_id handlerID = mNextHandlerID++;
     mHandlers.add(handlerID, info);
 
-    handler->setID(handlerID);
+    handler->setID(handlerID, looper);
 
     return handlerID;
 }
@@ -68,7 +67,7 @@
     sp<AHandler> handler = info.mHandler.promote();
 
     if (handler != NULL) {
-        handler->setID(0);
+        handler->setID(0, NULL);
     }
 
     mHandlers.removeItemsAt(index);
@@ -80,7 +79,8 @@
     {
         Mutex::Autolock autoLock(mLock);
 
-        for (size_t i = mHandlers.size(); i-- > 0;) {
+        for (size_t i = mHandlers.size(); i > 0;) {
+            i--;
             const HandlerInfo &info = mHandlers.valueAt(i);
 
             sp<ALooper> looper = info.mLooper.promote();
@@ -100,116 +100,6 @@
     }
 }
 
-status_t ALooperRoster::postMessage(
-        const sp<AMessage> &msg, int64_t delayUs) {
-
-    sp<ALooper> looper = findLooper(msg->target());
-
-    if (looper == NULL) {
-        return -ENOENT;
-    }
-    looper->post(msg, delayUs);
-    return OK;
-}
-
-void ALooperRoster::deliverMessage(const sp<AMessage> &msg) {
-    sp<AHandler> handler;
-
-    {
-        Mutex::Autolock autoLock(mLock);
-
-        ssize_t index = mHandlers.indexOfKey(msg->target());
-
-        if (index < 0) {
-            ALOGW("failed to deliver message. Target handler not registered.");
-            return;
-        }
-
-        const HandlerInfo &info = mHandlers.valueAt(index);
-        handler = info.mHandler.promote();
-
-        if (handler == NULL) {
-            ALOGW("failed to deliver message. "
-                 "Target handler %d registered, but object gone.",
-                 msg->target());
-
-            mHandlers.removeItemsAt(index);
-            return;
-        }
-    }
-
-    handler->onMessageReceived(msg);
-    handler->mMessageCounter++;
-
-    if (verboseStats) {
-        uint32_t what = msg->what();
-        ssize_t idx = handler->mMessages.indexOfKey(what);
-        if (idx < 0) {
-            handler->mMessages.add(what, 1);
-        } else {
-            handler->mMessages.editValueAt(idx)++;
-        }
-    }
-}
-
-sp<ALooper> ALooperRoster::findLooper(ALooper::handler_id handlerID) {
-    Mutex::Autolock autoLock(mLock);
-
-    ssize_t index = mHandlers.indexOfKey(handlerID);
-
-    if (index < 0) {
-        return NULL;
-    }
-
-    sp<ALooper> looper = mHandlers.valueAt(index).mLooper.promote();
-
-    if (looper == NULL) {
-        mHandlers.removeItemsAt(index);
-        return NULL;
-    }
-
-    return looper;
-}
-
-status_t ALooperRoster::postAndAwaitResponse(
-        const sp<AMessage> &msg, sp<AMessage> *response) {
-    sp<ALooper> looper = findLooper(msg->target());
-
-    if (looper == NULL) {
-        ALOGW("failed to post message. "
-                "Target handler %d still registered, but object gone.",
-                msg->target());
-        response->clear();
-        return -ENOENT;
-    }
-
-    Mutex::Autolock autoLock(mLock);
-
-    uint32_t replyID = mNextReplyID++;
-
-    msg->setInt32("replyID", replyID);
-
-    looper->post(msg, 0 /* delayUs */);
-
-    ssize_t index;
-    while ((index = mReplies.indexOfKey(replyID)) < 0) {
-        mRepliesCondition.wait(mLock);
-    }
-
-    *response = mReplies.valueAt(index);
-    mReplies.removeItemsAt(index);
-
-    return OK;
-}
-
-void ALooperRoster::postReply(uint32_t replyID, const sp<AMessage> &reply) {
-    Mutex::Autolock autoLock(mLock);
-
-    CHECK(mReplies.indexOfKey(replyID) < 0);
-    mReplies.add(replyID, reply);
-    mRepliesCondition.broadcast();
-}
-
 static void makeFourCC(uint32_t fourcc, char *s) {
     s[0] = (fourcc >> 24) & 0xff;
     if (s[0]) {
@@ -225,7 +115,7 @@
 void ALooperRoster::dump(int fd, const Vector<String16>& args) {
     bool clear = false;
     bool oldVerbose = verboseStats;
-    for (size_t i = 0;i < args.size(); i++) {
+    for (size_t i = 0; i < args.size(); i++) {
         if (args[i] == String16("-c")) {
             clear = true;
         } else if (args[i] == String16("-von")) {
@@ -241,22 +131,23 @@
 
     Mutex::Autolock autoLock(mLock);
     size_t n = mHandlers.size();
-    s.appendFormat(" %zd registered handlers:\n", n);
+    s.appendFormat(" %zu registered handlers:\n", n);
 
     for (size_t i = 0; i < n; i++) {
-        s.appendFormat("  %zd: ", i);
+        s.appendFormat("  %d: ", mHandlers.keyAt(i));
         HandlerInfo &info = mHandlers.editValueAt(i);
         sp<ALooper> looper = info.mLooper.promote();
         if (looper != NULL) {
-            s.append(looper->mName.c_str());
+            s.append(looper->getName());
             sp<AHandler> handler = info.mHandler.promote();
             if (handler != NULL) {
+                handler->mVerboseStats = verboseStats;
                 s.appendFormat(": %u messages processed", handler->mMessageCounter);
                 if (verboseStats) {
                     for (size_t j = 0; j < handler->mMessages.size(); j++) {
                         char fourcc[15];
                         makeFourCC(handler->mMessages.keyAt(j), fourcc);
-                        s.appendFormat("\n    %s: %d",
+                        s.appendFormat("\n    %s: %u",
                                 fourcc,
                                 handler->mMessages.valueAt(j));
                     }
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index 1f46bc9..e549ff6 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -27,6 +27,7 @@
 #include "ABuffer.h"
 #include "ADebug.h"
 #include "ALooperRoster.h"
+#include "AHandler.h"
 #include "AString.h"
 
 #include <binder/Parcel.h>
@@ -36,12 +37,29 @@
 
 extern ALooperRoster gLooperRoster;
 
-AMessage::AMessage(uint32_t what, ALooper::handler_id target)
-    : mWhat(what),
-      mTarget(target),
+status_t AReplyToken::setReply(const sp<AMessage> &reply) {
+    if (mReplied) {
+        ALOGE("trying to post a duplicate reply");
+        return -EBUSY;
+    }
+    CHECK(mReply == NULL);
+    mReply = reply;
+    mReplied = true;
+    return OK;
+}
+
+AMessage::AMessage(void)
+    : mWhat(0),
+      mTarget(0),
       mNumItems(0) {
 }
 
+AMessage::AMessage(uint32_t what, const sp<const AHandler> &handler)
+    : mWhat(what),
+      mNumItems(0) {
+    setTarget(handler);
+}
+
 AMessage::~AMessage() {
     clear();
 }
@@ -54,12 +72,16 @@
     return mWhat;
 }
 
-void AMessage::setTarget(ALooper::handler_id handlerID) {
-    mTarget = handlerID;
-}
-
-ALooper::handler_id AMessage::target() const {
-    return mTarget;
+void AMessage::setTarget(const sp<const AHandler> &handler) {
+    if (handler == NULL) {
+        mTarget = 0;
+        mHandler.clear();
+        mLooper.clear();
+    } else {
+        mTarget = handler->id();
+        mHandler = handler->getHandler();
+        mLooper = handler->getLooper();
+    }
 }
 
 void AMessage::clear() {
@@ -322,33 +344,76 @@
     return true;
 }
 
-void AMessage::post(int64_t delayUs) {
-    gLooperRoster.postMessage(this, delayUs);
+void AMessage::deliver() {
+    sp<AHandler> handler = mHandler.promote();
+    if (handler == NULL) {
+        ALOGW("failed to deliver message as target handler %d is gone.", mTarget);
+        return;
+    }
+
+    handler->deliverMessage(this);
+}
+
+status_t AMessage::post(int64_t delayUs) {
+    sp<ALooper> looper = mLooper.promote();
+    if (looper == NULL) {
+        ALOGW("failed to post message as target looper for handler %d is gone.", mTarget);
+        return -ENOENT;
+    }
+
+    looper->post(this, delayUs);
+    return OK;
 }
 
 status_t AMessage::postAndAwaitResponse(sp<AMessage> *response) {
-    return gLooperRoster.postAndAwaitResponse(this, response);
+    sp<ALooper> looper = mLooper.promote();
+    if (looper == NULL) {
+        ALOGW("failed to post message as target looper for handler %d is gone.", mTarget);
+        return -ENOENT;
+    }
+
+    sp<AReplyToken> token = looper->createReplyToken();
+    if (token == NULL) {
+        ALOGE("failed to create reply token");
+        return -ENOMEM;
+    }
+    setObject("replyID", token);
+
+    looper->post(this, 0 /* delayUs */);
+    return looper->awaitResponse(token, response);
 }
 
-void AMessage::postReply(uint32_t replyID) {
-    gLooperRoster.postReply(replyID, this);
+status_t AMessage::postReply(const sp<AReplyToken> &replyToken) {
+    if (replyToken == NULL) {
+        ALOGW("failed to post reply to a NULL token");
+        return -ENOENT;
+    }
+    sp<ALooper> looper = replyToken->getLooper();
+    if (looper == NULL) {
+        ALOGW("failed to post reply as target looper is gone.");
+        return -ENOENT;
+    }
+    return looper->postReply(replyToken, this);
 }
 
-bool AMessage::senderAwaitsResponse(uint32_t *replyID) const {
-    int32_t tmp;
-    bool found = findInt32("replyID", &tmp);
+bool AMessage::senderAwaitsResponse(sp<AReplyToken> *replyToken) {
+    sp<RefBase> tmp;
+    bool found = findObject("replyID", &tmp);
 
     if (!found) {
         return false;
     }
 
-    *replyID = static_cast<uint32_t>(tmp);
+    *replyToken = static_cast<AReplyToken *>(tmp.get());
+    tmp.clear();
+    setObject("replyID", tmp);
+    // TODO: delete Object instead of setting it to NULL
 
-    return true;
+    return *replyToken != NULL;
 }
 
 sp<AMessage> AMessage::dup() const {
-    sp<AMessage> msg = new AMessage(mWhat, mTarget);
+    sp<AMessage> msg = new AMessage(mWhat, mHandler.promote());
     msg->mNumItems = mNumItems;
 
 #ifdef DUMP_STATS
@@ -532,7 +597,8 @@
 // static
 sp<AMessage> AMessage::FromParcel(const Parcel &parcel) {
     int32_t what = parcel.readInt32();
-    sp<AMessage> msg = new AMessage(what);
+    sp<AMessage> msg = new AMessage();
+    msg->setWhat(what);
 
     msg->mNumItems = static_cast<size_t>(parcel.readInt32());
     for (size_t i = 0; i < msg->mNumItems; ++i) {
diff --git a/media/libstagefright/foundation/Android.mk b/media/libstagefright/foundation/Android.mk
index 08355c7..c68264c 100644
--- a/media/libstagefright/foundation/Android.mk
+++ b/media/libstagefright/foundation/Android.mk
@@ -29,7 +29,8 @@
         liblog            \
         libpowermanager
 
-LOCAL_CFLAGS += -Wno-multichar -Werror
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE:= libstagefright_foundation
 
diff --git a/media/libstagefright/http/Android.mk b/media/libstagefright/http/Android.mk
index 7f3307d..5fb51c1 100644
--- a/media/libstagefright/http/Android.mk
+++ b/media/libstagefright/http/Android.mk
@@ -21,7 +21,8 @@
 
 LOCAL_CFLAGS += -Wno-multichar
 
-LOCAL_CFLAGS += -Werror
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
 
 include $(BUILD_SHARED_LIBRARY)
 
diff --git a/media/libstagefright/http/MediaHTTP.cpp b/media/libstagefright/http/MediaHTTP.cpp
index bb89567..801ff26 100644
--- a/media/libstagefright/http/MediaHTTP.cpp
+++ b/media/libstagefright/http/MediaHTTP.cpp
@@ -30,12 +30,11 @@
 namespace android {
 
 MediaHTTP::MediaHTTP(const sp<IMediaHTTPConnection> &conn)
-    : mInitCheck(NO_INIT),
+    : mInitCheck((conn != NULL) ? OK : NO_INIT),
       mHTTPConnection(conn),
       mCachedSizeValid(false),
       mCachedSize(0ll),
       mDrmManagerClient(NULL) {
-    mInitCheck = OK;
 }
 
 MediaHTTP::~MediaHTTP() {
@@ -54,7 +53,10 @@
     if (headers != NULL) {
         extHeaders = *headers;
     }
-    extHeaders.add(String8("User-Agent"), String8(MakeUserAgent().c_str()));
+
+    if (extHeaders.indexOfKey(String8("User-Agent")) < 0) {
+        extHeaders.add(String8("User-Agent"), String8(MakeUserAgent().c_str()));
+    }
 
     bool success = mHTTPConnection->connect(uri, &extHeaders);
 
@@ -171,6 +173,10 @@
 }
 
 String8 MediaHTTP::getUri() {
+    if (mInitCheck != OK) {
+        return String8::empty();
+    }
+
     String8 uri;
     if (OK == mHTTPConnection->getUri(&uri)) {
         return uri;
diff --git a/media/libstagefright/httplive/Android.mk b/media/libstagefright/httplive/Android.mk
index 93b7935..fc85835 100644
--- a/media/libstagefright/httplive/Android.mk
+++ b/media/libstagefright/httplive/Android.mk
@@ -3,6 +3,7 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:=               \
+        HTTPDownloader.cpp      \
         LiveDataSource.cpp      \
         LiveSession.cpp         \
         M3UParser.cpp           \
@@ -12,7 +13,8 @@
 	$(TOP)/frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Werror
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_SHARED_LIBRARIES := \
         libbinder \
diff --git a/media/libstagefright/httplive/HTTPDownloader.cpp b/media/libstagefright/httplive/HTTPDownloader.cpp
new file mode 100644
index 0000000..3b44bae
--- /dev/null
+++ b/media/libstagefright/httplive/HTTPDownloader.cpp
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "HTTPDownloader"
+#include <utils/Log.h>
+
+#include "HTTPDownloader.h"
+#include "M3UParser.h"
+
+#include <media/IMediaHTTPConnection.h>
+#include <media/IMediaHTTPService.h>
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/MediaHTTP.h>
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/FileSource.h>
+#include <openssl/aes.h>
+#include <openssl/md5.h>
+#include <utils/Mutex.h>
+
+namespace android {
+
+HTTPDownloader::HTTPDownloader(
+        const sp<IMediaHTTPService> &httpService,
+        const KeyedVector<String8, String8> &headers) :
+    mHTTPDataSource(new MediaHTTP(httpService->makeHTTPConnection())),
+    mExtraHeaders(headers),
+    mDisconnecting(false) {
+}
+
+void HTTPDownloader::reconnect() {
+    AutoMutex _l(mLock);
+    mDisconnecting = false;
+}
+
+void HTTPDownloader::disconnect() {
+    {
+        AutoMutex _l(mLock);
+        mDisconnecting = true;
+    }
+    mHTTPDataSource->disconnect();
+}
+
+bool HTTPDownloader::isDisconnecting() {
+    AutoMutex _l(mLock);
+    return mDisconnecting;
+}
+
+/*
+ * Illustration of parameters:
+ *
+ * 0      `range_offset`
+ * +------------+-------------------------------------------------------+--+--+
+ * |            |                                 | next block to fetch |  |  |
+ * |            | `source` handle => `out` buffer |                     |  |  |
+ * | `url` file |<--------- buffer size --------->|<--- `block_size` -->|  |  |
+ * |            |<----------- `range_length` / buffer capacity ----------->|  |
+ * |<------------------------------ file_size ------------------------------->|
+ *
+ * Special parameter values:
+ * - range_length == -1 means entire file
+ * - block_size == 0 means entire range
+ *
+ */
+ssize_t HTTPDownloader::fetchBlock(
+        const char *url, sp<ABuffer> *out,
+        int64_t range_offset, int64_t range_length,
+        uint32_t block_size, /* download block size */
+        String8 *actualUrl,
+        bool reconnect /* force connect HTTP when resuing source */) {
+    if (isDisconnecting()) {
+        return ERROR_NOT_CONNECTED;
+    }
+
+    off64_t size;
+
+    if (reconnect) {
+        if (!strncasecmp(url, "file://", 7)) {
+            mDataSource = new FileSource(url + 7);
+        } else if (strncasecmp(url, "http://", 7)
+                && strncasecmp(url, "https://", 8)) {
+            return ERROR_UNSUPPORTED;
+        } else {
+            KeyedVector<String8, String8> headers = mExtraHeaders;
+            if (range_offset > 0 || range_length >= 0) {
+                headers.add(
+                        String8("Range"),
+                        String8(
+                            AStringPrintf(
+                                "bytes=%lld-%s",
+                                range_offset,
+                                range_length < 0
+                                    ? "" : AStringPrintf("%lld",
+                                            range_offset + range_length - 1).c_str()).c_str()));
+            }
+
+            status_t err = mHTTPDataSource->connect(url, &headers);
+
+            if (isDisconnecting()) {
+                return ERROR_NOT_CONNECTED;
+            }
+
+            if (err != OK) {
+                return err;
+            }
+
+            mDataSource = mHTTPDataSource;
+        }
+    }
+
+    status_t getSizeErr = mDataSource->getSize(&size);
+
+    if (isDisconnecting()) {
+        return ERROR_NOT_CONNECTED;
+    }
+
+    if (getSizeErr != OK) {
+        size = 65536;
+    }
+
+    sp<ABuffer> buffer = *out != NULL ? *out : new ABuffer(size);
+    if (*out == NULL) {
+        buffer->setRange(0, 0);
+    }
+
+    ssize_t bytesRead = 0;
+    // adjust range_length if only reading partial block
+    if (block_size > 0 && (range_length == -1 || (int64_t)(buffer->size() + block_size) < range_length)) {
+        range_length = buffer->size() + block_size;
+    }
+    for (;;) {
+        // Only resize when we don't know the size.
+        size_t bufferRemaining = buffer->capacity() - buffer->size();
+        if (bufferRemaining == 0 && getSizeErr != OK) {
+            size_t bufferIncrement = buffer->size() / 2;
+            if (bufferIncrement < 32768) {
+                bufferIncrement = 32768;
+            }
+            bufferRemaining = bufferIncrement;
+
+            ALOGV("increasing download buffer to %zu bytes",
+                 buffer->size() + bufferRemaining);
+
+            sp<ABuffer> copy = new ABuffer(buffer->size() + bufferRemaining);
+            memcpy(copy->data(), buffer->data(), buffer->size());
+            copy->setRange(0, buffer->size());
+
+            buffer = copy;
+        }
+
+        size_t maxBytesToRead = bufferRemaining;
+        if (range_length >= 0) {
+            int64_t bytesLeftInRange = range_length - buffer->size();
+            if (bytesLeftInRange < (int64_t)maxBytesToRead) {
+                maxBytesToRead = bytesLeftInRange;
+
+                if (bytesLeftInRange == 0) {
+                    break;
+                }
+            }
+        }
+
+        // The DataSource is responsible for informing us of error (n < 0) or eof (n == 0)
+        // to help us break out of the loop.
+        ssize_t n = mDataSource->readAt(
+                buffer->size(), buffer->data() + buffer->size(),
+                maxBytesToRead);
+
+        if (isDisconnecting()) {
+            return ERROR_NOT_CONNECTED;
+        }
+
+        if (n < 0) {
+            return n;
+        }
+
+        if (n == 0) {
+            break;
+        }
+
+        buffer->setRange(0, buffer->size() + (size_t)n);
+        bytesRead += n;
+    }
+
+    *out = buffer;
+    if (actualUrl != NULL) {
+        *actualUrl = mDataSource->getUri();
+        if (actualUrl->isEmpty()) {
+            *actualUrl = url;
+        }
+    }
+
+    return bytesRead;
+}
+
+ssize_t HTTPDownloader::fetchFile(
+        const char *url, sp<ABuffer> *out, String8 *actualUrl) {
+    ssize_t err = fetchBlock(url, out, 0, -1, 0, actualUrl, true /* reconnect */);
+
+    // close off the connection after use
+    mHTTPDataSource->disconnect();
+
+    return err;
+}
+
+sp<M3UParser> HTTPDownloader::fetchPlaylist(
+        const char *url, uint8_t *curPlaylistHash, bool *unchanged) {
+    ALOGV("fetchPlaylist '%s'", url);
+
+    *unchanged = false;
+
+    sp<ABuffer> buffer;
+    String8 actualUrl;
+    ssize_t err = fetchFile(url, &buffer, &actualUrl);
+
+    // close off the connection after use
+    mHTTPDataSource->disconnect();
+
+    if (err <= 0) {
+        return NULL;
+    }
+
+    // MD5 functionality is not available on the simulator, treat all
+    // playlists as changed.
+
+#if defined(HAVE_ANDROID_OS)
+    uint8_t hash[16];
+
+    MD5_CTX m;
+    MD5_Init(&m);
+    MD5_Update(&m, buffer->data(), buffer->size());
+
+    MD5_Final(hash, &m);
+
+    if (curPlaylistHash != NULL && !memcmp(hash, curPlaylistHash, 16)) {
+        // playlist unchanged
+        *unchanged = true;
+
+        return NULL;
+    }
+
+    if (curPlaylistHash != NULL) {
+        memcpy(curPlaylistHash, hash, sizeof(hash));
+    }
+#endif
+
+    sp<M3UParser> playlist =
+        new M3UParser(actualUrl.string(), buffer->data(), buffer->size());
+
+    if (playlist->initCheck() != OK) {
+        ALOGE("failed to parse .m3u8 playlist");
+
+        return NULL;
+    }
+
+    return playlist;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/httplive/HTTPDownloader.h b/media/libstagefright/httplive/HTTPDownloader.h
new file mode 100644
index 0000000..1db4a48
--- /dev/null
+++ b/media/libstagefright/httplive/HTTPDownloader.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef HTTP_DOWNLOADER_H_
+
+#define HTTP_DOWNLOADER_H_
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <utils/KeyedVector.h>
+#include <utils/Mutex.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+struct ABuffer;
+class DataSource;
+struct HTTPBase;
+struct IMediaHTTPService;
+struct M3UParser;
+
+struct HTTPDownloader : public RefBase {
+    HTTPDownloader(
+            const sp<IMediaHTTPService> &httpService,
+            const KeyedVector<String8, String8> &headers);
+
+    void reconnect();
+    void disconnect();
+    bool isDisconnecting();
+    // If given a non-zero block_size (default 0), it is used to cap the number of
+    // bytes read in from the DataSource. If given a non-NULL buffer, new content
+    // is read into the end.
+    //
+    // The DataSource we read from is responsible for signaling error or EOF to help us
+    // break out of the read loop. The DataSource can be returned to the caller, so
+    // that the caller can reuse it for subsequent fetches (within the initially
+    // requested range).
+    //
+    // For reused HTTP sources, the caller must download a file sequentially without
+    // any overlaps or gaps to prevent reconnection.
+    ssize_t fetchBlock(
+            const char *url,
+            sp<ABuffer> *out,
+            int64_t range_offset, /* open file at range_offset */
+            int64_t range_length, /* open file for range_length (-1: entire file) */
+            uint32_t block_size,  /* download block size (0: entire range) */
+            String8 *actualUrl,   /* returns actual URL */
+            bool reconnect        /* force connect http */
+            );
+
+    // simplified version to fetch a single file
+    ssize_t fetchFile(
+            const char *url,
+            sp<ABuffer> *out,
+            String8 *actualUrl = NULL);
+
+    // fetch a playlist file
+    sp<M3UParser> fetchPlaylist(
+            const char *url, uint8_t *curPlaylistHash, bool *unchanged);
+
+private:
+    sp<HTTPBase> mHTTPDataSource;
+    sp<DataSource> mDataSource;
+    KeyedVector<String8, String8> mExtraHeaders;
+
+    Mutex mLock;
+    bool mDisconnecting;
+
+    DISALLOW_EVIL_CONSTRUCTORS(HTTPDownloader);
+};
+
+}  // namespace android
+
+#endif  // HTTP_DOWNLOADER_H_
diff --git a/media/libstagefright/httplive/LiveSession.cpp b/media/libstagefright/httplive/LiveSession.cpp
index d0f3bc2..1557401 100644
--- a/media/libstagefright/httplive/LiveSession.cpp
+++ b/media/libstagefright/httplive/LiveSession.cpp
@@ -19,24 +19,19 @@
 #include <utils/Log.h>
 
 #include "LiveSession.h"
-
+#include "HTTPDownloader.h"
 #include "M3UParser.h"
 #include "PlaylistFetcher.h"
 
-#include "include/HTTPBase.h"
 #include "mpeg2ts/AnotherPacketSource.h"
 
 #include <cutils/properties.h>
-#include <media/IMediaHTTPConnection.h>
 #include <media/IMediaHTTPService.h>
-#include <media/stagefright/foundation/hexdump.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/DataSource.h>
-#include <media/stagefright/FileSource.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaHTTP.h>
+#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
 
@@ -44,13 +39,239 @@
 
 #include <ctype.h>
 #include <inttypes.h>
-#include <openssl/aes.h>
-#include <openssl/md5.h>
 
 namespace android {
 
-// Number of recently-read bytes to use for bandwidth estimation
-const size_t LiveSession::kBandwidthHistoryBytes = 200 * 1024;
+// static
+// Bandwidth Switch Mark Defaults
+const int64_t LiveSession::kUpSwitchMarkUs = 15000000ll;
+const int64_t LiveSession::kDownSwitchMarkUs = 20000000ll;
+const int64_t LiveSession::kUpSwitchMarginUs = 5000000ll;
+const int64_t LiveSession::kResumeThresholdUs = 100000ll;
+
+// Buffer Prepare/Ready/Underflow Marks
+const int64_t LiveSession::kReadyMarkUs = 5000000ll;
+const int64_t LiveSession::kPrepareMarkUs = 1500000ll;
+const int64_t LiveSession::kUnderflowMarkUs = 1000000ll;
+
+struct LiveSession::BandwidthEstimator : public RefBase {
+    BandwidthEstimator();
+
+    void addBandwidthMeasurement(size_t numBytes, int64_t delayUs);
+    bool estimateBandwidth(
+            int32_t *bandwidth,
+            bool *isStable = NULL,
+            int32_t *shortTermBps = NULL);
+
+private:
+    // Bandwidth estimation parameters
+    static const int32_t kShortTermBandwidthItems = 3;
+    static const int32_t kMinBandwidthHistoryItems = 20;
+    static const int64_t kMinBandwidthHistoryWindowUs = 5000000ll; // 5 sec
+    static const int64_t kMaxBandwidthHistoryWindowUs = 30000000ll; // 30 sec
+    static const int64_t kMaxBandwidthHistoryAgeUs = 60000000ll; // 60 sec
+
+    struct BandwidthEntry {
+        int64_t mTimestampUs;
+        int64_t mDelayUs;
+        size_t mNumBytes;
+    };
+
+    Mutex mLock;
+    List<BandwidthEntry> mBandwidthHistory;
+    List<int32_t> mPrevEstimates;
+    int32_t mShortTermEstimate;
+    bool mHasNewSample;
+    bool mIsStable;
+    int64_t mTotalTransferTimeUs;
+    size_t mTotalTransferBytes;
+
+    DISALLOW_EVIL_CONSTRUCTORS(BandwidthEstimator);
+};
+
+LiveSession::BandwidthEstimator::BandwidthEstimator() :
+    mShortTermEstimate(0),
+    mHasNewSample(false),
+    mIsStable(true),
+    mTotalTransferTimeUs(0),
+    mTotalTransferBytes(0) {
+}
+
+void LiveSession::BandwidthEstimator::addBandwidthMeasurement(
+        size_t numBytes, int64_t delayUs) {
+    AutoMutex autoLock(mLock);
+
+    int64_t nowUs = ALooper::GetNowUs();
+    BandwidthEntry entry;
+    entry.mTimestampUs = nowUs;
+    entry.mDelayUs = delayUs;
+    entry.mNumBytes = numBytes;
+    mTotalTransferTimeUs += delayUs;
+    mTotalTransferBytes += numBytes;
+    mBandwidthHistory.push_back(entry);
+    mHasNewSample = true;
+
+    // Remove no more than 10% of total transfer time at a time
+    // to avoid sudden jump on bandwidth estimation. There might
+    // be long blocking reads that takes up signification time,
+    // we have to keep a longer window in that case.
+    int64_t bandwidthHistoryWindowUs = mTotalTransferTimeUs * 9 / 10;
+    if (bandwidthHistoryWindowUs < kMinBandwidthHistoryWindowUs) {
+        bandwidthHistoryWindowUs = kMinBandwidthHistoryWindowUs;
+    } else if (bandwidthHistoryWindowUs > kMaxBandwidthHistoryWindowUs) {
+        bandwidthHistoryWindowUs = kMaxBandwidthHistoryWindowUs;
+    }
+    // trim old samples, keeping at least kMaxBandwidthHistoryItems samples,
+    // and total transfer time at least kMaxBandwidthHistoryWindowUs.
+    while (mBandwidthHistory.size() > kMinBandwidthHistoryItems) {
+        List<BandwidthEntry>::iterator it = mBandwidthHistory.begin();
+        // remove sample if either absolute age or total transfer time is
+        // over kMaxBandwidthHistoryWindowUs
+        if (nowUs - it->mTimestampUs < kMaxBandwidthHistoryAgeUs &&
+                mTotalTransferTimeUs - it->mDelayUs < bandwidthHistoryWindowUs) {
+            break;
+        }
+        mTotalTransferTimeUs -= it->mDelayUs;
+        mTotalTransferBytes -= it->mNumBytes;
+        mBandwidthHistory.erase(mBandwidthHistory.begin());
+    }
+}
+
+bool LiveSession::BandwidthEstimator::estimateBandwidth(
+        int32_t *bandwidthBps, bool *isStable, int32_t *shortTermBps) {
+    AutoMutex autoLock(mLock);
+
+    if (mBandwidthHistory.size() < 2) {
+        return false;
+    }
+
+    if (!mHasNewSample) {
+        *bandwidthBps = *(--mPrevEstimates.end());
+        if (isStable) {
+            *isStable = mIsStable;
+        }
+        if (shortTermBps) {
+            *shortTermBps = mShortTermEstimate;
+        }
+        return true;
+    }
+
+    *bandwidthBps = ((double)mTotalTransferBytes * 8E6 / mTotalTransferTimeUs);
+    mPrevEstimates.push_back(*bandwidthBps);
+    while (mPrevEstimates.size() > 3) {
+        mPrevEstimates.erase(mPrevEstimates.begin());
+    }
+    mHasNewSample = false;
+
+    int64_t totalTimeUs = 0;
+    size_t totalBytes = 0;
+    if (mBandwidthHistory.size() >= kShortTermBandwidthItems) {
+        List<BandwidthEntry>::iterator it = --mBandwidthHistory.end();
+        for (size_t i = 0; i < kShortTermBandwidthItems; i++, it--) {
+            totalTimeUs += it->mDelayUs;
+            totalBytes += it->mNumBytes;
+        }
+    }
+    mShortTermEstimate = totalTimeUs > 0 ?
+            (totalBytes * 8E6 / totalTimeUs) : *bandwidthBps;
+    if (shortTermBps) {
+        *shortTermBps = mShortTermEstimate;
+    }
+
+    int32_t minEstimate = -1, maxEstimate = -1;
+    List<int32_t>::iterator it;
+    for (it = mPrevEstimates.begin(); it != mPrevEstimates.end(); it++) {
+        int32_t estimate = *it;
+        if (minEstimate < 0 || minEstimate > estimate) {
+            minEstimate = estimate;
+        }
+        if (maxEstimate < 0 || maxEstimate < estimate) {
+            maxEstimate = estimate;
+        }
+    }
+    // consider it stable if long-term average is not jumping a lot
+    // and short-term average is not much lower than long-term average
+    mIsStable = (maxEstimate <= minEstimate * 4 / 3)
+            && mShortTermEstimate > minEstimate * 7 / 10;
+    if (isStable) {
+        *isStable = mIsStable;
+    }
+
+#if 0
+    {
+        char dumpStr[1024] = {0};
+        size_t itemIdx = 0;
+        size_t histSize = mBandwidthHistory.size();
+        sprintf(dumpStr, "estimate bps=%d stable=%d history (n=%d): {",
+            *bandwidthBps, mIsStable, histSize);
+        List<BandwidthEntry>::iterator it = mBandwidthHistory.begin();
+        for (; it != mBandwidthHistory.end(); ++it) {
+            if (itemIdx > 50) {
+                sprintf(dumpStr + strlen(dumpStr),
+                        "...(%zd more items)... }", histSize - itemIdx);
+                break;
+            }
+            sprintf(dumpStr + strlen(dumpStr), "%dk/%.3fs%s",
+                it->mNumBytes / 1024,
+                (double)it->mDelayUs * 1.0e-6,
+                (it == (--mBandwidthHistory.end())) ? "}" : ", ");
+            itemIdx++;
+        }
+        ALOGE(dumpStr);
+    }
+#endif
+    return true;
+}
+
+//static
+const char *LiveSession::getKeyForStream(StreamType type) {
+    switch (type) {
+        case STREAMTYPE_VIDEO:
+            return "timeUsVideo";
+        case STREAMTYPE_AUDIO:
+            return "timeUsAudio";
+        case STREAMTYPE_SUBTITLES:
+            return "timeUsSubtitle";
+        case STREAMTYPE_METADATA:
+            return "timeUsMetadata"; // unused
+        default:
+            TRESPASS();
+    }
+    return NULL;
+}
+
+//static
+const char *LiveSession::getNameForStream(StreamType type) {
+    switch (type) {
+        case STREAMTYPE_VIDEO:
+            return "video";
+        case STREAMTYPE_AUDIO:
+            return "audio";
+        case STREAMTYPE_SUBTITLES:
+            return "subs";
+        case STREAMTYPE_METADATA:
+            return "metadata";
+        default:
+            break;
+    }
+    return "unknown";
+}
+
+//static
+ATSParser::SourceType LiveSession::getSourceTypeForStream(StreamType type) {
+    switch (type) {
+        case STREAMTYPE_VIDEO:
+            return ATSParser::VIDEO;
+        case STREAMTYPE_AUDIO:
+            return ATSParser::AUDIO;
+        case STREAMTYPE_METADATA:
+            return ATSParser::META;
+        case STREAMTYPE_SUBTITLES:
+        default:
+            TRESPASS();
+    }
+    return ATSParser::NUM_SOURCE_TYPES; // should not reach here
+}
 
 LiveSession::LiveSession(
         const sp<AMessage> &notify, uint32_t flags,
@@ -58,169 +279,95 @@
     : mNotify(notify),
       mFlags(flags),
       mHTTPService(httpService),
+      mBuffering(false),
       mInPreparationPhase(true),
-      mHTTPDataSource(new MediaHTTP(mHTTPService->makeHTTPConnection())),
+      mPollBufferingGeneration(0),
+      mPrevBufferPercentage(-1),
       mCurBandwidthIndex(-1),
+      mOrigBandwidthIndex(-1),
+      mLastBandwidthBps(-1ll),
+      mLastBandwidthStable(false),
+      mBandwidthEstimator(new BandwidthEstimator()),
+      mMaxWidth(720),
+      mMaxHeight(480),
       mStreamMask(0),
       mNewStreamMask(0),
       mSwapMask(0),
-      mCheckBandwidthGeneration(0),
       mSwitchGeneration(0),
       mSubtitleGeneration(0),
       mLastDequeuedTimeUs(0ll),
       mRealTimeBaseUs(0ll),
       mReconfigurationInProgress(false),
       mSwitchInProgress(false),
-      mDisconnectReplyID(0),
-      mSeekReplyID(0),
+      mUpSwitchMark(kUpSwitchMarkUs),
+      mDownSwitchMark(kDownSwitchMarkUs),
+      mUpSwitchMargin(kUpSwitchMarginUs),
       mFirstTimeUsValid(false),
       mFirstTimeUs(0),
-      mLastSeekTimeUs(0) {
-
+      mLastSeekTimeUs(0),
+      mHasMetadata(false) {
     mStreams[kAudioIndex] = StreamItem("audio");
     mStreams[kVideoIndex] = StreamItem("video");
     mStreams[kSubtitleIndex] = StreamItem("subtitles");
 
-    for (size_t i = 0; i < kMaxStreams; ++i) {
-        mDiscontinuities.add(indexToType(i), new AnotherPacketSource(NULL /* meta */));
+    for (size_t i = 0; i < kNumSources; ++i) {
         mPacketSources.add(indexToType(i), new AnotherPacketSource(NULL /* meta */));
         mPacketSources2.add(indexToType(i), new AnotherPacketSource(NULL /* meta */));
-        mBuffering[i] = false;
     }
-
-    size_t numHistoryItems = kBandwidthHistoryBytes /
-            PlaylistFetcher::kDownloadBlockSize + 1;
-    if (numHistoryItems < 5) {
-        numHistoryItems = 5;
-    }
-    mHTTPDataSource->setBandwidthHistorySize(numHistoryItems);
 }
 
 LiveSession::~LiveSession() {
+    if (mFetcherLooper != NULL) {
+        mFetcherLooper->stop();
+    }
 }
 
-sp<ABuffer> LiveSession::createFormatChangeBuffer(bool swap) {
-    ABuffer *discontinuity = new ABuffer(0);
-    discontinuity->meta()->setInt32("discontinuity", ATSParser::DISCONTINUITY_FORMATCHANGE);
-    discontinuity->meta()->setInt32("swapPacketSource", swap);
-    discontinuity->meta()->setInt32("switchGeneration", mSwitchGeneration);
-    discontinuity->meta()->setInt64("timeUs", -1);
-    return discontinuity;
-}
-
-void LiveSession::swapPacketSource(StreamType stream) {
-    sp<AnotherPacketSource> &aps = mPacketSources.editValueFor(stream);
-    sp<AnotherPacketSource> &aps2 = mPacketSources2.editValueFor(stream);
-    sp<AnotherPacketSource> tmp = aps;
-    aps = aps2;
-    aps2 = tmp;
-    aps2->clear();
+int64_t LiveSession::calculateMediaTimeUs(
+        int64_t firstTimeUs, int64_t timeUs, int32_t discontinuitySeq) {
+    if (timeUs >= firstTimeUs) {
+        timeUs -= firstTimeUs;
+    } else {
+        timeUs = 0;
+    }
+    timeUs += mLastSeekTimeUs;
+    if (mDiscontinuityOffsetTimesUs.indexOfKey(discontinuitySeq) >= 0) {
+        timeUs += mDiscontinuityOffsetTimesUs.valueFor(discontinuitySeq);
+    }
+    return timeUs;
 }
 
 status_t LiveSession::dequeueAccessUnit(
         StreamType stream, sp<ABuffer> *accessUnit) {
-    if (!(mStreamMask & stream)) {
-        // return -EWOULDBLOCK to avoid halting the decoder
-        // when switching between audio/video and audio only.
-        return -EWOULDBLOCK;
-    }
-
-    status_t finalResult;
-    sp<AnotherPacketSource> discontinuityQueue  = mDiscontinuities.valueFor(stream);
-    if (discontinuityQueue->hasBufferAvailable(&finalResult)) {
-        discontinuityQueue->dequeueAccessUnit(accessUnit);
-        // seeking, track switching
-        sp<AMessage> extra;
-        int64_t timeUs;
-        if ((*accessUnit)->meta()->findMessage("extra", &extra)
-                && extra != NULL
-                && extra->findInt64("timeUs", &timeUs)) {
-            // seeking only
-            mLastSeekTimeUs = timeUs;
-            mDiscontinuityOffsetTimesUs.clear();
-            mDiscontinuityAbsStartTimesUs.clear();
-        }
-        return INFO_DISCONTINUITY;
-    }
-
+    status_t finalResult = OK;
     sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(stream);
 
-    ssize_t idx = typeToIndex(stream);
-    if (!packetSource->hasBufferAvailable(&finalResult)) {
+    ssize_t streamIdx = typeToIndex(stream);
+    if (streamIdx < 0) {
+        return BAD_VALUE;
+    }
+    const char *streamStr = getNameForStream(stream);
+    // Do not let client pull data if we don't have data packets yet.
+    // We might only have a format discontinuity queued without data.
+    // When NuPlayerDecoder dequeues the format discontinuity, it will
+    // immediately try to getFormat. If we return NULL, NuPlayerDecoder
+    // thinks it can do seamless change, so will not shutdown decoder.
+    // When the actual format arrives, it can't handle it and get stuck.
+    if (!packetSource->hasDataBufferAvailable(&finalResult)) {
+        ALOGV("[%s] dequeueAccessUnit: no buffer available (finalResult=%d)",
+                streamStr, finalResult);
+
         if (finalResult == OK) {
-            mBuffering[idx] = true;
             return -EAGAIN;
         } else {
             return finalResult;
         }
     }
 
-    int32_t targetDuration = 0;
-    sp<AMessage> meta = packetSource->getLatestEnqueuedMeta();
-    if (meta != NULL) {
-        meta->findInt32("targetDuration", &targetDuration);
-    }
-
-    int64_t targetDurationUs = targetDuration * 1000000ll;
-    if (targetDurationUs == 0 ||
-            targetDurationUs > PlaylistFetcher::kMinBufferedDurationUs) {
-        // Fetchers limit buffering to
-        // min(3 * targetDuration, kMinBufferedDurationUs)
-        targetDurationUs = PlaylistFetcher::kMinBufferedDurationUs;
-    }
-
-    if (mBuffering[idx]) {
-        if (mSwitchInProgress
-                || packetSource->isFinished(0)
-                || packetSource->getEstimatedDurationUs() > targetDurationUs) {
-            mBuffering[idx] = false;
-        }
-    }
-
-    if (mBuffering[idx]) {
-        return -EAGAIN;
-    }
-
-    // wait for counterpart
-    sp<AnotherPacketSource> otherSource;
-    uint32_t mask = mNewStreamMask & mStreamMask;
-    uint32_t fetchersMask  = 0;
-    for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
-        uint32_t fetcherMask = mFetcherInfos.valueAt(i).mFetcher->getStreamTypeMask();
-        fetchersMask |= fetcherMask;
-    }
-    mask &= fetchersMask;
-    if (stream == STREAMTYPE_AUDIO && (mask & STREAMTYPE_VIDEO)) {
-        otherSource = mPacketSources.valueFor(STREAMTYPE_VIDEO);
-    } else if (stream == STREAMTYPE_VIDEO && (mask & STREAMTYPE_AUDIO)) {
-        otherSource = mPacketSources.valueFor(STREAMTYPE_AUDIO);
-    }
-    if (otherSource != NULL && !otherSource->hasBufferAvailable(&finalResult)) {
-        return finalResult == OK ? -EAGAIN : finalResult;
-    }
+    // Let the client dequeue as long as we have buffers available
+    // Do not make pause/resume decisions here.
 
     status_t err = packetSource->dequeueAccessUnit(accessUnit);
 
-    size_t streamIdx;
-    const char *streamStr;
-    switch (stream) {
-        case STREAMTYPE_AUDIO:
-            streamIdx = kAudioIndex;
-            streamStr = "audio";
-            break;
-        case STREAMTYPE_VIDEO:
-            streamIdx = kVideoIndex;
-            streamStr = "video";
-            break;
-        case STREAMTYPE_SUBTITLES:
-            streamIdx = kSubtitleIndex;
-            streamStr = "subs";
-            break;
-        default:
-            TRESPASS();
-    }
-
-    StreamItem& strm = mStreams[streamIdx];
     if (err == INFO_DISCONTINUITY) {
         // adaptive streaming, discontinuities in the playlist
         int32_t type;
@@ -235,50 +382,36 @@
               streamStr,
               type,
               extra == NULL ? "NULL" : extra->debugString().c_str());
-
-        int32_t swap;
-        if ((*accessUnit)->meta()->findInt32("swapPacketSource", &swap) && swap) {
-            int32_t switchGeneration;
-            CHECK((*accessUnit)->meta()->findInt32("switchGeneration", &switchGeneration));
-            {
-                Mutex::Autolock lock(mSwapMutex);
-                if (switchGeneration == mSwitchGeneration) {
-                    swapPacketSource(stream);
-                    sp<AMessage> msg = new AMessage(kWhatSwapped, id());
-                    msg->setInt32("stream", stream);
-                    msg->setInt32("switchGeneration", switchGeneration);
-                    msg->post();
-                }
-            }
-        } else {
-            size_t seq = strm.mCurDiscontinuitySeq;
-            int64_t offsetTimeUs;
-            if (mDiscontinuityOffsetTimesUs.indexOfKey(seq) >= 0) {
-                offsetTimeUs = mDiscontinuityOffsetTimesUs.valueFor(seq);
-            } else {
-                offsetTimeUs = 0;
-            }
-
-            seq += 1;
-            if (mDiscontinuityAbsStartTimesUs.indexOfKey(strm.mCurDiscontinuitySeq) >= 0) {
-                int64_t firstTimeUs;
-                firstTimeUs = mDiscontinuityAbsStartTimesUs.valueFor(strm.mCurDiscontinuitySeq);
-                offsetTimeUs += strm.mLastDequeuedTimeUs - firstTimeUs;
-                offsetTimeUs += strm.mLastSampleDurationUs;
-            } else {
-                offsetTimeUs += strm.mLastSampleDurationUs;
-            }
-
-            mDiscontinuityOffsetTimesUs.add(seq, offsetTimeUs);
-        }
     } else if (err == OK) {
 
         if (stream == STREAMTYPE_AUDIO || stream == STREAMTYPE_VIDEO) {
-            int64_t timeUs;
+            int64_t timeUs, originalTimeUs;
             int32_t discontinuitySeq = 0;
+            StreamItem& strm = mStreams[streamIdx];
             CHECK((*accessUnit)->meta()->findInt64("timeUs",  &timeUs));
+            originalTimeUs = timeUs;
             (*accessUnit)->meta()->findInt32("discontinuitySeq", &discontinuitySeq);
-            strm.mCurDiscontinuitySeq = discontinuitySeq;
+            if (discontinuitySeq > (int32_t) strm.mCurDiscontinuitySeq) {
+                int64_t offsetTimeUs;
+                if (mDiscontinuityOffsetTimesUs.indexOfKey(strm.mCurDiscontinuitySeq) >= 0) {
+                    offsetTimeUs = mDiscontinuityOffsetTimesUs.valueFor(strm.mCurDiscontinuitySeq);
+                } else {
+                    offsetTimeUs = 0;
+                }
+
+                if (mDiscontinuityAbsStartTimesUs.indexOfKey(strm.mCurDiscontinuitySeq) >= 0
+                        && strm.mLastDequeuedTimeUs >= 0) {
+                    int64_t firstTimeUs;
+                    firstTimeUs = mDiscontinuityAbsStartTimesUs.valueFor(strm.mCurDiscontinuitySeq);
+                    offsetTimeUs += strm.mLastDequeuedTimeUs - firstTimeUs;
+                    offsetTimeUs += strm.mLastSampleDurationUs;
+                } else {
+                    offsetTimeUs += strm.mLastSampleDurationUs;
+                }
+
+                mDiscontinuityOffsetTimesUs.add(discontinuitySeq, offsetTimeUs);
+                strm.mCurDiscontinuitySeq = discontinuitySeq;
+            }
 
             int32_t discard = 0;
             int64_t firstTimeUs;
@@ -299,17 +432,10 @@
             }
 
             strm.mLastDequeuedTimeUs = timeUs;
-            if (timeUs >= firstTimeUs) {
-                timeUs -= firstTimeUs;
-            } else {
-                timeUs = 0;
-            }
-            timeUs += mLastSeekTimeUs;
-            if (mDiscontinuityOffsetTimesUs.indexOfKey(discontinuitySeq) >= 0) {
-                timeUs += mDiscontinuityOffsetTimesUs.valueFor(discontinuitySeq);
-            }
+            timeUs = calculateMediaTimeUs(firstTimeUs, timeUs, discontinuitySeq);
 
-            ALOGV("[%s] read buffer at time %" PRId64 " us", streamStr, timeUs);
+            ALOGV("[%s] dequeueAccessUnit: time %lld us, original %lld us",
+                    streamStr, (long long)timeUs, (long long)originalTimeUs);
             (*accessUnit)->meta()->setInt64("timeUs",  timeUs);
             mLastDequeuedTimeUs = timeUs;
             mRealTimeBaseUs = ALooper::GetNowUs() - timeUs;
@@ -322,6 +448,17 @@
             (*accessUnit)->meta()->setInt32(
                     "trackIndex", mPlaylist->getSelectedIndex());
             (*accessUnit)->meta()->setInt64("baseUs", mRealTimeBaseUs);
+        } else if (stream == STREAMTYPE_METADATA) {
+            HLSTime mdTime((*accessUnit)->meta());
+            if (mDiscontinuityAbsStartTimesUs.indexOfKey(mdTime.mSeq) < 0) {
+                packetSource->requeueAccessUnit((*accessUnit));
+                return -EAGAIN;
+            } else {
+                int64_t firstTimeUs = mDiscontinuityAbsStartTimesUs.valueFor(mdTime.mSeq);
+                int64_t timeUs = calculateMediaTimeUs(firstTimeUs, mdTime.mTimeUs, mdTime.mSeq);
+                (*accessUnit)->meta()->setInt64("timeUs",  timeUs);
+                (*accessUnit)->meta()->setInt64("baseUs", mRealTimeBaseUs);
+            }
         }
     } else {
         ALOGI("[%s] encountered error %d", streamStr, err);
@@ -331,7 +468,6 @@
 }
 
 status_t LiveSession::getStreamFormat(StreamType stream, sp<AMessage> *format) {
-    // No swapPacketSource race condition; called from the same thread as dequeueAccessUnit.
     if (!(mStreamMask & stream)) {
         return UNKNOWN_ERROR;
     }
@@ -344,12 +480,24 @@
         return -EAGAIN;
     }
 
+    if (stream == STREAMTYPE_AUDIO) {
+        // set AAC input buffer size to 32K bytes (256kbps x 1sec)
+        meta->setInt32(kKeyMaxInputSize, 32 * 1024);
+    } else if (stream == STREAMTYPE_VIDEO) {
+        meta->setInt32(kKeyMaxWidth, mMaxWidth);
+        meta->setInt32(kKeyMaxHeight, mMaxHeight);
+    }
+
     return convertMetaDataToMessage(meta, format);
 }
 
+sp<HTTPDownloader> LiveSession::getHTTPDownloader() {
+    return new HTTPDownloader(mHTTPService, mExtraHeaders);
+}
+
 void LiveSession::connectAsync(
         const char *url, const KeyedVector<String8, String8> *headers) {
-    sp<AMessage> msg = new AMessage(kWhatConnect, id());
+    sp<AMessage> msg = new AMessage(kWhatConnect, this);
     msg->setString("url", url);
 
     if (headers != NULL) {
@@ -362,7 +510,7 @@
 }
 
 status_t LiveSession::disconnect() {
-    sp<AMessage> msg = new AMessage(kWhatDisconnect, id());
+    sp<AMessage> msg = new AMessage(kWhatDisconnect, this);
 
     sp<AMessage> response;
     status_t err = msg->postAndAwaitResponse(&response);
@@ -371,7 +519,7 @@
 }
 
 status_t LiveSession::seekTo(int64_t timeUs) {
-    sp<AMessage> msg = new AMessage(kWhatSeek, id());
+    sp<AMessage> msg = new AMessage(kWhatSeek, this);
     msg->setInt64("timeUs", timeUs);
 
     sp<AMessage> response;
@@ -380,6 +528,95 @@
     return err;
 }
 
+bool LiveSession::checkSwitchProgress(
+        sp<AMessage> &stopParams, int64_t delayUs, bool *needResumeUntil) {
+    AString newUri;
+    CHECK(stopParams->findString("uri", &newUri));
+
+    *needResumeUntil = false;
+    sp<AMessage> firstNewMeta[kMaxStreams];
+    for (size_t i = 0; i < kMaxStreams; ++i) {
+        StreamType stream = indexToType(i);
+        if (!(mSwapMask & mNewStreamMask & stream)
+            || (mStreams[i].mNewUri != newUri)) {
+            continue;
+        }
+        if (stream == STREAMTYPE_SUBTITLES) {
+            continue;
+        }
+        sp<AnotherPacketSource> &source = mPacketSources.editValueAt(i);
+
+        // First, get latest dequeued meta, which is where the decoder is at.
+        // (when upswitching, we take the meta after a certain delay, so that
+        // the decoder is left with some cushion)
+        sp<AMessage> lastDequeueMeta, lastEnqueueMeta;
+        if (delayUs > 0) {
+            lastDequeueMeta = source->getMetaAfterLastDequeued(delayUs);
+            if (lastDequeueMeta == NULL) {
+                // this means we don't have enough cushion, try again later
+                ALOGV("[%s] up switching failed due to insufficient buffer",
+                        getNameForStream(stream));
+                return false;
+            }
+        } else {
+            // It's okay for lastDequeueMeta to be NULL here, it means the
+            // decoder hasn't even started dequeueing
+            lastDequeueMeta = source->getLatestDequeuedMeta();
+        }
+        // Then, trim off packets at beginning of mPacketSources2 that's before
+        // the latest dequeued time. These samples are definitely too late.
+        firstNewMeta[i] = mPacketSources2.editValueAt(i)
+                            ->trimBuffersBeforeMeta(lastDequeueMeta);
+
+        // Now firstNewMeta[i] is the first sample after the trim.
+        // If it's NULL, we failed because dequeue already past all samples
+        // in mPacketSource2, we have to try again.
+        if (firstNewMeta[i] == NULL) {
+            HLSTime dequeueTime(lastDequeueMeta);
+            ALOGV("[%s] dequeue time (%d, %lld) past start time",
+                    getNameForStream(stream),
+                    dequeueTime.mSeq, (long long) dequeueTime.mTimeUs);
+            return false;
+        }
+
+        // Otherwise, we check if mPacketSources2 overlaps with what old fetcher
+        // already fetched, and see if we need to resumeUntil
+        lastEnqueueMeta = source->getLatestEnqueuedMeta();
+        // lastEnqueueMeta == NULL means old fetcher stopped at a discontinuity
+        // boundary, no need to resume as the content will look different anyways
+        if (lastEnqueueMeta != NULL) {
+            HLSTime lastTime(lastEnqueueMeta), startTime(firstNewMeta[i]);
+
+            // no need to resume old fetcher if new fetcher started in different
+            // discontinuity sequence, as the content will look different.
+            *needResumeUntil |= (startTime.mSeq == lastTime.mSeq
+                    && startTime.mTimeUs - lastTime.mTimeUs > kResumeThresholdUs);
+
+            // update the stopTime for resumeUntil
+            stopParams->setInt32("discontinuitySeq", startTime.mSeq);
+            stopParams->setInt64(getKeyForStream(stream), startTime.mTimeUs);
+        }
+    }
+
+    // if we're here, it means dequeue progress hasn't passed some samples in
+    // mPacketSource2, we can trim off the excess in mPacketSource.
+    // (old fetcher might still need to resumeUntil the start time of new fetcher)
+    for (size_t i = 0; i < kMaxStreams; ++i) {
+        StreamType stream = indexToType(i);
+        if (!(mSwapMask & mNewStreamMask & stream)
+            || (newUri != mStreams[i].mNewUri)
+            || stream == STREAMTYPE_SUBTITLES) {
+            continue;
+        }
+        mPacketSources.valueFor(stream)->trimBuffersAfterMeta(firstNewMeta[i]);
+    }
+
+    // no resumeUntil if already underflow
+    *needResumeUntil &= !mBuffering;
+
+    return true;
+}
+
 void LiveSession::onMessageReceived(const sp<AMessage> &msg) {
     switch (msg->what()) {
         case kWhatConnect:
@@ -402,16 +639,15 @@
 
         case kWhatSeek:
         {
-            uint32_t seekReplyID;
-            CHECK(msg->senderAwaitsResponse(&seekReplyID));
-            mSeekReplyID = seekReplyID;
+            if (mReconfigurationInProgress) {
+                msg->post(50000);
+                break;
+            }
+
+            CHECK(msg->senderAwaitsResponse(&mSeekReplyID));
             mSeekReply = new AMessage;
 
-            status_t err = onSeek(msg);
-
-            if (err != OK) {
-                msg->post(50000);
-            }
+            onSeek(msg);
             break;
         }
 
@@ -426,16 +662,30 @@
                 case PlaylistFetcher::kWhatPaused:
                 case PlaylistFetcher::kWhatStopped:
                 {
-                    if (what == PlaylistFetcher::kWhatStopped) {
-                        AString uri;
-                        CHECK(msg->findString("uri", &uri));
-                        if (mFetcherInfos.removeItem(uri) < 0) {
-                            // ignore duplicated kWhatStopped messages.
-                            break;
-                        }
+                    AString uri;
+                    CHECK(msg->findString("uri", &uri));
+                    ssize_t index = mFetcherInfos.indexOfKey(uri);
+                    if (index < 0) {
+                        // ignore msgs from fetchers that's already gone
+                        break;
+                    }
 
-                        if (mSwitchInProgress) {
-                            tryToFinishBandwidthSwitch();
+                    ALOGV("fetcher-%d %s",
+                            mFetcherInfos[index].mFetcher->getFetcherID(),
+                            what == PlaylistFetcher::kWhatPaused ?
+                                    "paused" : "stopped");
+
+                    if (what == PlaylistFetcher::kWhatStopped) {
+                        mFetcherLooper->unregisterHandler(
+                                mFetcherInfos[index].mFetcher->id());
+                        mFetcherInfos.removeItemsAt(index);
+                    } else if (what == PlaylistFetcher::kWhatPaused) {
+                        int32_t seekMode;
+                        CHECK(msg->findInt32("seekMode", &seekMode));
+                        for (size_t i = 0; i < kMaxStreams; ++i) {
+                            if (mStreams[i].mUri == uri) {
+                                mStreams[i].mSeekMode = (SeekMode) seekMode;
+                            }
                         }
                     }
 
@@ -443,15 +693,8 @@
                         CHECK_GT(mContinuationCounter, 0);
                         if (--mContinuationCounter == 0) {
                             mContinuation->post();
-
-                            if (mSeekReplyID != 0) {
-                                CHECK(mSeekReply != NULL);
-                                mSeekReply->setInt32("err", OK);
-                                mSeekReply->postReply(mSeekReplyID);
-                                mSeekReplyID = 0;
-                                mSeekReply.clear();
-                            }
                         }
+                        ALOGV("%zu fetcher(s) left", mContinuationCounter);
                     }
                     break;
                 }
@@ -464,8 +707,21 @@
                     int64_t durationUs;
                     CHECK(msg->findInt64("durationUs", &durationUs));
 
-                    FetcherInfo *info = &mFetcherInfos.editValueFor(uri);
-                    info->mDurationUs = durationUs;
+                    ssize_t index = mFetcherInfos.indexOfKey(uri);
+                    if (index >= 0) {
+                        FetcherInfo *info = &mFetcherInfos.editValueFor(uri);
+                        info->mDurationUs = durationUs;
+                    }
+                    break;
+                }
+
+                case PlaylistFetcher::kWhatTargetDurationUpdate:
+                {
+                    int64_t targetDurationUs;
+                    CHECK(msg->findInt64("targetDurationUs", &targetDurationUs));
+                    mUpSwitchMark = min(kUpSwitchMarkUs, targetDurationUs * 7 / 4);
+                    mDownSwitchMark = min(kDownSwitchMarkUs, targetDurationUs * 9 / 4);
+                    mUpSwitchMargin = min(kUpSwitchMarginUs, targetDurationUs);
                     break;
                 }
 
@@ -493,6 +749,20 @@
                         }
                     }
 
+                    // remember the failure index (as mCurBandwidthIndex will be restored
+                    // after cancelBandwidthSwitch()), and record last fail time
+                    size_t failureIndex = mCurBandwidthIndex;
+                    mBandwidthItems.editItemAt(
+                            failureIndex).mLastFailureUs = ALooper::GetNowUs();
+
+                    if (mSwitchInProgress) {
+                        // if error happened when we switch to a variant, try fallback
+                        // to other variant to save the session
+                        if (tryBandwidthFallback()) {
+                            break;
+                        }
+                    }
+
                     if (mInPreparationPhase) {
                         postPrepared(err);
                     }
@@ -506,38 +776,23 @@
                     mPacketSources.valueFor(
                             STREAMTYPE_SUBTITLES)->signalEOS(err);
 
-                    sp<AMessage> notify = mNotify->dup();
-                    notify->setInt32("what", kWhatError);
-                    notify->setInt32("err", err);
-                    notify->post();
+                    postError(err);
                     break;
                 }
 
-                case PlaylistFetcher::kWhatTemporarilyDoneFetching:
+                case PlaylistFetcher::kWhatStopReached:
                 {
-                    AString uri;
-                    CHECK(msg->findString("uri", &uri));
+                    ALOGV("kWhatStopReached");
 
-                    if (mFetcherInfos.indexOfKey(uri) < 0) {
-                        ALOGE("couldn't find uri");
+                    AString oldUri;
+                    CHECK(msg->findString("uri", &oldUri));
+
+                    ssize_t index = mFetcherInfos.indexOfKey(oldUri);
+                    if (index < 0) {
                         break;
                     }
-                    FetcherInfo *info = &mFetcherInfos.editValueFor(uri);
-                    info->mIsPrepared = true;
 
-                    if (mInPreparationPhase) {
-                        bool allFetchersPrepared = true;
-                        for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
-                            if (!mFetcherInfos.valueAt(i).mIsPrepared) {
-                                allFetchersPrepared = false;
-                                break;
-                            }
-                        }
-
-                        if (allFetchersPrepared) {
-                            postPrepared(OK);
-                        }
-                    }
+                    tryToFinishBandwidthSwitch(oldUri);
                     break;
                 }
 
@@ -546,19 +801,97 @@
                     int32_t switchGeneration;
                     CHECK(msg->findInt32("switchGeneration", &switchGeneration));
 
+                    ALOGV("kWhatStartedAt: switchGen=%d, mSwitchGen=%d",
+                            switchGeneration, mSwitchGeneration);
+
                     if (switchGeneration != mSwitchGeneration) {
                         break;
                     }
 
-                    // Resume fetcher for the original variant; the resumed fetcher should
-                    // continue until the timestamps found in msg, which is stored by the
-                    // new fetcher to indicate where the new variant has started buffering.
-                    for (size_t i = 0; i < mFetcherInfos.size(); i++) {
-                        const FetcherInfo info = mFetcherInfos.valueAt(i);
-                        if (info.mToBeRemoved) {
-                            info.mFetcher->resumeUntilAsync(msg);
+                    AString uri;
+                    CHECK(msg->findString("uri", &uri));
+
+                    // mark new fetcher mToBeResumed
+                    ssize_t index = mFetcherInfos.indexOfKey(uri);
+                    if (index >= 0) {
+                        mFetcherInfos.editValueAt(index).mToBeResumed = true;
+                    }
+
+                    // temporarily disable packet sources to be swapped to prevent
+                    // NuPlayerDecoder from dequeuing while we check progress
+                    for (size_t i = 0; i < mPacketSources.size(); ++i) {
+                        if ((mSwapMask & mPacketSources.keyAt(i))
+                                && uri == mStreams[i].mNewUri) {
+                            mPacketSources.editValueAt(i)->enable(false);
                         }
                     }
+                    bool switchUp = (mCurBandwidthIndex > mOrigBandwidthIndex);
+                    // If switching up, require a cushion bigger than kUnderflowMark
+                    // to avoid buffering immediately after the switch.
+                    // (If we don't have that cushion we'd rather cancel and try again.)
+                    int64_t delayUs = switchUp ? (kUnderflowMarkUs + 1000000ll) : 0;
+                    bool needResumeUntil = false;
+                    sp<AMessage> stopParams = msg;
+                    if (checkSwitchProgress(stopParams, delayUs, &needResumeUntil)) {
+                        // playback time hasn't passed startAt time
+                        if (!needResumeUntil) {
+                            ALOGV("finish switch");
+                            for (size_t i = 0; i < kMaxStreams; ++i) {
+                                if ((mSwapMask & indexToType(i))
+                                        && uri == mStreams[i].mNewUri) {
+                                    // have to make a copy of mStreams[i].mUri because
+                                    // tryToFinishBandwidthSwitch is modifying mStreams[]
+                                    AString oldURI = mStreams[i].mUri;
+                                    tryToFinishBandwidthSwitch(oldURI);
+                                    break;
+                                }
+                            }
+                        } else {
+                            // startAt time is after last enqueue time
+                            // Resume fetcher for the original variant; the resumed fetcher should
+                            // continue until the timestamps found in msg, which is stored by the
+                            // new fetcher to indicate where the new variant has started buffering.
+                            ALOGV("finish switch with resumeUntilAsync");
+                            for (size_t i = 0; i < mFetcherInfos.size(); i++) {
+                                const FetcherInfo &info = mFetcherInfos.valueAt(i);
+                                if (info.mToBeRemoved) {
+                                    info.mFetcher->resumeUntilAsync(stopParams);
+                                }
+                            }
+                        }
+                    } else {
+                        // playback time passed startAt time
+                        if (switchUp) {
+                            // if switching up, cancel and retry if condition satisfies again
+                            ALOGV("cancel up switch because we're too late");
+                            cancelBandwidthSwitch(true /* resume */);
+                        } else {
+                            ALOGV("retry down switch at next sample");
+                            resumeFetcher(uri, mSwapMask, -1, true /* newUri */);
+                        }
+                    }
+                    // re-enable all packet sources
+                    for (size_t i = 0; i < mPacketSources.size(); ++i) {
+                        mPacketSources.editValueAt(i)->enable(true);
+                    }
+
+                    break;
+                }
+
+                case PlaylistFetcher::kWhatPlaylistFetched:
+                {
+                    onMasterPlaylistFetched(msg);
+                    break;
+                }
+
+                case PlaylistFetcher::kWhatMetadataDetected:
+                {
+                    if (!mHasMetadata) {
+                        mHasMetadata = true;
+                        sp<AMessage> notify = mNotify->dup();
+                        notify->setInt32("what", kWhatMetadataDetected);
+                        notify->post();
+                    }
                     break;
                 }
 
@@ -569,19 +902,6 @@
             break;
         }
 
-        case kWhatCheckBandwidth:
-        {
-            int32_t generation;
-            CHECK(msg->findInt32("generation", &generation));
-
-            if (generation != mCheckBandwidthGeneration) {
-                break;
-            }
-
-            onCheckBandwidth(msg);
-            break;
-        }
-
         case kWhatChangeConfiguration:
         {
             onChangeConfiguration(msg);
@@ -600,27 +920,13 @@
             break;
         }
 
-        case kWhatFinishDisconnect2:
+        case kWhatPollBuffering:
         {
-            onFinishDisconnect2();
-            break;
-        }
-
-        case kWhatSwapped:
-        {
-            onSwapped(msg);
-            break;
-        }
-
-        case kWhatCheckSwitchDown:
-        {
-            onCheckSwitchDown();
-            break;
-        }
-
-        case kWhatSwitchDown:
-        {
-            onSwitchDown();
+            int32_t generation;
+            CHECK(msg->findInt32("generation", &generation));
+            if (generation == mPollBufferingGeneration) {
+                onPollBuffering();
+            }
             break;
         }
 
@@ -631,6 +937,13 @@
 }
 
 // static
+bool LiveSession::isBandwidthValid(const BandwidthItem &item) {
+    static const int64_t kBlacklistWindowUs = 300 * 1000000ll;
+    return item.mLastFailureUs < 0
+            || ALooper::GetNowUs() - item.mLastFailureUs > kBlacklistWindowUs;
+}
+
+// static
 int LiveSession::SortByBandwidth(const BandwidthItem *a, const BandwidthItem *b) {
     if (a->mBandwidth < b->mBandwidth) {
         return -1;
@@ -643,7 +956,7 @@
 
 // static
 LiveSession::StreamType LiveSession::indexToType(int idx) {
-    CHECK(idx >= 0 && idx < kMaxStreams);
+    CHECK(idx >= 0 && idx < kNumSources);
     return (StreamType)(1 << idx);
 }
 
@@ -656,6 +969,8 @@
             return 1;
         case STREAMTYPE_SUBTITLES:
             return 2;
+        case STREAMTYPE_METADATA:
+            return 3;
         default:
             return -1;
     };
@@ -663,8 +978,10 @@
 }
 
 void LiveSession::onConnect(const sp<AMessage> &msg) {
-    AString url;
-    CHECK(msg->findString("url", &url));
+    CHECK(msg->findString("url", &mMasterURL));
+
+    // TODO currently we don't know if we are coming here from incognito mode
+    ALOGI("onConnect %s", uriDebugString(mMasterURL).c_str());
 
     KeyedVector<String8, String8> *headers = NULL;
     if (!msg->findPointer("headers", (void **)&headers)) {
@@ -676,21 +993,39 @@
         headers = NULL;
     }
 
-    // TODO currently we don't know if we are coming here from incognito mode
-    ALOGI("onConnect %s", uriDebugString(url).c_str());
+    // create looper for fetchers
+    if (mFetcherLooper == NULL) {
+        mFetcherLooper = new ALooper();
 
-    mMasterURL = url;
+        mFetcherLooper->setName("Fetcher");
+        mFetcherLooper->start(false, false);
+    }
 
-    bool dummy;
-    mPlaylist = fetchPlaylist(url.c_str(), NULL /* curPlaylistHash */, &dummy);
+    // create fetcher to fetch the master playlist
+    addFetcher(mMasterURL.c_str())->fetchPlaylistAsync();
+}
 
+void LiveSession::onMasterPlaylistFetched(const sp<AMessage> &msg) {
+    AString uri;
+    CHECK(msg->findString("uri", &uri));
+    ssize_t index = mFetcherInfos.indexOfKey(uri);
+    if (index < 0) {
+        ALOGW("fetcher for master playlist is gone.");
+        return;
+    }
+
+    // no longer useful, remove
+    mFetcherLooper->unregisterHandler(mFetcherInfos[index].mFetcher->id());
+    mFetcherInfos.removeItemsAt(index);
+
+    CHECK(msg->findObject("playlist", (sp<RefBase> *)&mPlaylist));
     if (mPlaylist == NULL) {
-        ALOGE("unable to fetch master playlist %s.", uriDebugString(url).c_str());
+        ALOGE("unable to fetch master playlist %s.",
+                uriDebugString(mMasterURL).c_str());
 
         postPrepared(ERROR_IO);
         return;
     }
-
     // We trust the content provider to make a reasonable choice of preferred
     // initial bandwidth by listing it first in the variant playlist.
     // At startup we really don't have a good estimate on the available
@@ -699,11 +1034,16 @@
     size_t initialBandwidth = 0;
     size_t initialBandwidthIndex = 0;
 
+    int32_t maxWidth = 0;
+    int32_t maxHeight = 0;
+
     if (mPlaylist->isVariantPlaylist()) {
+        Vector<BandwidthItem> itemsWithVideo;
         for (size_t i = 0; i < mPlaylist->size(); ++i) {
             BandwidthItem item;
 
             item.mPlaylistIndex = i;
+            item.mLastFailureUs = -1ll;
 
             sp<AMessage> meta;
             AString uri;
@@ -711,14 +1051,30 @@
 
             CHECK(meta->findInt32("bandwidth", (int32_t *)&item.mBandwidth));
 
-            if (initialBandwidth == 0) {
-                initialBandwidth = item.mBandwidth;
+            int32_t width, height;
+            if (meta->findInt32("width", &width)) {
+                maxWidth = max(maxWidth, width);
+            }
+            if (meta->findInt32("height", &height)) {
+                maxHeight = max(maxHeight, height);
             }
 
             mBandwidthItems.push(item);
+            if (mPlaylist->hasType(i, "video")) {
+                itemsWithVideo.push(item);
+            }
+        }
+        // remove the audio-only variants if we have at least one with video
+        if (!itemsWithVideo.empty()
+                && itemsWithVideo.size() < mBandwidthItems.size()) {
+            mBandwidthItems.clear();
+            for (size_t i = 0; i < itemsWithVideo.size(); ++i) {
+                mBandwidthItems.push(itemsWithVideo[i]);
+            }
         }
 
         CHECK_GT(mBandwidthItems.size(), 0u);
+        initialBandwidth = mBandwidthItems[0].mBandwidth;
 
         mBandwidthItems.sort(SortByBandwidth);
 
@@ -736,39 +1092,44 @@
         mBandwidthItems.push(item);
     }
 
+    mMaxWidth = maxWidth > 0 ? maxWidth : mMaxWidth;
+    mMaxHeight = maxHeight > 0 ? maxHeight : mMaxHeight;
+
     mPlaylist->pickRandomMediaItems();
     changeConfiguration(
             0ll /* timeUs */, initialBandwidthIndex, false /* pickTrack */);
 }
 
 void LiveSession::finishDisconnect() {
+    ALOGV("finishDisconnect");
+
     // No reconfiguration is currently pending, make sure none will trigger
     // during disconnection either.
-    cancelCheckBandwidthEvent();
-
-    // Protect mPacketSources from a swapPacketSource race condition through disconnect.
-    // (finishDisconnect, onFinishDisconnect2)
     cancelBandwidthSwitch();
 
-    // cancel switch down monitor
-    mSwitchDownMonitor.clear();
+    // cancel buffer polling
+    cancelPollBuffering();
 
+    // TRICKY: don't wait for all fetcher to be stopped when disconnecting
+    //
+    // Some fetchers might be stuck in connect/getSize at this point. These
+    // operations will eventually timeout (as we have a timeout set in
+    // MediaHTTPConnection), but we don't want to block the main UI thread
+    // until then. Here we just need to make sure we clear all references
+    // to the fetchers, so that when they finally exit from the blocking
+    // operation, they can be destructed.
+    //
+    // There is one very tricky point though. For this scheme to work, the
+    // fecther must hold a reference to LiveSession, so that LiveSession is
+    // destroyed after fetcher. Otherwise LiveSession would get stuck in its
+    // own destructor when it waits for mFetcherLooper to stop, which still
+    // blocks main UI thread.
     for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
         mFetcherInfos.valueAt(i).mFetcher->stopAsync();
+        mFetcherLooper->unregisterHandler(
+                mFetcherInfos.valueAt(i).mFetcher->id());
     }
-
-    sp<AMessage> msg = new AMessage(kWhatFinishDisconnect2, id());
-
-    mContinuationCounter = mFetcherInfos.size();
-    mContinuation = msg;
-
-    if (mContinuationCounter == 0) {
-        msg->post();
-    }
-}
-
-void LiveSession::onFinishDisconnect2() {
-    mContinuation.clear();
+    mFetcherInfos.clear();
 
     mPacketSources.valueFor(STREAMTYPE_AUDIO)->signalEOS(ERROR_END_OF_STREAM);
     mPacketSources.valueFor(STREAMTYPE_VIDEO)->signalEOS(ERROR_END_OF_STREAM);
@@ -780,7 +1141,7 @@
     response->setInt32("err", OK);
 
     response->postReply(mDisconnectReplyID);
-    mDisconnectReplyID = 0;
+    mDisconnectReplyID.clear();
 }
 
 sp<PlaylistFetcher> LiveSession::addFetcher(const char *uri) {
@@ -790,213 +1151,185 @@
         return NULL;
     }
 
-    sp<AMessage> notify = new AMessage(kWhatFetcherNotify, id());
+    sp<AMessage> notify = new AMessage(kWhatFetcherNotify, this);
     notify->setString("uri", uri);
     notify->setInt32("switchGeneration", mSwitchGeneration);
 
     FetcherInfo info;
-    info.mFetcher = new PlaylistFetcher(notify, this, uri, mSubtitleGeneration);
+    info.mFetcher = new PlaylistFetcher(
+            notify, this, uri, mCurBandwidthIndex, mSubtitleGeneration);
     info.mDurationUs = -1ll;
-    info.mIsPrepared = false;
     info.mToBeRemoved = false;
-    looper()->registerHandler(info.mFetcher);
+    info.mToBeResumed = false;
+    mFetcherLooper->registerHandler(info.mFetcher);
 
     mFetcherInfos.add(uri, info);
 
     return info.mFetcher;
 }
 
-/*
- * Illustration of parameters:
- *
- * 0      `range_offset`
- * +------------+-------------------------------------------------------+--+--+
- * |            |                                 | next block to fetch |  |  |
- * |            | `source` handle => `out` buffer |                     |  |  |
- * | `url` file |<--------- buffer size --------->|<--- `block_size` -->|  |  |
- * |            |<----------- `range_length` / buffer capacity ----------->|  |
- * |<------------------------------ file_size ------------------------------->|
- *
- * Special parameter values:
- * - range_length == -1 means entire file
- * - block_size == 0 means entire range
- *
- */
-ssize_t LiveSession::fetchFile(
-        const char *url, sp<ABuffer> *out,
-        int64_t range_offset, int64_t range_length,
-        uint32_t block_size, /* download block size */
-        sp<DataSource> *source, /* to return and reuse source */
-        String8 *actualUrl) {
-    off64_t size;
-    sp<DataSource> temp_source;
-    if (source == NULL) {
-        source = &temp_source;
-    }
-
-    if (*source == NULL) {
-        if (!strncasecmp(url, "file://", 7)) {
-            *source = new FileSource(url + 7);
-        } else if (strncasecmp(url, "http://", 7)
-                && strncasecmp(url, "https://", 8)) {
-            return ERROR_UNSUPPORTED;
-        } else {
-            KeyedVector<String8, String8> headers = mExtraHeaders;
-            if (range_offset > 0 || range_length >= 0) {
-                headers.add(
-                        String8("Range"),
-                        String8(
-                            AStringPrintf(
-                                "bytes=%lld-%s",
-                                range_offset,
-                                range_length < 0
-                                    ? "" : AStringPrintf("%lld",
-                                            range_offset + range_length - 1).c_str()).c_str()));
-            }
-            status_t err = mHTTPDataSource->connect(url, &headers);
-
-            if (err != OK) {
-                return err;
-            }
-
-            *source = mHTTPDataSource;
-        }
-    }
-
-    status_t getSizeErr = (*source)->getSize(&size);
-    if (getSizeErr != OK) {
-        size = 65536;
-    }
-
-    sp<ABuffer> buffer = *out != NULL ? *out : new ABuffer(size);
-    if (*out == NULL) {
-        buffer->setRange(0, 0);
-    }
-
-    ssize_t bytesRead = 0;
-    // adjust range_length if only reading partial block
-    if (block_size > 0 && (range_length == -1 || (int64_t)(buffer->size() + block_size) < range_length)) {
-        range_length = buffer->size() + block_size;
-    }
-    for (;;) {
-        // Only resize when we don't know the size.
-        size_t bufferRemaining = buffer->capacity() - buffer->size();
-        if (bufferRemaining == 0 && getSizeErr != OK) {
-            size_t bufferIncrement = buffer->size() / 2;
-            if (bufferIncrement < 32768) {
-                bufferIncrement = 32768;
-            }
-            bufferRemaining = bufferIncrement;
-
-            ALOGV("increasing download buffer to %zu bytes",
-                 buffer->size() + bufferRemaining);
-
-            sp<ABuffer> copy = new ABuffer(buffer->size() + bufferRemaining);
-            memcpy(copy->data(), buffer->data(), buffer->size());
-            copy->setRange(0, buffer->size());
-
-            buffer = copy;
-        }
-
-        size_t maxBytesToRead = bufferRemaining;
-        if (range_length >= 0) {
-            int64_t bytesLeftInRange = range_length - buffer->size();
-            if (bytesLeftInRange < (int64_t)maxBytesToRead) {
-                maxBytesToRead = bytesLeftInRange;
-
-                if (bytesLeftInRange == 0) {
-                    break;
-                }
-            }
-        }
-
-        // The DataSource is responsible for informing us of error (n < 0) or eof (n == 0)
-        // to help us break out of the loop.
-        ssize_t n = (*source)->readAt(
-                buffer->size(), buffer->data() + buffer->size(),
-                maxBytesToRead);
-
-        if (n < 0) {
-            return n;
-        }
-
-        if (n == 0) {
-            break;
-        }
-
-        buffer->setRange(0, buffer->size() + (size_t)n);
-        bytesRead += n;
-    }
-
-    *out = buffer;
-    if (actualUrl != NULL) {
-        *actualUrl = (*source)->getUri();
-        if (actualUrl->isEmpty()) {
-            *actualUrl = url;
-        }
-    }
-
-    return bytesRead;
-}
-
-sp<M3UParser> LiveSession::fetchPlaylist(
-        const char *url, uint8_t *curPlaylistHash, bool *unchanged) {
-    ALOGV("fetchPlaylist '%s'", url);
-
-    *unchanged = false;
-
-    sp<ABuffer> buffer;
-    String8 actualUrl;
-    ssize_t  err = fetchFile(url, &buffer, 0, -1, 0, NULL, &actualUrl);
-
-    if (err <= 0) {
-        return NULL;
-    }
-
-    // MD5 functionality is not available on the simulator, treat all
-    // playlists as changed.
-
-#if defined(HAVE_ANDROID_OS)
-    uint8_t hash[16];
-
-    MD5_CTX m;
-    MD5_Init(&m);
-    MD5_Update(&m, buffer->data(), buffer->size());
-
-    MD5_Final(hash, &m);
-
-    if (curPlaylistHash != NULL && !memcmp(hash, curPlaylistHash, 16)) {
-        // playlist unchanged
-        *unchanged = true;
-
-        return NULL;
-    }
-
-    if (curPlaylistHash != NULL) {
-        memcpy(curPlaylistHash, hash, sizeof(hash));
-    }
-#endif
-
-    sp<M3UParser> playlist =
-        new M3UParser(actualUrl.string(), buffer->data(), buffer->size());
-
-    if (playlist->initCheck() != OK) {
-        ALOGE("failed to parse .m3u8 playlist");
-
-        return NULL;
-    }
-
-    return playlist;
-}
-
 #if 0
 static double uniformRand() {
     return (double)rand() / RAND_MAX;
 }
 #endif
 
-size_t LiveSession::getBandwidthIndex() {
-    if (mBandwidthItems.size() == 0) {
+bool LiveSession::UriIsSameAsIndex(const AString &uri, int32_t i, bool newUri) {
+    ALOGI("[timed_id3] i %d UriIsSameAsIndex newUri %s, %s", i,
+            newUri ? "true" : "false",
+            newUri ? mStreams[i].mNewUri.c_str() : mStreams[i].mUri.c_str());
+    return i >= 0
+            && ((!newUri && uri == mStreams[i].mUri)
+            || (newUri && uri == mStreams[i].mNewUri));
+}
+
+sp<AnotherPacketSource> LiveSession::getPacketSourceForStreamIndex(
+        size_t trackIndex, bool newUri) {
+    StreamType type = indexToType(trackIndex);
+    sp<AnotherPacketSource> source = NULL;
+    if (newUri) {
+        source = mPacketSources2.valueFor(type);
+        source->clear();
+    } else {
+        source = mPacketSources.valueFor(type);
+    };
+    return source;
+}
+
+sp<AnotherPacketSource> LiveSession::getMetadataSource(
+        sp<AnotherPacketSource> sources[kNumSources], uint32_t streamMask, bool newUri) {
+    // todo: One case where the following strategy can fail is when audio and video
+    // are in separate playlists, both are transport streams, and the metadata
+    // is actually contained in the audio stream.
+    ALOGV("[timed_id3] getMetadataSourceForUri streamMask %x newUri %s",
+            streamMask, newUri ? "true" : "false");
+
+    if ((sources[kVideoIndex] != NULL) // video fetcher; or ...
+            || (!(streamMask & STREAMTYPE_VIDEO) && sources[kAudioIndex] != NULL)) {
+            // ... audio fetcher for audio only variant
+        return getPacketSourceForStreamIndex(kMetaDataIndex, newUri);
+    }
+
+    return NULL;
+}
+
+bool LiveSession::resumeFetcher(
+        const AString &uri, uint32_t streamMask, int64_t timeUs, bool newUri) {
+    ssize_t index = mFetcherInfos.indexOfKey(uri);
+    if (index < 0) {
+        ALOGE("did not find fetcher for uri: %s", uri.c_str());
+        return false;
+    }
+
+    bool resume = false;
+    sp<AnotherPacketSource> sources[kNumSources];
+    for (size_t i = 0; i < kMaxStreams; ++i) {
+        if ((streamMask & indexToType(i)) && UriIsSameAsIndex(uri, i, newUri)) {
+            resume = true;
+            sources[i] = getPacketSourceForStreamIndex(i, newUri);
+        }
+    }
+
+    if (resume) {
+        sp<PlaylistFetcher> &fetcher = mFetcherInfos.editValueAt(index).mFetcher;
+        SeekMode seekMode = newUri ? kSeekModeNextSample : kSeekModeExactPosition;
+
+        ALOGV("resuming fetcher-%d, timeUs=%lld, seekMode=%d",
+                fetcher->getFetcherID(), (long long)timeUs, seekMode);
+
+        fetcher->startAsync(
+                sources[kAudioIndex],
+                sources[kVideoIndex],
+                sources[kSubtitleIndex],
+                getMetadataSource(sources, streamMask, newUri),
+                timeUs, -1, -1, seekMode);
+    }
+
+    return resume;
+}
+
+float LiveSession::getAbortThreshold(
+        ssize_t currentBWIndex, ssize_t targetBWIndex) const {
+    float abortThreshold = -1.0f;
+    if (currentBWIndex > 0 && targetBWIndex < currentBWIndex) {
+        /*
+           If we're switching down, we need to decide whether to
+
+           1) finish last segment of high-bandwidth variant, or
+           2) abort last segment of high-bandwidth variant, and fetch an
+              overlapping portion from low-bandwidth variant.
+
+           Here we try to maximize the amount of buffer left when the
+           switch point is met. Given the following parameters:
+
+           B: our current buffering level in seconds
+           T: target duration in seconds
+           X: sample duration in seconds remain to fetch in last segment
+           bw0: bandwidth of old variant (as specified in playlist)
+           bw1: bandwidth of new variant (as specified in playlist)
+           bw: measured bandwidth available
+
+           If we choose 1), when switch happens at the end of current
+           segment, our buffering will be
+                  B + X - X * bw0 / bw
+
+           If we choose 2), when switch happens where we aborted current
+           segment, our buffering will be
+                  B - (T - X) * bw1 / bw
+
+           We should only choose 1) if
+                  X/T < bw1 / (bw1 + bw0 - bw)
+        */
+
+        // abort old bandwidth immediately if bandwidth is fluctuating a lot.
+        // our estimate could be far off, and fetching old bandwidth could
+        // take too long.
+        if (!mLastBandwidthStable) {
+            return 0.0f;
+        }
+
+        // Taking the measured current bandwidth at 50% face value only,
+        // as our bandwidth estimation is a lagging indicator. Being
+        // conservative on this, we prefer switching to lower bandwidth
+        // unless we're really confident finishing up the last segment
+        // of higher bandwidth will be fast.
+        CHECK(mLastBandwidthBps >= 0);
+        abortThreshold =
+                (float)mBandwidthItems.itemAt(targetBWIndex).mBandwidth
+             / ((float)mBandwidthItems.itemAt(targetBWIndex).mBandwidth
+              + (float)mBandwidthItems.itemAt(currentBWIndex).mBandwidth
+              - (float)mLastBandwidthBps * 0.5f);
+        if (abortThreshold < 0.0f) {
+            abortThreshold = -1.0f; // do not abort
+        }
+        ALOGV("Switching Down: bps %ld => %ld, measured %d, abort ratio %.2f",
+                mBandwidthItems.itemAt(currentBWIndex).mBandwidth,
+                mBandwidthItems.itemAt(targetBWIndex).mBandwidth,
+                mLastBandwidthBps,
+                abortThreshold);
+    }
+    return abortThreshold;
+}
+
+void LiveSession::addBandwidthMeasurement(size_t numBytes, int64_t delayUs) {
+    mBandwidthEstimator->addBandwidthMeasurement(numBytes, delayUs);
+}
+
+ssize_t LiveSession::getLowestValidBandwidthIndex() const {
+    for (size_t index = 0; index < mBandwidthItems.size(); index++) {
+        if (isBandwidthValid(mBandwidthItems[index])) {
+            return index;
+        }
+    }
+    // if playlists are all blacklisted, return 0 and hope it's alive
+    return 0;
+}
+
+size_t LiveSession::getBandwidthIndex(int32_t bandwidthBps) {
+    if (mBandwidthItems.size() < 2) {
+        // shouldn't be here if we only have 1 bandwidth, check
+        // logic to get rid of redundant bandwidth polling
+        ALOGW("getBandwidthIndex() called for single bandwidth playlist!");
         return 0;
     }
 
@@ -1014,15 +1347,6 @@
     }
 
     if (index < 0) {
-        int32_t bandwidthBps;
-        if (mHTTPDataSource != NULL
-                && mHTTPDataSource->estimateBandwidth(&bandwidthBps)) {
-            ALOGV("bandwidth estimated at %.2f kbps", bandwidthBps / 1024.0f);
-        } else {
-            ALOGV("no bandwidth estimate.");
-            return 0;  // Pick the lowest bandwidth stream by default.
-        }
-
         char value[PROPERTY_VALUE_MAX];
         if (property_get("media.httplive.max-bw", value, NULL)) {
             char *end;
@@ -1035,20 +1359,18 @@
             }
         }
 
-        // Pick the highest bandwidth stream below or equal to estimated bandwidth.
+        // Pick the highest bandwidth stream that's not currently blacklisted
+        // below or equal to estimated bandwidth.
 
         index = mBandwidthItems.size() - 1;
-        while (index > 0) {
-            // consider only 80% of the available bandwidth, but if we are switching up,
-            // be even more conservative (70%) to avoid overestimating and immediately
-            // switching back.
-            size_t adjustedBandwidthBps = bandwidthBps;
-            if (index > mCurBandwidthIndex) {
-                adjustedBandwidthBps = adjustedBandwidthBps * 7 / 10;
-            } else {
-                adjustedBandwidthBps = adjustedBandwidthBps * 8 / 10;
-            }
-            if (mBandwidthItems.itemAt(index).mBandwidth <= adjustedBandwidthBps) {
+        ssize_t lowestBandwidth = getLowestValidBandwidthIndex();
+        while (index > lowestBandwidth) {
+            // be conservative (70%) to avoid overestimating and immediately
+            // switching down again.
+            size_t adjustedBandwidthBps = bandwidthBps * 7 / 10;
+            const BandwidthItem &item = mBandwidthItems[index];
+            if (item.mBandwidth <= adjustedBandwidthBps
+                    && isBandwidthValid(item)) {
                 break;
             }
             --index;
@@ -1107,34 +1429,20 @@
     return index;
 }
 
-int64_t LiveSession::latestMediaSegmentStartTimeUs() {
-    sp<AMessage> audioMeta = mPacketSources.valueFor(STREAMTYPE_AUDIO)->getLatestDequeuedMeta();
-    int64_t minSegmentStartTimeUs = -1, videoSegmentStartTimeUs = -1;
-    if (audioMeta != NULL) {
-        audioMeta->findInt64("segmentStartTimeUs", &minSegmentStartTimeUs);
-    }
+HLSTime LiveSession::latestMediaSegmentStartTime() const {
+    HLSTime audioTime(mPacketSources.valueFor(
+                    STREAMTYPE_AUDIO)->getLatestDequeuedMeta());
 
-    sp<AMessage> videoMeta = mPacketSources.valueFor(STREAMTYPE_VIDEO)->getLatestDequeuedMeta();
-    if (videoMeta != NULL
-            && videoMeta->findInt64("segmentStartTimeUs", &videoSegmentStartTimeUs)) {
-        if (minSegmentStartTimeUs < 0 || videoSegmentStartTimeUs < minSegmentStartTimeUs) {
-            minSegmentStartTimeUs = videoSegmentStartTimeUs;
-        }
+    HLSTime videoTime(mPacketSources.valueFor(
+                    STREAMTYPE_VIDEO)->getLatestDequeuedMeta());
 
-    }
-    return minSegmentStartTimeUs;
+    return audioTime < videoTime ? videoTime : audioTime;
 }
 
-status_t LiveSession::onSeek(const sp<AMessage> &msg) {
+void LiveSession::onSeek(const sp<AMessage> &msg) {
     int64_t timeUs;
     CHECK(msg->findInt64("timeUs", &timeUs));
-
-    if (!mReconfigurationInProgress) {
-        changeConfiguration(timeUs, mCurBandwidthIndex);
-        return OK;
-    } else {
-        return -EWOULDBLOCK;
-    }
+    changeConfiguration(timeUs);
 }
 
 status_t LiveSession::getDuration(int64_t *durationUs) const {
@@ -1165,7 +1473,7 @@
     if (mPlaylist == NULL) {
         return 0;
     } else {
-        return mPlaylist->getTrackCount();
+        return mPlaylist->getTrackCount() + (mHasMetadata ? 1 : 0);
     }
 }
 
@@ -1173,6 +1481,13 @@
     if (mPlaylist == NULL) {
         return NULL;
     } else {
+        if (trackIndex == mPlaylist->getTrackCount() && mHasMetadata) {
+            sp<AMessage> format = new AMessage();
+            format->setInt32("type", MEDIA_TRACK_TYPE_METADATA);
+            format->setString("language", "und");
+            format->setString("mime", MEDIA_MIMETYPE_DATA_TIMED_ID3);
+            return format;
+        }
         return mPlaylist->getTrackInfo(trackIndex);
     }
 }
@@ -1182,11 +1497,13 @@
         return INVALID_OPERATION;
     }
 
+    ALOGV("selectTrack: index=%zu, select=%d, mSubtitleGen=%d++",
+            index, select, mSubtitleGeneration);
+
     ++mSubtitleGeneration;
     status_t err = mPlaylist->selectTrack(index, select);
     if (err == OK) {
-        sp<AMessage> msg = new AMessage(kWhatChangeConfiguration, id());
-        msg->setInt32("bandwidthIndex", mCurBandwidthIndex);
+        sp<AMessage> msg = new AMessage(kWhatChangeConfiguration, this);
         msg->setInt32("pickTrack", select);
         msg->post();
     }
@@ -1201,35 +1518,25 @@
     }
 }
 
-bool LiveSession::canSwitchUp() {
-    // Allow upwards bandwidth switch when a stream has buffered at least 10 seconds.
-    status_t err = OK;
-    for (size_t i = 0; i < mPacketSources.size(); ++i) {
-        sp<AnotherPacketSource> source = mPacketSources.valueAt(i);
-        int64_t dur = source->getBufferedDurationUs(&err);
-        if (err == OK && dur > 10000000) {
-            return true;
-        }
-    }
-    return false;
-}
-
 void LiveSession::changeConfiguration(
-        int64_t timeUs, size_t bandwidthIndex, bool pickTrack) {
-    // Protect mPacketSources from a swapPacketSource race condition through reconfiguration.
-    // (changeConfiguration, onChangeConfiguration2, onChangeConfiguration3).
+        int64_t timeUs, ssize_t bandwidthIndex, bool pickTrack) {
+    ALOGV("changeConfiguration: timeUs=%lld us, bwIndex=%zd, pickTrack=%d",
+          (long long)timeUs, bandwidthIndex, pickTrack);
+
     cancelBandwidthSwitch();
 
     CHECK(!mReconfigurationInProgress);
     mReconfigurationInProgress = true;
-
-    mCurBandwidthIndex = bandwidthIndex;
-
-    ALOGV("changeConfiguration => timeUs:%" PRId64 " us, bwIndex:%zu, pickTrack:%d",
-          timeUs, bandwidthIndex, pickTrack);
-
-    CHECK_LT(bandwidthIndex, mBandwidthItems.size());
-    const BandwidthItem &item = mBandwidthItems.itemAt(bandwidthIndex);
+    if (bandwidthIndex >= 0) {
+        mOrigBandwidthIndex = mCurBandwidthIndex;
+        mCurBandwidthIndex = bandwidthIndex;
+        if (mOrigBandwidthIndex != mCurBandwidthIndex) {
+            ALOGI("#### Starting Bandwidth Switch: %zd => %zd",
+                    mOrigBandwidthIndex, mCurBandwidthIndex);
+        }
+    }
+    CHECK_LT(mCurBandwidthIndex, mBandwidthItems.size());
+    const BandwidthItem &item = mBandwidthItems.itemAt(mCurBandwidthIndex);
 
     uint32_t streamMask = 0; // streams that should be fetched by the new fetcher
     uint32_t resumeMask = 0; // streams that should be fetched by the original fetcher
@@ -1244,38 +1551,60 @@
     // Step 1, stop and discard fetchers that are no longer needed.
     // Pause those that we'll reuse.
     for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
+        // skip fetchers that are marked mToBeRemoved,
+        // these are done and can't be reused
+        if (mFetcherInfos[i].mToBeRemoved) {
+            continue;
+        }
+
         const AString &uri = mFetcherInfos.keyAt(i);
+        sp<PlaylistFetcher> &fetcher = mFetcherInfos.editValueAt(i).mFetcher;
 
-        bool discardFetcher = true;
-
-        // If we're seeking all current fetchers are discarded.
-        if (timeUs < 0ll) {
-            // delay fetcher removal if not picking tracks
-            discardFetcher = pickTrack;
-
-            for (size_t j = 0; j < kMaxStreams; ++j) {
-                StreamType type = indexToType(j);
-                if ((streamMask & type) && uri == URIs[j]) {
-                    resumeMask |= type;
-                    streamMask &= ~type;
-                    discardFetcher = false;
-                }
+        bool discardFetcher = true, delayRemoval = false;
+        for (size_t j = 0; j < kMaxStreams; ++j) {
+            StreamType type = indexToType(j);
+            if ((streamMask & type) && uri == URIs[j]) {
+                resumeMask |= type;
+                streamMask &= ~type;
+                discardFetcher = false;
             }
         }
+        // Delay fetcher removal if not picking tracks, AND old fetcher
+        // has stream mask that overlaps new variant. (Okay to discard
+        // old fetcher now, if completely no overlap.)
+        if (discardFetcher && timeUs < 0ll && !pickTrack
+                && (fetcher->getStreamTypeMask() & streamMask)) {
+            discardFetcher = false;
+            delayRemoval = true;
+        }
 
         if (discardFetcher) {
-            mFetcherInfos.valueAt(i).mFetcher->stopAsync();
+            ALOGV("discarding fetcher-%d", fetcher->getFetcherID());
+            fetcher->stopAsync();
         } else {
-            mFetcherInfos.valueAt(i).mFetcher->pauseAsync();
+            float threshold = 0.0f; // default to pause after current block (47Kbytes)
+            bool disconnect = false;
+            if (timeUs >= 0ll) {
+                // seeking, no need to finish fetching
+                disconnect = true;
+            } else if (delayRemoval) {
+                // adapting, abort if remaining of current segment is over threshold
+                threshold = getAbortThreshold(
+                        mOrigBandwidthIndex, mCurBandwidthIndex);
+            }
+
+            ALOGV("pausing fetcher-%d, threshold=%.2f",
+                    fetcher->getFetcherID(), threshold);
+            fetcher->pauseAsync(threshold, disconnect);
         }
     }
 
     sp<AMessage> msg;
     if (timeUs < 0ll) {
         // skip onChangeConfiguration2 (decoder destruction) if not seeking.
-        msg = new AMessage(kWhatChangeConfiguration3, id());
+        msg = new AMessage(kWhatChangeConfiguration3, this);
     } else {
-        msg = new AMessage(kWhatChangeConfiguration2, id());
+        msg = new AMessage(kWhatChangeConfiguration2, this);
     }
     msg->setInt32("streamMask", streamMask);
     msg->setInt32("resumeMask", resumeMask);
@@ -1296,40 +1625,74 @@
 
     if (mContinuationCounter == 0) {
         msg->post();
-
-        if (mSeekReplyID != 0) {
-            CHECK(mSeekReply != NULL);
-            mSeekReply->setInt32("err", OK);
-            mSeekReply->postReply(mSeekReplyID);
-            mSeekReplyID = 0;
-            mSeekReply.clear();
-        }
     }
 }
 
 void LiveSession::onChangeConfiguration(const sp<AMessage> &msg) {
+    ALOGV("onChangeConfiguration");
+
     if (!mReconfigurationInProgress) {
-        int32_t pickTrack = 0, bandwidthIndex = mCurBandwidthIndex;
+        int32_t pickTrack = 0;
         msg->findInt32("pickTrack", &pickTrack);
-        msg->findInt32("bandwidthIndex", &bandwidthIndex);
-        changeConfiguration(-1ll /* timeUs */, bandwidthIndex, pickTrack);
+        changeConfiguration(-1ll /* timeUs */, -1, pickTrack);
     } else {
         msg->post(1000000ll); // retry in 1 sec
     }
 }
 
 void LiveSession::onChangeConfiguration2(const sp<AMessage> &msg) {
+    ALOGV("onChangeConfiguration2");
+
     mContinuation.clear();
 
     // All fetchers are either suspended or have been removed now.
 
+    // If we're seeking, clear all packet sources before we report
+    // seek complete, to prevent decoder from pulling stale data.
+    int64_t timeUs;
+    CHECK(msg->findInt64("timeUs", &timeUs));
+
+    if (timeUs >= 0) {
+        mLastSeekTimeUs = timeUs;
+        mLastDequeuedTimeUs = timeUs;
+
+        for (size_t i = 0; i < mPacketSources.size(); i++) {
+            sp<AnotherPacketSource> packetSource = mPacketSources.editValueAt(i);
+            sp<MetaData> format = packetSource->getFormat();
+            packetSource->clear();
+            // Set a tentative format here such that HTTPLiveSource will always have
+            // a format available when NuPlayer queries. Without an available video
+            // format when setting a surface NuPlayer might disable video decoding
+            // altogether. The tentative format will be overwritten by the
+            // authoritative (and possibly same) format once content from the new
+            // position is dequeued.
+            packetSource->setFormat(format);
+        }
+
+        for (size_t i = 0; i < kMaxStreams; ++i) {
+            mStreams[i].reset();
+        }
+
+        mDiscontinuityOffsetTimesUs.clear();
+        mDiscontinuityAbsStartTimesUs.clear();
+
+        if (mSeekReplyID != NULL) {
+            CHECK(mSeekReply != NULL);
+            mSeekReply->setInt32("err", OK);
+            mSeekReply->postReply(mSeekReplyID);
+            mSeekReplyID.clear();
+            mSeekReply.clear();
+        }
+
+        // restart buffer polling after seek becauese previous
+        // buffering position is no longer valid.
+        restartPollBuffering();
+    }
+
     uint32_t streamMask, resumeMask;
     CHECK(msg->findInt32("streamMask", (int32_t *)&streamMask));
     CHECK(msg->findInt32("resumeMask", (int32_t *)&resumeMask));
 
-    // currently onChangeConfiguration2 is only called for seeking;
-    // remove the following CHECK if using it else where.
-    CHECK_EQ(resumeMask, 0);
     streamMask |= resumeMask;
 
     AString URIs[kMaxStreams];
@@ -1341,17 +1704,27 @@
         }
     }
 
-    // Determine which decoders to shutdown on the player side,
-    // a decoder has to be shutdown if either
-    // 1) its streamtype was active before but now longer isn't.
-    // or
-    // 2) its streamtype was already active and still is but the URI
-    //    has changed.
     uint32_t changedMask = 0;
     for (size_t i = 0; i < kMaxStreams && i != kSubtitleIndex; ++i) {
-        if (((mStreamMask & streamMask & indexToType(i))
-                && !(URIs[i] == mStreams[i].mUri))
-                || (mStreamMask & ~streamMask & indexToType(i))) {
+        // stream URI could change even if onChangeConfiguration2 is only
+        // used for seek. Seek could happen during a bw switch, in this
+        // case bw switch will be cancelled, but the seekTo position will
+        // fetch from the new URI.
+        if ((mStreamMask & streamMask & indexToType(i))
+                && !mStreams[i].mUri.empty()
+                && !(URIs[i] == mStreams[i].mUri)) {
+            ALOGV("stream %zu changed: oldURI %s, newURI %s", i,
+                    mStreams[i].mUri.c_str(), URIs[i].c_str());
+            sp<AnotherPacketSource> source = mPacketSources.valueFor(indexToType(i));
+            if (source->getLatestDequeuedMeta() != NULL) {
+                source->queueDiscontinuity(
+                        ATSParser::DISCONTINUITY_FORMATCHANGE, NULL, true);
+            }
+        }
+        // Determine which decoders to shutdown on the player side,
+        // a decoder has to be shutdown if its streamtype was active
+        // before but now longer isn't.
+        if ((mStreamMask & ~streamMask & indexToType(i))) {
             changedMask |= indexToType(i);
         }
     }
@@ -1372,7 +1745,7 @@
     notify->setInt32("changedMask", changedMask);
 
     msg->setWhat(kWhatChangeConfiguration3);
-    msg->setTarget(id());
+    msg->setTarget(this);
 
     notify->setMessage("reply", msg);
     notify->post();
@@ -1387,6 +1760,8 @@
     CHECK(msg->findInt32("streamMask", (int32_t *)&streamMask));
     CHECK(msg->findInt32("resumeMask", (int32_t *)&resumeMask));
 
+    mNewStreamMask = streamMask | resumeMask;
+
     int64_t timeUs;
     int32_t pickTrack;
     bool switching = false;
@@ -1395,13 +1770,26 @@
 
     if (timeUs < 0ll) {
         if (!pickTrack) {
-            switching = true;
+            // mSwapMask contains streams that are in both old and new variant,
+            // (in mNewStreamMask & mStreamMask) but with different URIs
+            // (not in resumeMask).
+            // For example, old variant has video and audio in two separate
+            // URIs, and new variant has only audio with unchanged URI. mSwapMask
+            // should be 0 as there is nothing to swap. We only need to stop video,
+            // and resume audio.
+            mSwapMask =  mNewStreamMask & mStreamMask & ~resumeMask;
+            switching = (mSwapMask != 0);
         }
         mRealTimeBaseUs = ALooper::GetNowUs() - mLastDequeuedTimeUs;
     } else {
         mRealTimeBaseUs = ALooper::GetNowUs() - timeUs;
     }
 
+    ALOGV("onChangeConfiguration3: timeUs=%lld, switching=%d, pickTrack=%d, "
+            "mStreamMask=0x%x, mNewStreamMask=0x%x, mSwapMask=0x%x",
+            (long long)timeUs, switching, pickTrack,
+            mStreamMask, mNewStreamMask, mSwapMask);
+
     for (size_t i = 0; i < kMaxStreams; ++i) {
         if (streamMask & indexToType(i)) {
             if (switching) {
@@ -1412,47 +1800,21 @@
         }
     }
 
-    mNewStreamMask = streamMask | resumeMask;
-    if (switching) {
-        mSwapMask = mStreamMask & ~resumeMask;
-    }
-
     // Of all existing fetchers:
     // * Resume fetchers that are still needed and assign them original packet sources.
     // * Mark otherwise unneeded fetchers for removal.
     ALOGV("resuming fetchers for mask 0x%08x", resumeMask);
     for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
         const AString &uri = mFetcherInfos.keyAt(i);
+        if (!resumeFetcher(uri, resumeMask, timeUs)) {
+            ALOGV("marking fetcher-%d to be removed",
+                    mFetcherInfos[i].mFetcher->getFetcherID());
 
-        sp<AnotherPacketSource> sources[kMaxStreams];
-        for (size_t j = 0; j < kMaxStreams; ++j) {
-            if ((resumeMask & indexToType(j)) && uri == mStreams[j].mUri) {
-                sources[j] = mPacketSources.valueFor(indexToType(j));
-
-                if (j != kSubtitleIndex) {
-                    ALOGV("queueing dummy discontinuity for stream type %d", indexToType(j));
-                    sp<AnotherPacketSource> discontinuityQueue;
-                    discontinuityQueue = mDiscontinuities.valueFor(indexToType(j));
-                    discontinuityQueue->queueDiscontinuity(
-                            ATSParser::DISCONTINUITY_NONE,
-                            NULL,
-                            true);
-                }
-            }
-        }
-
-        FetcherInfo &info = mFetcherInfos.editValueAt(i);
-        if (sources[kAudioIndex] != NULL || sources[kVideoIndex] != NULL
-                || sources[kSubtitleIndex] != NULL) {
-            info.mFetcher->startAsync(
-                    sources[kAudioIndex], sources[kVideoIndex], sources[kSubtitleIndex]);
-        } else {
-            info.mToBeRemoved = true;
+            mFetcherInfos.editValueAt(i).mToBeRemoved = true;
         }
     }
 
     // streamMask now only contains the types that need a new fetcher created.
-
     if (streamMask != 0) {
         ALOGV("creating new fetchers for mask 0x%08x", streamMask);
     }
@@ -1470,13 +1832,12 @@
         sp<PlaylistFetcher> fetcher = addFetcher(uri.c_str());
         CHECK(fetcher != NULL);
 
-        int64_t startTimeUs = -1;
-        int64_t segmentStartTimeUs = -1ll;
-        int32_t discontinuitySeq = -1;
-        sp<AnotherPacketSource> sources[kMaxStreams];
+        HLSTime startTime;
+        SeekMode seekMode = kSeekModeExactPosition;
+        sp<AnotherPacketSource> sources[kNumSources];
 
-        if (i == kSubtitleIndex) {
-            segmentStartTimeUs = latestMediaSegmentStartTimeUs();
+        if (i == kSubtitleIndex || (!pickTrack && !switching)) {
+            startTime = latestMediaSegmentStartTime();
         }
 
         // TRICKY: looping from i as earlier streams are already removed from streamMask
@@ -1486,63 +1847,50 @@
                 sources[j] = mPacketSources.valueFor(indexToType(j));
 
                 if (timeUs >= 0) {
-                    sources[j]->clear();
-                    startTimeUs = timeUs;
-
-                    sp<AnotherPacketSource> discontinuityQueue;
-                    sp<AMessage> extra = new AMessage;
-                    extra->setInt64("timeUs", timeUs);
-                    discontinuityQueue = mDiscontinuities.valueFor(indexToType(j));
-                    discontinuityQueue->queueDiscontinuity(
-                            ATSParser::DISCONTINUITY_TIME, extra, true);
+                    startTime.mTimeUs = timeUs;
                 } else {
                     int32_t type;
                     sp<AMessage> meta;
-                    if (pickTrack) {
-                        // selecting
+                    if (!switching) {
+                        // selecting, or adapting but no swap required
                         meta = sources[j]->getLatestDequeuedMeta();
                     } else {
-                        // adapting
+                        // adapting and swap required
                         meta = sources[j]->getLatestEnqueuedMeta();
-                    }
-
-                    if (meta != NULL && !meta->findInt32("discontinuity", &type)) {
-                        int64_t tmpUs;
-                        int64_t tmpSegmentUs;
-
-                        CHECK(meta->findInt64("timeUs", &tmpUs));
-                        CHECK(meta->findInt64("segmentStartTimeUs", &tmpSegmentUs));
-                        if (startTimeUs < 0 || tmpSegmentUs < segmentStartTimeUs) {
-                            startTimeUs = tmpUs;
-                            segmentStartTimeUs = tmpSegmentUs;
-                        } else if (tmpSegmentUs == segmentStartTimeUs && tmpUs < startTimeUs) {
-                            startTimeUs = tmpUs;
-                        }
-
-                        int32_t seq;
-                        CHECK(meta->findInt32("discontinuitySeq", &seq));
-                        if (discontinuitySeq < 0 || seq < discontinuitySeq) {
-                            discontinuitySeq = seq;
+                        if (meta != NULL && mCurBandwidthIndex > mOrigBandwidthIndex) {
+                            // switching up
+                            meta = sources[j]->getMetaAfterLastDequeued(mUpSwitchMargin);
                         }
                     }
 
-                    if (pickTrack) {
-                        // selecting track, queue discontinuities before content
+                    if ((j == kAudioIndex || j == kVideoIndex)
+                            && meta != NULL && !meta->findInt32("discontinuity", &type)) {
+                        HLSTime tmpTime(meta);
+                        if (startTime < tmpTime) {
+                            startTime = tmpTime;
+                        }
+                    }
+
+                    if (!switching) {
+                        // selecting, or adapting but no swap required
                         sources[j]->clear();
                         if (j == kSubtitleIndex) {
                             break;
                         }
-                        sp<AnotherPacketSource> discontinuityQueue;
-                        discontinuityQueue = mDiscontinuities.valueFor(indexToType(j));
-                        discontinuityQueue->queueDiscontinuity(
-                                ATSParser::DISCONTINUITY_FORMATCHANGE, NULL, true);
+
+                        ALOGV("stream[%zu]: queue format change", j);
+                        sources[j]->queueDiscontinuity(
+                                ATSParser::DISCONTINUITY_FORMAT_ONLY, NULL, true);
                     } else {
-                        // adapting, queue discontinuities after resume
+                        // switching, queue discontinuities after resume
                         sources[j] = mPacketSources2.valueFor(indexToType(j));
                         sources[j]->clear();
-                        uint32_t extraStreams = mNewStreamMask & (~mStreamMask);
-                        if (extraStreams & indexToType(j)) {
-                            sources[j]->queueAccessUnit(createFormatChangeBuffer(/*swap*/ false));
+                        // the new fetcher might be providing streams that used to be
+                        // provided by two different fetchers,  if one of the fetcher
+                        // paused in the middle while the other somehow paused in next
+                        // seg, we have to start from next seg.
+                        if (seekMode < mStreams[j].mSeekMode) {
+                            seekMode = mStreams[j].mSeekMode;
                         }
                     }
                 }
@@ -1551,54 +1899,104 @@
             }
         }
 
+        ALOGV("[fetcher-%d] startAsync: startTimeUs %lld mLastSeekTimeUs %lld "
+                "segmentStartTimeUs %lld seekMode %d",
+                fetcher->getFetcherID(),
+                (long long)startTime.mTimeUs,
+                (long long)mLastSeekTimeUs,
+                (long long)startTime.getSegmentTimeUs(),
+                seekMode);
+
+        // Set the target segment start time to the middle point of the
+        // segment where the last sample was.
+        // This gives a better guess if segments of the two variants are not
+        // perfectly aligned. (If the corresponding segment in new variant
+        // starts slightly later than that in the old variant, we still want
+        // to pick that segment, not the one before)
         fetcher->startAsync(
                 sources[kAudioIndex],
                 sources[kVideoIndex],
                 sources[kSubtitleIndex],
-                startTimeUs < 0 ? mLastSeekTimeUs : startTimeUs,
-                segmentStartTimeUs,
-                discontinuitySeq,
-                switching);
+                getMetadataSource(sources, mNewStreamMask, switching),
+                startTime.mTimeUs < 0 ? mLastSeekTimeUs : startTime.mTimeUs,
+                startTime.getSegmentTimeUs(),
+                startTime.mSeq,
+                seekMode);
     }
 
     // All fetchers have now been started, the configuration change
     // has completed.
 
-    cancelCheckBandwidthEvent();
-    scheduleCheckBandwidthEvent();
-
-    ALOGV("XXX configuration change completed.");
     mReconfigurationInProgress = false;
     if (switching) {
         mSwitchInProgress = true;
     } else {
         mStreamMask = mNewStreamMask;
+        if (mOrigBandwidthIndex != mCurBandwidthIndex) {
+            ALOGV("#### Finished Bandwidth Switch Early: %zd => %zd",
+                    mOrigBandwidthIndex, mCurBandwidthIndex);
+            mOrigBandwidthIndex = mCurBandwidthIndex;
+        }
     }
 
-    if (mDisconnectReplyID != 0) {
+    ALOGV("onChangeConfiguration3: mSwitchInProgress %d, mStreamMask 0x%x",
+            mSwitchInProgress, mStreamMask);
+
+    if (mDisconnectReplyID != NULL) {
         finishDisconnect();
     }
 }
 
-void LiveSession::onSwapped(const sp<AMessage> &msg) {
-    int32_t switchGeneration;
-    CHECK(msg->findInt32("switchGeneration", &switchGeneration));
-    if (switchGeneration != mSwitchGeneration) {
+void LiveSession::swapPacketSource(StreamType stream) {
+    ALOGV("[%s] swapPacketSource", getNameForStream(stream));
+
+    // transfer packets from source2 to source
+    sp<AnotherPacketSource> &aps = mPacketSources.editValueFor(stream);
+    sp<AnotherPacketSource> &aps2 = mPacketSources2.editValueFor(stream);
+
+    // queue discontinuity in mPacketSource
+    aps->queueDiscontinuity(ATSParser::DISCONTINUITY_FORMAT_ONLY, NULL, false);
+
+    // queue packets in mPacketSource2 to mPacketSource
+    status_t finalResult = OK;
+    sp<ABuffer> accessUnit;
+    while (aps2->hasBufferAvailable(&finalResult) && finalResult == OK &&
+          OK == aps2->dequeueAccessUnit(&accessUnit)) {
+        aps->queueAccessUnit(accessUnit);
+    }
+    aps2->clear();
+}
+
+void LiveSession::tryToFinishBandwidthSwitch(const AString &oldUri) {
+    if (!mSwitchInProgress) {
         return;
     }
 
-    int32_t stream;
-    CHECK(msg->findInt32("stream", &stream));
-
-    ssize_t idx = typeToIndex(stream);
-    CHECK(idx >= 0);
-    if ((mNewStreamMask & stream) && mStreams[idx].mNewUri.empty()) {
-        ALOGW("swapping stream type %d %s to empty stream", stream, mStreams[idx].mUri.c_str());
+    ssize_t index = mFetcherInfos.indexOfKey(oldUri);
+    if (index < 0 || !mFetcherInfos[index].mToBeRemoved) {
+        return;
     }
-    mStreams[idx].mUri = mStreams[idx].mNewUri;
-    mStreams[idx].mNewUri.clear();
 
-    mSwapMask &= ~stream;
+    // Swap packet source of streams provided by old variant
+    for (size_t idx = 0; idx < kMaxStreams; idx++) {
+        StreamType stream = indexToType(idx);
+        if ((mSwapMask & stream) && (oldUri == mStreams[idx].mUri)) {
+            swapPacketSource(stream);
+
+            if ((mNewStreamMask & stream) && mStreams[idx].mNewUri.empty()) {
+                ALOGW("swapping stream type %d %s to empty stream",
+                        stream, mStreams[idx].mUri.c_str());
+            }
+            mStreams[idx].mUri = mStreams[idx].mNewUri;
+            mStreams[idx].mNewUri.clear();
+
+            mSwapMask &= ~stream;
+        }
+    }
+
+    mFetcherInfos.editValueAt(index).mFetcher->stopAsync(false /* clear */);
+
+    ALOGV("tryToFinishBandwidthSwitch: mSwapMask=0x%x", mSwapMask);
     if (mSwapMask != 0) {
         return;
     }
@@ -1606,157 +2004,372 @@
     // Check if new variant contains extra streams.
     uint32_t extraStreams = mNewStreamMask & (~mStreamMask);
     while (extraStreams) {
-        StreamType extraStream = (StreamType) (extraStreams & ~(extraStreams - 1));
-        swapPacketSource(extraStream);
-        extraStreams &= ~extraStream;
+        StreamType stream = (StreamType) (extraStreams & ~(extraStreams - 1));
+        extraStreams &= ~stream;
 
-        idx = typeToIndex(extraStream);
+        swapPacketSource(stream);
+
+        ssize_t idx = typeToIndex(stream);
         CHECK(idx >= 0);
         if (mStreams[idx].mNewUri.empty()) {
             ALOGW("swapping extra stream type %d %s to empty stream",
-                    extraStream, mStreams[idx].mUri.c_str());
+                    stream, mStreams[idx].mUri.c_str());
         }
         mStreams[idx].mUri = mStreams[idx].mNewUri;
         mStreams[idx].mNewUri.clear();
     }
 
-    tryToFinishBandwidthSwitch();
-}
-
-void LiveSession::onCheckSwitchDown() {
-    if (mSwitchDownMonitor == NULL) {
-        return;
-    }
-
-    if (mSwitchInProgress || mReconfigurationInProgress) {
-        ALOGV("Switch/Reconfig in progress, defer switch down");
-        mSwitchDownMonitor->post(1000000ll);
-        return;
-    }
-
-    for (size_t i = 0; i < kMaxStreams; ++i) {
-        int32_t targetDuration;
-        sp<AnotherPacketSource> packetSource = mPacketSources.valueFor(indexToType(i));
-        sp<AMessage> meta = packetSource->getLatestDequeuedMeta();
-
-        if (meta != NULL && meta->findInt32("targetDuration", &targetDuration) ) {
-            int64_t bufferedDurationUs = packetSource->getEstimatedDurationUs();
-            int64_t targetDurationUs = targetDuration * 1000000ll;
-
-            if (bufferedDurationUs < targetDurationUs / 3) {
-                (new AMessage(kWhatSwitchDown, id()))->post();
-                break;
-            }
+    // Restart new fetcher (it was paused after the first 47k block)
+    // and let it fetch into mPacketSources (not mPacketSources2)
+    for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
+        FetcherInfo &info = mFetcherInfos.editValueAt(i);
+        if (info.mToBeResumed) {
+            resumeFetcher(mFetcherInfos.keyAt(i), mNewStreamMask);
+            info.mToBeResumed = false;
         }
     }
 
-    mSwitchDownMonitor->post(1000000ll);
+    ALOGI("#### Finished Bandwidth Switch: %zd => %zd",
+            mOrigBandwidthIndex, mCurBandwidthIndex);
+
+    mStreamMask = mNewStreamMask;
+    mSwitchInProgress = false;
+    mOrigBandwidthIndex = mCurBandwidthIndex;
+
+    restartPollBuffering();
 }
 
-void LiveSession::onSwitchDown() {
-    if (mReconfigurationInProgress || mSwitchInProgress || mCurBandwidthIndex == 0) {
-        return;
-    }
-
-    ssize_t bandwidthIndex = getBandwidthIndex();
-    if (bandwidthIndex < mCurBandwidthIndex) {
-        changeConfiguration(-1, bandwidthIndex, false);
-        return;
-    }
-
+void LiveSession::schedulePollBuffering() {
+    sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
+    msg->setInt32("generation", mPollBufferingGeneration);
+    msg->post(1000000ll);
 }
 
-// Mark switch done when:
-//   1. all old buffers are swapped out
-void LiveSession::tryToFinishBandwidthSwitch() {
+void LiveSession::cancelPollBuffering() {
+    ++mPollBufferingGeneration;
+    mPrevBufferPercentage = -1;
+}
+
+void LiveSession::restartPollBuffering() {
+    cancelPollBuffering();
+    onPollBuffering();
+}
+
+void LiveSession::onPollBuffering() {
+    ALOGV("onPollBuffering: mSwitchInProgress %d, mReconfigurationInProgress %d, "
+            "mInPreparationPhase %d, mCurBandwidthIndex %zd, mStreamMask 0x%x",
+        mSwitchInProgress, mReconfigurationInProgress,
+        mInPreparationPhase, mCurBandwidthIndex, mStreamMask);
+
+    bool underflow, ready, down, up;
+    if (checkBuffering(underflow, ready, down, up)) {
+        if (mInPreparationPhase) {
+            // Allow down switch even if we're still preparing.
+            //
+            // Some streams have a high bandwidth index as default,
+            // when bandwidth is low, it takes a long time to buffer
+            // to ready mark, then it immediately pauses after start
+            // as we have to do a down switch. It's better experience
+            // to restart from a lower index, if we detect low bw.
+            if (!switchBandwidthIfNeeded(false /* up */, down) && ready) {
+                postPrepared(OK);
+            }
+        }
+
+        if (!mInPreparationPhase) {
+            if (ready) {
+                stopBufferingIfNecessary();
+            } else if (underflow) {
+                startBufferingIfNecessary();
+            }
+            switchBandwidthIfNeeded(up, down);
+        }
+    }
+
+    schedulePollBuffering();
+}
+
+void LiveSession::cancelBandwidthSwitch(bool resume) {
+    ALOGV("cancelBandwidthSwitch: mSwitchGen(%d)++, orig %zd, cur %zd",
+            mSwitchGeneration, mOrigBandwidthIndex, mCurBandwidthIndex);
     if (!mSwitchInProgress) {
         return;
     }
 
-    bool needToRemoveFetchers = false;
-    for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
-        if (mFetcherInfos.valueAt(i).mToBeRemoved) {
-            needToRemoveFetchers = true;
-            break;
-        }
-    }
-
-    if (!needToRemoveFetchers && mSwapMask == 0) {
-        ALOGI("mSwitchInProgress = false");
-        mStreamMask = mNewStreamMask;
-        mSwitchInProgress = false;
-    }
-}
-
-void LiveSession::scheduleCheckBandwidthEvent() {
-    sp<AMessage> msg = new AMessage(kWhatCheckBandwidth, id());
-    msg->setInt32("generation", mCheckBandwidthGeneration);
-    msg->post(10000000ll);
-}
-
-void LiveSession::cancelCheckBandwidthEvent() {
-    ++mCheckBandwidthGeneration;
-}
-
-void LiveSession::cancelBandwidthSwitch() {
-    Mutex::Autolock lock(mSwapMutex);
-    mSwitchGeneration++;
-    mSwitchInProgress = false;
-    mSwapMask = 0;
-
     for (size_t i = 0; i < mFetcherInfos.size(); ++i) {
         FetcherInfo& info = mFetcherInfos.editValueAt(i);
         if (info.mToBeRemoved) {
             info.mToBeRemoved = false;
+            if (resume) {
+                resumeFetcher(mFetcherInfos.keyAt(i), mSwapMask);
+            }
         }
     }
 
     for (size_t i = 0; i < kMaxStreams; ++i) {
-        if (!mStreams[i].mNewUri.empty()) {
-            ssize_t j = mFetcherInfos.indexOfKey(mStreams[i].mNewUri);
-            if (j < 0) {
-                mStreams[i].mNewUri.clear();
+        AString newUri = mStreams[i].mNewUri;
+        if (!newUri.empty()) {
+            // clear all mNewUri matching this newUri
+            for (size_t j = i; j < kMaxStreams; ++j) {
+                if (mStreams[j].mNewUri == newUri) {
+                    mStreams[j].mNewUri.clear();
+                }
+            }
+            ALOGV("stopping newUri = %s", newUri.c_str());
+            ssize_t index = mFetcherInfos.indexOfKey(newUri);
+            if (index < 0) {
+                ALOGE("did not find fetcher for newUri: %s", newUri.c_str());
                 continue;
             }
-
-            const FetcherInfo &info = mFetcherInfos.valueAt(j);
+            FetcherInfo &info = mFetcherInfos.editValueAt(index);
+            info.mToBeRemoved = true;
             info.mFetcher->stopAsync();
-            mFetcherInfos.removeItemsAt(j);
-            mStreams[i].mNewUri.clear();
         }
     }
+
+    ALOGI("#### Canceled Bandwidth Switch: %zd => %zd",
+            mOrigBandwidthIndex, mCurBandwidthIndex);
+
+    mSwitchGeneration++;
+    mSwitchInProgress = false;
+    mCurBandwidthIndex = mOrigBandwidthIndex;
+    mSwapMask = 0;
 }
 
-bool LiveSession::canSwitchBandwidthTo(size_t bandwidthIndex) {
-    if (mReconfigurationInProgress || mSwitchInProgress) {
+bool LiveSession::checkBuffering(
+        bool &underflow, bool &ready, bool &down, bool &up) {
+    underflow = ready = down = up = false;
+
+    if (mReconfigurationInProgress) {
+        ALOGV("Switch/Reconfig in progress, defer buffer polling");
         return false;
     }
 
-    if (mCurBandwidthIndex < 0) {
+    size_t activeCount, underflowCount, readyCount, downCount, upCount;
+    activeCount = underflowCount = readyCount = downCount = upCount =0;
+    int32_t minBufferPercent = -1;
+    int64_t durationUs;
+    if (getDuration(&durationUs) != OK) {
+        durationUs = -1;
+    }
+    for (size_t i = 0; i < mPacketSources.size(); ++i) {
+        // we don't check subtitles for buffering level
+        if (!(mStreamMask & mPacketSources.keyAt(i)
+                & (STREAMTYPE_AUDIO | STREAMTYPE_VIDEO))) {
+            continue;
+        }
+        // ignore streams that never had any packet queued.
+        // (it's possible that the variant only has audio or video)
+        sp<AMessage> meta = mPacketSources[i]->getLatestEnqueuedMeta();
+        if (meta == NULL) {
+            continue;
+        }
+
+        status_t finalResult;
+        int64_t bufferedDurationUs =
+                mPacketSources[i]->getBufferedDurationUs(&finalResult);
+        ALOGV("[%s] buffered %lld us",
+                getNameForStream(mPacketSources.keyAt(i)),
+                (long long)bufferedDurationUs);
+        if (durationUs >= 0) {
+            int32_t percent;
+            if (mPacketSources[i]->isFinished(0 /* duration */)) {
+                percent = 100;
+            } else {
+                percent = (int32_t)(100.0 *
+                        (mLastDequeuedTimeUs + bufferedDurationUs) / durationUs);
+            }
+            if (minBufferPercent < 0 || percent < minBufferPercent) {
+                minBufferPercent = percent;
+            }
+        }
+
+        ++activeCount;
+        int64_t readyMark = mInPreparationPhase ? kPrepareMarkUs : kReadyMarkUs;
+        if (bufferedDurationUs > readyMark
+                || mPacketSources[i]->isFinished(0)) {
+            ++readyCount;
+        }
+        if (!mPacketSources[i]->isFinished(0)) {
+            if (bufferedDurationUs < kUnderflowMarkUs) {
+                ++underflowCount;
+            }
+            if (bufferedDurationUs > mUpSwitchMark) {
+                ++upCount;
+            }
+            if (bufferedDurationUs < mDownSwitchMark) {
+                ++downCount;
+            }
+        }
+    }
+
+    if (minBufferPercent >= 0) {
+        notifyBufferingUpdate(minBufferPercent);
+    }
+
+    if (activeCount > 0) {
+        up        = (upCount == activeCount);
+        down      = (downCount > 0);
+        ready     = (readyCount == activeCount);
+        underflow = (underflowCount > 0);
         return true;
     }
 
-    if (bandwidthIndex == (size_t)mCurBandwidthIndex) {
-        return false;
-    } else if (bandwidthIndex > (size_t)mCurBandwidthIndex) {
-        return canSwitchUp();
-    } else {
-        return true;
+    return false;
+}
+
+void LiveSession::startBufferingIfNecessary() {
+    ALOGV("startBufferingIfNecessary: mInPreparationPhase=%d, mBuffering=%d",
+            mInPreparationPhase, mBuffering);
+    if (!mBuffering) {
+        mBuffering = true;
+
+        sp<AMessage> notify = mNotify->dup();
+        notify->setInt32("what", kWhatBufferingStart);
+        notify->post();
     }
 }
 
-void LiveSession::onCheckBandwidth(const sp<AMessage> &msg) {
-    size_t bandwidthIndex = getBandwidthIndex();
-    if (canSwitchBandwidthTo(bandwidthIndex)) {
-        changeConfiguration(-1ll /* timeUs */, bandwidthIndex);
-    } else {
-        // Come back and check again 10 seconds later in case there is nothing to do now.
-        // If we DO change configuration, once that completes it'll schedule a new
-        // check bandwidth event with an incremented mCheckBandwidthGeneration.
-        msg->post(10000000ll);
+void LiveSession::stopBufferingIfNecessary() {
+    ALOGV("stopBufferingIfNecessary: mInPreparationPhase=%d, mBuffering=%d",
+            mInPreparationPhase, mBuffering);
+
+    if (mBuffering) {
+        mBuffering = false;
+
+        sp<AMessage> notify = mNotify->dup();
+        notify->setInt32("what", kWhatBufferingEnd);
+        notify->post();
     }
 }
 
+void LiveSession::notifyBufferingUpdate(int32_t percentage) {
+    if (percentage < mPrevBufferPercentage) {
+        percentage = mPrevBufferPercentage;
+    } else if (percentage > 100) {
+        percentage = 100;
+    }
+
+    mPrevBufferPercentage = percentage;
+
+    ALOGV("notifyBufferingUpdate: percentage=%d%%", percentage);
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatBufferingUpdate);
+    notify->setInt32("percentage", percentage);
+    notify->post();
+}
+
+bool LiveSession::tryBandwidthFallback() {
+    if (mInPreparationPhase || mReconfigurationInProgress) {
+        // Don't try fallback during prepare or reconfig.
+        // If error happens there, it's likely unrecoverable.
+        return false;
+    }
+    if (mCurBandwidthIndex > mOrigBandwidthIndex) {
+        // if we're switching up, simply cancel and resume old variant
+        cancelBandwidthSwitch(true /* resume */);
+        return true;
+    } else {
+        // if we're switching down, we're likely about to underflow (if
+        // not already underflowing). try the lowest viable bandwidth if
+        // not on that variant already.
+        ssize_t lowestValid = getLowestValidBandwidthIndex();
+        if (mCurBandwidthIndex > lowestValid) {
+            cancelBandwidthSwitch();
+            changeConfiguration(-1ll, lowestValid);
+            return true;
+        }
+    }
+    // return false if we couldn't find any fallback
+    return false;
+}
+
+/*
+ * returns true if a bandwidth switch is actually needed (and started),
+ * returns false otherwise
+ */
+bool LiveSession::switchBandwidthIfNeeded(bool bufferHigh, bool bufferLow) {
+    // no need to check bandwidth if we only have 1 bandwidth settings
+    if (mBandwidthItems.size() < 2) {
+        return false;
+    }
+
+    if (mSwitchInProgress) {
+        if (mBuffering) {
+            tryBandwidthFallback();
+        }
+        return false;
+    }
+
+    int32_t bandwidthBps, shortTermBps;
+    bool isStable;
+    if (mBandwidthEstimator->estimateBandwidth(
+            &bandwidthBps, &isStable, &shortTermBps)) {
+        ALOGV("bandwidth estimated at %.2f kbps, "
+                "stable %d, shortTermBps %.2f kbps",
+                bandwidthBps / 1024.0f, isStable, shortTermBps / 1024.0f);
+        mLastBandwidthBps = bandwidthBps;
+        mLastBandwidthStable = isStable;
+    } else {
+        ALOGV("no bandwidth estimate.");
+        return false;
+    }
+
+    int32_t curBandwidth = mBandwidthItems.itemAt(mCurBandwidthIndex).mBandwidth;
+    // canSwithDown and canSwitchUp can't both be true.
+    // we only want to switch up when measured bw is 120% higher than current variant,
+    // and we only want to switch down when measured bw is below current variant.
+    bool canSwitchDown = bufferLow
+            && (bandwidthBps < (int32_t)curBandwidth);
+    bool canSwitchUp = bufferHigh
+            && (bandwidthBps > (int32_t)curBandwidth * 12 / 10);
+
+    if (canSwitchDown || canSwitchUp) {
+        // bandwidth estimating has some delay, if we have to downswitch when
+        // it hasn't stabilized, use the short term to guess real bandwidth,
+        // since it may be dropping too fast.
+        // (note this doesn't apply to upswitch, always use longer average there)
+        if (!isStable && canSwitchDown) {
+            if (shortTermBps < bandwidthBps) {
+                bandwidthBps = shortTermBps;
+            }
+        }
+
+        ssize_t bandwidthIndex = getBandwidthIndex(bandwidthBps);
+
+        // it's possible that we're checking for canSwitchUp case, but the returned
+        // bandwidthIndex is < mCurBandwidthIndex, as getBandwidthIndex() only uses 70%
+        // of measured bw. In that case we don't want to do anything, since we have
+        // both enough buffer and enough bw.
+        if ((canSwitchUp && bandwidthIndex > mCurBandwidthIndex)
+         || (canSwitchDown && bandwidthIndex < mCurBandwidthIndex)) {
+            // if not yet prepared, just restart again with new bw index.
+            // this is faster and playback experience is cleaner.
+            changeConfiguration(
+                    mInPreparationPhase ? 0 : -1ll, bandwidthIndex);
+            return true;
+        }
+    }
+    return false;
+}
+
+void LiveSession::postError(status_t err) {
+    // if we reached EOS, notify buffering of 100%
+    if (err == ERROR_END_OF_STREAM) {
+        notifyBufferingUpdate(100);
+    }
+    // we'll stop buffer polling now, before that notify
+    // stop buffering to stop the spinning icon
+    stopBufferingIfNecessary();
+    cancelPollBuffering();
+
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatError);
+    notify->setInt32("err", err);
+    notify->post();
+}
+
 void LiveSession::postPrepared(status_t err) {
     CHECK(mInPreparationPhase);
 
@@ -1764,6 +2377,8 @@
     if (err == OK || err == ERROR_END_OF_STREAM) {
         notify->setInt32("what", kWhatPrepared);
     } else {
+        cancelPollBuffering();
+
         notify->setInt32("what", kWhatPreparationFailed);
         notify->setInt32("err", err);
     }
@@ -1771,10 +2386,8 @@
     notify->post();
 
     mInPreparationPhase = false;
-
-    mSwitchDownMonitor = new AMessage(kWhatCheckSwitchDown, id());
-    mSwitchDownMonitor->post();
 }
 
+
 }  // namespace android
 
diff --git a/media/libstagefright/httplive/LiveSession.h b/media/libstagefright/httplive/LiveSession.h
index 2d3a25a..90d56d0 100644
--- a/media/libstagefright/httplive/LiveSession.h
+++ b/media/libstagefright/httplive/LiveSession.h
@@ -23,43 +23,62 @@
 
 #include <utils/String8.h>
 
+#include "mpeg2ts/ATSParser.h"
+
 namespace android {
 
 struct ABuffer;
+struct AReplyToken;
 struct AnotherPacketSource;
-struct DataSource;
+class DataSource;
 struct HTTPBase;
 struct IMediaHTTPService;
 struct LiveDataSource;
 struct M3UParser;
 struct PlaylistFetcher;
+struct HLSTime;
+struct HTTPDownloader;
 
 struct LiveSession : public AHandler {
     enum Flags {
         // Don't log any URLs.
         kFlagIncognito = 1,
     };
-    LiveSession(
-            const sp<AMessage> &notify,
-            uint32_t flags,
-            const sp<IMediaHTTPService> &httpService);
 
     enum StreamIndex {
         kAudioIndex    = 0,
         kVideoIndex    = 1,
         kSubtitleIndex = 2,
         kMaxStreams    = 3,
+        kMetaDataIndex = 3,
+        kNumSources    = 4,
     };
 
     enum StreamType {
         STREAMTYPE_AUDIO        = 1 << kAudioIndex,
         STREAMTYPE_VIDEO        = 1 << kVideoIndex,
         STREAMTYPE_SUBTITLES    = 1 << kSubtitleIndex,
+        STREAMTYPE_METADATA     = 1 << kMetaDataIndex,
     };
+
+    enum SeekMode {
+        kSeekModeExactPosition = 0, // used for seeking
+        kSeekModeNextSample    = 1, // used for seamless switching
+        kSeekModeNextSegment   = 2, // used for seamless switching
+    };
+
+    LiveSession(
+            const sp<AMessage> &notify,
+            uint32_t flags,
+            const sp<IMediaHTTPService> &httpService);
+
+    int64_t calculateMediaTimeUs(int64_t firstTimeUs, int64_t timeUs, int32_t discontinuitySeq);
     status_t dequeueAccessUnit(StreamType stream, sp<ABuffer> *accessUnit);
 
     status_t getStreamFormat(StreamType stream, sp<AMessage> *format);
 
+    sp<HTTPDownloader> getHTTPDownloader();
+
     void connectAsync(
             const char *url,
             const KeyedVector<String8, String8> *headers = NULL);
@@ -78,18 +97,21 @@
     bool isSeekable() const;
     bool hasDynamicDuration() const;
 
+    static const char *getKeyForStream(StreamType type);
+    static const char *getNameForStream(StreamType type);
+    static ATSParser::SourceType getSourceTypeForStream(StreamType type);
+
     enum {
         kWhatStreamsChanged,
         kWhatError,
         kWhatPrepared,
         kWhatPreparationFailed,
+        kWhatBufferingStart,
+        kWhatBufferingEnd,
+        kWhatBufferingUpdate,
+        kWhatMetadataDetected,
     };
 
-    // create a format-change discontinuity
-    //
-    // swap:
-    //   whether is format-change discontinuity should trigger a buffer swap
-    sp<ABuffer> createFormatChangeBuffer(bool swap = true);
 protected:
     virtual ~LiveSession();
 
@@ -103,46 +125,56 @@
         kWhatDisconnect                 = 'disc',
         kWhatSeek                       = 'seek',
         kWhatFetcherNotify              = 'notf',
-        kWhatCheckBandwidth             = 'bndw',
         kWhatChangeConfiguration        = 'chC0',
         kWhatChangeConfiguration2       = 'chC2',
         kWhatChangeConfiguration3       = 'chC3',
-        kWhatFinishDisconnect2          = 'fin2',
-        kWhatSwapped                    = 'swap',
-        kWhatCheckSwitchDown            = 'ckSD',
-        kWhatSwitchDown                 = 'sDwn',
+        kWhatPollBuffering              = 'poll',
     };
 
-    static const size_t kBandwidthHistoryBytes;
+    // Bandwidth Switch Mark Defaults
+    static const int64_t kUpSwitchMarkUs;
+    static const int64_t kDownSwitchMarkUs;
+    static const int64_t kUpSwitchMarginUs;
+    static const int64_t kResumeThresholdUs;
 
+    // Buffer Prepare/Ready/Underflow Marks
+    static const int64_t kReadyMarkUs;
+    static const int64_t kPrepareMarkUs;
+    static const int64_t kUnderflowMarkUs;
+
+    struct BandwidthEstimator;
     struct BandwidthItem {
         size_t mPlaylistIndex;
         unsigned long mBandwidth;
+        int64_t mLastFailureUs;
     };
 
     struct FetcherInfo {
         sp<PlaylistFetcher> mFetcher;
         int64_t mDurationUs;
-        bool mIsPrepared;
         bool mToBeRemoved;
+        bool mToBeResumed;
     };
 
     struct StreamItem {
         const char *mType;
         AString mUri, mNewUri;
+        SeekMode mSeekMode;
         size_t mCurDiscontinuitySeq;
         int64_t mLastDequeuedTimeUs;
         int64_t mLastSampleDurationUs;
         StreamItem()
-            : mType(""),
-              mCurDiscontinuitySeq(0),
-              mLastDequeuedTimeUs(0),
-              mLastSampleDurationUs(0) {}
+            : StreamItem("") {}
         StreamItem(const char *type)
             : mType(type),
-              mCurDiscontinuitySeq(0),
-              mLastDequeuedTimeUs(0),
-              mLastSampleDurationUs(0) {}
+              mSeekMode(kSeekModeExactPosition) {
+                  reset();
+              }
+        void reset() {
+            mCurDiscontinuitySeq = 0;
+            mLastDequeuedTimeUs = -1ll;
+            mLastSampleDurationUs = 0ll;
+        }
         AString uriKey() {
             AString key(mType);
             key.append("URI");
@@ -155,19 +187,27 @@
     uint32_t mFlags;
     sp<IMediaHTTPService> mHTTPService;
 
+    bool mBuffering;
     bool mInPreparationPhase;
-    bool mBuffering[kMaxStreams];
+    int32_t mPollBufferingGeneration;
+    int32_t mPrevBufferPercentage;
 
-    sp<HTTPBase> mHTTPDataSource;
     KeyedVector<String8, String8> mExtraHeaders;
 
     AString mMasterURL;
 
     Vector<BandwidthItem> mBandwidthItems;
     ssize_t mCurBandwidthIndex;
+    ssize_t mOrigBandwidthIndex;
+    int32_t mLastBandwidthBps;
+    bool mLastBandwidthStable;
+    sp<BandwidthEstimator> mBandwidthEstimator;
 
     sp<M3UParser> mPlaylist;
+    int32_t mMaxWidth;
+    int32_t mMaxHeight;
 
+    sp<ALooper> mFetcherLooper;
     KeyedVector<AString, FetcherInfo> mFetcherInfos;
     uint32_t mStreamMask;
 
@@ -180,17 +220,10 @@
     // we use this to track reconfiguration progress.
     uint32_t mSwapMask;
 
-    KeyedVector<StreamType, sp<AnotherPacketSource> > mDiscontinuities;
     KeyedVector<StreamType, sp<AnotherPacketSource> > mPacketSources;
     // A second set of packet sources that buffer content for the variant we're switching to.
     KeyedVector<StreamType, sp<AnotherPacketSource> > mPacketSources2;
 
-    // A mutex used to serialize two sets of events:
-    // * the swapping of packet sources in dequeueAccessUnit on the player thread, AND
-    // * a forced bandwidth switch termination in cancelSwitch on the live looper.
-    Mutex mSwapMutex;
-
-    int32_t mCheckBandwidthGeneration;
     int32_t mSwitchGeneration;
     int32_t mSubtitleGeneration;
 
@@ -203,80 +236,76 @@
 
     bool mReconfigurationInProgress;
     bool mSwitchInProgress;
-    uint32_t mDisconnectReplyID;
-    uint32_t mSeekReplyID;
+    int64_t mUpSwitchMark;
+    int64_t mDownSwitchMark;
+    int64_t mUpSwitchMargin;
+
+    sp<AReplyToken> mDisconnectReplyID;
+    sp<AReplyToken> mSeekReplyID;
 
     bool mFirstTimeUsValid;
     int64_t mFirstTimeUs;
     int64_t mLastSeekTimeUs;
-    sp<AMessage> mSwitchDownMonitor;
+    bool mHasMetadata;
+
     KeyedVector<size_t, int64_t> mDiscontinuityAbsStartTimesUs;
     KeyedVector<size_t, int64_t> mDiscontinuityOffsetTimesUs;
 
     sp<PlaylistFetcher> addFetcher(const char *uri);
 
     void onConnect(const sp<AMessage> &msg);
-    status_t onSeek(const sp<AMessage> &msg);
-    void onFinishDisconnect2();
+    void onMasterPlaylistFetched(const sp<AMessage> &msg);
+    void onSeek(const sp<AMessage> &msg);
 
-    // If given a non-zero block_size (default 0), it is used to cap the number of
-    // bytes read in from the DataSource. If given a non-NULL buffer, new content
-    // is read into the end.
-    //
-    // The DataSource we read from is responsible for signaling error or EOF to help us
-    // break out of the read loop. The DataSource can be returned to the caller, so
-    // that the caller can reuse it for subsequent fetches (within the initially
-    // requested range).
-    //
-    // For reused HTTP sources, the caller must download a file sequentially without
-    // any overlaps or gaps to prevent reconnection.
-    ssize_t fetchFile(
-            const char *url, sp<ABuffer> *out,
-            /* request/open a file starting at range_offset for range_length bytes */
-            int64_t range_offset = 0, int64_t range_length = -1,
-            /* download block size */
-            uint32_t block_size = 0,
-            /* reuse DataSource if doing partial fetch */
-            sp<DataSource> *source = NULL,
-            String8 *actualUrl = NULL);
+    bool UriIsSameAsIndex( const AString &uri, int32_t index, bool newUri);
+    sp<AnotherPacketSource> getPacketSourceForStreamIndex(size_t trackIndex, bool newUri);
+    sp<AnotherPacketSource> getMetadataSource(
+            sp<AnotherPacketSource> sources[kNumSources], uint32_t streamMask, bool newUri);
 
-    sp<M3UParser> fetchPlaylist(
-            const char *url, uint8_t *curPlaylistHash, bool *unchanged);
+    bool resumeFetcher(
+            const AString &uri, uint32_t streamMask,
+            int64_t timeUs = -1ll, bool newUri = false);
 
-    size_t getBandwidthIndex();
-    int64_t latestMediaSegmentStartTimeUs();
+    float getAbortThreshold(
+            ssize_t currentBWIndex, ssize_t targetBWIndex) const;
+    void addBandwidthMeasurement(size_t numBytes, int64_t delayUs);
+    size_t getBandwidthIndex(int32_t bandwidthBps);
+    ssize_t getLowestValidBandwidthIndex() const;
+    HLSTime latestMediaSegmentStartTime() const;
 
+    static bool isBandwidthValid(const BandwidthItem &item);
     static int SortByBandwidth(const BandwidthItem *, const BandwidthItem *);
     static StreamType indexToType(int idx);
     static ssize_t typeToIndex(int32_t type);
 
     void changeConfiguration(
-            int64_t timeUs, size_t bandwidthIndex, bool pickTrack = false);
+            int64_t timeUs, ssize_t bwIndex = -1, bool pickTrack = false);
     void onChangeConfiguration(const sp<AMessage> &msg);
     void onChangeConfiguration2(const sp<AMessage> &msg);
     void onChangeConfiguration3(const sp<AMessage> &msg);
-    void onSwapped(const sp<AMessage> &msg);
-    void onCheckSwitchDown();
-    void onSwitchDown();
-    void tryToFinishBandwidthSwitch();
 
-    void scheduleCheckBandwidthEvent();
-    void cancelCheckBandwidthEvent();
+    void swapPacketSource(StreamType stream);
+    void tryToFinishBandwidthSwitch(const AString &oldUri);
+    void cancelBandwidthSwitch(bool resume = false);
+    bool checkSwitchProgress(
+            sp<AMessage> &msg, int64_t delayUs, bool *needResumeUntil);
 
-    // cancelBandwidthSwitch is atomic wrt swapPacketSource; call it to prevent packet sources
-    // from being swapped out on stale discontinuities while manipulating
-    // mPacketSources/mPacketSources2.
-    void cancelBandwidthSwitch();
+    bool switchBandwidthIfNeeded(bool bufferHigh, bool bufferLow);
+    bool tryBandwidthFallback();
 
-    bool canSwitchBandwidthTo(size_t bandwidthIndex);
-    void onCheckBandwidth(const sp<AMessage> &msg);
+    void schedulePollBuffering();
+    void cancelPollBuffering();
+    void restartPollBuffering();
+    void onPollBuffering();
+    bool checkBuffering(bool &underflow, bool &ready, bool &down, bool &up);
+    void startBufferingIfNecessary();
+    void stopBufferingIfNecessary();
+    void notifyBufferingUpdate(int32_t percentage);
 
     void finishDisconnect();
 
     void postPrepared(status_t err);
-
-    void swapPacketSource(StreamType stream);
-    bool canSwitchUp();
+    void postError(status_t err);
 
     DISALLOW_EVIL_CONSTRUCTORS(LiveSession);
 };
diff --git a/media/libstagefright/httplive/M3UParser.cpp b/media/libstagefright/httplive/M3UParser.cpp
index 997b694..ff2bb27 100644
--- a/media/libstagefright/httplive/M3UParser.cpp
+++ b/media/libstagefright/httplive/M3UParser.cpp
@@ -250,7 +250,11 @@
       mIsVariantPlaylist(false),
       mIsComplete(false),
       mIsEvent(false),
+      mFirstSeqNumber(-1),
+      mLastSeqNumber(-1),
+      mTargetDurationUs(-1ll),
       mDiscontinuitySeq(0),
+      mDiscontinuityCount(0),
       mSelectedIndex(-1) {
     mInitCheck = parse(data, size);
 }
@@ -282,6 +286,19 @@
     return mDiscontinuitySeq;
 }
 
+int64_t M3UParser::getTargetDuration() const {
+    return mTargetDurationUs;
+}
+
+int32_t M3UParser::getFirstSeqNumber() const {
+    return mFirstSeqNumber;
+}
+
+void M3UParser::getSeqNumberRange(int32_t *firstSeq, int32_t *lastSeq) const {
+    *firstSeq = mFirstSeqNumber;
+    *lastSeq = mLastSeqNumber;
+}
+
 sp<AMessage> M3UParser::meta() {
     return mMeta;
 }
@@ -394,7 +411,9 @@
 
 bool M3UParser::getTypeURI(size_t index, const char *key, AString *uri) const {
     if (!mIsVariantPlaylist) {
-        *uri = mBaseURI;
+        if (uri != NULL) {
+            *uri = mBaseURI;
+        }
 
         // Assume media without any more specific attribute contains
         // audio and video, but no subtitles.
@@ -407,7 +426,9 @@
 
     AString groupID;
     if (!meta->findString(key, &groupID)) {
-        *uri = mItems.itemAt(index).mURI;
+        if (uri != NULL) {
+            *uri = mItems.itemAt(index).mURI;
+        }
 
         AString codecs;
         if (!meta->findString("codecs", &codecs)) {
@@ -433,18 +454,26 @@
         }
     }
 
-    sp<MediaGroup> group = mMediaGroups.valueFor(groupID);
-    if (!group->getActiveURI(uri)) {
-        return false;
-    }
+    // if uri == NULL, we're only checking if the type is present,
+    // don't care about the active URI (or if there is an active one)
+    if (uri != NULL) {
+        sp<MediaGroup> group = mMediaGroups.valueFor(groupID);
+        if (!group->getActiveURI(uri)) {
+            return false;
+        }
 
-    if ((*uri).empty()) {
-        *uri = mItems.itemAt(index).mURI;
+        if ((*uri).empty()) {
+            *uri = mItems.itemAt(index).mURI;
+        }
     }
 
     return true;
 }
 
+bool M3UParser::hasType(size_t index, const char *key) const {
+    return getTypeURI(index, key, NULL /* uri */);
+}
+
 static bool MakeURL(const char *baseURL, const char *url, AString *out) {
     out->clear();
 
@@ -582,6 +611,7 @@
                     itemMeta = new AMessage;
                 }
                 itemMeta->setInt32("discontinuity", true);
+                ++mDiscontinuityCount;
             } else if (line.startsWith("#EXT-X-STREAM-INF")) {
                 if (mMeta != NULL) {
                     return ERROR_MALFORMED;
@@ -609,6 +639,9 @@
             } else if (line.startsWith("#EXT-X-MEDIA")) {
                 err = parseMedia(line);
             } else if (line.startsWith("#EXT-X-DISCONTINUITY-SEQUENCE")) {
+                if (mIsVariantPlaylist) {
+                    return ERROR_MALFORMED;
+                }
                 size_t seq;
                 err = parseDiscontinuitySequence(line, &seq);
                 if (err == OK) {
@@ -628,6 +661,8 @@
                         || !itemMeta->findInt64("durationUs", &durationUs)) {
                     return ERROR_MALFORMED;
                 }
+                itemMeta->setInt32("discontinuity-sequence",
+                        mDiscontinuitySeq + mDiscontinuityCount);
             }
 
             mItems.push();
@@ -644,6 +679,25 @@
         ++lineNo;
     }
 
+    // error checking of all fields that's required to appear once
+    // (currently only checking "target-duration"), and
+    // initialization of playlist properties (eg. mTargetDurationUs)
+    if (!mIsVariantPlaylist) {
+        int32_t targetDurationSecs;
+        if (mMeta == NULL || !mMeta->findInt32(
+                "target-duration", &targetDurationSecs)) {
+            ALOGE("Media playlist missing #EXT-X-TARGETDURATION");
+            return ERROR_MALFORMED;
+        }
+        mTargetDurationUs = targetDurationSecs * 1000000ll;
+
+        mFirstSeqNumber = 0;
+        if (mMeta != NULL) {
+            mMeta->findInt32("media-sequence", &mFirstSeqNumber);
+        }
+        mLastSeqNumber = mFirstSeqNumber + mItems.size() - 1;
+    }
+
     return OK;
 }
 
@@ -781,6 +835,29 @@
                 *meta = new AMessage;
             }
             (*meta)->setString(key.c_str(), codecs.c_str());
+        } else if (!strcasecmp("resolution", key.c_str())) {
+            const char *s = val.c_str();
+            char *end;
+            unsigned long width = strtoul(s, &end, 10);
+
+            if (end == s || *end != 'x') {
+                // malformed
+                continue;
+            }
+
+            s = end + 1;
+            unsigned long height = strtoul(s, &end, 10);
+
+            if (end == s || *end != '\0') {
+                // malformed
+                continue;
+            }
+
+            if (meta->get() == NULL) {
+                *meta = new AMessage;
+            }
+            (*meta)->setInt32("width", width);
+            (*meta)->setInt32("height", height);
         } else if (!strcasecmp("audio", key.c_str())
                 || !strcasecmp("video", key.c_str())
                 || !strcasecmp("subtitles", key.c_str())) {
diff --git a/media/libstagefright/httplive/M3UParser.h b/media/libstagefright/httplive/M3UParser.h
index 1cad060..fa648ed 100644
--- a/media/libstagefright/httplive/M3UParser.h
+++ b/media/libstagefright/httplive/M3UParser.h
@@ -36,6 +36,9 @@
     bool isComplete() const;
     bool isEvent() const;
     size_t getDiscontinuitySeq() const;
+    int64_t getTargetDuration() const;
+    int32_t getFirstSeqNumber() const;
+    void getSeqNumberRange(int32_t *firstSeq, int32_t *lastSeq) const;
 
     sp<AMessage> meta();
 
@@ -50,6 +53,7 @@
     ssize_t getSelectedTrack(media_track_type /* type */) const;
 
     bool getTypeURI(size_t index, const char *key, AString *uri) const;
+    bool hasType(size_t index, const char *key) const;
 
 protected:
     virtual ~M3UParser();
@@ -69,7 +73,11 @@
     bool mIsVariantPlaylist;
     bool mIsComplete;
     bool mIsEvent;
+    int32_t mFirstSeqNumber;
+    int32_t mLastSeqNumber;
+    int64_t mTargetDurationUs;
     size_t mDiscontinuitySeq;
+    int32_t mDiscontinuityCount;
 
     sp<AMessage> mMeta;
     Vector<Item> mItems;
diff --git a/media/libstagefright/httplive/PlaylistFetcher.cpp b/media/libstagefright/httplive/PlaylistFetcher.cpp
index 1227600..72d832e 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.cpp
+++ b/media/libstagefright/httplive/PlaylistFetcher.cpp
@@ -17,24 +17,19 @@
 //#define LOG_NDEBUG 0
 #define LOG_TAG "PlaylistFetcher"
 #include <utils/Log.h>
+#include <utils/misc.h>
 
 #include "PlaylistFetcher.h"
-
-#include "LiveDataSource.h"
+#include "HTTPDownloader.h"
 #include "LiveSession.h"
 #include "M3UParser.h"
-
 #include "include/avc_utils.h"
-#include "include/HTTPBase.h"
 #include "include/ID3.h"
 #include "mpeg2ts/AnotherPacketSource.h"
 
-#include <media/IStreamSource.h>
 #include <media/stagefright/foundation/ABitReader.h>
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/hexdump.h>
-#include <media/stagefright/FileSource.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
 #include <media/stagefright/Utils.h>
@@ -42,63 +37,156 @@
 #include <ctype.h>
 #include <inttypes.h>
 #include <openssl/aes.h>
-#include <openssl/md5.h>
+
+#define FLOGV(fmt, ...) ALOGV("[fetcher-%d] " fmt, mFetcherID, ##__VA_ARGS__)
+#define FSLOGV(stream, fmt, ...) ALOGV("[fetcher-%d] [%s] " fmt, mFetcherID, \
+         LiveSession::getNameForStream(stream), ##__VA_ARGS__)
 
 namespace android {
 
 // static
-const int64_t PlaylistFetcher::kMinBufferedDurationUs = 10000000ll;
+const int64_t PlaylistFetcher::kMinBufferedDurationUs = 30000000ll;
 const int64_t PlaylistFetcher::kMaxMonitorDelayUs = 3000000ll;
 // LCM of 188 (size of a TS packet) & 1k works well
 const int32_t PlaylistFetcher::kDownloadBlockSize = 47 * 1024;
-const int32_t PlaylistFetcher::kNumSkipFrames = 5;
+
+struct PlaylistFetcher::DownloadState : public RefBase {
+    DownloadState();
+    void resetState();
+    bool hasSavedState() const;
+    void restoreState(
+            AString &uri,
+            sp<AMessage> &itemMeta,
+            sp<ABuffer> &buffer,
+            sp<ABuffer> &tsBuffer,
+            int32_t &firstSeqNumberInPlaylist,
+            int32_t &lastSeqNumberInPlaylist);
+    void saveState(
+            AString &uri,
+            sp<AMessage> &itemMeta,
+            sp<ABuffer> &buffer,
+            sp<ABuffer> &tsBuffer,
+            int32_t &firstSeqNumberInPlaylist,
+            int32_t &lastSeqNumberInPlaylist);
+
+private:
+    bool mHasSavedState;
+    AString mUri;
+    sp<AMessage> mItemMeta;
+    sp<ABuffer> mBuffer;
+    sp<ABuffer> mTsBuffer;
+    int32_t mFirstSeqNumberInPlaylist;
+    int32_t mLastSeqNumberInPlaylist;
+};
+
+PlaylistFetcher::DownloadState::DownloadState() {
+    resetState();
+}
+
+bool PlaylistFetcher::DownloadState::hasSavedState() const {
+    return mHasSavedState;
+}
+
+void PlaylistFetcher::DownloadState::resetState() {
+    mHasSavedState = false;
+
+    mUri.clear();
+    mItemMeta = NULL;
+    mBuffer = NULL;
+    mTsBuffer = NULL;
+    mFirstSeqNumberInPlaylist = 0;
+    mLastSeqNumberInPlaylist = 0;
+}
+
+void PlaylistFetcher::DownloadState::restoreState(
+        AString &uri,
+        sp<AMessage> &itemMeta,
+        sp<ABuffer> &buffer,
+        sp<ABuffer> &tsBuffer,
+        int32_t &firstSeqNumberInPlaylist,
+        int32_t &lastSeqNumberInPlaylist) {
+    if (!mHasSavedState) {
+        return;
+    }
+
+    uri = mUri;
+    itemMeta = mItemMeta;
+    buffer = mBuffer;
+    tsBuffer = mTsBuffer;
+    firstSeqNumberInPlaylist = mFirstSeqNumberInPlaylist;
+    lastSeqNumberInPlaylist = mLastSeqNumberInPlaylist;
+
+    resetState();
+}
+
+void PlaylistFetcher::DownloadState::saveState(
+        AString &uri,
+        sp<AMessage> &itemMeta,
+        sp<ABuffer> &buffer,
+        sp<ABuffer> &tsBuffer,
+        int32_t &firstSeqNumberInPlaylist,
+        int32_t &lastSeqNumberInPlaylist) {
+    mHasSavedState = true;
+
+    mUri = uri;
+    mItemMeta = itemMeta;
+    mBuffer = buffer;
+    mTsBuffer = tsBuffer;
+    mFirstSeqNumberInPlaylist = firstSeqNumberInPlaylist;
+    mLastSeqNumberInPlaylist = lastSeqNumberInPlaylist;
+}
 
 PlaylistFetcher::PlaylistFetcher(
         const sp<AMessage> &notify,
         const sp<LiveSession> &session,
         const char *uri,
+        int32_t id,
         int32_t subtitleGeneration)
     : mNotify(notify),
-      mStartTimeUsNotify(notify->dup()),
       mSession(session),
       mURI(uri),
+      mFetcherID(id),
       mStreamTypeMask(0),
       mStartTimeUs(-1ll),
       mSegmentStartTimeUs(-1ll),
       mDiscontinuitySeq(-1ll),
       mStartTimeUsRelative(false),
       mLastPlaylistFetchTimeUs(-1ll),
+      mPlaylistTimeUs(-1ll),
       mSeqNumber(-1),
       mNumRetries(0),
       mStartup(true),
-      mAdaptive(false),
-      mPrepared(false),
+      mIDRFound(false),
+      mSeekMode(LiveSession::kSeekModeExactPosition),
+      mTimeChangeSignaled(false),
       mNextPTSTimeUs(-1ll),
       mMonitorQueueGeneration(0),
       mSubtitleGeneration(subtitleGeneration),
+      mLastDiscontinuitySeq(-1ll),
       mRefreshState(INITIAL_MINIMUM_RELOAD_DELAY),
       mFirstPTSValid(false),
-      mAbsoluteTimeAnchorUs(0ll),
-      mVideoBuffer(new AnotherPacketSource(NULL)) {
+      mFirstTimeUs(-1ll),
+      mVideoBuffer(new AnotherPacketSource(NULL)),
+      mThresholdRatio(-1.0f),
+      mDownloadState(new DownloadState()),
+      mHasMetadata(false) {
     memset(mPlaylistHash, 0, sizeof(mPlaylistHash));
-    mStartTimeUsNotify->setInt32("what", kWhatStartedAt);
-    mStartTimeUsNotify->setInt32("streamMask", 0);
+    mHTTPDownloader = mSession->getHTTPDownloader();
 }
 
 PlaylistFetcher::~PlaylistFetcher() {
 }
 
+int32_t PlaylistFetcher::getFetcherID() const {
+    return mFetcherID;
+}
+
 int64_t PlaylistFetcher::getSegmentStartTimeUs(int32_t seqNumber) const {
     CHECK(mPlaylist != NULL);
 
-    int32_t firstSeqNumberInPlaylist;
-    if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
-                "media-sequence", &firstSeqNumberInPlaylist)) {
-        firstSeqNumberInPlaylist = 0;
-    }
-
-    int32_t lastSeqNumberInPlaylist =
-        firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1;
+    int32_t firstSeqNumberInPlaylist, lastSeqNumberInPlaylist;
+    mPlaylist->getSeqNumberRange(
+            &firstSeqNumberInPlaylist, &lastSeqNumberInPlaylist);
 
     CHECK_GE(seqNumber, firstSeqNumberInPlaylist);
     CHECK_LE(seqNumber, lastSeqNumberInPlaylist);
@@ -119,6 +207,27 @@
     return segmentStartUs;
 }
 
+int64_t PlaylistFetcher::getSegmentDurationUs(int32_t seqNumber) const {
+    CHECK(mPlaylist != NULL);
+
+    int32_t firstSeqNumberInPlaylist, lastSeqNumberInPlaylist;
+    mPlaylist->getSeqNumberRange(
+            &firstSeqNumberInPlaylist, &lastSeqNumberInPlaylist);
+
+    CHECK_GE(seqNumber, firstSeqNumberInPlaylist);
+    CHECK_LE(seqNumber, lastSeqNumberInPlaylist);
+
+    int32_t index = seqNumber - firstSeqNumberInPlaylist;
+    sp<AMessage> itemMeta;
+    CHECK(mPlaylist->itemAt(
+                index, NULL /* uri */, &itemMeta));
+
+    int64_t itemDurationUs;
+    CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
+
+    return itemDurationUs;
+}
+
 int64_t PlaylistFetcher::delayUsToRefreshPlaylist() const {
     int64_t nowUs = ALooper::GetNowUs();
 
@@ -131,10 +240,7 @@
         return (~0llu >> 1);
     }
 
-    int32_t targetDurationSecs;
-    CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
-
-    int64_t targetDurationUs = targetDurationSecs * 1000000ll;
+    int64_t targetDurationUs = mPlaylist->getTargetDuration();
 
     int64_t minPlaylistAgeUs;
 
@@ -224,9 +330,11 @@
     if (index >= 0) {
         key = mAESKeyForURI.valueAt(index);
     } else {
-        ssize_t err = mSession->fetchFile(keyURI.c_str(), &key);
+        ssize_t err = mHTTPDownloader->fetchFile(keyURI.c_str(), &key);
 
-        if (err < 0) {
+        if (err == ERROR_NOT_CONNECTED) {
+            return ERROR_NOT_CONNECTED;
+        } else if (err < 0) {
             ALOGE("failed to fetch cipher key from '%s'.", keyURI.c_str());
             return ERROR_IO;
         } else if (key->size() != 16) {
@@ -322,10 +430,10 @@
         maxDelayUs = minDelayUs;
     }
     if (delayUs > maxDelayUs) {
-        ALOGV("Need to refresh playlist in %" PRId64 , maxDelayUs);
+        FLOGV("Need to refresh playlist in %lld", (long long)maxDelayUs);
         delayUs = maxDelayUs;
     }
-    sp<AMessage> msg = new AMessage(kWhatMonitorQueue, id());
+    sp<AMessage> msg = new AMessage(kWhatMonitorQueue, this);
     msg->setInt32("generation", mMonitorQueueGeneration);
     msg->post(delayUs);
 }
@@ -334,15 +442,44 @@
     ++mMonitorQueueGeneration;
 }
 
+void PlaylistFetcher::setStoppingThreshold(float thresholdRatio, bool disconnect) {
+    {
+        AutoMutex _l(mThresholdLock);
+        mThresholdRatio = thresholdRatio;
+    }
+    if (disconnect) {
+        mHTTPDownloader->disconnect();
+    }
+}
+
+void PlaylistFetcher::resetStoppingThreshold(bool disconnect) {
+    {
+        AutoMutex _l(mThresholdLock);
+        mThresholdRatio = -1.0f;
+    }
+    if (disconnect) {
+        mHTTPDownloader->disconnect();
+    } else {
+        // allow reconnect
+        mHTTPDownloader->reconnect();
+    }
+}
+
+float PlaylistFetcher::getStoppingThreshold() {
+    AutoMutex _l(mThresholdLock);
+    return mThresholdRatio;
+}
+
 void PlaylistFetcher::startAsync(
         const sp<AnotherPacketSource> &audioSource,
         const sp<AnotherPacketSource> &videoSource,
         const sp<AnotherPacketSource> &subtitleSource,
+        const sp<AnotherPacketSource> &metadataSource,
         int64_t startTimeUs,
         int64_t segmentStartTimeUs,
         int32_t startDiscontinuitySeq,
-        bool adaptive) {
-    sp<AMessage> msg = new AMessage(kWhatStart, id());
+        LiveSession::SeekMode seekMode) {
+    sp<AMessage> msg = new AMessage(kWhatStart, this);
 
     uint32_t streamTypeMask = 0ul;
 
@@ -361,30 +498,53 @@
         streamTypeMask |= LiveSession::STREAMTYPE_SUBTITLES;
     }
 
+    if (metadataSource != NULL) {
+        msg->setPointer("metadataSource", metadataSource.get());
+        // metadataSource does not affect streamTypeMask.
+    }
+
     msg->setInt32("streamTypeMask", streamTypeMask);
     msg->setInt64("startTimeUs", startTimeUs);
     msg->setInt64("segmentStartTimeUs", segmentStartTimeUs);
     msg->setInt32("startDiscontinuitySeq", startDiscontinuitySeq);
-    msg->setInt32("adaptive", adaptive);
+    msg->setInt32("seekMode", seekMode);
     msg->post();
 }
 
-void PlaylistFetcher::pauseAsync() {
-    (new AMessage(kWhatPause, id()))->post();
+/*
+ * pauseAsync
+ *
+ * threshold: 0.0f - pause after current fetch block (default 47Kbytes)
+ *           -1.0f - pause after finishing current segment
+ *        0.0~1.0f - pause if remaining of current segment exceeds threshold
+ */
+void PlaylistFetcher::pauseAsync(
+        float thresholdRatio, bool disconnect) {
+    setStoppingThreshold(thresholdRatio, disconnect);
+
+    (new AMessage(kWhatPause, this))->post();
 }
 
 void PlaylistFetcher::stopAsync(bool clear) {
-    sp<AMessage> msg = new AMessage(kWhatStop, id());
+    setStoppingThreshold(0.0f, true /* disconncect */);
+
+    sp<AMessage> msg = new AMessage(kWhatStop, this);
     msg->setInt32("clear", clear);
     msg->post();
 }
 
 void PlaylistFetcher::resumeUntilAsync(const sp<AMessage> &params) {
-    AMessage* msg = new AMessage(kWhatResumeUntil, id());
+    FLOGV("resumeUntilAsync: params=%s", params->debugString().c_str());
+
+    AMessage* msg = new AMessage(kWhatResumeUntil, this);
     msg->setMessage("params", params);
     msg->post();
 }
 
+void PlaylistFetcher::fetchPlaylistAsync() {
+    (new AMessage(kWhatFetchPlaylist, this))->post();
+}
+
 void PlaylistFetcher::onMessageReceived(const sp<AMessage> &msg) {
     switch (msg->what()) {
         case kWhatStart:
@@ -404,6 +564,10 @@
 
             sp<AMessage> notify = mNotify->dup();
             notify->setInt32("what", kWhatPaused);
+            notify->setInt32("seekMode",
+                    mDownloadState->hasSavedState()
+                    ? LiveSession::kSeekModeNextSample
+                    : LiveSession::kSeekModeNextSegment);
             notify->post();
             break;
         }
@@ -418,6 +582,19 @@
             break;
         }
 
+        case kWhatFetchPlaylist:
+        {
+            bool unchanged;
+            sp<M3UParser> playlist = mHTTPDownloader->fetchPlaylist(
+                    mURI.c_str(), NULL /* curPlaylistHash */, &unchanged);
+
+            sp<AMessage> notify = mNotify->dup();
+            notify->setInt32("what", kWhatPlaylistFetched);
+            notify->setObject("playlist", playlist);
+            notify->post();
+            break;
+        }
+
         case kWhatMonitorQueue:
         case kWhatDownloadNext:
         {
@@ -450,6 +627,10 @@
 
 status_t PlaylistFetcher::onStart(const sp<AMessage> &msg) {
     mPacketSources.clear();
+    mStopParams.clear();
+    mStartTimeUsNotify = mNotify->dup();
+    mStartTimeUsNotify->setInt32("what", kWhatStartedAt);
+    mStartTimeUsNotify->setString("uri", mURI);
 
     uint32_t streamTypeMask;
     CHECK(msg->findInt32("streamTypeMask", (int32_t *)&streamTypeMask));
@@ -457,11 +638,11 @@
     int64_t startTimeUs;
     int64_t segmentStartTimeUs;
     int32_t startDiscontinuitySeq;
-    int32_t adaptive;
+    int32_t seekMode;
     CHECK(msg->findInt64("startTimeUs", &startTimeUs));
     CHECK(msg->findInt64("segmentStartTimeUs", &segmentStartTimeUs));
     CHECK(msg->findInt32("startDiscontinuitySeq", &startDiscontinuitySeq));
-    CHECK(msg->findInt32("adaptive", &adaptive));
+    CHECK(msg->findInt32("seekMode", &seekMode));
 
     if (streamTypeMask & LiveSession::STREAMTYPE_AUDIO) {
         void *ptr;
@@ -490,17 +671,38 @@
                 static_cast<AnotherPacketSource *>(ptr));
     }
 
+    void *ptr;
+    // metadataSource is not part of streamTypeMask
+    if ((streamTypeMask & (LiveSession::STREAMTYPE_AUDIO | LiveSession::STREAMTYPE_VIDEO))
+            && msg->findPointer("metadataSource", &ptr)) {
+        mPacketSources.add(
+                LiveSession::STREAMTYPE_METADATA,
+                static_cast<AnotherPacketSource *>(ptr));
+    }
+
     mStreamTypeMask = streamTypeMask;
 
     mSegmentStartTimeUs = segmentStartTimeUs;
-    mDiscontinuitySeq = startDiscontinuitySeq;
+
+    if (startDiscontinuitySeq >= 0) {
+        mDiscontinuitySeq = startDiscontinuitySeq;
+    }
+
+    mRefreshState = INITIAL_MINIMUM_RELOAD_DELAY;
+    mSeekMode = (LiveSession::SeekMode) seekMode;
+
+    if (startTimeUs >= 0 || mSeekMode == LiveSession::kSeekModeNextSample) {
+        mStartup = true;
+        mIDRFound = false;
+        mVideoBuffer->clear();
+    }
 
     if (startTimeUs >= 0) {
         mStartTimeUs = startTimeUs;
+        mFirstPTSValid = false;
         mSeqNumber = -1;
-        mStartup = true;
-        mPrepared = false;
-        mAdaptive = adaptive;
+        mTimeChangeSignaled = false;
+        mDownloadState->resetState();
     }
 
     postMonitorQueue();
@@ -510,6 +712,9 @@
 
 void PlaylistFetcher::onPause() {
     cancelMonitorQueue();
+    mLastDiscontinuitySeq = mDiscontinuitySeq;
+
+    resetStoppingThreshold(false /* disconnect */);
 }
 
 void PlaylistFetcher::onStop(const sp<AMessage> &msg) {
@@ -524,8 +729,11 @@
         }
     }
 
+    mDownloadState->resetState();
     mPacketSources.clear();
     mStreamTypeMask = 0;
+
+    resetStoppingThreshold(true /* disconnect */);
 }
 
 // Resume until we have reached the boundary timestamps listed in `msg`; when
@@ -535,57 +743,18 @@
     sp<AMessage> params;
     CHECK(msg->findMessage("params", &params));
 
-    bool stop = false;
-    for (size_t i = 0; i < mPacketSources.size(); i++) {
-        sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
-
-        const char *stopKey;
-        int streamType = mPacketSources.keyAt(i);
-        switch (streamType) {
-        case LiveSession::STREAMTYPE_VIDEO:
-            stopKey = "timeUsVideo";
-            break;
-
-        case LiveSession::STREAMTYPE_AUDIO:
-            stopKey = "timeUsAudio";
-            break;
-
-        case LiveSession::STREAMTYPE_SUBTITLES:
-            stopKey = "timeUsSubtitle";
-            break;
-
-        default:
-            TRESPASS();
-        }
-
-        // Don't resume if we would stop within a resume threshold.
-        int32_t discontinuitySeq;
-        int64_t latestTimeUs = 0, stopTimeUs = 0;
-        sp<AMessage> latestMeta = packetSource->getLatestEnqueuedMeta();
-        if (latestMeta != NULL
-                && latestMeta->findInt32("discontinuitySeq", &discontinuitySeq)
-                && discontinuitySeq == mDiscontinuitySeq
-                && latestMeta->findInt64("timeUs", &latestTimeUs)
-                && params->findInt64(stopKey, &stopTimeUs)
-                && stopTimeUs - latestTimeUs < resumeThreshold(latestMeta)) {
-            stop = true;
-        }
-    }
-
-    if (stop) {
-        for (size_t i = 0; i < mPacketSources.size(); i++) {
-            mPacketSources.valueAt(i)->queueAccessUnit(mSession->createFormatChangeBuffer());
-        }
-        stopAsync(/* clear = */ false);
-        return OK;
-    }
-
     mStopParams = params;
-    postMonitorQueue();
+    onDownloadNext();
 
     return OK;
 }
 
+void PlaylistFetcher::notifyStopReached() {
+    sp<AMessage> notify = mNotify->dup();
+    notify->setInt32("what", kWhatStopReached);
+    notify->post();
+}
+
 void PlaylistFetcher::notifyError(status_t err) {
     sp<AMessage> notify = mNotify->dup();
     notify->setInt32("what", kWhatError);
@@ -604,96 +773,84 @@
 }
 
 void PlaylistFetcher::onMonitorQueue() {
-    bool downloadMore = false;
-    refreshPlaylist();
-
-    int32_t targetDurationSecs;
-    int64_t targetDurationUs = kMinBufferedDurationUs;
-    if (mPlaylist != NULL) {
-        if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
-                "target-duration", &targetDurationSecs)) {
-            ALOGE("Playlist is missing required EXT-X-TARGETDURATION tag");
-            notifyError(ERROR_MALFORMED);
-            return;
-        }
-        targetDurationUs = targetDurationSecs * 1000000ll;
+    // in the middle of an unfinished download, delay
+    // playlist refresh as it'll change seq numbers
+    if (!mDownloadState->hasSavedState()) {
+        refreshPlaylist();
     }
 
-    // buffer at least 3 times the target duration, or up to 10 seconds
-    int64_t durationToBufferUs = targetDurationUs * 3;
-    if (durationToBufferUs > kMinBufferedDurationUs)  {
-        durationToBufferUs = kMinBufferedDurationUs;
+    int64_t targetDurationUs = kMinBufferedDurationUs;
+    if (mPlaylist != NULL) {
+        targetDurationUs = mPlaylist->getTargetDuration();
     }
 
     int64_t bufferedDurationUs = 0ll;
-    status_t finalResult = NOT_ENOUGH_DATA;
+    status_t finalResult = OK;
     if (mStreamTypeMask == LiveSession::STREAMTYPE_SUBTITLES) {
         sp<AnotherPacketSource> packetSource =
             mPacketSources.valueFor(LiveSession::STREAMTYPE_SUBTITLES);
 
         bufferedDurationUs =
                 packetSource->getBufferedDurationUs(&finalResult);
-        finalResult = OK;
     } else {
-        // Use max stream duration to prevent us from waiting on a non-existent stream;
-        // when we cannot make out from the manifest what streams are included in a playlist
-        // we might assume extra streams.
+        // Use min stream duration, but ignore streams that never have any packet
+        // enqueued to prevent us from waiting on a non-existent stream;
+        // when we cannot make out from the manifest what streams are included in
+        // a playlist we might assume extra streams.
+        bufferedDurationUs = -1ll;
         for (size_t i = 0; i < mPacketSources.size(); ++i) {
-            if ((mStreamTypeMask & mPacketSources.keyAt(i)) == 0) {
+            if ((mStreamTypeMask & mPacketSources.keyAt(i)) == 0
+                    || mPacketSources[i]->getLatestEnqueuedMeta() == NULL) {
                 continue;
             }
 
             int64_t bufferedStreamDurationUs =
                 mPacketSources.valueAt(i)->getBufferedDurationUs(&finalResult);
-            ALOGV("buffered %" PRId64 " for stream %d",
-                    bufferedStreamDurationUs, mPacketSources.keyAt(i));
-            if (bufferedStreamDurationUs > bufferedDurationUs) {
+
+            FSLOGV(mPacketSources.keyAt(i), "buffered %lld", (long long)bufferedStreamDurationUs);
+
+            if (bufferedDurationUs == -1ll
+                 || bufferedStreamDurationUs < bufferedDurationUs) {
                 bufferedDurationUs = bufferedStreamDurationUs;
             }
         }
-    }
-    downloadMore = (bufferedDurationUs < durationToBufferUs);
-
-    // signal start if buffered up at least the target size
-    if (!mPrepared && bufferedDurationUs > targetDurationUs && downloadMore) {
-        mPrepared = true;
-
-        ALOGV("prepared, buffered=%" PRId64 " > %" PRId64 "",
-                bufferedDurationUs, targetDurationUs);
-        sp<AMessage> msg = mNotify->dup();
-        msg->setInt32("what", kWhatTemporarilyDoneFetching);
-        msg->post();
+        if (bufferedDurationUs == -1ll) {
+            bufferedDurationUs = 0ll;
+        }
     }
 
-    if (finalResult == OK && downloadMore) {
-        ALOGV("monitoring, buffered=%" PRId64 " < %" PRId64 "",
-                bufferedDurationUs, durationToBufferUs);
+    if (finalResult == OK && bufferedDurationUs < kMinBufferedDurationUs) {
+        FLOGV("monitoring, buffered=%lld < %lld",
+                (long long)bufferedDurationUs, (long long)kMinBufferedDurationUs);
+
         // delay the next download slightly; hopefully this gives other concurrent fetchers
         // a better chance to run.
         // onDownloadNext();
-        sp<AMessage> msg = new AMessage(kWhatDownloadNext, id());
+        sp<AMessage> msg = new AMessage(kWhatDownloadNext, this);
         msg->setInt32("generation", mMonitorQueueGeneration);
         msg->post(1000l);
     } else {
-        // Nothing to do yet, try again in a second.
+        // We'd like to maintain buffering above durationToBufferUs, so try
+        // again when buffer just about to go below durationToBufferUs
+        // (or after targetDurationUs / 2, whichever is smaller).
+        int64_t delayUs = bufferedDurationUs - kMinBufferedDurationUs + 1000000ll;
+        if (delayUs > targetDurationUs / 2) {
+            delayUs = targetDurationUs / 2;
+        }
 
-        sp<AMessage> msg = mNotify->dup();
-        msg->setInt32("what", kWhatTemporarilyDoneFetching);
-        msg->post();
+        FLOGV("pausing for %lld, buffered=%lld > %lld",
+                (long long)delayUs,
+                (long long)bufferedDurationUs,
+                (long long)kMinBufferedDurationUs);
 
-        int64_t delayUs = mPrepared ? kMaxMonitorDelayUs : targetDurationUs / 2;
-        ALOGV("pausing for %" PRId64 ", buffered=%" PRId64 " > %" PRId64 "",
-                delayUs, bufferedDurationUs, durationToBufferUs);
-        // :TRICKY: need to enforce minimum delay because the delay to
-        // refresh the playlist will become 0
-        postMonitorQueue(delayUs, mPrepared ? targetDurationUs * 2 : 0);
+        postMonitorQueue(delayUs);
     }
 }
 
 status_t PlaylistFetcher::refreshPlaylist() {
     if (delayUsToRefreshPlaylist() <= 0) {
         bool unchanged;
-        sp<M3UParser> playlist = mSession->fetchPlaylist(
+        sp<M3UParser> playlist = mHTTPDownloader->fetchPlaylist(
                 mURI.c_str(), mPlaylistHash, &unchanged);
 
         if (playlist == NULL) {
@@ -715,6 +872,14 @@
             if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
                 updateDuration();
             }
+            // Notify LiveSession to use target-duration based buffering level
+            // for up/down switch. Default LiveSession::kUpSwitchMark may not
+            // be reachable for live streams, as our max buffering amount is
+            // limited to 3 segments.
+            if (!mPlaylist->isComplete()) {
+                updateTargetDuration();
+            }
+            mPlaylistTimeUs = ALooper::GetNowUs();
         }
 
         mLastPlaylistFetchTimeUs = ALooper::GetNowUs();
@@ -727,25 +892,83 @@
     return buffer->size() > 0 && buffer->data()[0] == 0x47;
 }
 
-void PlaylistFetcher::onDownloadNext() {
+bool PlaylistFetcher::shouldPauseDownload() {
+    if (mStreamTypeMask == LiveSession::STREAMTYPE_SUBTITLES) {
+        // doesn't apply to subtitles
+        return false;
+    }
+
+    // Calculate threshold to abort current download
+    float thresholdRatio = getStoppingThreshold();
+
+    if (thresholdRatio < 0.0f) {
+        // never abort
+        return false;
+    } else if (thresholdRatio == 0.0f) {
+        // immediately abort
+        return true;
+    }
+
+    // now we have a positive thresholdUs, abort if remaining
+    // portion to download is over that threshold.
+    if (mSegmentFirstPTS < 0) {
+        // this means we haven't even find the first access unit,
+        // abort now as we must be very far away from the end.
+        return true;
+    }
+    int64_t lastEnqueueUs = mSegmentFirstPTS;
+    for (size_t i = 0; i < mPacketSources.size(); ++i) {
+        if ((mStreamTypeMask & mPacketSources.keyAt(i)) == 0) {
+            continue;
+        }
+        sp<AMessage> meta = mPacketSources[i]->getLatestEnqueuedMeta();
+        int32_t type;
+        if (meta == NULL || meta->findInt32("discontinuity", &type)) {
+            continue;
+        }
+        int64_t tmpUs;
+        CHECK(meta->findInt64("timeUs", &tmpUs));
+        if (tmpUs > lastEnqueueUs) {
+            lastEnqueueUs = tmpUs;
+        }
+    }
+    lastEnqueueUs -= mSegmentFirstPTS;
+
+    int64_t targetDurationUs = mPlaylist->getTargetDuration();
+    int64_t thresholdUs = thresholdRatio * targetDurationUs;
+
+    FLOGV("%spausing now, thresholdUs %lld, remaining %lld",
+            targetDurationUs - lastEnqueueUs > thresholdUs ? "" : "not ",
+            (long long)thresholdUs,
+            (long long)(targetDurationUs - lastEnqueueUs));
+
+    if (targetDurationUs - lastEnqueueUs > thresholdUs) {
+        return true;
+    }
+    return false;
+}
+
+bool PlaylistFetcher::initDownloadState(
+        AString &uri,
+        sp<AMessage> &itemMeta,
+        int32_t &firstSeqNumberInPlaylist,
+        int32_t &lastSeqNumberInPlaylist) {
     status_t err = refreshPlaylist();
-    int32_t firstSeqNumberInPlaylist = 0;
-    int32_t lastSeqNumberInPlaylist = 0;
+    firstSeqNumberInPlaylist = 0;
+    lastSeqNumberInPlaylist = 0;
     bool discontinuity = false;
 
     if (mPlaylist != NULL) {
-        if (mPlaylist->meta() != NULL) {
-            mPlaylist->meta()->findInt32("media-sequence", &firstSeqNumberInPlaylist);
-        }
-
-        lastSeqNumberInPlaylist =
-                firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1;
+        mPlaylist->getSeqNumberRange(
+                &firstSeqNumberInPlaylist, &lastSeqNumberInPlaylist);
 
         if (mDiscontinuitySeq < 0) {
             mDiscontinuitySeq = mPlaylist->getDiscontinuitySeq();
         }
     }
 
+    mSegmentFirstPTS = -1ll;
+
     if (mPlaylist != NULL && mSeqNumber < 0) {
         CHECK_GE(mStartTimeUs, 0ll);
 
@@ -764,18 +987,26 @@
                 mStartTimeUs -= getSegmentStartTimeUs(mSeqNumber);
             }
             mStartTimeUsRelative = true;
-            ALOGV("Initial sequence number for time %" PRId64 " is %d from (%d .. %d)",
-                    mStartTimeUs, mSeqNumber, firstSeqNumberInPlaylist,
+            FLOGV("Initial sequence number for time %lld is %d from (%d .. %d)",
+                    (long long)mStartTimeUs, mSeqNumber, firstSeqNumberInPlaylist,
                     lastSeqNumberInPlaylist);
         } else {
             // When adapting or track switching, mSegmentStartTimeUs (relative
             // to media time 0) is used to determine the start segment; mStartTimeUs (absolute
             // timestamps coming from the media container) is used to determine the position
             // inside a segments.
-            mSeqNumber = getSeqNumberForTime(mSegmentStartTimeUs);
-            if (mAdaptive) {
+            if (mStreamTypeMask != LiveSession::STREAMTYPE_SUBTITLES
+                    && mSeekMode != LiveSession::kSeekModeNextSample) {
                 // avoid double fetch/decode
-                mSeqNumber += 1;
+                // Use (mSegmentStartTimeUs + 1/2 * targetDurationUs) to search
+                // for the starting segment in new variant.
+                // If the two variants' segments are aligned, this gives the
+                // next segment. If they're not aligned, this gives the segment
+                // that overlaps no more than 1/2 * targetDurationUs.
+                mSeqNumber = getSeqNumberForTime(mSegmentStartTimeUs
+                        + mPlaylist->getTargetDuration() / 2);
+            } else {
+                mSeqNumber = getSeqNumberForTime(mSegmentStartTimeUs);
             }
             ssize_t minSeq = getSeqNumberForDiscontinuity(mDiscontinuitySeq);
             if (mSeqNumber < minSeq) {
@@ -789,7 +1020,7 @@
             if (mSeqNumber > lastSeqNumberInPlaylist) {
                 mSeqNumber = lastSeqNumberInPlaylist;
             }
-            ALOGV("Initial sequence number for live event %d from (%d .. %d)",
+            FLOGV("Initial sequence number is %d from (%d .. %d)",
                     mSeqNumber, firstSeqNumberInPlaylist,
                     lastSeqNumberInPlaylist);
         }
@@ -809,26 +1040,24 @@
                 // refresh in increasing fraction (1/2, 1/3, ...) of the
                 // playlist's target duration or 3 seconds, whichever is less
                 int64_t delayUs = kMaxMonitorDelayUs;
-                if (mPlaylist != NULL && mPlaylist->meta() != NULL) {
-                    int32_t targetDurationSecs;
-                    CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
-                    delayUs = mPlaylist->size() * targetDurationSecs *
-                            1000000ll / (1 + mNumRetries);
+                if (mPlaylist != NULL) {
+                    delayUs = mPlaylist->size() * mPlaylist->getTargetDuration()
+                            / (1 + mNumRetries);
                 }
                 if (delayUs > kMaxMonitorDelayUs) {
                     delayUs = kMaxMonitorDelayUs;
                 }
-                ALOGV("sequence number high: %d from (%d .. %d), "
-                      "monitor in %" PRId64 " (retry=%d)",
+                FLOGV("sequence number high: %d from (%d .. %d), "
+                      "monitor in %lld (retry=%d)",
                         mSeqNumber, firstSeqNumberInPlaylist,
-                        lastSeqNumberInPlaylist, delayUs, mNumRetries);
+                        lastSeqNumberInPlaylist, (long long)delayUs, mNumRetries);
                 postMonitorQueue(delayUs);
-                return;
+                return false;
             }
 
             if (err != OK) {
                 notifyError(err);
-                return;
+                return false;
             }
 
             // we've missed the boat, let's start 3 segments prior to the latest sequence
@@ -843,12 +1072,8 @@
                 // but since the segments we are supposed to fetch have already rolled off
                 // the playlist, i.e. we have already missed the boat, we inevitably have to
                 // skip.
-                for (size_t i = 0; i < mPacketSources.size(); i++) {
-                    sp<ABuffer> formatChange = mSession->createFormatChangeBuffer();
-                    mPacketSources.valueAt(i)->queueAccessUnit(formatChange);
-                }
-                stopAsync(/* clear = */ false);
-                return;
+                notifyStopReached();
+                return false;
             }
             mSeqNumber = lastSeqNumberInPlaylist - 3;
             if (mSeqNumber < firstSeqNumberInPlaylist) {
@@ -858,29 +1083,156 @@
 
             // fall through
         } else {
-            ALOGE("Cannot find sequence number %d in playlist "
-                 "(contains %d - %d)",
-                 mSeqNumber, firstSeqNumberInPlaylist,
-                  firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1);
+            if (mPlaylist != NULL) {
+                ALOGE("Cannot find sequence number %d in playlist "
+                     "(contains %d - %d)",
+                     mSeqNumber, firstSeqNumberInPlaylist,
+                      firstSeqNumberInPlaylist + (int32_t)mPlaylist->size() - 1);
 
-            notifyError(ERROR_END_OF_STREAM);
-            return;
+                if (mTSParser != NULL) {
+                    mTSParser->signalEOS(ERROR_END_OF_STREAM);
+                    // Use an empty buffer; we don't have any new data, just want to extract
+                    // potential new access units after flush.  Reset mSeqNumber to
+                    // lastSeqNumberInPlaylist such that we set the correct access unit
+                    // properties in extractAndQueueAccessUnitsFromTs.
+                    sp<ABuffer> buffer = new ABuffer(0);
+                    mSeqNumber = lastSeqNumberInPlaylist;
+                    extractAndQueueAccessUnitsFromTs(buffer);
+                }
+                notifyError(ERROR_END_OF_STREAM);
+            } else {
+                // It's possible that we were never able to download the playlist.
+                // In this case we should notify error, instead of EOS, as EOS during
+                // prepare means we succeeded in downloading everything.
+                ALOGE("Failed to download playlist!");
+                notifyError(ERROR_IO);
+            }
+
+            return false;
         }
     }
 
     mNumRetries = 0;
 
-    AString uri;
-    sp<AMessage> itemMeta;
     CHECK(mPlaylist->itemAt(
                 mSeqNumber - firstSeqNumberInPlaylist,
                 &uri,
                 &itemMeta));
 
+    CHECK(itemMeta->findInt32("discontinuity-sequence", &mDiscontinuitySeq));
+
     int32_t val;
     if (itemMeta->findInt32("discontinuity", &val) && val != 0) {
-        mDiscontinuitySeq++;
         discontinuity = true;
+    } else if (mLastDiscontinuitySeq >= 0
+            && mDiscontinuitySeq != mLastDiscontinuitySeq) {
+        // Seek jumped to a new discontinuity sequence. We need to signal
+        // a format change to decoder. Decoder needs to shutdown and be
+        // created again if seamless format change is unsupported.
+        FLOGV("saw discontinuity: mStartup %d, mLastDiscontinuitySeq %d, "
+                "mDiscontinuitySeq %d, mStartTimeUs %lld",
+                mStartup, mLastDiscontinuitySeq, mDiscontinuitySeq, (long long)mStartTimeUs);
+        discontinuity = true;
+    }
+    mLastDiscontinuitySeq = -1;
+
+    // decrypt a junk buffer to prefetch key; since a session uses only one http connection,
+    // this avoids interleaved connections to the key and segment file.
+    {
+        sp<ABuffer> junk = new ABuffer(16);
+        junk->setRange(0, 16);
+        status_t err = decryptBuffer(mSeqNumber - firstSeqNumberInPlaylist, junk,
+                true /* first */);
+        if (err == ERROR_NOT_CONNECTED) {
+            return false;
+        } else if (err != OK) {
+            notifyError(err);
+            return false;
+        }
+    }
+
+    if ((mStartup && !mTimeChangeSignaled) || discontinuity) {
+        // We need to signal a time discontinuity to ATSParser on the
+        // first segment after start, or on a discontinuity segment.
+        // Setting mNextPTSTimeUs informs extractAndQueueAccessUnitsXX()
+        // to send the time discontinuity.
+        if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
+            // If this was a live event this made no sense since
+            // we don't have access to all the segment before the current
+            // one.
+            mNextPTSTimeUs = getSegmentStartTimeUs(mSeqNumber);
+        }
+
+        // Setting mTimeChangeSignaled to true, so that if start time
+        // searching goes into 2nd segment (without a discontinuity),
+        // we don't reset time again. It causes corruption when pending
+        // data in ATSParser is cleared.
+        mTimeChangeSignaled = true;
+    }
+
+    if (discontinuity) {
+        ALOGI("queueing discontinuity (explicit=%d)", discontinuity);
+
+        // Signal a format discontinuity to ATSParser to clear partial data
+        // from previous streams. Not doing this causes bitstream corruption.
+        if (mTSParser != NULL) {
+            mTSParser->signalDiscontinuity(
+                    ATSParser::DISCONTINUITY_FORMATCHANGE, NULL /* extra */);
+        }
+
+        queueDiscontinuity(
+                ATSParser::DISCONTINUITY_FORMAT_ONLY,
+                NULL /* extra */);
+
+        if (mStartup && mStartTimeUsRelative && mFirstPTSValid) {
+            // This means we guessed mStartTimeUs to be in the previous
+            // segment (likely very close to the end), but either video or
+            // audio has not found start by the end of that segment.
+            //
+            // If this new segment is not a discontinuity, keep searching.
+            //
+            // If this new segment even got a discontinuity marker, just
+            // set mStartTimeUs=0, and take all samples from now on.
+            mStartTimeUs = 0;
+            mFirstPTSValid = false;
+            mIDRFound = false;
+            mVideoBuffer->clear();
+        }
+    }
+
+    FLOGV("fetching segment %d from (%d .. %d)",
+            mSeqNumber, firstSeqNumberInPlaylist, lastSeqNumberInPlaylist);
+    return true;
+}
+
+void PlaylistFetcher::onDownloadNext() {
+    AString uri;
+    sp<AMessage> itemMeta;
+    sp<ABuffer> buffer;
+    sp<ABuffer> tsBuffer;
+    int32_t firstSeqNumberInPlaylist = 0;
+    int32_t lastSeqNumberInPlaylist = 0;
+    bool connectHTTP = true;
+
+    if (mDownloadState->hasSavedState()) {
+        mDownloadState->restoreState(
+                uri,
+                itemMeta,
+                buffer,
+                tsBuffer,
+                firstSeqNumberInPlaylist,
+                lastSeqNumberInPlaylist);
+        connectHTTP = false;
+        FLOGV("resuming: '%s'", uri.c_str());
+    } else {
+        if (!initDownloadState(
+                uri,
+                itemMeta,
+                firstSeqNumberInPlaylist,
+                lastSeqNumberInPlaylist)) {
+            return;
+        }
+        FLOGV("fetching: '%s'", uri.c_str());
     }
 
     int64_t range_offset, range_length;
@@ -890,33 +1242,19 @@
         range_length = -1;
     }
 
-    ALOGV("fetching segment %d from (%d .. %d)",
-          mSeqNumber, firstSeqNumberInPlaylist, lastSeqNumberInPlaylist);
-
-    ALOGV("fetching '%s'", uri.c_str());
-
-    sp<DataSource> source;
-    sp<ABuffer> buffer, tsBuffer;
-    // decrypt a junk buffer to prefetch key; since a session uses only one http connection,
-    // this avoids interleaved connections to the key and segment file.
-    {
-        sp<ABuffer> junk = new ABuffer(16);
-        junk->setRange(0, 16);
-        status_t err = decryptBuffer(mSeqNumber - firstSeqNumberInPlaylist, junk,
-                true /* first */);
-        if (err != OK) {
-            notifyError(err);
-            return;
-        }
-    }
-
     // block-wise download
-    bool startup = mStartup;
+    bool shouldPause = false;
     ssize_t bytesRead;
     do {
-        bytesRead = mSession->fetchFile(
-                uri.c_str(), &buffer, range_offset, range_length, kDownloadBlockSize, &source);
+        int64_t startUs = ALooper::GetNowUs();
+        bytesRead = mHTTPDownloader->fetchBlock(
+                uri.c_str(), &buffer, range_offset, range_length, kDownloadBlockSize,
+                NULL /* actualURL */, connectHTTP);
+        int64_t delayUs = ALooper::GetNowUs() - startUs;
 
+        if (bytesRead == ERROR_NOT_CONNECTED) {
+            return;
+        }
         if (bytesRead < 0) {
             status_t err = bytesRead;
             ALOGE("failed to fetch .ts segment at url '%s'", uri.c_str());
@@ -924,6 +1262,22 @@
             return;
         }
 
+        // add sample for bandwidth estimation, excluding samples from subtitles (as
+        // its too small), or during startup/resumeUntil (when we could have more than
+        // one connection open which affects bandwidth)
+        if (!mStartup && mStopParams == NULL && bytesRead > 0
+                && (mStreamTypeMask
+                        & (LiveSession::STREAMTYPE_AUDIO
+                        | LiveSession::STREAMTYPE_VIDEO))) {
+            mSession->addBandwidthMeasurement(bytesRead, delayUs);
+            if (delayUs > 2000000ll) {
+                FLOGV("bytesRead %zd took %.2f seconds - abnormal bandwidth dip",
+                        bytesRead, (double)delayUs / 1.0e6);
+            }
+        }
+
+        connectHTTP = false;
+
         CHECK(buffer != NULL);
 
         size_t size = buffer->size();
@@ -941,28 +1295,7 @@
             return;
         }
 
-        if (startup || discontinuity) {
-            // Signal discontinuity.
-
-            if (mPlaylist->isComplete() || mPlaylist->isEvent()) {
-                // If this was a live event this made no sense since
-                // we don't have access to all the segment before the current
-                // one.
-                mNextPTSTimeUs = getSegmentStartTimeUs(mSeqNumber);
-            }
-
-            if (discontinuity) {
-                ALOGI("queueing discontinuity (explicit=%d)", discontinuity);
-
-                queueDiscontinuity(
-                        ATSParser::DISCONTINUITY_FORMATCHANGE,
-                        NULL /* extra */);
-
-                discontinuity = false;
-            }
-
-            startup = false;
-        }
+        bool startUp = mStartup; // save current start up state
 
         err = OK;
         if (bufferStartsWithTsSyncByte(buffer)) {
@@ -976,7 +1309,6 @@
                 tsBuffer->setRange(tsOff, tsSize);
             }
             tsBuffer->setRange(tsBuffer->offset(), tsBuffer->size() + bytesRead);
-
             err = extractAndQueueAccessUnitsFromTs(tsBuffer);
         }
 
@@ -991,23 +1323,45 @@
             return;
         } else if (err == ERROR_OUT_OF_RANGE) {
             // reached stopping point
-            stopAsync(/* clear = */ false);
+            notifyStopReached();
             return;
         } else if (err != OK) {
             notifyError(err);
             return;
         }
-
+        // If we're switching, post start notification
+        // this should only be posted when the last chunk is full processed by TSParser
+        if (mSeekMode != LiveSession::kSeekModeExactPosition && startUp != mStartup) {
+            CHECK(mStartTimeUsNotify != NULL);
+            mStartTimeUsNotify->post();
+            mStartTimeUsNotify.clear();
+            shouldPause = true;
+        }
+        if (shouldPause || shouldPauseDownload()) {
+            // save state and return if this is not the last chunk,
+            // leaving the fetcher in paused state.
+            if (bytesRead != 0) {
+                mDownloadState->saveState(
+                        uri,
+                        itemMeta,
+                        buffer,
+                        tsBuffer,
+                        firstSeqNumberInPlaylist,
+                        lastSeqNumberInPlaylist);
+                return;
+            }
+            shouldPause = true;
+        }
     } while (bytesRead != 0);
 
     if (bufferStartsWithTsSyncByte(buffer)) {
         // If we don't see a stream in the program table after fetching a full ts segment
         // mark it as nonexistent.
-        const size_t kNumTypes = ATSParser::NUM_SOURCE_TYPES;
-        ATSParser::SourceType srcTypes[kNumTypes] =
+        ATSParser::SourceType srcTypes[] =
                 { ATSParser::VIDEO, ATSParser::AUDIO };
-        LiveSession::StreamType streamTypes[kNumTypes] =
+        LiveSession::StreamType streamTypes[] =
                 { LiveSession::STREAMTYPE_VIDEO, LiveSession::STREAMTYPE_AUDIO };
+        const size_t kNumTypes = NELEM(srcTypes);
 
         for (size_t i = 0; i < kNumTypes; i++) {
             ATSParser::SourceType srcType = srcTypes[i];
@@ -1034,7 +1388,6 @@
         return;
     }
 
-    err = OK;
     if (tsBuffer != NULL) {
         AString method;
         CHECK(buffer->meta()->findString("cipher-method", &method));
@@ -1048,81 +1401,117 @@
     }
 
     // bulk extract non-ts files
+    bool startUp = mStartup;
     if (tsBuffer == NULL) {
-        err = extractAndQueueAccessUnits(buffer, itemMeta);
+        status_t err = extractAndQueueAccessUnits(buffer, itemMeta);
         if (err == -EAGAIN) {
             // starting sequence number too low/high
             postMonitorQueue();
             return;
         } else if (err == ERROR_OUT_OF_RANGE) {
             // reached stopping point
-            stopAsync(/* clear = */false);
+            notifyStopReached();
+            return;
+        } else if (err != OK) {
+            notifyError(err);
             return;
         }
     }
 
-    if (err != OK) {
-        notifyError(err);
-        return;
-    }
-
     ++mSeqNumber;
 
-    postMonitorQueue();
+    // if adapting, pause after found the next starting point
+    if (mSeekMode != LiveSession::kSeekModeExactPosition && startUp != mStartup) {
+        CHECK(mStartTimeUsNotify != NULL);
+        mStartTimeUsNotify->post();
+        mStartTimeUsNotify.clear();
+        shouldPause = true;
+    }
+
+    if (!shouldPause) {
+        postMonitorQueue();
+    }
 }
 
-int32_t PlaylistFetcher::getSeqNumberWithAnchorTime(int64_t anchorTimeUs) const {
-    int32_t firstSeqNumberInPlaylist, lastSeqNumberInPlaylist;
-    if (mPlaylist->meta() == NULL
-            || !mPlaylist->meta()->findInt32("media-sequence", &firstSeqNumberInPlaylist)) {
-        firstSeqNumberInPlaylist = 0;
-    }
-    lastSeqNumberInPlaylist = firstSeqNumberInPlaylist + mPlaylist->size() - 1;
+/*
+ * returns true if we need to adjust mSeqNumber
+ */
+bool PlaylistFetcher::adjustSeqNumberWithAnchorTime(int64_t anchorTimeUs) {
+    int32_t firstSeqNumberInPlaylist = mPlaylist->getFirstSeqNumber();
 
-    int32_t index = mSeqNumber - firstSeqNumberInPlaylist - 1;
-    while (index >= 0 && anchorTimeUs > mStartTimeUs) {
-        sp<AMessage> itemMeta;
-        CHECK(mPlaylist->itemAt(index, NULL /* uri */, &itemMeta));
-
-        int64_t itemDurationUs;
-        CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
-
-        anchorTimeUs -= itemDurationUs;
-        --index;
-    }
-
-    int32_t newSeqNumber = firstSeqNumberInPlaylist + index + 1;
-    if (newSeqNumber <= lastSeqNumberInPlaylist) {
-        return newSeqNumber;
+    int64_t minDiffUs, maxDiffUs;
+    if (mSeekMode == LiveSession::kSeekModeNextSample) {
+        // if the previous fetcher paused in the middle of a segment, we
+        // want to start at a segment that overlaps the last sample
+        minDiffUs = -mPlaylist->getTargetDuration();
+        maxDiffUs = 0ll;
     } else {
-        return lastSeqNumberInPlaylist;
+        // if the previous fetcher paused at the end of a segment, ideally
+        // we want to start at the segment that's roughly aligned with its
+        // next segment, but if the two variants are not well aligned we
+        // adjust the diff to within (-T/2, T/2)
+        minDiffUs = -mPlaylist->getTargetDuration() / 2;
+        maxDiffUs = mPlaylist->getTargetDuration() / 2;
     }
+
+    int32_t oldSeqNumber = mSeqNumber;
+    ssize_t index = mSeqNumber - firstSeqNumberInPlaylist;
+
+    // adjust anchorTimeUs to within (minDiffUs, maxDiffUs) from mStartTimeUs
+    int64_t diffUs = anchorTimeUs - mStartTimeUs;
+    if (diffUs > maxDiffUs) {
+        while (index > 0 && diffUs > maxDiffUs) {
+            --index;
+
+            sp<AMessage> itemMeta;
+            CHECK(mPlaylist->itemAt(index, NULL /* uri */, &itemMeta));
+
+            int64_t itemDurationUs;
+            CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
+
+            diffUs -= itemDurationUs;
+        }
+    } else if (diffUs < minDiffUs) {
+        while (index + 1 < (ssize_t) mPlaylist->size()
+                && diffUs < minDiffUs) {
+            ++index;
+
+            sp<AMessage> itemMeta;
+            CHECK(mPlaylist->itemAt(index, NULL /* uri */, &itemMeta));
+
+            int64_t itemDurationUs;
+            CHECK(itemMeta->findInt64("durationUs", &itemDurationUs));
+
+            diffUs += itemDurationUs;
+        }
+    }
+
+    mSeqNumber = firstSeqNumberInPlaylist + index;
+
+    if (mSeqNumber != oldSeqNumber) {
+        FLOGV("guessed wrong seg number: diff %lld out of [%lld, %lld]",
+                (long long) anchorTimeUs - mStartTimeUs,
+                (long long) minDiffUs,
+                (long long) maxDiffUs);
+        return true;
+    }
+    return false;
 }
 
 int32_t PlaylistFetcher::getSeqNumberForDiscontinuity(size_t discontinuitySeq) const {
-    int32_t firstSeqNumberInPlaylist;
-    if (mPlaylist->meta() == NULL
-            || !mPlaylist->meta()->findInt32("media-sequence", &firstSeqNumberInPlaylist)) {
-        firstSeqNumberInPlaylist = 0;
-    }
-
-    size_t curDiscontinuitySeq = mPlaylist->getDiscontinuitySeq();
-    if (discontinuitySeq < curDiscontinuitySeq) {
-        return firstSeqNumberInPlaylist <= 0 ? 0 : (firstSeqNumberInPlaylist - 1);
-    }
+    int32_t firstSeqNumberInPlaylist = mPlaylist->getFirstSeqNumber();
 
     size_t index = 0;
     while (index < mPlaylist->size()) {
         sp<AMessage> itemMeta;
         CHECK(mPlaylist->itemAt( index, NULL /* uri */, &itemMeta));
-
-        int64_t discontinuity;
-        if (itemMeta->findInt64("discontinuity", &discontinuity)) {
-            curDiscontinuitySeq++;
-        }
-
+        size_t curDiscontinuitySeq;
+        CHECK(itemMeta->findInt32("discontinuity-sequence", (int32_t *)&curDiscontinuitySeq));
+        int32_t seqNumber = firstSeqNumberInPlaylist + index;
         if (curDiscontinuitySeq == discontinuitySeq) {
-            return firstSeqNumberInPlaylist + index;
+            return seqNumber;
+        } else if (curDiscontinuitySeq > discontinuitySeq) {
+            return seqNumber <= 0 ? 0 : seqNumber - 1;
         }
 
         ++index;
@@ -1132,12 +1521,6 @@
 }
 
 int32_t PlaylistFetcher::getSeqNumberForTime(int64_t timeUs) const {
-    int32_t firstSeqNumberInPlaylist;
-    if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
-                "media-sequence", &firstSeqNumberInPlaylist)) {
-        firstSeqNumberInPlaylist = 0;
-    }
-
     size_t index = 0;
     int64_t segmentStartUs = 0;
     while (index < mPlaylist->size()) {
@@ -1160,7 +1543,7 @@
         index = mPlaylist->size() - 1;
     }
 
-    return firstSeqNumberInPlaylist + index;
+    return mPlaylist->getFirstSeqNumber() + index;
 }
 
 const sp<ABuffer> &PlaylistFetcher::setAccessUnitProperties(
@@ -1175,16 +1558,37 @@
         accessUnit->meta()->setInt32("discard", discard);
     }
 
-    int32_t targetDurationSecs;
-    if (mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs)) {
-        accessUnit->meta()->setInt32("targetDuration", targetDurationSecs);
-    }
-
     accessUnit->meta()->setInt32("discontinuitySeq", mDiscontinuitySeq);
     accessUnit->meta()->setInt64("segmentStartTimeUs", getSegmentStartTimeUs(mSeqNumber));
+    accessUnit->meta()->setInt64("segmentFirstTimeUs", mSegmentFirstPTS);
+    accessUnit->meta()->setInt64("segmentDurationUs", getSegmentDurationUs(mSeqNumber));
+    if (!mPlaylist->isComplete() && !mPlaylist->isEvent()) {
+        accessUnit->meta()->setInt64("playlistTimeUs", mPlaylistTimeUs);
+    }
     return accessUnit;
 }
 
+bool PlaylistFetcher::isStartTimeReached(int64_t timeUs) {
+    if (!mFirstPTSValid) {
+        mFirstTimeUs = timeUs;
+        mFirstPTSValid = true;
+    }
+    bool startTimeReached = true;
+    if (mStartTimeUsRelative) {
+        FLOGV("startTimeUsRelative, timeUs (%lld) - %lld = %lld",
+                (long long)timeUs,
+                (long long)mFirstTimeUs,
+                (long long)(timeUs - mFirstTimeUs));
+        timeUs -= mFirstTimeUs;
+        if (timeUs < 0) {
+            FLOGV("clamp negative timeUs to 0");
+            timeUs = 0;
+        }
+        startTimeReached = (timeUs >= mStartTimeUs);
+    }
+    return startTimeReached;
+}
+
 status_t PlaylistFetcher::extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &buffer) {
     if (mTSParser == NULL) {
         // Use TS_TIMESTAMPS_ARE_ABSOLUTE so pts carry over between fetchers.
@@ -1197,12 +1601,16 @@
         // ATSParser from skewing the timestamps of access units.
         extra->setInt64(IStreamListener::kKeyMediaTimeUs, 0);
 
+        // When adapting, signal a recent media time to the parser,
+        // so that PTS wrap around is handled for the new variant.
+        if (mStartTimeUs >= 0 && !mStartTimeUsRelative) {
+            extra->setInt64(IStreamListener::kKeyRecentMediaTimeUs, mStartTimeUs);
+        }
+
         mTSParser->signalDiscontinuity(
                 ATSParser::DISCONTINUITY_TIME, extra);
 
-        mAbsoluteTimeAnchorUs = mNextPTSTimeUs;
         mNextPTSTimeUs = -1ll;
-        mFirstPTSValid = false;
     }
 
     size_t offset = 0;
@@ -1218,35 +1626,75 @@
     // setRange to indicate consumed bytes.
     buffer->setRange(buffer->offset() + offset, buffer->size() - offset);
 
+    if (mSegmentFirstPTS < 0ll) {
+        // get the smallest first PTS from all streams present in this parser
+        for (size_t i = mPacketSources.size(); i-- > 0;) {
+            const LiveSession::StreamType stream = mPacketSources.keyAt(i);
+            if (stream == LiveSession::STREAMTYPE_SUBTITLES) {
+                ALOGE("MPEG2 Transport streams do not contain subtitles.");
+                return ERROR_MALFORMED;
+            }
+            if (stream == LiveSession::STREAMTYPE_METADATA) {
+                continue;
+            }
+            ATSParser::SourceType type =LiveSession::getSourceTypeForStream(stream);
+            sp<AnotherPacketSource> source =
+                static_cast<AnotherPacketSource *>(
+                        mTSParser->getSource(type).get());
+
+            if (source == NULL) {
+                continue;
+            }
+            sp<AMessage> meta = source->getMetaAfterLastDequeued(0);
+            if (meta != NULL) {
+                int64_t timeUs;
+                CHECK(meta->findInt64("timeUs", &timeUs));
+                if (mSegmentFirstPTS < 0ll || timeUs < mSegmentFirstPTS) {
+                    mSegmentFirstPTS = timeUs;
+                }
+            }
+        }
+        if (mSegmentFirstPTS < 0ll) {
+            // didn't find any TS packet, can return early
+            return OK;
+        }
+        if (!mStartTimeUsRelative) {
+            // mStartup
+            //   mStartup is true until we have queued a packet for all the streams
+            //   we are fetching. We queue packets whose timestamps are greater than
+            //   mStartTimeUs.
+            // mSegmentStartTimeUs >= 0
+            //   mSegmentStartTimeUs is non-negative when adapting or switching tracks
+            // adjustSeqNumberWithAnchorTime(timeUs) == true
+            //   we guessed a seq number that's either too large or too small.
+            // If this happens, we'll adjust mSeqNumber and restart fetching from new
+            // location. Note that we only want to adjust once, so set mSegmentStartTimeUs
+            // to -1 so that we don't enter this chunk next time.
+            if (mStartup && mSegmentStartTimeUs >= 0
+                    && adjustSeqNumberWithAnchorTime(mSegmentFirstPTS)) {
+                mStartTimeUsNotify = mNotify->dup();
+                mStartTimeUsNotify->setInt32("what", kWhatStartedAt);
+                mStartTimeUsNotify->setString("uri", mURI);
+                mIDRFound = false;
+                mSegmentStartTimeUs = -1;
+                return -EAGAIN;
+            }
+        }
+    }
+
     status_t err = OK;
     for (size_t i = mPacketSources.size(); i-- > 0;) {
         sp<AnotherPacketSource> packetSource = mPacketSources.valueAt(i);
 
-        const char *key;
-        ATSParser::SourceType type;
         const LiveSession::StreamType stream = mPacketSources.keyAt(i);
-        switch (stream) {
-            case LiveSession::STREAMTYPE_VIDEO:
-                type = ATSParser::VIDEO;
-                key = "timeUsVideo";
-                break;
-
-            case LiveSession::STREAMTYPE_AUDIO:
-                type = ATSParser::AUDIO;
-                key = "timeUsAudio";
-                break;
-
-            case LiveSession::STREAMTYPE_SUBTITLES:
-            {
-                ALOGE("MPEG2 Transport streams do not contain subtitles.");
-                return ERROR_MALFORMED;
-                break;
-            }
-
-            default:
-                TRESPASS();
+        if (stream == LiveSession::STREAMTYPE_SUBTITLES) {
+            ALOGE("MPEG2 Transport streams do not contain subtitles.");
+            return ERROR_MALFORMED;
         }
 
+        const char *key = LiveSession::getKeyForStream(stream);
+        ATSParser::SourceType type =LiveSession::getSourceTypeForStream(stream);
+
         sp<AnotherPacketSource> source =
             static_cast<AnotherPacketSource *>(
                     mTSParser->getSource(type).get());
@@ -1255,116 +1703,65 @@
             continue;
         }
 
-        int64_t timeUs;
+        const char *mime;
+        sp<MetaData> format  = source->getFormat();
+        bool isAvc = format != NULL && format->findCString(kKeyMIMEType, &mime)
+                && !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
+
         sp<ABuffer> accessUnit;
         status_t finalResult;
         while (source->hasBufferAvailable(&finalResult)
                 && source->dequeueAccessUnit(&accessUnit) == OK) {
 
+            int64_t timeUs;
             CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
 
             if (mStartup) {
-                if (!mFirstPTSValid) {
-                    mFirstTimeUs = timeUs;
-                    mFirstPTSValid = true;
-                }
-                if (mStartTimeUsRelative) {
-                    timeUs -= mFirstTimeUs;
-                    if (timeUs < 0) {
-                        timeUs = 0;
-                    }
-                }
+                bool startTimeReached = isStartTimeReached(timeUs);
 
-                if (timeUs < mStartTimeUs) {
-                    // buffer up to the closest preceding IDR frame
-                    ALOGV("timeUs %" PRId64 " us < mStartTimeUs %" PRId64 " us",
-                            timeUs, mStartTimeUs);
-                    const char *mime;
-                    sp<MetaData> format  = source->getFormat();
-                    bool isAvc = false;
-                    if (format != NULL && format->findCString(kKeyMIMEType, &mime)
-                            && !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
-                        isAvc = true;
-                    }
-                    if (isAvc && IsIDR(accessUnit)) {
-                        mVideoBuffer->clear();
-                    }
+                if (!startTimeReached || (isAvc && !mIDRFound)) {
+                    // buffer up to the closest preceding IDR frame in the next segement,
+                    // or the closest succeeding IDR frame after the exact position
+                    FSLOGV(stream, "timeUs(%lld)-mStartTimeUs(%lld)=%lld, mIDRFound=%d",
+                            (long long)timeUs,
+                            (long long)mStartTimeUs,
+                            (long long)timeUs - mStartTimeUs,
+                            mIDRFound);
                     if (isAvc) {
-                        mVideoBuffer->queueAccessUnit(accessUnit);
+                        if (IsIDR(accessUnit)) {
+                            mVideoBuffer->clear();
+                            FSLOGV(stream, "found IDR, clear mVideoBuffer");
+                            mIDRFound = true;
+                        }
+                        if (mIDRFound && mStartTimeUsRelative && !startTimeReached) {
+                            mVideoBuffer->queueAccessUnit(accessUnit);
+                            FSLOGV(stream, "saving AVC video AccessUnit");
+                        }
                     }
-
-                    continue;
+                    if (!startTimeReached || (isAvc && !mIDRFound)) {
+                        continue;
+                    }
                 }
             }
 
-            CHECK(accessUnit->meta()->findInt64("timeUs", &timeUs));
-            if (mStartTimeUsNotify != NULL && timeUs > mStartTimeUs) {
-                int32_t firstSeqNumberInPlaylist;
-                if (mPlaylist->meta() == NULL || !mPlaylist->meta()->findInt32(
-                            "media-sequence", &firstSeqNumberInPlaylist)) {
-                    firstSeqNumberInPlaylist = 0;
-                }
-
-                int32_t targetDurationSecs;
-                CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
-                int64_t targetDurationUs = targetDurationSecs * 1000000ll;
-                // mStartup
-                //   mStartup is true until we have queued a packet for all the streams
-                //   we are fetching. We queue packets whose timestamps are greater than
-                //   mStartTimeUs.
-                // mSegmentStartTimeUs >= 0
-                //   mSegmentStartTimeUs is non-negative when adapting or switching tracks
-                // mSeqNumber > firstSeqNumberInPlaylist
-                //   don't decrement mSeqNumber if it already points to the 1st segment
-                // timeUs - mStartTimeUs > targetDurationUs:
-                //   This and the 2 above conditions should only happen when adapting in a live
-                //   stream; the old fetcher has already fetched to mStartTimeUs; the new fetcher
-                //   would start fetching after timeUs, which should be greater than mStartTimeUs;
-                //   the old fetcher would then continue fetching data until timeUs. We don't want
-                //   timeUs to be too far ahead of mStartTimeUs because we want the old fetcher to
-                //   stop as early as possible. The definition of being "too far ahead" is
-                //   arbitrary; here we use targetDurationUs as threshold.
-                if (mStartup && mSegmentStartTimeUs >= 0
-                        && mSeqNumber > firstSeqNumberInPlaylist
-                        && timeUs - mStartTimeUs > targetDurationUs) {
-                    // we just guessed a starting timestamp that is too high when adapting in a
-                    // live stream; re-adjust based on the actual timestamp extracted from the
-                    // media segment; if we didn't move backward after the re-adjustment
-                    // (newSeqNumber), start at least 1 segment prior.
-                    int32_t newSeqNumber = getSeqNumberWithAnchorTime(timeUs);
-                    if (newSeqNumber >= mSeqNumber) {
-                        --mSeqNumber;
-                    } else {
-                        mSeqNumber = newSeqNumber;
-                    }
-                    mStartTimeUsNotify = mNotify->dup();
-                    mStartTimeUsNotify->setInt32("what", kWhatStartedAt);
-                    return -EAGAIN;
-                }
-
-                int32_t seq;
-                if (!mStartTimeUsNotify->findInt32("discontinuitySeq", &seq)) {
-                    mStartTimeUsNotify->setInt32("discontinuitySeq", mDiscontinuitySeq);
-                }
-                int64_t startTimeUs;
-                if (!mStartTimeUsNotify->findInt64(key, &startTimeUs)) {
-                    mStartTimeUsNotify->setInt64(key, timeUs);
-
-                    uint32_t streamMask = 0;
-                    mStartTimeUsNotify->findInt32("streamMask", (int32_t *) &streamMask);
+            if (mStartTimeUsNotify != NULL) {
+                uint32_t streamMask = 0;
+                mStartTimeUsNotify->findInt32("streamMask", (int32_t *) &streamMask);
+                if ((mStreamTypeMask & mPacketSources.keyAt(i))
+                        && !(streamMask & mPacketSources.keyAt(i))) {
                     streamMask |= mPacketSources.keyAt(i);
                     mStartTimeUsNotify->setInt32("streamMask", streamMask);
+                    FSLOGV(stream, "found start point, timeUs=%lld, streamMask becomes %x",
+                            (long long)timeUs, streamMask);
 
                     if (streamMask == mStreamTypeMask) {
+                        FLOGV("found start point for all streams");
                         mStartup = false;
-                        mStartTimeUsNotify->post();
-                        mStartTimeUsNotify.clear();
                     }
                 }
             }
 
             if (mStopParams != NULL) {
-                // Queue discontinuity in original stream.
                 int32_t discontinuitySeq;
                 int64_t stopTimeUs;
                 if (!mStopParams->findInt32("discontinuitySeq", &discontinuitySeq)
@@ -1372,14 +1769,13 @@
                         || !mStopParams->findInt64(key, &stopTimeUs)
                         || (discontinuitySeq == mDiscontinuitySeq
                                 && timeUs >= stopTimeUs)) {
-                    packetSource->queueAccessUnit(mSession->createFormatChangeBuffer());
+                    FSLOGV(stream, "reached stop point, timeUs=%lld", (long long)timeUs);
                     mStreamTypeMask &= ~stream;
                     mPacketSources.removeItemsAt(i);
                     break;
                 }
             }
 
-            // Note that we do NOT dequeue any discontinuities except for format change.
             if (stream == LiveSession::STREAMTYPE_VIDEO) {
                 const bool discard = true;
                 status_t status;
@@ -1388,11 +1784,21 @@
                     mVideoBuffer->dequeueAccessUnit(&videoBuffer);
                     setAccessUnitProperties(videoBuffer, source, discard);
                     packetSource->queueAccessUnit(videoBuffer);
+                    int64_t bufferTimeUs;
+                    CHECK(videoBuffer->meta()->findInt64("timeUs", &bufferTimeUs));
+                    FSLOGV(stream, "queueAccessUnit (saved), timeUs=%lld",
+                            (long long)bufferTimeUs);
                 }
+            } else if (stream == LiveSession::STREAMTYPE_METADATA && !mHasMetadata) {
+                mHasMetadata = true;
+                sp<AMessage> notify = mNotify->dup();
+                notify->setInt32("what", kWhatMetadataDetected);
+                notify->post();
             }
 
             setAccessUnitProperties(accessUnit, source);
             packetSource->queueAccessUnit(accessUnit);
+            FSLOGV(stream, "queueAccessUnit, timeUs=%lld", (long long)timeUs);
         }
 
         if (err != OK) {
@@ -1410,7 +1816,7 @@
 
     if (!mStreamTypeMask) {
         // Signal gap is filled between original and new stream.
-        ALOGV("ERROR OUT OF RANGE");
+        FLOGV("reached stop point for all streams");
         return ERROR_OUT_OF_RANGE;
     }
 
@@ -1461,14 +1867,11 @@
         buffer->meta()->setInt64("segmentStartTimeUs", getSegmentStartTimeUs(mSeqNumber));
         buffer->meta()->setInt32("discontinuitySeq", mDiscontinuitySeq);
         buffer->meta()->setInt32("subtitleGeneration", mSubtitleGeneration);
-
         packetSource->queueAccessUnit(buffer);
         return OK;
     }
 
     if (mNextPTSTimeUs >= 0ll) {
-        mFirstPTSValid = false;
-        mAbsoluteTimeAnchorUs = mNextPTSTimeUs;
         mNextPTSTimeUs = -1ll;
     }
 
@@ -1569,11 +1972,23 @@
     CHECK(packetSource->getFormat()->findInt32(kKeySampleRate, &sampleRate));
 
     int64_t timeUs = (PTS * 100ll) / 9ll;
-    if (!mFirstPTSValid) {
+    if (mStartup && !mFirstPTSValid) {
         mFirstPTSValid = true;
         mFirstTimeUs = timeUs;
     }
 
+    if (mSegmentFirstPTS < 0ll) {
+        mSegmentFirstPTS = timeUs;
+        if (!mStartTimeUsRelative) {
+            // Duplicated logic from how we handle .ts playlists.
+            if (mStartup && mSegmentStartTimeUs >= 0
+                    && adjustSeqNumberWithAnchorTime(timeUs)) {
+                mSegmentStartTimeUs = -1;
+                return -EAGAIN;
+            }
+        }
+    }
+
     size_t offset = 0;
     while (offset < buffer->size()) {
         const uint8_t *adtsHeader = buffer->data() + offset;
@@ -1617,40 +2032,18 @@
             }
 
             if (mStartTimeUsNotify != NULL) {
-                int32_t targetDurationSecs;
-                CHECK(mPlaylist->meta()->findInt32("target-duration", &targetDurationSecs));
-                int64_t targetDurationUs = targetDurationSecs * 1000000ll;
-
-                // Duplicated logic from how we handle .ts playlists.
-                if (mStartup && mSegmentStartTimeUs >= 0
-                        && timeUs - mStartTimeUs > targetDurationUs) {
-                    int32_t newSeqNumber = getSeqNumberWithAnchorTime(timeUs);
-                    if (newSeqNumber >= mSeqNumber) {
-                        --mSeqNumber;
-                    } else {
-                        mSeqNumber = newSeqNumber;
-                    }
-                    return -EAGAIN;
-                }
-
-                mStartTimeUsNotify->setInt64("timeUsAudio", timeUs);
-                mStartTimeUsNotify->setInt32("discontinuitySeq", mDiscontinuitySeq);
                 mStartTimeUsNotify->setInt32("streamMask", LiveSession::STREAMTYPE_AUDIO);
-                mStartTimeUsNotify->post();
-                mStartTimeUsNotify.clear();
                 mStartup = false;
             }
         }
 
         if (mStopParams != NULL) {
-            // Queue discontinuity in original stream.
             int32_t discontinuitySeq;
             int64_t stopTimeUs;
             if (!mStopParams->findInt32("discontinuitySeq", &discontinuitySeq)
                     || discontinuitySeq > mDiscontinuitySeq
                     || !mStopParams->findInt64("timeUsAudio", &stopTimeUs)
                     || (discontinuitySeq == mDiscontinuitySeq && unitTimeUs >= stopTimeUs)) {
-                packetSource->queueAccessUnit(mSession->createFormatChangeBuffer());
                 mStreamTypeMask = 0;
                 mPacketSources.clear();
                 return ERROR_OUT_OF_RANGE;
@@ -1687,33 +2080,11 @@
     msg->post();
 }
 
-int64_t PlaylistFetcher::resumeThreshold(const sp<AMessage> &msg) {
-    int64_t durationUs;
-    if (msg->findInt64("durationUs", &durationUs) && durationUs > 0) {
-        return kNumSkipFrames * durationUs;
-    }
-
-    sp<RefBase> obj;
-    msg->findObject("format", &obj);
-    MetaData *format = static_cast<MetaData *>(obj.get());
-
-    const char *mime;
-    CHECK(format->findCString(kKeyMIMEType, &mime));
-    bool audio = !strncasecmp(mime, "audio/", 6);
-    if (audio) {
-        // Assumes 1000 samples per frame.
-        int32_t sampleRate;
-        CHECK(format->findInt32(kKeySampleRate, &sampleRate));
-        return kNumSkipFrames  /* frames */ * 1000 /* samples */
-                * (1000000 / sampleRate) /* sample duration (us) */;
-    } else {
-        int32_t frameRate;
-        if (format->findInt32(kKeyFrameRate, &frameRate) && frameRate > 0) {
-            return kNumSkipFrames * (1000000 / frameRate);
-        }
-    }
-
-    return 500000ll;
+void PlaylistFetcher::updateTargetDuration() {
+    sp<AMessage> msg = mNotify->dup();
+    msg->setInt32("what", kWhatTargetDurationUpdate);
+    msg->setInt64("targetDurationUs", mPlaylist->getTargetDuration());
+    msg->post();
 }
 
 }  // namespace android
diff --git a/media/libstagefright/httplive/PlaylistFetcher.h b/media/libstagefright/httplive/PlaylistFetcher.h
index 4e15f85..c8ca457 100644
--- a/media/libstagefright/httplive/PlaylistFetcher.h
+++ b/media/libstagefright/httplive/PlaylistFetcher.h
@@ -27,7 +27,7 @@
 
 struct ABuffer;
 struct AnotherPacketSource;
-struct DataSource;
+class DataSource;
 struct HTTPBase;
 struct LiveDataSource;
 struct M3UParser;
@@ -36,6 +36,7 @@
 struct PlaylistFetcher : public AHandler {
     static const int64_t kMinBufferedDurationUs;
     static const int32_t kDownloadBlockSize;
+    static const int64_t kFetcherResumeThreshold;
 
     enum {
         kWhatStarted,
@@ -43,36 +44,43 @@
         kWhatStopped,
         kWhatError,
         kWhatDurationUpdate,
-        kWhatTemporarilyDoneFetching,
+        kWhatTargetDurationUpdate,
         kWhatPrepared,
         kWhatPreparationFailed,
         kWhatStartedAt,
+        kWhatStopReached,
+        kWhatPlaylistFetched,
+        kWhatMetadataDetected,
     };
 
     PlaylistFetcher(
             const sp<AMessage> &notify,
             const sp<LiveSession> &session,
             const char *uri,
+            int32_t id,
             int32_t subtitleGeneration);
 
-    sp<DataSource> getDataSource();
+    int32_t getFetcherID() const;
 
     void startAsync(
             const sp<AnotherPacketSource> &audioSource,
             const sp<AnotherPacketSource> &videoSource,
             const sp<AnotherPacketSource> &subtitleSource,
+            const sp<AnotherPacketSource> &metadataSource,
             int64_t startTimeUs = -1ll,         // starting timestamps
             int64_t segmentStartTimeUs = -1ll, // starting position within playlist
             // startTimeUs!=segmentStartTimeUs only when playlist is live
-            int32_t startDiscontinuitySeq = 0,
-            bool adaptive = false);
+            int32_t startDiscontinuitySeq = -1,
+            LiveSession::SeekMode seekMode = LiveSession::kSeekModeExactPosition);
 
-    void pauseAsync();
+    void pauseAsync(float thresholdRatio, bool disconnect);
 
     void stopAsync(bool clear = true);
 
     void resumeUntilAsync(const sp<AMessage> &params);
 
+    void fetchPlaylistAsync();
+
     uint32_t getStreamTypeMask() const {
         return mStreamTypeMask;
     }
@@ -93,8 +101,11 @@
         kWhatMonitorQueue   = 'moni',
         kWhatResumeUntil    = 'rsme',
         kWhatDownloadNext   = 'dlnx',
+        kWhatFetchPlaylist  = 'flst'
     };
 
+    struct DownloadState;
+
     static const int64_t kMaxMonitorDelayUs;
     static const int32_t kNumSkipFrames;
 
@@ -105,9 +116,12 @@
     sp<AMessage> mNotify;
     sp<AMessage> mStartTimeUsNotify;
 
+    sp<HTTPDownloader> mHTTPDownloader;
     sp<LiveSession> mSession;
     AString mURI;
 
+    int32_t mFetcherID;
+
     uint32_t mStreamTypeMask;
     int64_t mStartTimeUs;
 
@@ -116,7 +130,7 @@
     // adapting or switching tracks.
     int64_t mSegmentStartTimeUs;
 
-    ssize_t mDiscontinuitySeq;
+    int32_t mDiscontinuitySeq;
     bool mStartTimeUsRelative;
     sp<AMessage> mStopParams; // message containing the latest timestamps we should fetch.
 
@@ -126,17 +140,21 @@
     KeyedVector<AString, sp<ABuffer> > mAESKeyForURI;
 
     int64_t mLastPlaylistFetchTimeUs;
+    int64_t mPlaylistTimeUs;
     sp<M3UParser> mPlaylist;
     int32_t mSeqNumber;
     int32_t mNumRetries;
     bool mStartup;
-    bool mAdaptive;
-    bool mPrepared;
+    bool mIDRFound;
+    int32_t mSeekMode;
+    bool mTimeChangeSignaled;
     int64_t mNextPTSTimeUs;
 
     int32_t mMonitorQueueGeneration;
     const int32_t mSubtitleGeneration;
 
+    int32_t mLastDiscontinuitySeq;
+
     enum RefreshState {
         INITIAL_MINIMUM_RELOAD_DELAY,
         FIRST_UNCHANGED_RELOAD_ATTEMPT,
@@ -150,9 +168,8 @@
     sp<ATSParser> mTSParser;
 
     bool mFirstPTSValid;
-    uint64_t mFirstPTS;
     int64_t mFirstTimeUs;
-    int64_t mAbsoluteTimeAnchorUs;
+    int64_t mSegmentFirstPTS;
     sp<AnotherPacketSource> mVideoBuffer;
 
     // Stores the initialization vector to decrypt the next block of cipher text, which can
@@ -160,6 +177,13 @@
     // the last block of cipher text (cipher-block chaining).
     unsigned char mAESInitVec[16];
 
+    Mutex mThresholdLock;
+    float mThresholdRatio;
+
+    sp<DownloadState> mDownloadState;
+
+    bool mHasMetadata;
+
     // Set first to true if decrypting the first segment of a playlist segment. When
     // first is true, reset the initialization vector based on the available
     // information in the manifest; otherwise, use the initialization vector as
@@ -175,6 +199,10 @@
 
     void postMonitorQueue(int64_t delayUs = 0, int64_t minDelayUs = 0);
     void cancelMonitorQueue();
+    void setStoppingThreshold(float thresholdRatio, bool disconnect);
+    void resetStoppingThreshold(bool disconnect);
+    float getStoppingThreshold();
+    bool shouldPauseDownload();
 
     int64_t delayUsToRefreshPlaylist() const;
     status_t refreshPlaylist();
@@ -182,12 +210,19 @@
     // Returns the media time in us of the segment specified by seqNumber.
     // This is computed by summing the durations of all segments before it.
     int64_t getSegmentStartTimeUs(int32_t seqNumber) const;
+    // Returns the duration time in us of the segment specified.
+    int64_t getSegmentDurationUs(int32_t seqNumber) const;
 
     status_t onStart(const sp<AMessage> &msg);
     void onPause();
     void onStop(const sp<AMessage> &msg);
     void onMonitorQueue();
     void onDownloadNext();
+    bool initDownloadState(
+            AString &uri,
+            sp<AMessage> &itemMeta,
+            int32_t &firstSeqNumberInPlaylist,
+            int32_t &lastSeqNumberInPlaylist);
 
     // Resume a fetcher to continue until the stopping point stored in msg.
     status_t onResumeUntil(const sp<AMessage> &msg);
@@ -196,25 +231,24 @@
             const sp<ABuffer> &accessUnit,
             const sp<AnotherPacketSource> &source,
             bool discard = false);
+    bool isStartTimeReached(int64_t timeUs);
     status_t extractAndQueueAccessUnitsFromTs(const sp<ABuffer> &buffer);
 
     status_t extractAndQueueAccessUnits(
             const sp<ABuffer> &buffer, const sp<AMessage> &itemMeta);
 
+    void notifyStopReached();
     void notifyError(status_t err);
 
     void queueDiscontinuity(
             ATSParser::DiscontinuityType type, const sp<AMessage> &extra);
 
-    int32_t getSeqNumberWithAnchorTime(int64_t anchorTimeUs) const;
+    bool adjustSeqNumberWithAnchorTime(int64_t anchorTimeUs);
     int32_t getSeqNumberForDiscontinuity(size_t discontinuitySeq) const;
     int32_t getSeqNumberForTime(int64_t timeUs) const;
 
     void updateDuration();
-
-    // Before resuming a fetcher in onResume, check the remaining duration is longer than that
-    // returned by resumeThreshold.
-    int64_t resumeThreshold(const sp<AMessage> &msg);
+    void updateTargetDuration();
 
     DISALLOW_EVIL_CONSTRUCTORS(PlaylistFetcher);
 };
diff --git a/media/libstagefright/id3/Android.mk b/media/libstagefright/id3/Android.mk
index 2194c38..68bd017 100644
--- a/media/libstagefright/id3/Android.mk
+++ b/media/libstagefright/id3/Android.mk
@@ -4,7 +4,8 @@
 LOCAL_SRC_FILES := \
 	ID3.cpp
 
-LOCAL_CFLAGS += -Werror
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE := libstagefright_id3
 
@@ -17,7 +18,8 @@
 LOCAL_SRC_FILES := \
 	testid3.cpp
 
-LOCAL_CFLAGS += -Werror
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_SHARED_LIBRARIES := \
 	libstagefright libutils liblog libbinder libstagefright_foundation
diff --git a/media/libstagefright/include/AwesomePlayer.h b/media/libstagefright/include/AwesomePlayer.h
index 77d65e0..758b2c9 100644
--- a/media/libstagefright/include/AwesomePlayer.h
+++ b/media/libstagefright/include/AwesomePlayer.h
@@ -21,6 +21,7 @@
 #include "HTTPBase.h"
 #include "TimedEventQueue.h"
 
+#include <media/AudioResamplerPublic.h>
 #include <media/MediaPlayerInterface.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/OMXClient.h>
@@ -31,20 +32,20 @@
 
 namespace android {
 
-struct AudioPlayer;
+class AudioPlayer;
 struct ClockEstimator;
-struct DataSource;
-struct MediaBuffer;
+class IDataSource;
+class MediaBuffer;
 struct MediaExtractor;
 struct MediaSource;
 struct NuCachedSource2;
-struct IGraphicBufferProducer;
+class IGraphicBufferProducer;
 
 class DrmManagerClinet;
 class DecryptHandle;
 
 class TimedTextDriver;
-struct WVMExtractor;
+class WVMExtractor;
 
 struct AwesomeRenderer : public RefBase {
     AwesomeRenderer() {}
@@ -93,6 +94,8 @@
 
     status_t setParameter(int key, const Parcel &request);
     status_t getParameter(int key, Parcel *reply);
+    status_t setPlaybackSettings(const AudioPlaybackRate &rate);
+    status_t getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */);
     status_t invoke(const Parcel &request, Parcel *reply);
     status_t setCacheStatCollectFreq(const Parcel &request);
 
@@ -180,6 +183,7 @@
     sp<MediaSource> mOmxSource;
     sp<MediaSource> mAudioSource;
     AudioPlayer *mAudioPlayer;
+    AudioPlaybackRate mPlaybackSettings;
     int64_t mDurationUs;
 
     int32_t mDisplayWidth;
diff --git a/media/libstagefright/include/CallbackDataSource.h b/media/libstagefright/include/CallbackDataSource.h
new file mode 100644
index 0000000..1a21dd3
--- /dev/null
+++ b/media/libstagefright/include/CallbackDataSource.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_CALLBACKDATASOURCE_H
+#define ANDROID_CALLBACKDATASOURCE_H
+
+#include <media/stagefright/DataSource.h>
+#include <media/stagefright/foundation/ADebug.h>
+
+namespace android {
+
+class IDataSource;
+class IMemory;
+
+// A stagefright DataSource that wraps a binder IDataSource. It's a "Callback"
+// DataSource because it calls back to the IDataSource for data.
+class CallbackDataSource : public DataSource {
+public:
+    CallbackDataSource(const sp<IDataSource>& iDataSource);
+    virtual ~CallbackDataSource();
+
+    // DataSource implementation.
+    virtual status_t initCheck() const;
+    virtual ssize_t readAt(off64_t offset, void *data, size_t size);
+    virtual status_t getSize(off64_t *size);
+
+private:
+    sp<IDataSource> mIDataSource;
+    sp<IMemory> mMemory;
+
+    DISALLOW_EVIL_CONSTRUCTORS(CallbackDataSource);
+};
+
+
+// A caching DataSource that wraps a CallbackDataSource. For reads smaller
+// than kCacheSize it will read up to kCacheSize ahead and cache it.
+// This reduces the number of binder round trips to the IDataSource and has a significant
+// impact on time taken for filetype sniffing and metadata extraction.
+class TinyCacheSource : public DataSource {
+public:
+    TinyCacheSource(const sp<DataSource>& source);
+
+    virtual status_t initCheck() const;
+    virtual ssize_t readAt(off64_t offset, void* data, size_t size);
+    virtual status_t getSize(off64_t* size);
+    virtual uint32_t flags();
+
+private:
+    // 2kb comes from experimenting with the time-to-first-frame from a MediaPlayer
+    // with an in-memory MediaDataSource source on a Nexus 5. Beyond 2kb there was
+    // no improvement.
+    enum {
+        kCacheSize = 2048,
+    };
+
+    sp<DataSource> mSource;
+    uint8_t mCache[kCacheSize];
+    off64_t mCachedOffset;
+    size_t mCachedSize;
+
+    DISALLOW_EVIL_CONSTRUCTORS(TinyCacheSource);
+};
+
+}; // namespace android
+
+#endif // ANDROID_CALLBACKDATASOURCE_H
diff --git a/media/libstagefright/include/MPEG2PSExtractor.h b/media/libstagefright/include/MPEG2PSExtractor.h
index fb76564..22cb02d 100644
--- a/media/libstagefright/include/MPEG2PSExtractor.h
+++ b/media/libstagefright/include/MPEG2PSExtractor.h
@@ -28,7 +28,7 @@
 struct ABuffer;
 struct AMessage;
 struct Track;
-struct String8;
+class String8;
 
 struct MPEG2PSExtractor : public MediaExtractor {
     MPEG2PSExtractor(const sp<DataSource> &source);
diff --git a/media/libstagefright/include/MPEG2TSExtractor.h b/media/libstagefright/include/MPEG2TSExtractor.h
index db1187d..8eb8f6c 100644
--- a/media/libstagefright/include/MPEG2TSExtractor.h
+++ b/media/libstagefright/include/MPEG2TSExtractor.h
@@ -20,7 +20,9 @@
 
 #include <media/stagefright/foundation/ABase.h>
 #include <media/stagefright/MediaExtractor.h>
+#include <media/stagefright/MediaSource.h>
 #include <utils/threads.h>
+#include <utils/KeyedVector.h>
 #include <utils/Vector.h>
 
 namespace android {
@@ -30,7 +32,7 @@
 struct ATSParser;
 class DataSource;
 struct MPEG2TSSource;
-struct String8;
+class String8;
 
 struct MPEG2TSExtractor : public MediaExtractor {
     MPEG2TSExtractor(const sp<DataSource> &source);
@@ -54,10 +56,21 @@
 
     Vector<sp<AnotherPacketSource> > mSourceImpls;
 
+    Vector<KeyedVector<int64_t, off64_t> > mSyncPoints;
+    // Sync points used for seeking --- normally one for video track is used.
+    // If no video track is present, audio track will be used instead.
+    KeyedVector<int64_t, off64_t> *mSeekSyncPoints;
+
     off64_t mOffset;
 
     void init();
     status_t feedMore();
+    status_t seek(int64_t seekTimeUs,
+            const MediaSource::ReadOptions::SeekMode& seekMode);
+    status_t queueDiscontinuityForSeek(int64_t actualSeekTimeUs);
+    status_t seekBeyond(int64_t seekTimeUs);
+
+    status_t feedUntilBufferAvailable(const sp<AnotherPacketSource> &impl);
 
     DISALLOW_EVIL_CONSTRUCTORS(MPEG2TSExtractor);
 };
diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h
index 1fe6fcf..3067c3d 100644
--- a/media/libstagefright/include/MPEG4Extractor.h
+++ b/media/libstagefright/include/MPEG4Extractor.h
@@ -83,6 +83,8 @@
 
     Vector<SidxEntry> mSidxEntries;
     off64_t mMoofOffset;
+    bool mMoofFound;
+    bool mMdatFound;
 
     Vector<PsshInfo> mPssh;
 
@@ -102,11 +104,15 @@
     String8 mLastCommentName;
     String8 mLastCommentData;
 
+    KeyedVector<uint32_t, AString> mMetaKeyMap;
+
     status_t readMetaData();
     status_t parseChunk(off64_t *offset, int depth);
     status_t parseITunesMetaData(off64_t offset, size_t size);
     status_t parse3GPPMetaData(off64_t offset, size_t size, int depth);
     void parseID3v2MetaData(off64_t offset);
+    status_t parseQTMetaKey(off64_t data_offset, size_t data_size);
+    status_t parseQTMetaVal(int32_t keyId, off64_t data_offset, size_t data_size);
 
     status_t updateAudioTrackInfoFromESDS_MPEG4Audio(
             const void *esds_data, size_t esds_size);
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index e8c4970..d468dfc 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -24,7 +24,7 @@
 namespace android {
 
 struct OMXMaster;
-class OMXNodeInstance;
+struct OMXNodeInstance;
 
 class OMX : public BnOMX,
             public IBinder::DeathRecipient {
@@ -69,7 +69,7 @@
             node_id node, OMX_U32 port_index, OMX_U32* usage);
 
     virtual status_t storeMetaDataInBuffers(
-            node_id node, OMX_U32 port_index, OMX_BOOL enable);
+            node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type);
 
     virtual status_t prepareForAdaptivePlayback(
             node_id node, OMX_U32 portIndex, OMX_BOOL enable,
@@ -81,7 +81,7 @@
 
     virtual status_t useBuffer(
             node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-            buffer_id *buffer);
+            buffer_id *buffer, OMX_U32 allottedSize);
 
     virtual status_t useGraphicBuffer(
             node_id node, OMX_U32 port_index,
@@ -93,7 +93,17 @@
 
     virtual status_t createInputSurface(
             node_id node, OMX_U32 port_index,
-            sp<IGraphicBufferProducer> *bufferProducer);
+            sp<IGraphicBufferProducer> *bufferProducer,
+            MetadataBufferType *type);
+
+    virtual status_t createPersistentInputSurface(
+            sp<IGraphicBufferProducer> *bufferProducer,
+            sp<IGraphicBufferConsumer> *bufferConsumer);
+
+    virtual status_t setInputSurface(
+            node_id node, OMX_U32 port_index,
+            const sp<IGraphicBufferConsumer> &bufferConsumer,
+            MetadataBufferType *type);
 
     virtual status_t signalEndOfInputStream(node_id node);
 
@@ -103,18 +113,18 @@
 
     virtual status_t allocateBufferWithBackup(
             node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-            buffer_id *buffer);
+            buffer_id *buffer, OMX_U32 allottedSize);
 
     virtual status_t freeBuffer(
             node_id node, OMX_U32 port_index, buffer_id buffer);
 
-    virtual status_t fillBuffer(node_id node, buffer_id buffer);
+    virtual status_t fillBuffer(node_id node, buffer_id buffer, int fenceFd);
 
     virtual status_t emptyBuffer(
             node_id node,
             buffer_id buffer,
             OMX_U32 range_offset, OMX_U32 range_length,
-            OMX_U32 flags, OMX_TICKS timestamp);
+            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
 
     virtual status_t getExtensionIndex(
             node_id node,
@@ -138,10 +148,10 @@
             OMX_IN OMX_PTR pEventData);
 
     OMX_ERRORTYPE OnEmptyBufferDone(
-            node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer);
+            node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer, int fenceFd);
 
     OMX_ERRORTYPE OnFillBufferDone(
-            node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer);
+            node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer, int fenceFd);
 
     void invalidateNodeID(node_id node);
 
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index b26e940..f68e0a9 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -27,7 +27,9 @@
 
 class IOMXObserver;
 struct OMXMaster;
-struct GraphicBufferSource;
+class GraphicBufferSource;
+
+status_t StatusFromOMXError(OMX_ERRORTYPE err);
 
 struct OMXNodeInstance {
     OMXNodeInstance(
@@ -56,7 +58,8 @@
 
     status_t getGraphicBufferUsage(OMX_U32 portIndex, OMX_U32* usage);
 
-    status_t storeMetaDataInBuffers(OMX_U32 portIndex, OMX_BOOL enable);
+    status_t storeMetaDataInBuffers(
+            OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type);
 
     status_t prepareForAdaptivePlayback(
             OMX_U32 portIndex, OMX_BOOL enable,
@@ -68,7 +71,7 @@
 
     status_t useBuffer(
             OMX_U32 portIndex, const sp<IMemory> &params,
-            OMX::buffer_id *buffer);
+            OMX::buffer_id *buffer, OMX_U32 allottedSize);
 
     status_t useGraphicBuffer(
             OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
@@ -79,7 +82,16 @@
             OMX::buffer_id buffer);
 
     status_t createInputSurface(
-            OMX_U32 portIndex, sp<IGraphicBufferProducer> *bufferProducer);
+            OMX_U32 portIndex, sp<IGraphicBufferProducer> *bufferProducer,
+            MetadataBufferType *type);
+
+    static status_t createPersistentInputSurface(
+            sp<IGraphicBufferProducer> *bufferProducer,
+            sp<IGraphicBufferConsumer> *bufferConsumer);
+
+    status_t setInputSurface(
+            OMX_U32 portIndex, const sp<IGraphicBufferConsumer> &bufferConsumer,
+            MetadataBufferType *type);
 
     status_t signalEndOfInputStream();
 
@@ -89,21 +101,20 @@
 
     status_t allocateBufferWithBackup(
             OMX_U32 portIndex, const sp<IMemory> &params,
-            OMX::buffer_id *buffer);
+            OMX::buffer_id *buffer, OMX_U32 allottedSize);
 
     status_t freeBuffer(OMX_U32 portIndex, OMX::buffer_id buffer);
 
-    status_t fillBuffer(OMX::buffer_id buffer);
+    status_t fillBuffer(OMX::buffer_id buffer, int fenceFd);
 
     status_t emptyBuffer(
             OMX::buffer_id buffer,
             OMX_U32 rangeOffset, OMX_U32 rangeLength,
-            OMX_U32 flags, OMX_TICKS timestamp);
+            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
 
-    status_t emptyDirectBuffer(
-            OMX_BUFFERHEADERTYPE *header,
-            OMX_U32 rangeOffset, OMX_U32 rangeLength,
-            OMX_U32 flags, OMX_TICKS timestamp);
+    status_t emptyGraphicBuffer(
+            OMX_BUFFERHEADERTYPE *header, const sp<GraphicBuffer> &buffer,
+            OMX_U32 flags, OMX_TICKS timestamp, int fenceFd);
 
     status_t getExtensionIndex(
             const char *parameterName, OMX_INDEXTYPE *index);
@@ -114,6 +125,8 @@
             const void *data,
             size_t size);
 
+    // handles messages and removes them from the list
+    void onMessages(std::list<omx_message> &messages);
     void onMessage(const omx_message &msg);
     void onObserverDied(OMXMaster *master);
     void onGetHandleFailed();
@@ -147,6 +160,7 @@
     uint32_t mBufferIDCount;
     KeyedVector<OMX::buffer_id, OMX_BUFFERHEADERTYPE *> mBufferIDToBufferHeader;
     KeyedVector<OMX_BUFFERHEADERTYPE *, OMX::buffer_id> mBufferHeaderToBufferID;
+    MetadataBufferType mMetadataType[2];
 
     // For debug support
     char *mName;
@@ -194,16 +208,35 @@
             OMX_IN OMX_BUFFERHEADERTYPE *pBuffer);
 
     status_t storeMetaDataInBuffers_l(
-            OMX_U32 portIndex, OMX_BOOL enable,
-            OMX_BOOL useGraphicBuffer, OMX_BOOL *usingGraphicBufferInMeta);
+            OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type);
+
+    // Stores fence into buffer if it is ANWBuffer type and has enough space.
+    // otherwise, waits for the fence to signal.  Takes ownership of |fenceFd|.
+    status_t storeFenceInMeta_l(
+            OMX_BUFFERHEADERTYPE *header, int fenceFd, OMX_U32 portIndex);
+
+    // Retrieves the fence from buffer if ANWBuffer type and has enough space. Otherwise, returns -1
+    int retrieveFenceFromMeta_l(
+            OMX_BUFFERHEADERTYPE *header, OMX_U32 portIndex);
 
     status_t emptyBuffer_l(
             OMX_BUFFERHEADERTYPE *header,
-            OMX_U32 flags, OMX_TICKS timestamp, intptr_t debugAddr);
+            OMX_U32 flags, OMX_TICKS timestamp, intptr_t debugAddr, int fenceFd);
 
+    status_t updateGraphicBufferInMeta_l(
+            OMX_U32 portIndex, const sp<GraphicBuffer> &graphicBuffer,
+            OMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header);
+
+    status_t createGraphicBufferSource(
+            OMX_U32 portIndex, sp<IGraphicBufferConsumer> consumer /* nullable */,
+            MetadataBufferType *type);
     sp<GraphicBufferSource> getGraphicBufferSource();
     void setGraphicBufferSource(const sp<GraphicBufferSource>& bufferSource);
 
+    // Handles |msg|, and may modify it. Returns true iff completely handled it and
+    // |msg| does not need to be sent to the event listener.
+    bool handleMessage(omx_message &msg);
+
     OMXNodeInstance(const OMXNodeInstance &);
     OMXNodeInstance &operator=(const OMXNodeInstance &);
 };
diff --git a/media/libstagefright/include/OggExtractor.h b/media/libstagefright/include/OggExtractor.h
index e97c8cd..c647cbb 100644
--- a/media/libstagefright/include/OggExtractor.h
+++ b/media/libstagefright/include/OggExtractor.h
@@ -27,7 +27,7 @@
 class DataSource;
 class String8;
 
-struct MyVorbisExtractor;
+struct MyOggExtractor;
 struct OggSource;
 
 struct OggExtractor : public MediaExtractor {
@@ -48,7 +48,7 @@
     sp<DataSource> mDataSource;
     status_t mInitCheck;
 
-    MyVorbisExtractor *mImpl;
+    MyOggExtractor *mImpl;
 
     OggExtractor(const OggExtractor &);
     OggExtractor &operator=(const OggExtractor &);
diff --git a/media/libstagefright/include/SampleIterator.h b/media/libstagefright/include/SampleIterator.h
index 60c9e7e..7053247 100644
--- a/media/libstagefright/include/SampleIterator.h
+++ b/media/libstagefright/include/SampleIterator.h
@@ -18,7 +18,7 @@
 
 namespace android {
 
-struct SampleTable;
+class SampleTable;
 
 struct SampleIterator {
     SampleIterator(SampleTable *table);
diff --git a/media/libstagefright/include/SoftwareRenderer.h b/media/libstagefright/include/SoftwareRenderer.h
index fa3ea89..757b308 100644
--- a/media/libstagefright/include/SoftwareRenderer.h
+++ b/media/libstagefright/include/SoftwareRenderer.h
@@ -19,22 +19,27 @@
 #define SOFTWARE_RENDERER_H_
 
 #include <media/stagefright/ColorConverter.h>
+#include <media/stagefright/FrameRenderTracker.h>
 #include <utils/RefBase.h>
 #include <system/window.h>
 
+#include <list>
+
 namespace android {
 
 struct AMessage;
 
 class SoftwareRenderer {
 public:
-    explicit SoftwareRenderer(const sp<ANativeWindow> &nativeWindow);
+    explicit SoftwareRenderer(
+            const sp<ANativeWindow> &nativeWindow, int32_t rotation = 0);
 
     ~SoftwareRenderer();
 
-    void render(
-            const void *data, size_t size, int64_t timestampNs,
+    std::list<FrameRenderTracker::Info> render(
+            const void *data, size_t size, int64_t mediaTimeUs, nsecs_t renderTimeNs,
             void *platformPrivate, const sp<AMessage> &format);
+    void clearTracker();
 
 private:
     enum YUVMode {
@@ -48,6 +53,8 @@
     int32_t mWidth, mHeight;
     int32_t mCropLeft, mCropTop, mCropRight, mCropBottom;
     int32_t mCropWidth, mCropHeight;
+    int32_t mRotationDegrees;
+    FrameRenderTracker mRenderTracker;
 
     SoftwareRenderer(const SoftwareRenderer &);
     SoftwareRenderer &operator=(const SoftwareRenderer &);
diff --git a/media/libstagefright/include/StagefrightMetadataRetriever.h b/media/libstagefright/include/StagefrightMetadataRetriever.h
index 6632c27..fd739d0 100644
--- a/media/libstagefright/include/StagefrightMetadataRetriever.h
+++ b/media/libstagefright/include/StagefrightMetadataRetriever.h
@@ -25,7 +25,7 @@
 
 namespace android {
 
-struct DataSource;
+class DataSource;
 class MediaExtractor;
 
 struct StagefrightMetadataRetriever : public MediaMetadataRetrieverInterface {
@@ -38,6 +38,7 @@
             const KeyedVector<String8, String8> *headers);
 
     virtual status_t setDataSource(int fd, int64_t offset, int64_t length);
+    virtual status_t setDataSource(const sp<DataSource>& source);
 
     virtual VideoFrame *getFrameAtTime(int64_t timeUs, int option);
     virtual MediaAlbumArt *extractAlbumArt();
@@ -53,6 +54,8 @@
     MediaAlbumArt *mAlbumArt;
 
     void parseMetaData();
+    // Delete album art and clear metadata.
+    void clearMetadata();
 
     StagefrightMetadataRetriever(const StagefrightMetadataRetriever &);
 
diff --git a/media/libstagefright/include/TimedEventQueue.h b/media/libstagefright/include/TimedEventQueue.h
index 2963150..890f7e8 100644
--- a/media/libstagefright/include/TimedEventQueue.h
+++ b/media/libstagefright/include/TimedEventQueue.h
@@ -46,7 +46,7 @@
         virtual void fire(TimedEventQueue *queue, int64_t now_us) = 0;
 
     private:
-        friend class TimedEventQueue;
+        friend struct TimedEventQueue;
 
         event_id mEventID;
 
diff --git a/media/libstagefright/include/VBRISeeker.h b/media/libstagefright/include/VBRISeeker.h
index 1a2bf9f..c57d571 100644
--- a/media/libstagefright/include/VBRISeeker.h
+++ b/media/libstagefright/include/VBRISeeker.h
@@ -24,7 +24,7 @@
 
 namespace android {
 
-struct DataSource;
+class DataSource;
 
 struct VBRISeeker : public MP3Seeker {
     static sp<VBRISeeker> CreateFromSource(
diff --git a/media/libstagefright/include/XINGSeeker.h b/media/libstagefright/include/XINGSeeker.h
index c408576..cce04f0 100644
--- a/media/libstagefright/include/XINGSeeker.h
+++ b/media/libstagefright/include/XINGSeeker.h
@@ -22,7 +22,7 @@
 
 namespace android {
 
-struct DataSource;
+class DataSource;
 
 struct XINGSeeker : public MP3Seeker {
     static sp<XINGSeeker> CreateFromSource(
diff --git a/media/libstagefright/include/avc_utils.h b/media/libstagefright/include/avc_utils.h
index c270bc1..dafa07e 100644
--- a/media/libstagefright/include/avc_utils.h
+++ b/media/libstagefright/include/avc_utils.h
@@ -36,6 +36,11 @@
     kAVCProfileCAVLC444Intra = 0x2c
 };
 
+struct NALPosition {
+    size_t nalOffset;
+    size_t nalSize;
+};
+
 // Optionally returns sample aspect ratio as well.
 void FindAVCDimensions(
         const sp<ABuffer> &seqParamSet,
diff --git a/media/libstagefright/matroska/Android.mk b/media/libstagefright/matroska/Android.mk
index 446ff8c..1e8c2b2 100644
--- a/media/libstagefright/matroska/Android.mk
+++ b/media/libstagefright/matroska/Android.mk
@@ -8,7 +8,8 @@
         $(TOP)/external/libvpx/libwebm \
         $(TOP)/frameworks/native/include/media/openmax \
 
-LOCAL_CFLAGS += -Wno-multichar -Werror
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE:= libstagefright_matroska
 
diff --git a/media/libstagefright/matroska/MatroskaExtractor.cpp b/media/libstagefright/matroska/MatroskaExtractor.cpp
index c30e807..ecc2573 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.cpp
+++ b/media/libstagefright/matroska/MatroskaExtractor.cpp
@@ -944,6 +944,11 @@
         ALOGV("codec id = %s", codecID);
         ALOGV("codec name = %s", track->GetCodecNameAsUTF8());
 
+        if (codecID == NULL) {
+            ALOGW("unknown codecID is not supported.");
+            continue;
+        }
+
         size_t codecPrivateSize;
         const unsigned char *codecPrivate =
             track->GetCodecPrivate(codecPrivateSize);
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 1eae6cf..e3c3e80 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -35,6 +35,7 @@
 #include <media/stagefright/Utils.h>
 #include <media/IStreamSource.h>
 #include <utils/KeyedVector.h>
+#include <utils/Vector.h>
 
 #include <inttypes.h>
 
@@ -47,15 +48,19 @@
 static const size_t kTSPacketSize = 188;
 
 struct ATSParser::Program : public RefBase {
-    Program(ATSParser *parser, unsigned programNumber, unsigned programMapPID);
+    Program(ATSParser *parser, unsigned programNumber, unsigned programMapPID,
+            int64_t lastRecoveredPTS);
 
     bool parsePSISection(
             unsigned pid, ABitReader *br, status_t *err);
 
+    // Pass to appropriate stream according to pid, and set event if it's a PES
+    // with a sync frame.
+    // Note that the method itself does not touch event.
     bool parsePID(
             unsigned pid, unsigned continuity_counter,
             unsigned payload_unit_start_indicator,
-            ABitReader *br, status_t *err);
+            ABitReader *br, status_t *err, SyncEvent *event);
 
     void signalDiscontinuity(
             DiscontinuityType type, const sp<AMessage> &extra);
@@ -86,14 +91,22 @@
     }
 
 private:
+    struct StreamInfo {
+        unsigned mType;
+        unsigned mPID;
+    };
+
     ATSParser *mParser;
     unsigned mProgramNumber;
     unsigned mProgramMapPID;
     KeyedVector<unsigned, sp<Stream> > mStreams;
     bool mFirstPTSValid;
     uint64_t mFirstPTS;
+    int64_t mLastRecoveredPTS;
 
     status_t parseProgramMap(ABitReader *br);
+    int64_t recoverPTS(uint64_t PTS_33bit);
+    bool switchPIDs(const Vector<StreamInfo> &infos);
 
     DISALLOW_EVIL_CONSTRUCTORS(Program);
 };
@@ -108,10 +121,14 @@
     unsigned pid() const { return mElementaryPID; }
     void setPID(unsigned pid) { mElementaryPID = pid; }
 
+    // Parse the payload and set event when PES with a sync frame is detected.
+    // This method knows when a PES starts; so record mPesStartOffset in that
+    // case.
     status_t parse(
             unsigned continuity_counter,
             unsigned payload_unit_start_indicator,
-            ABitReader *br);
+            ABitReader *br,
+            SyncEvent *event);
 
     void signalDiscontinuity(
             DiscontinuityType type, const sp<AMessage> &extra);
@@ -122,6 +139,7 @@
 
     bool isAudio() const;
     bool isVideo() const;
+    bool isMeta() const;
 
 protected:
     virtual ~Stream();
@@ -139,17 +157,24 @@
     bool mEOSReached;
 
     uint64_t mPrevPTS;
+    off64_t mPesStartOffset;
 
     ElementaryStreamQueue *mQueue;
 
-    status_t flush();
-    status_t parsePES(ABitReader *br);
+    // Flush accumulated payload if necessary --- i.e. at EOS or at the start of
+    // another payload. event is set if the flushed payload is PES with a sync
+    // frame.
+    status_t flush(SyncEvent *event);
+    // Strip and parse PES headers and pass remaining payload into onPayload
+    // with parsed metadata. event is set if the PES contains a sync frame.
+    status_t parsePES(ABitReader *br, SyncEvent *event);
 
+    // Feed the payload into mQueue and if a packet is identified, queue it
+    // into mSource. If the packet is a sync frame. set event with start offset
+    // and timestamp of the packet.
     void onPayloadData(
             unsigned PTS_DTS_flags, uint64_t PTS, uint64_t DTS,
-            const uint8_t *data, size_t size);
-
-    void extractAACFrames(const sp<ABuffer> &buffer);
+            const uint8_t *data, size_t size, SyncEvent *event);
 
     DISALLOW_EVIL_CONSTRUCTORS(Stream);
 };
@@ -158,10 +183,12 @@
     PSISection();
 
     status_t append(const void *data, size_t size);
+    void setSkipBytes(uint8_t skip);
     void clear();
 
     bool isComplete() const;
     bool isEmpty() const;
+    bool isCRCOkay() const;
 
     const uint8_t *data() const;
     size_t size() const;
@@ -171,19 +198,34 @@
 
 private:
     sp<ABuffer> mBuffer;
+    uint8_t mSkipBytes;
+    static uint32_t CRC_TABLE[];
 
     DISALLOW_EVIL_CONSTRUCTORS(PSISection);
 };
 
+ATSParser::SyncEvent::SyncEvent(off64_t offset)
+    : mInit(false), mOffset(offset), mTimeUs(0) {}
+
+void ATSParser::SyncEvent::init(off64_t offset, const sp<MediaSource> &source,
+        int64_t timeUs) {
+    mInit = true;
+    mOffset = offset;
+    mMediaSource = source;
+    mTimeUs = timeUs;
+}
+
 ////////////////////////////////////////////////////////////////////////////////
 
 ATSParser::Program::Program(
-        ATSParser *parser, unsigned programNumber, unsigned programMapPID)
+        ATSParser *parser, unsigned programNumber, unsigned programMapPID,
+        int64_t lastRecoveredPTS)
     : mParser(parser),
       mProgramNumber(programNumber),
       mProgramMapPID(programMapPID),
       mFirstPTSValid(false),
-      mFirstPTS(0) {
+      mFirstPTS(0),
+      mLastRecoveredPTS(lastRecoveredPTS) {
     ALOGV("new program number %u", programNumber);
 }
 
@@ -203,7 +245,7 @@
 bool ATSParser::Program::parsePID(
         unsigned pid, unsigned continuity_counter,
         unsigned payload_unit_start_indicator,
-        ABitReader *br, status_t *err) {
+        ABitReader *br, status_t *err, SyncEvent *event) {
     *err = OK;
 
     ssize_t index = mStreams.indexOfKey(pid);
@@ -212,7 +254,7 @@
     }
 
     *err = mStreams.editValueAt(index)->parse(
-            continuity_counter, payload_unit_start_indicator, br);
+            continuity_counter, payload_unit_start_indicator, br, event);
 
     return true;
 }
@@ -238,10 +280,75 @@
     }
 }
 
-struct StreamInfo {
-    unsigned mType;
-    unsigned mPID;
-};
+bool ATSParser::Program::switchPIDs(const Vector<StreamInfo> &infos) {
+    bool success = false;
+
+    if (mStreams.size() == infos.size()) {
+        // build type->PIDs map for old and new mapping
+        size_t i;
+        KeyedVector<int32_t, Vector<int32_t> > oldType2PIDs, newType2PIDs;
+        for (i = 0; i < mStreams.size(); ++i) {
+            ssize_t index = oldType2PIDs.indexOfKey(mStreams[i]->type());
+            if (index < 0) {
+                oldType2PIDs.add(mStreams[i]->type(), Vector<int32_t>());
+            }
+            oldType2PIDs.editValueFor(mStreams[i]->type()).push_back(mStreams[i]->pid());
+        }
+        for (i = 0; i < infos.size(); ++i) {
+            ssize_t index = newType2PIDs.indexOfKey(infos[i].mType);
+            if (index < 0) {
+                newType2PIDs.add(infos[i].mType, Vector<int32_t>());
+            }
+            newType2PIDs.editValueFor(infos[i].mType).push_back(infos[i].mPID);
+        }
+
+        // we can recover if the number of streams for each type hasn't changed
+        if (oldType2PIDs.size() == newType2PIDs.size()) {
+            success = true;
+            for (i = 0; i < oldType2PIDs.size(); ++i) {
+                // KeyedVector is sorted, we just compare key and size of each index
+                if (oldType2PIDs.keyAt(i) != newType2PIDs.keyAt(i)
+                        || oldType2PIDs[i].size() != newType2PIDs[i].size()) {
+                     success = false;
+                     break;
+                }
+            }
+        }
+
+        if (success) {
+            // save current streams to temp
+            KeyedVector<int32_t, sp<Stream> > temp;
+            for (i = 0; i < mStreams.size(); ++i) {
+                 temp.add(mStreams.keyAt(i), mStreams.editValueAt(i));
+            }
+
+            mStreams.clear();
+            for (i = 0; i < temp.size(); ++i) {
+                // The two checks below shouldn't happen,
+                // we already checked above the stream count matches
+                ssize_t index = newType2PIDs.indexOfKey(temp[i]->type());
+                if (index < 0) {
+                    return false;
+                }
+                Vector<int32_t> &newPIDs = newType2PIDs.editValueAt(index);
+                if (newPIDs.isEmpty()) {
+                    return false;
+                }
+
+                // get the next PID for temp[i]->type() in the new PID map
+                Vector<int32_t>::iterator it = newPIDs.begin();
+
+                // change the PID of the stream, and add it back
+                temp.editValueAt(i)->setPID(*it);
+                mStreams.add(temp[i]->pid(), temp.editValueAt(i));
+
+                // removed the used PID
+                newPIDs.erase(it);
+            }
+        }
+    }
+    return success;
+}
 
 status_t ATSParser::Program::parseProgramMap(ABitReader *br) {
     unsigned table_id = br->getBits(8);
@@ -257,13 +364,11 @@
         return ERROR_MALFORMED;
     }
 
-    CHECK_EQ(br->getBits(1), 0u);
+    br->skipBits(1);  // '0'
     MY_LOGV("  reserved = %u", br->getBits(2));
 
     unsigned section_length = br->getBits(12);
     ALOGV("  section_length = %u", section_length);
-    CHECK_EQ(section_length & 0xc00, 0u);
-    CHECK_LE(section_length, 1021u);
 
     MY_LOGV("  program_number = %u", br->getBits(16));
     MY_LOGV("  reserved = %u", br->getBits(2));
@@ -280,7 +385,6 @@
 
     unsigned program_info_length = br->getBits(12);
     ALOGV("  program_info_length = %u", program_info_length);
-    CHECK_EQ(program_info_length & 0xc00, 0u);
 
     br->skipBits(program_info_length * 8);  // skip descriptors
 
@@ -291,8 +395,7 @@
     // final CRC.
     size_t infoBytesRemaining = section_length - 9 - program_info_length - 4;
 
-    while (infoBytesRemaining > 0) {
-        CHECK_GE(infoBytesRemaining, 5u);
+    while (infoBytesRemaining >= 5) {
 
         unsigned streamType = br->getBits(8);
         ALOGV("    stream_type = 0x%02x", streamType);
@@ -306,9 +409,6 @@
 
         unsigned ES_info_length = br->getBits(12);
         ALOGV("    ES_info_length = %u", ES_info_length);
-        CHECK_EQ(ES_info_length & 0xc00, 0u);
-
-        CHECK_GE(infoBytesRemaining - 5, ES_info_length);
 
 #if 0
         br->skipBits(ES_info_length * 8);  // skip descriptors
@@ -320,13 +420,13 @@
             unsigned descLength = br->getBits(8);
             ALOGV("      len = %u", descLength);
 
-            CHECK_GE(info_bytes_remaining, 2 + descLength);
-
+            if (info_bytes_remaining < descLength) {
+                return ERROR_MALFORMED;
+            }
             br->skipBits(descLength * 8);
 
             info_bytes_remaining -= descLength + 2;
         }
-        CHECK_EQ(info_bytes_remaining, 0u);
 #endif
 
         StreamInfo info;
@@ -337,7 +437,9 @@
         infoBytesRemaining -= 5 + ES_info_length;
     }
 
-    CHECK_EQ(infoBytesRemaining, 0u);
+    if (infoBytesRemaining != 0) {
+        ALOGW("Section data remains unconsumed");
+    }
     MY_LOGV("  CRC = 0x%08x", br->getBits(32));
 
     bool PIDsChanged = false;
@@ -370,39 +472,8 @@
         }
 #endif
 
-        // The only case we can recover from is if we have two streams
-        // and they switched PIDs.
-
-        bool success = false;
-
-        if (mStreams.size() == 2 && infos.size() == 2) {
-            const StreamInfo &info1 = infos.itemAt(0);
-            const StreamInfo &info2 = infos.itemAt(1);
-
-            sp<Stream> s1 = mStreams.editValueAt(0);
-            sp<Stream> s2 = mStreams.editValueAt(1);
-
-            bool caseA =
-                info1.mPID == s1->pid() && info1.mType == s2->type()
-                    && info2.mPID == s2->pid() && info2.mType == s1->type();
-
-            bool caseB =
-                info1.mPID == s2->pid() && info1.mType == s1->type()
-                    && info2.mPID == s1->pid() && info2.mType == s2->type();
-
-            if (caseA || caseB) {
-                unsigned pid1 = s1->pid();
-                unsigned pid2 = s2->pid();
-                s1->setPID(pid2);
-                s2->setPID(pid1);
-
-                mStreams.clear();
-                mStreams.add(s1->pid(), s1);
-                mStreams.add(s2->pid(), s2);
-
-                success = true;
-            }
-        }
+        // we can recover if number of streams for each type remain the same
+        bool success = switchPIDs(infos);
 
         if (!success) {
             ALOGI("Stream PIDs changed and we cannot recover.");
@@ -426,6 +497,32 @@
     return OK;
 }
 
+int64_t ATSParser::Program::recoverPTS(uint64_t PTS_33bit) {
+    // We only have the lower 33-bit of the PTS. It could overflow within a
+    // reasonable amount of time. To handle the wrap-around, use fancy math
+    // to get an extended PTS that is within [-0xffffffff, 0xffffffff]
+    // of the latest recovered PTS.
+    if (mLastRecoveredPTS < 0ll) {
+        // Use the original 33bit number for 1st frame, the reason is that
+        // if 1st frame wraps to negative that's far away from 0, we could
+        // never start. Only start wrapping around from 2nd frame.
+        mLastRecoveredPTS = static_cast<int64_t>(PTS_33bit);
+    } else {
+        mLastRecoveredPTS = static_cast<int64_t>(
+                ((mLastRecoveredPTS - PTS_33bit + 0x100000000ll)
+                & 0xfffffffe00000000ull) | PTS_33bit);
+        // We start from 0, but recovered PTS could be slightly below 0.
+        // Clamp it to 0 as rest of the pipeline doesn't take negative pts.
+        // (eg. video is read first and starts at 0, but audio starts at 0xfffffff0)
+        if (mLastRecoveredPTS < 0ll) {
+            ALOGI("Clamping negative recovered PTS (%" PRId64 ") to 0", mLastRecoveredPTS);
+            mLastRecoveredPTS = 0ll;
+        }
+    }
+
+    return mLastRecoveredPTS;
+}
+
 sp<MediaSource> ATSParser::Program::getSource(SourceType type) {
     size_t index = (type == AUDIO) ? 0 : 0;
 
@@ -456,6 +553,8 @@
 }
 
 int64_t ATSParser::Program::convertPTSToTimestamp(uint64_t PTS) {
+    PTS = recoverPTS(PTS);
+
     if (!(mParser->mFlags & TS_TIMESTAMPS_ARE_ABSOLUTE)) {
         if (!mFirstPTSValid) {
             mFirstPTSValid = true;
@@ -530,6 +629,11 @@
                     ElementaryStreamQueue::AC3);
             break;
 
+        case STREAMTYPE_METADATA:
+            mQueue = new ElementaryStreamQueue(
+                    ElementaryStreamQueue::METADATA);
+            break;
+
         default:
             break;
     }
@@ -549,7 +653,8 @@
 
 status_t ATSParser::Stream::parse(
         unsigned continuity_counter,
-        unsigned payload_unit_start_indicator, ABitReader *br) {
+        unsigned payload_unit_start_indicator, ABitReader *br,
+        SyncEvent *event) {
     if (mQueue == NULL) {
         return OK;
     }
@@ -580,19 +685,22 @@
     mExpectedContinuityCounter = (continuity_counter + 1) & 0x0f;
 
     if (payload_unit_start_indicator) {
+        off64_t offset = (event != NULL) ? event->getOffset() : 0;
         if (mPayloadStarted) {
             // Otherwise we run the danger of receiving the trailing bytes
             // of a PES packet that we never saw the start of and assuming
             // we have a a complete PES packet.
 
-            status_t err = flush();
+            status_t err = flush(event);
 
             if (err != OK) {
-                return err;
+                ALOGW("Error (%08x) happened while flushing; we simply discard "
+                      "the PES packet and continue.", err);
             }
         }
 
         mPayloadStarted = true;
+        mPesStartOffset = offset;
     }
 
     if (!mPayloadStarted) {
@@ -600,7 +708,10 @@
     }
 
     size_t payloadSizeBits = br->numBitsLeft();
-    CHECK_EQ(payloadSizeBits % 8, 0u);
+    if (payloadSizeBits % 8 != 0u) {
+        ALOGE("Wrong value");
+        return BAD_VALUE;
+    }
 
     size_t neededSize = mBuffer->size() + payloadSizeBits / 8;
     if (mBuffer->capacity() < neededSize) {
@@ -648,6 +759,13 @@
     }
 }
 
+bool ATSParser::Stream::isMeta() const {
+    if (mStreamType == STREAMTYPE_METADATA) {
+        return true;
+    }
+    return false;
+}
+
 void ATSParser::Stream::signalDiscontinuity(
         DiscontinuityType type, const sp<AMessage> &extra) {
     mExpectedContinuityCounter = -1;
@@ -657,6 +775,7 @@
     }
 
     mPayloadStarted = false;
+    mEOSReached = false;
     mBuffer->setRange(0, 0);
 
     bool clearFormat = false;
@@ -695,10 +814,10 @@
         mSource->signalEOS(finalResult);
     }
     mEOSReached = true;
-    flush();
+    flush(NULL);
 }
 
-status_t ATSParser::Stream::parsePES(ABitReader *br) {
+status_t ATSParser::Stream::parsePES(ABitReader *br, SyncEvent *event) {
     unsigned packet_startcode_prefix = br->getBits(24);
 
     ALOGV("packet_startcode_prefix = 0x%08x", packet_startcode_prefix);
@@ -710,8 +829,6 @@
         return ERROR_MALFORMED;
     }
 
-    CHECK_EQ(packet_startcode_prefix, 0x000001u);
-
     unsigned stream_id = br->getBits(8);
     ALOGV("stream_id = 0x%02x", stream_id);
 
@@ -726,7 +843,9 @@
             && stream_id != 0xff  // program_stream_directory
             && stream_id != 0xf2  // DSMCC
             && stream_id != 0xf8) {  // H.222.1 type E
-        CHECK_EQ(br->getBits(2), 2u);
+        if (br->getBits(2) != 2u) {
+            return ERROR_MALFORMED;
+        }
 
         MY_LOGV("PES_scrambling_control = %u", br->getBits(2));
         MY_LOGV("PES_priority = %u", br->getBits(1));
@@ -760,34 +879,51 @@
         uint64_t PTS = 0, DTS = 0;
 
         if (PTS_DTS_flags == 2 || PTS_DTS_flags == 3) {
-            CHECK_GE(optional_bytes_remaining, 5u);
+            if (optional_bytes_remaining < 5u) {
+                return ERROR_MALFORMED;
+            }
 
             if (br->getBits(4) != PTS_DTS_flags) {
-                ALOGE("PES data Error!");
                 return ERROR_MALFORMED;
             }
             PTS = ((uint64_t)br->getBits(3)) << 30;
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             PTS |= ((uint64_t)br->getBits(15)) << 15;
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             PTS |= br->getBits(15);
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
 
             ALOGV("PTS = 0x%016" PRIx64 " (%.2f)", PTS, PTS / 90000.0);
 
             optional_bytes_remaining -= 5;
 
             if (PTS_DTS_flags == 3) {
-                CHECK_GE(optional_bytes_remaining, 5u);
+                if (optional_bytes_remaining < 5u) {
+                    return ERROR_MALFORMED;
+                }
 
-                CHECK_EQ(br->getBits(4), 1u);
+                if (br->getBits(4) != 1u) {
+                    return ERROR_MALFORMED;
+                }
 
                 DTS = ((uint64_t)br->getBits(3)) << 30;
-                CHECK_EQ(br->getBits(1), 1u);
+                if (br->getBits(1) != 1u) {
+                    return ERROR_MALFORMED;
+                }
                 DTS |= ((uint64_t)br->getBits(15)) << 15;
-                CHECK_EQ(br->getBits(1), 1u);
+                if (br->getBits(1) != 1u) {
+                    return ERROR_MALFORMED;
+                }
                 DTS |= br->getBits(15);
-                CHECK_EQ(br->getBits(1), 1u);
+                if (br->getBits(1) != 1u) {
+                    return ERROR_MALFORMED;
+                }
 
                 ALOGV("DTS = %" PRIu64, DTS);
 
@@ -796,31 +932,47 @@
         }
 
         if (ESCR_flag) {
-            CHECK_GE(optional_bytes_remaining, 6u);
+            if (optional_bytes_remaining < 6u) {
+                return ERROR_MALFORMED;
+            }
 
             br->getBits(2);
 
             uint64_t ESCR = ((uint64_t)br->getBits(3)) << 30;
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             ESCR |= ((uint64_t)br->getBits(15)) << 15;
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             ESCR |= br->getBits(15);
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
 
             ALOGV("ESCR = %" PRIu64, ESCR);
             MY_LOGV("ESCR_extension = %u", br->getBits(9));
 
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
 
             optional_bytes_remaining -= 6;
         }
 
         if (ES_rate_flag) {
-            CHECK_GE(optional_bytes_remaining, 3u);
+            if (optional_bytes_remaining < 3u) {
+                return ERROR_MALFORMED;
+            }
 
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             MY_LOGV("ES_rate = %u", br->getBits(22));
-            CHECK_EQ(br->getBits(1), 1u);
+            if (br->getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
 
             optional_bytes_remaining -= 3;
         }
@@ -830,7 +982,9 @@
         // ES data follows.
 
         if (PES_packet_length != 0) {
-            CHECK_GE(PES_packet_length, PES_header_data_length + 3);
+            if (PES_packet_length < PES_header_data_length + 3) {
+                return ERROR_MALFORMED;
+            }
 
             unsigned dataLength =
                 PES_packet_length - 3 - PES_header_data_length;
@@ -843,35 +997,39 @@
                 return ERROR_MALFORMED;
             }
 
-            CHECK_GE(br->numBitsLeft(), dataLength * 8);
-
             onPayloadData(
-                    PTS_DTS_flags, PTS, DTS, br->data(), dataLength);
+                    PTS_DTS_flags, PTS, DTS, br->data(), dataLength, event);
 
             br->skipBits(dataLength * 8);
         } else {
             onPayloadData(
                     PTS_DTS_flags, PTS, DTS,
-                    br->data(), br->numBitsLeft() / 8);
+                    br->data(), br->numBitsLeft() / 8, event);
 
             size_t payloadSizeBits = br->numBitsLeft();
-            CHECK_EQ(payloadSizeBits % 8, 0u);
+            if (payloadSizeBits % 8 != 0u) {
+                return ERROR_MALFORMED;
+            }
 
             ALOGV("There's %zu bytes of payload.", payloadSizeBits / 8);
         }
     } else if (stream_id == 0xbe) {  // padding_stream
-        CHECK_NE(PES_packet_length, 0u);
+        if (PES_packet_length == 0u) {
+            return ERROR_MALFORMED;
+        }
         br->skipBits(PES_packet_length * 8);
     } else {
-        CHECK_NE(PES_packet_length, 0u);
+        if (PES_packet_length == 0u) {
+            return ERROR_MALFORMED;
+        }
         br->skipBits(PES_packet_length * 8);
     }
 
     return OK;
 }
 
-status_t ATSParser::Stream::flush() {
-    if (mBuffer->size() == 0) {
+status_t ATSParser::Stream::flush(SyncEvent *event) {
+    if (mBuffer == NULL || mBuffer->size() == 0) {
         return OK;
     }
 
@@ -879,7 +1037,7 @@
 
     ABitReader br(mBuffer->data(), mBuffer->size());
 
-    status_t err = parsePES(&br);
+    status_t err = parsePES(&br, event);
 
     mBuffer->setRange(0, 0);
 
@@ -888,7 +1046,7 @@
 
 void ATSParser::Stream::onPayloadData(
         unsigned PTS_DTS_flags, uint64_t PTS, uint64_t /* DTS */,
-        const uint8_t *data, size_t size) {
+        const uint8_t *data, size_t size, SyncEvent *event) {
 #if 0
     ALOGI("payload streamType 0x%02x, PTS = 0x%016llx, dPTS = %lld",
           mStreamType,
@@ -915,6 +1073,7 @@
     }
 
     sp<ABuffer> accessUnit;
+    bool found = false;
     while ((accessUnit = mQueue->dequeueAccessUnit()) != NULL) {
         if (mSource == NULL) {
             sp<MetaData> meta = mQueue->getFormat();
@@ -942,6 +1101,17 @@
             }
             mSource->queueAccessUnit(accessUnit);
         }
+
+        if ((event != NULL) && !found && mQueue->getFormat() != NULL) {
+            int32_t sync = 0;
+            if (accessUnit->meta()->findInt32("isSync", &sync) && sync) {
+                int64_t timeUs;
+                if (accessUnit->meta()->findInt64("timeUs", &timeUs)) {
+                    found = true;
+                    event->init(mPesStartOffset, mSource, timeUs);
+                }
+            }
+        }
     }
 }
 
@@ -963,6 +1133,14 @@
             break;
         }
 
+        case META:
+        {
+            if (isMeta()) {
+                return mSource;
+            }
+            break;
+        }
+
         default:
             break;
     }
@@ -977,6 +1155,7 @@
       mAbsoluteTimeAnchorUs(-1ll),
       mTimeOffsetValid(false),
       mTimeOffsetUs(0ll),
+      mLastRecoveredPTS(-1ll),
       mNumTSPacketsParsed(0),
       mNumPCRs(0) {
     mPSISections.add(0 /* PID */, new PSISection);
@@ -985,31 +1164,54 @@
 ATSParser::~ATSParser() {
 }
 
-status_t ATSParser::feedTSPacket(const void *data, size_t size) {
-    CHECK_EQ(size, kTSPacketSize);
+status_t ATSParser::feedTSPacket(const void *data, size_t size,
+        SyncEvent *event) {
+    if (size != kTSPacketSize) {
+        ALOGE("Wrong TS packet size");
+        return BAD_VALUE;
+    }
 
     ABitReader br((const uint8_t *)data, kTSPacketSize);
-    return parseTS(&br);
+    return parseTS(&br, event);
 }
 
 void ATSParser::signalDiscontinuity(
         DiscontinuityType type, const sp<AMessage> &extra) {
     int64_t mediaTimeUs;
-    if ((type & DISCONTINUITY_TIME)
-            && extra != NULL
-            && extra->findInt64(
-                IStreamListener::kKeyMediaTimeUs, &mediaTimeUs)) {
-        mAbsoluteTimeAnchorUs = mediaTimeUs;
+    if ((type & DISCONTINUITY_TIME) && extra != NULL) {
+        if (extra->findInt64(IStreamListener::kKeyMediaTimeUs, &mediaTimeUs)) {
+            mAbsoluteTimeAnchorUs = mediaTimeUs;
+        }
+        if ((mFlags & TS_TIMESTAMPS_ARE_ABSOLUTE)
+                && extra->findInt64(
+                    IStreamListener::kKeyRecentMediaTimeUs, &mediaTimeUs)) {
+            if (mAbsoluteTimeAnchorUs >= 0ll) {
+                mediaTimeUs -= mAbsoluteTimeAnchorUs;
+            }
+            if (mTimeOffsetValid) {
+                mediaTimeUs -= mTimeOffsetUs;
+            }
+            mLastRecoveredPTS = (mediaTimeUs * 9) / 100;
+        }
     } else if (type == DISCONTINUITY_ABSOLUTE_TIME) {
         int64_t timeUs;
-        CHECK(extra->findInt64("timeUs", &timeUs));
+        if (!extra->findInt64("timeUs", &timeUs)) {
+            ALOGE("timeUs not found");
+            return;
+        }
 
-        CHECK(mPrograms.empty());
+        if (!mPrograms.empty()) {
+            ALOGE("mPrograms is not empty");
+            return;
+        }
         mAbsoluteTimeAnchorUs = timeUs;
         return;
     } else if (type == DISCONTINUITY_TIME_OFFSET) {
         int64_t offset;
-        CHECK(extra->findInt64("offset", &offset));
+        if (!extra->findInt64("offset", &offset)) {
+            ALOGE("offset not found");
+            return;
+        }
 
         mTimeOffsetValid = true;
         mTimeOffsetUs = offset;
@@ -1022,7 +1224,10 @@
 }
 
 void ATSParser::signalEOS(status_t finalResult) {
-    CHECK_NE(finalResult, (status_t)OK);
+    if (finalResult == (status_t) OK) {
+        ALOGE("finalResult not OK");
+        return;
+    }
 
     for (size_t i = 0; i < mPrograms.size(); ++i) {
         mPrograms.editItemAt(i)->signalEOS(finalResult);
@@ -1038,14 +1243,12 @@
     }
     unsigned section_syntax_indictor = br->getBits(1);
     ALOGV("  section_syntax_indictor = %u", section_syntax_indictor);
-    CHECK_EQ(section_syntax_indictor, 1u);
 
-    CHECK_EQ(br->getBits(1), 0u);
+    br->skipBits(1);  // '0'
     MY_LOGV("  reserved = %u", br->getBits(2));
 
     unsigned section_length = br->getBits(12);
     ALOGV("  section_length = %u", section_length);
-    CHECK_EQ(section_length & 0xc00, 0u);
 
     MY_LOGV("  transport_stream_id = %u", br->getBits(16));
     MY_LOGV("  reserved = %u", br->getBits(2));
@@ -1055,7 +1258,6 @@
     MY_LOGV("  last_section_number = %u", br->getBits(8));
 
     size_t numProgramBytes = (section_length - 5 /* header */ - 4 /* crc */);
-    CHECK_EQ((numProgramBytes % 4), 0u);
 
     for (size_t i = 0; i < numProgramBytes / 4; ++i) {
         unsigned program_number = br->getBits(16);
@@ -1083,7 +1285,7 @@
 
             if (!found) {
                 mPrograms.push(
-                        new Program(this, program_number, programMapPID));
+                        new Program(this, program_number, programMapPID, mLastRecoveredPTS));
             }
 
             if (mPSISections.indexOfKey(programMapPID) < 0) {
@@ -1098,7 +1300,8 @@
 status_t ATSParser::parsePID(
         ABitReader *br, unsigned PID,
         unsigned continuity_counter,
-        unsigned payload_unit_start_indicator) {
+        unsigned payload_unit_start_indicator,
+        SyncEvent *event) {
     ssize_t sectionIndex = mPSISections.indexOfKey(PID);
 
     if (sectionIndex >= 0) {
@@ -1106,14 +1309,18 @@
 
         if (payload_unit_start_indicator) {
             if (!section->isEmpty()) {
-                return ERROR_UNSUPPORTED;
+                ALOGW("parsePID encounters payload_unit_start_indicator when section is not empty");
+                section->clear();
             }
 
             unsigned skip = br->getBits(8);
+            section->setSkipBytes(skip + 1);  // skip filler bytes + pointer field itself
             br->skipBits(skip * 8);
         }
 
-        CHECK((br->numBitsLeft() % 8) == 0);
+        if (br->numBitsLeft() % 8 != 0) {
+            return ERROR_MALFORMED;
+        }
         status_t err = section->append(br->data(), br->numBitsLeft() / 8);
 
         if (err != OK) {
@@ -1124,6 +1331,9 @@
             return OK;
         }
 
+        if (!section->isCRCOkay()) {
+            return BAD_VALUE;
+        }
         ABitReader sectionBits(section->data(), section->size());
 
         if (PID == 0) {
@@ -1163,7 +1373,7 @@
         status_t err;
         if (mPrograms.editItemAt(i)->parsePID(
                     PID, continuity_counter, payload_unit_start_indicator,
-                    br, &err)) {
+                    br, &err, event)) {
             if (err != OK) {
                 return err;
             }
@@ -1180,10 +1390,15 @@
     return OK;
 }
 
-void ATSParser::parseAdaptationField(ABitReader *br, unsigned PID) {
+status_t ATSParser::parseAdaptationField(ABitReader *br, unsigned PID) {
     unsigned adaptation_field_length = br->getBits(8);
 
     if (adaptation_field_length > 0) {
+        if (adaptation_field_length * 8 > br->numBitsLeft()) {
+            ALOGV("Adaptation field should be included in a single TS packet.");
+            return ERROR_MALFORMED;
+        }
+
         unsigned discontinuity_indicator = br->getBits(1);
 
         if (discontinuity_indicator) {
@@ -1196,6 +1411,9 @@
         size_t numBitsRead = 4;
 
         if (PCR_flag) {
+            if (adaptation_field_length * 8 < 52) {
+                return ERROR_MALFORMED;
+            }
             br->skipBits(4);
             uint64_t PCR_base = br->getBits(32);
             PCR_base = (PCR_base << 1) | br->getBits(1);
@@ -1226,13 +1444,12 @@
             numBitsRead += 52;
         }
 
-        CHECK_GE(adaptation_field_length * 8, numBitsRead);
-
         br->skipBits(adaptation_field_length * 8 - numBitsRead);
     }
+    return OK;
 }
 
-status_t ATSParser::parseTS(ABitReader *br) {
+status_t ATSParser::parseTS(ABitReader *br, SyncEvent *event) {
     ALOGV("---");
 
     unsigned sync_byte = br->getBits(8);
@@ -1264,15 +1481,16 @@
 
     // ALOGI("PID = 0x%04x, continuity_counter = %u", PID, continuity_counter);
 
-    if (adaptation_field_control == 2 || adaptation_field_control == 3) {
-        parseAdaptationField(br, PID);
-    }
-
     status_t err = OK;
 
-    if (adaptation_field_control == 1 || adaptation_field_control == 3) {
-        err = parsePID(
-                br, PID, continuity_counter, payload_unit_start_indicator);
+    if (adaptation_field_control == 2 || adaptation_field_control == 3) {
+        err = parseAdaptationField(br, PID);
+    }
+    if (err == OK) {
+        if (adaptation_field_control == 1 || adaptation_field_control == 3) {
+            err = parsePID(br, PID, continuity_counter,
+                    payload_unit_start_indicator, event);
+        }
     }
 
     ++mNumTSPacketsParsed;
@@ -1346,7 +1564,79 @@
 
 ////////////////////////////////////////////////////////////////////////////////
 
-ATSParser::PSISection::PSISection() {
+
+// CRC32 used for PSI section. The table was generated by following command:
+// $ python pycrc.py --model crc-32-mpeg --algorithm table-driven --generate c
+// Visit http://www.tty1.net/pycrc/index_en.html for more details.
+uint32_t ATSParser::PSISection::CRC_TABLE[] = {
+    0x00000000, 0x04c11db7, 0x09823b6e, 0x0d4326d9,
+    0x130476dc, 0x17c56b6b, 0x1a864db2, 0x1e475005,
+    0x2608edb8, 0x22c9f00f, 0x2f8ad6d6, 0x2b4bcb61,
+    0x350c9b64, 0x31cd86d3, 0x3c8ea00a, 0x384fbdbd,
+    0x4c11db70, 0x48d0c6c7, 0x4593e01e, 0x4152fda9,
+    0x5f15adac, 0x5bd4b01b, 0x569796c2, 0x52568b75,
+    0x6a1936c8, 0x6ed82b7f, 0x639b0da6, 0x675a1011,
+    0x791d4014, 0x7ddc5da3, 0x709f7b7a, 0x745e66cd,
+    0x9823b6e0, 0x9ce2ab57, 0x91a18d8e, 0x95609039,
+    0x8b27c03c, 0x8fe6dd8b, 0x82a5fb52, 0x8664e6e5,
+    0xbe2b5b58, 0xbaea46ef, 0xb7a96036, 0xb3687d81,
+    0xad2f2d84, 0xa9ee3033, 0xa4ad16ea, 0xa06c0b5d,
+    0xd4326d90, 0xd0f37027, 0xddb056fe, 0xd9714b49,
+    0xc7361b4c, 0xc3f706fb, 0xceb42022, 0xca753d95,
+    0xf23a8028, 0xf6fb9d9f, 0xfbb8bb46, 0xff79a6f1,
+    0xe13ef6f4, 0xe5ffeb43, 0xe8bccd9a, 0xec7dd02d,
+    0x34867077, 0x30476dc0, 0x3d044b19, 0x39c556ae,
+    0x278206ab, 0x23431b1c, 0x2e003dc5, 0x2ac12072,
+    0x128e9dcf, 0x164f8078, 0x1b0ca6a1, 0x1fcdbb16,
+    0x018aeb13, 0x054bf6a4, 0x0808d07d, 0x0cc9cdca,
+    0x7897ab07, 0x7c56b6b0, 0x71159069, 0x75d48dde,
+    0x6b93dddb, 0x6f52c06c, 0x6211e6b5, 0x66d0fb02,
+    0x5e9f46bf, 0x5a5e5b08, 0x571d7dd1, 0x53dc6066,
+    0x4d9b3063, 0x495a2dd4, 0x44190b0d, 0x40d816ba,
+    0xaca5c697, 0xa864db20, 0xa527fdf9, 0xa1e6e04e,
+    0xbfa1b04b, 0xbb60adfc, 0xb6238b25, 0xb2e29692,
+    0x8aad2b2f, 0x8e6c3698, 0x832f1041, 0x87ee0df6,
+    0x99a95df3, 0x9d684044, 0x902b669d, 0x94ea7b2a,
+    0xe0b41de7, 0xe4750050, 0xe9362689, 0xedf73b3e,
+    0xf3b06b3b, 0xf771768c, 0xfa325055, 0xfef34de2,
+    0xc6bcf05f, 0xc27dede8, 0xcf3ecb31, 0xcbffd686,
+    0xd5b88683, 0xd1799b34, 0xdc3abded, 0xd8fba05a,
+    0x690ce0ee, 0x6dcdfd59, 0x608edb80, 0x644fc637,
+    0x7a089632, 0x7ec98b85, 0x738aad5c, 0x774bb0eb,
+    0x4f040d56, 0x4bc510e1, 0x46863638, 0x42472b8f,
+    0x5c007b8a, 0x58c1663d, 0x558240e4, 0x51435d53,
+    0x251d3b9e, 0x21dc2629, 0x2c9f00f0, 0x285e1d47,
+    0x36194d42, 0x32d850f5, 0x3f9b762c, 0x3b5a6b9b,
+    0x0315d626, 0x07d4cb91, 0x0a97ed48, 0x0e56f0ff,
+    0x1011a0fa, 0x14d0bd4d, 0x19939b94, 0x1d528623,
+    0xf12f560e, 0xf5ee4bb9, 0xf8ad6d60, 0xfc6c70d7,
+    0xe22b20d2, 0xe6ea3d65, 0xeba91bbc, 0xef68060b,
+    0xd727bbb6, 0xd3e6a601, 0xdea580d8, 0xda649d6f,
+    0xc423cd6a, 0xc0e2d0dd, 0xcda1f604, 0xc960ebb3,
+    0xbd3e8d7e, 0xb9ff90c9, 0xb4bcb610, 0xb07daba7,
+    0xae3afba2, 0xaafbe615, 0xa7b8c0cc, 0xa379dd7b,
+    0x9b3660c6, 0x9ff77d71, 0x92b45ba8, 0x9675461f,
+    0x8832161a, 0x8cf30bad, 0x81b02d74, 0x857130c3,
+    0x5d8a9099, 0x594b8d2e, 0x5408abf7, 0x50c9b640,
+    0x4e8ee645, 0x4a4ffbf2, 0x470cdd2b, 0x43cdc09c,
+    0x7b827d21, 0x7f436096, 0x7200464f, 0x76c15bf8,
+    0x68860bfd, 0x6c47164a, 0x61043093, 0x65c52d24,
+    0x119b4be9, 0x155a565e, 0x18197087, 0x1cd86d30,
+    0x029f3d35, 0x065e2082, 0x0b1d065b, 0x0fdc1bec,
+    0x3793a651, 0x3352bbe6, 0x3e119d3f, 0x3ad08088,
+    0x2497d08d, 0x2056cd3a, 0x2d15ebe3, 0x29d4f654,
+    0xc5a92679, 0xc1683bce, 0xcc2b1d17, 0xc8ea00a0,
+    0xd6ad50a5, 0xd26c4d12, 0xdf2f6bcb, 0xdbee767c,
+    0xe3a1cbc1, 0xe760d676, 0xea23f0af, 0xeee2ed18,
+    0xf0a5bd1d, 0xf464a0aa, 0xf9278673, 0xfde69bc4,
+    0x89b8fd09, 0x8d79e0be, 0x803ac667, 0x84fbdbd0,
+    0x9abc8bd5, 0x9e7d9662, 0x933eb0bb, 0x97ffad0c,
+    0xafb010b1, 0xab710d06, 0xa6322bdf, 0xa2f33668,
+    0xbcb4666d, 0xb8757bda, 0xb5365d03, 0xb1f740b4
+    };
+
+ATSParser::PSISection::PSISection() :
+    mSkipBytes(0) {
 }
 
 ATSParser::PSISection::~PSISection() {
@@ -1377,10 +1667,15 @@
     return OK;
 }
 
+void ATSParser::PSISection::setSkipBytes(uint8_t skip) {
+    mSkipBytes = skip;
+}
+
 void ATSParser::PSISection::clear() {
     if (mBuffer != NULL) {
         mBuffer->setRange(0, 0);
     }
+    mSkipBytes = 0;
 }
 
 bool ATSParser::PSISection::isComplete() const {
@@ -1404,4 +1699,30 @@
     return mBuffer == NULL ? 0 : mBuffer->size();
 }
 
+bool ATSParser::PSISection::isCRCOkay() const {
+    if (!isComplete()) {
+        return false;
+    }
+    uint8_t* data = mBuffer->data();
+
+    // Return true if section_syntax_indicator says no section follows the field section_length.
+    if ((data[1] & 0x80) == 0) {
+        return true;
+    }
+
+    unsigned sectionLength = U16_AT(data + 1) & 0xfff;
+    ALOGV("sectionLength %u, skip %u", sectionLength, mSkipBytes);
+
+    // Skip the preceding field present when payload start indicator is on.
+    sectionLength -= mSkipBytes;
+
+    uint32_t crc = 0xffffffff;
+    for(unsigned i = 0; i < sectionLength + 4 /* crc */; i++) {
+        uint8_t b = data[i];
+        int index = ((crc >> 24) ^ (b & 0xff)) & 0xff;
+        crc = CRC_TABLE[index] ^ (crc << 8);
+    }
+    ALOGV("crc: %08x\n", crc);
+    return (crc == 0);
+}
 }  // namespace android
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 75d76dc..430a8d5 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -22,6 +22,7 @@
 
 #include <media/stagefright/foundation/ABase.h>
 #include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaSource.h>
 #include <utils/KeyedVector.h>
 #include <utils/Vector.h>
 #include <utils/RefBase.h>
@@ -30,7 +31,6 @@
 
 class ABitReader;
 struct ABuffer;
-struct MediaSource;
 
 struct ATSParser : public RefBase {
     enum DiscontinuityType {
@@ -46,6 +46,9 @@
             DISCONTINUITY_AUDIO_FORMAT
                 | DISCONTINUITY_VIDEO_FORMAT
                 | DISCONTINUITY_TIME,
+        DISCONTINUITY_FORMAT_ONLY       =
+            DISCONTINUITY_AUDIO_FORMAT
+                | DISCONTINUITY_VIDEO_FORMAT,
     };
 
     enum Flags {
@@ -59,9 +62,43 @@
         ALIGNED_VIDEO_DATA         = 2,
     };
 
+    // Event is used to signal sync point event at feedTSPacket().
+    struct SyncEvent {
+        SyncEvent(off64_t offset);
+
+        void init(off64_t offset, const sp<MediaSource> &source,
+                int64_t timeUs);
+
+        bool isInit() { return mInit; }
+        off64_t getOffset() { return mOffset; }
+        const sp<MediaSource> &getMediaSource() { return mMediaSource; }
+        int64_t getTimeUs() { return mTimeUs; }
+
+    private:
+        bool mInit;
+        /*
+         * mInit == false: the current offset
+         * mInit == true: the start offset of sync payload
+         */
+        off64_t mOffset;
+        /* The media source object for this event. */
+        sp<MediaSource> mMediaSource;
+        /* The timestamp of the sync frame. */
+        int64_t mTimeUs;
+    };
+
     ATSParser(uint32_t flags = 0);
 
-    status_t feedTSPacket(const void *data, size_t size);
+    // Feed a TS packet into the parser. uninitialized event with the start
+    // offset of this TS packet goes in, and if the parser detects PES with
+    // a sync frame, the event will be initiailzed with the start offset of the
+    // PES. Note that the offset of the event can be different from what we fed,
+    // as a PES may consist of multiple TS packets.
+    //
+    // Even in the case feedTSPacket() returns non-OK value, event still may be
+    // initialized if the parsing failed after the detection.
+    status_t feedTSPacket(
+            const void *data, size_t size, SyncEvent *event = NULL);
 
     void signalDiscontinuity(
             DiscontinuityType type, const sp<AMessage> &extra);
@@ -71,7 +108,8 @@
     enum SourceType {
         VIDEO = 0,
         AUDIO = 1,
-        NUM_SOURCE_TYPES = 2
+        META  = 2,
+        NUM_SOURCE_TYPES = 3
     };
     sp<MediaSource> getSource(SourceType type);
     bool hasSource(SourceType type) const;
@@ -87,6 +125,7 @@
         STREAMTYPE_MPEG2_AUDIO          = 0x04,
         STREAMTYPE_MPEG2_AUDIO_ADTS     = 0x0f,
         STREAMTYPE_MPEG4_VIDEO          = 0x10,
+        STREAMTYPE_METADATA             = 0x15,
         STREAMTYPE_H264                 = 0x1b,
 
         // From ATSC A/53 Part 3:2009, 6.7.1
@@ -115,20 +154,31 @@
 
     bool mTimeOffsetValid;
     int64_t mTimeOffsetUs;
+    int64_t mLastRecoveredPTS;
 
     size_t mNumTSPacketsParsed;
 
     void parseProgramAssociationTable(ABitReader *br);
     void parseProgramMap(ABitReader *br);
-    void parsePES(ABitReader *br);
+    // Parse PES packet where br is pointing to. If the PES contains a sync
+    // frame, set event with the time and the start offset of this PES.
+    // Note that the method itself does not touch event.
+    void parsePES(ABitReader *br, SyncEvent *event);
 
+    // Strip remaining packet headers and pass to appropriate program/stream
+    // to parse the payload. If the payload turns out to be PES and contains
+    // a sync frame, event shall be set with the time and start offset of the
+    // PES.
+    // Note that the method itself does not touch event.
     status_t parsePID(
         ABitReader *br, unsigned PID,
         unsigned continuity_counter,
-        unsigned payload_unit_start_indicator);
+        unsigned payload_unit_start_indicator,
+        SyncEvent *event);
 
-    void parseAdaptationField(ABitReader *br, unsigned PID);
-    status_t parseTS(ABitReader *br);
+    status_t parseAdaptationField(ABitReader *br, unsigned PID);
+    // see feedTSPacket().
+    status_t parseTS(ABitReader *br, SyncEvent *event);
 
     void updatePCR(unsigned PID, uint64_t PCR, size_t byteOffsetFromStart);
 
diff --git a/media/libstagefright/mpeg2ts/Android.mk b/media/libstagefright/mpeg2ts/Android.mk
index c17a0b7..16b0160 100644
--- a/media/libstagefright/mpeg2ts/Android.mk
+++ b/media/libstagefright/mpeg2ts/Android.mk
@@ -13,7 +13,8 @@
 	$(TOP)/frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Werror
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE:= libstagefright_mpeg2ts
 
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
index f266fe7..cabde32 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.cpp
@@ -19,6 +19,8 @@
 
 #include "AnotherPacketSource.h"
 
+#include "include/avc_utils.h"
+
 #include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
@@ -27,6 +29,7 @@
 #include <media/stagefright/MediaBuffer.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MetaData.h>
+#include <media/stagefright/Utils.h>
 #include <utils/Vector.h>
 
 #include <inttypes.h>
@@ -38,17 +41,22 @@
 AnotherPacketSource::AnotherPacketSource(const sp<MetaData> &meta)
     : mIsAudio(false),
       mIsVideo(false),
+      mEnabled(true),
       mFormat(NULL),
       mLastQueuedTimeUs(0),
       mEOSResult(OK),
       mLatestEnqueuedMeta(NULL),
-      mLatestDequeuedMeta(NULL),
-      mQueuedDiscontinuityCount(0) {
+      mLatestDequeuedMeta(NULL) {
     setFormat(meta);
+
+    mDiscontinuitySegments.push_back(DiscontinuitySegment());
 }
 
 void AnotherPacketSource::setFormat(const sp<MetaData> &meta) {
-    CHECK(mFormat == NULL);
+    if (mFormat != NULL) {
+        // Only allowed to be set once. Requires explicit clear to reset.
+        return;
+    }
 
     mIsAudio = false;
     mIsVideo = false;
@@ -66,7 +74,7 @@
     } else  if (!strncasecmp("video/", mime, 6)) {
         mIsVideo = true;
     } else {
-        CHECK(!strncasecmp("text/", mime, 5));
+        CHECK(!strncasecmp("text/", mime, 5) || !strncasecmp("application/", mime, 12));
     }
 }
 
@@ -91,13 +99,12 @@
     while (it != mBuffers.end()) {
         sp<ABuffer> buffer = *it;
         int32_t discontinuity;
-        if (buffer->meta()->findInt32("discontinuity", &discontinuity)) {
-            break;
-        }
-
-        sp<RefBase> object;
-        if (buffer->meta()->findObject("format", &object)) {
-            return mFormat = static_cast<MetaData*>(object.get());
+        if (!buffer->meta()->findInt32("discontinuity", &discontinuity)) {
+            sp<RefBase> object;
+            if (buffer->meta()->findObject("format", &object)) {
+                setFormat(static_cast<MetaData*>(object.get()));
+                return mFormat;
+            }
         }
 
         ++it;
@@ -123,15 +130,24 @@
                 mFormat.clear();
             }
 
-            --mQueuedDiscontinuityCount;
+            mDiscontinuitySegments.erase(mDiscontinuitySegments.begin());
+            // CHECK(!mDiscontinuitySegments.empty());
             return INFO_DISCONTINUITY;
         }
 
+        // CHECK(!mDiscontinuitySegments.empty());
+        DiscontinuitySegment &seg = *mDiscontinuitySegments.begin();
+
+        int64_t timeUs;
         mLatestDequeuedMeta = (*buffer)->meta()->dup();
+        CHECK(mLatestDequeuedMeta->findInt64("timeUs", &timeUs));
+        if (timeUs > seg.mMaxDequeTimeUs) {
+            seg.mMaxDequeTimeUs = timeUs;
+        }
 
         sp<RefBase> object;
         if ((*buffer)->meta()->findObject("format", &object)) {
-            mFormat = static_cast<MetaData*>(object.get());
+            setFormat(static_cast<MetaData*>(object.get()));
         }
 
         return OK;
@@ -140,6 +156,12 @@
     return mEOSResult;
 }
 
+void AnotherPacketSource::requeueAccessUnit(const sp<ABuffer> &buffer) {
+    // TODO: update corresponding book keeping info.
+    Mutex::Autolock autoLock(mLock);
+    mBuffers.push_front(buffer);
+}
+
 status_t AnotherPacketSource::read(
         MediaBuffer **out, const ReadOptions *) {
     *out = NULL;
@@ -153,7 +175,6 @@
 
         const sp<ABuffer> buffer = *mBuffers.begin();
         mBuffers.erase(mBuffers.begin());
-        mLatestDequeuedMeta = buffer->meta()->dup();
 
         int32_t discontinuity;
         if (buffer->meta()->findInt32("discontinuity", &discontinuity)) {
@@ -161,21 +182,40 @@
                 mFormat.clear();
             }
 
+            mDiscontinuitySegments.erase(mDiscontinuitySegments.begin());
+            // CHECK(!mDiscontinuitySegments.empty());
             return INFO_DISCONTINUITY;
         }
 
+        mLatestDequeuedMeta = buffer->meta()->dup();
+
         sp<RefBase> object;
         if (buffer->meta()->findObject("format", &object)) {
-            mFormat = static_cast<MetaData*>(object.get());
+            setFormat(static_cast<MetaData*>(object.get()));
         }
 
         int64_t timeUs;
         CHECK(buffer->meta()->findInt64("timeUs", &timeUs));
+        // CHECK(!mDiscontinuitySegments.empty());
+        DiscontinuitySegment &seg = *mDiscontinuitySegments.begin();
+        if (timeUs > seg.mMaxDequeTimeUs) {
+            seg.mMaxDequeTimeUs = timeUs;
+        }
 
         MediaBuffer *mediaBuffer = new MediaBuffer(buffer);
 
         mediaBuffer->meta_data()->setInt64(kKeyTime, timeUs);
 
+        int32_t isSync;
+        if (buffer->meta()->findInt32("isSync", &isSync)) {
+            mediaBuffer->meta_data()->setInt32(kKeyIsSyncFrame, isSync);
+        }
+
+        sp<ABuffer> sei;
+        if (buffer->meta()->findBuffer("sei", &sei) && sei != NULL) {
+            mediaBuffer->meta_data()->setData(kKeySEI, 0, sei->data(), sei->size());
+        }
+
         *out = mediaBuffer;
         return OK;
     }
@@ -203,18 +243,35 @@
         return;
     }
 
-    int64_t lastQueuedTimeUs;
-    CHECK(buffer->meta()->findInt64("timeUs", &lastQueuedTimeUs));
-    mLastQueuedTimeUs = lastQueuedTimeUs;
-    ALOGV("queueAccessUnit timeUs=%" PRIi64 " us (%.2f secs)", mLastQueuedTimeUs, mLastQueuedTimeUs / 1E6);
-
     Mutex::Autolock autoLock(mLock);
     mBuffers.push_back(buffer);
     mCondition.signal();
 
     int32_t discontinuity;
-    if (buffer->meta()->findInt32("discontinuity", &discontinuity)) {
-        ++mQueuedDiscontinuityCount;
+    if (buffer->meta()->findInt32("discontinuity", &discontinuity)){
+        ALOGV("queueing a discontinuity with queueAccessUnit");
+
+        mLastQueuedTimeUs = 0ll;
+        mEOSResult = OK;
+        mLatestEnqueuedMeta = NULL;
+
+        mDiscontinuitySegments.push_back(DiscontinuitySegment());
+        return;
+    }
+
+    int64_t lastQueuedTimeUs;
+    CHECK(buffer->meta()->findInt64("timeUs", &lastQueuedTimeUs));
+    mLastQueuedTimeUs = lastQueuedTimeUs;
+    ALOGV("queueAccessUnit timeUs=%" PRIi64 " us (%.2f secs)",
+            mLastQueuedTimeUs, mLastQueuedTimeUs / 1E6);
+
+    // CHECK(!mDiscontinuitySegments.empty());
+    DiscontinuitySegment &tailSeg = *(--mDiscontinuitySegments.end());
+    if (lastQueuedTimeUs > tailSeg.mMaxEnqueTimeUs) {
+        tailSeg.mMaxEnqueTimeUs = lastQueuedTimeUs;
+    }
+    if (tailSeg.mMaxDequeTimeUs == -1) {
+        tailSeg.mMaxDequeTimeUs = lastQueuedTimeUs;
     }
 
     if (mLatestEnqueuedMeta == NULL) {
@@ -240,7 +297,9 @@
 
     mBuffers.clear();
     mEOSResult = OK;
-    mQueuedDiscontinuityCount = 0;
+
+    mDiscontinuitySegments.clear();
+    mDiscontinuitySegments.push_back(DiscontinuitySegment());
 
     mFormat = NULL;
     mLatestEnqueuedMeta = NULL;
@@ -267,6 +326,14 @@
 
             ++it;
         }
+
+        for (List<DiscontinuitySegment>::iterator it2 = mDiscontinuitySegments.begin();
+                it2 != mDiscontinuitySegments.end();
+                ++it2) {
+            DiscontinuitySegment &seg = *it2;
+            seg.clear();
+        }
+
     }
 
     mEOSResult = OK;
@@ -277,7 +344,8 @@
         return;
     }
 
-    ++mQueuedDiscontinuityCount;
+    mDiscontinuitySegments.push_back(DiscontinuitySegment());
+
     sp<ABuffer> buffer = new ABuffer(0);
     buffer->meta()->setInt32("discontinuity", static_cast<int32_t>(type));
     buffer->meta()->setMessage("extra", extra);
@@ -296,6 +364,10 @@
 
 bool AnotherPacketSource::hasBufferAvailable(status_t *finalResult) {
     Mutex::Autolock autoLock(mLock);
+    *finalResult = OK;
+    if (!mEnabled) {
+        return false;
+    }
     if (!mBuffers.empty()) {
         return true;
     }
@@ -304,86 +376,53 @@
     return false;
 }
 
+bool AnotherPacketSource::hasDataBufferAvailable(status_t *finalResult) {
+    Mutex::Autolock autoLock(mLock);
+    *finalResult = OK;
+    if (!mEnabled) {
+        return false;
+    }
+    List<sp<ABuffer> >::iterator it;
+    for (it = mBuffers.begin(); it != mBuffers.end(); it++) {
+        int32_t discontinuity;
+        if (!(*it)->meta()->findInt32("discontinuity", &discontinuity)) {
+            return true;
+        }
+    }
+
+    *finalResult = mEOSResult;
+    return false;
+}
+
+size_t AnotherPacketSource::getAvailableBufferCount(status_t *finalResult) {
+    Mutex::Autolock autoLock(mLock);
+
+    *finalResult = OK;
+    if (!mEnabled) {
+        return 0;
+    }
+    if (!mBuffers.empty()) {
+        return mBuffers.size();
+    }
+    *finalResult = mEOSResult;
+    return 0;
+}
+
 int64_t AnotherPacketSource::getBufferedDurationUs(status_t *finalResult) {
     Mutex::Autolock autoLock(mLock);
-    return getBufferedDurationUs_l(finalResult);
-}
-
-int64_t AnotherPacketSource::getBufferedDurationUs_l(status_t *finalResult) {
     *finalResult = mEOSResult;
 
-    if (mBuffers.empty()) {
-        return 0;
-    }
-
-    int64_t time1 = -1;
-    int64_t time2 = -1;
     int64_t durationUs = 0;
-
-    List<sp<ABuffer> >::iterator it = mBuffers.begin();
-    while (it != mBuffers.end()) {
-        const sp<ABuffer> &buffer = *it;
-
-        int64_t timeUs;
-        if (buffer->meta()->findInt64("timeUs", &timeUs)) {
-            if (time1 < 0 || timeUs < time1) {
-                time1 = timeUs;
-            }
-
-            if (time2 < 0 || timeUs > time2) {
-                time2 = timeUs;
-            }
-        } else {
-            // This is a discontinuity, reset everything.
-            durationUs += time2 - time1;
-            time1 = time2 = -1;
-        }
-
-        ++it;
+    for (List<DiscontinuitySegment>::iterator it = mDiscontinuitySegments.begin();
+            it != mDiscontinuitySegments.end();
+            ++it) {
+        const DiscontinuitySegment &seg = *it;
+        // dequeued access units should be a subset of enqueued access units
+        // CHECK(seg.maxEnqueTimeUs >= seg.mMaxDequeTimeUs);
+        durationUs += (seg.mMaxEnqueTimeUs - seg.mMaxDequeTimeUs);
     }
 
-    return durationUs + (time2 - time1);
-}
-
-// A cheaper but less precise version of getBufferedDurationUs that we would like to use in
-// LiveSession::dequeueAccessUnit to trigger downwards adaptation.
-int64_t AnotherPacketSource::getEstimatedDurationUs() {
-    Mutex::Autolock autoLock(mLock);
-    if (mBuffers.empty()) {
-        return 0;
-    }
-
-    if (mQueuedDiscontinuityCount > 0) {
-        status_t finalResult;
-        return getBufferedDurationUs_l(&finalResult);
-    }
-
-    List<sp<ABuffer> >::iterator it = mBuffers.begin();
-    sp<ABuffer> buffer = *it;
-
-    int64_t startTimeUs;
-    buffer->meta()->findInt64("timeUs", &startTimeUs);
-    if (startTimeUs < 0) {
-        return 0;
-    }
-
-    it = mBuffers.end();
-    --it;
-    buffer = *it;
-
-    int64_t endTimeUs;
-    buffer->meta()->findInt64("timeUs", &endTimeUs);
-    if (endTimeUs < 0) {
-        return 0;
-    }
-
-    int64_t diffUs;
-    if (endTimeUs > startTimeUs) {
-        diffUs = endTimeUs - startTimeUs;
-    } else {
-        diffUs = startTimeUs - endTimeUs;
-    }
-    return diffUs;
+    return durationUs;
 }
 
 status_t AnotherPacketSource::nextBufferTime(int64_t *timeUs) {
@@ -422,4 +461,171 @@
     return mLatestDequeuedMeta;
 }
 
+void AnotherPacketSource::enable(bool enable) {
+    Mutex::Autolock autoLock(mLock);
+    mEnabled = enable;
+}
+
+/*
+ * returns the sample meta that's delayUs after queue head
+ * (NULL if such sample is unavailable)
+ */
+sp<AMessage> AnotherPacketSource::getMetaAfterLastDequeued(int64_t delayUs) {
+    Mutex::Autolock autoLock(mLock);
+    int64_t firstUs = -1;
+    int64_t lastUs = -1;
+    int64_t durationUs = 0;
+
+    List<sp<ABuffer> >::iterator it;
+    for (it = mBuffers.begin(); it != mBuffers.end(); ++it) {
+        const sp<ABuffer> &buffer = *it;
+        int32_t discontinuity;
+        if (buffer->meta()->findInt32("discontinuity", &discontinuity)) {
+            durationUs += lastUs - firstUs;
+            firstUs = -1;
+            lastUs = -1;
+            continue;
+        }
+        int64_t timeUs;
+        if (buffer->meta()->findInt64("timeUs", &timeUs)) {
+            if (firstUs < 0) {
+                firstUs = timeUs;
+            }
+            if (lastUs < 0 || timeUs > lastUs) {
+                lastUs = timeUs;
+            }
+            if (durationUs + (lastUs - firstUs) >= delayUs) {
+                return buffer->meta();
+            }
+        }
+    }
+    return NULL;
+}
+
+/*
+ * removes samples with time equal or after meta
+ */
+void AnotherPacketSource::trimBuffersAfterMeta(
+        const sp<AMessage> &meta) {
+    if (meta == NULL) {
+        ALOGW("trimming with NULL meta, ignoring");
+        return;
+    }
+
+    Mutex::Autolock autoLock(mLock);
+    if (mBuffers.empty()) {
+        return;
+    }
+
+    HLSTime stopTime(meta);
+    ALOGV("trimBuffersAfterMeta: discontinuitySeq %d, timeUs %lld",
+            stopTime.mSeq, (long long)stopTime.mTimeUs);
+
+    List<sp<ABuffer> >::iterator it;
+    List<DiscontinuitySegment >::iterator it2;
+    sp<AMessage> newLatestEnqueuedMeta = NULL;
+    int64_t newLastQueuedTimeUs = 0;
+    for (it = mBuffers.begin(), it2 = mDiscontinuitySegments.begin(); it != mBuffers.end(); ++it) {
+        const sp<ABuffer> &buffer = *it;
+        int32_t discontinuity;
+        if (buffer->meta()->findInt32("discontinuity", &discontinuity)) {
+            // CHECK(it2 != mDiscontinuitySegments.end());
+            ++it2;
+            continue;
+        }
+
+        HLSTime curTime(buffer->meta());
+        if (!(curTime < stopTime)) {
+            ALOGV("trimming from %lld (inclusive) to end",
+                    (long long)curTime.mTimeUs);
+            break;
+        }
+        newLatestEnqueuedMeta = buffer->meta();
+        newLastQueuedTimeUs = curTime.mTimeUs;
+    }
+
+    mBuffers.erase(it, mBuffers.end());
+    mLatestEnqueuedMeta = newLatestEnqueuedMeta;
+    mLastQueuedTimeUs = newLastQueuedTimeUs;
+
+    DiscontinuitySegment &seg = *it2;
+    if (newLatestEnqueuedMeta != NULL) {
+        seg.mMaxEnqueTimeUs = newLastQueuedTimeUs;
+    } else {
+        seg.clear();
+    }
+    mDiscontinuitySegments.erase(++it2, mDiscontinuitySegments.end());
+}
+
+/*
+ * removes samples with time equal or before meta;
+ * returns first sample left in the queue.
+ *
+ * (for AVC, if trim happens, the samples left will always start
+ * at next IDR.)
+ */
+sp<AMessage> AnotherPacketSource::trimBuffersBeforeMeta(
+        const sp<AMessage> &meta) {
+    HLSTime startTime(meta);
+    ALOGV("trimBuffersBeforeMeta: discontinuitySeq %d, timeUs %lld",
+            startTime.mSeq, (long long)startTime.mTimeUs);
+
+    sp<AMessage> firstMeta;
+    int64_t firstTimeUs = -1;
+    Mutex::Autolock autoLock(mLock);
+    if (mBuffers.empty()) {
+        return NULL;
+    }
+
+    sp<MetaData> format;
+    bool isAvc = false;
+
+    List<sp<ABuffer> >::iterator it;
+    for (it = mBuffers.begin(); it != mBuffers.end(); ++it) {
+        const sp<ABuffer> &buffer = *it;
+        int32_t discontinuity;
+        if (buffer->meta()->findInt32("discontinuity", &discontinuity)) {
+            mDiscontinuitySegments.erase(mDiscontinuitySegments.begin());
+            // CHECK(!mDiscontinuitySegments.empty());
+            format = NULL;
+            isAvc = false;
+            continue;
+        }
+        if (format == NULL) {
+            sp<RefBase> object;
+            if (buffer->meta()->findObject("format", &object)) {
+                const char* mime;
+                format = static_cast<MetaData*>(object.get());
+                isAvc = format != NULL
+                        && format->findCString(kKeyMIMEType, &mime)
+                        && !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
+            }
+        }
+        if (isAvc && !IsIDR(buffer)) {
+            continue;
+        }
+
+        HLSTime curTime(buffer->meta());
+        if (startTime < curTime) {
+            ALOGV("trimming from beginning to %lld (not inclusive)",
+                    (long long)curTime.mTimeUs);
+            firstMeta = buffer->meta();
+            firstTimeUs = curTime.mTimeUs;
+            break;
+        }
+    }
+    mBuffers.erase(mBuffers.begin(), it);
+    mLatestDequeuedMeta = NULL;
+
+    // CHECK(!mDiscontinuitySegments.empty());
+    DiscontinuitySegment &seg = *mDiscontinuitySegments.begin();
+    if (firstTimeUs >= 0) {
+        seg.mMaxDequeTimeUs = firstTimeUs;
+    } else {
+        seg.clear();
+    }
+
+    return firstMeta;
+}
+
 }  // namespace android
diff --git a/media/libstagefright/mpeg2ts/AnotherPacketSource.h b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
index 809a858..28a0e89 100644
--- a/media/libstagefright/mpeg2ts/AnotherPacketSource.h
+++ b/media/libstagefright/mpeg2ts/AnotherPacketSource.h
@@ -43,14 +43,20 @@
 
     void clear();
 
+    // Returns true if we have any packets including discontinuities
     bool hasBufferAvailable(status_t *finalResult);
 
+    // Returns true if we have packets that's not discontinuities
+    bool hasDataBufferAvailable(status_t *finalResult);
+
+    // Returns the number of available buffers. finalResult is always OK
+    // if this method returns non-0, or the final result if it returns 0.
+    size_t getAvailableBufferCount(status_t *finalResult);
+
     // Returns the difference between the last and the first queued
     // presentation timestamps since the last discontinuity (if any).
     int64_t getBufferedDurationUs(status_t *finalResult);
 
-    int64_t getEstimatedDurationUs();
-
     status_t nextBufferTime(int64_t *timeUs);
 
     void queueAccessUnit(const sp<ABuffer> &buffer);
@@ -63,21 +69,48 @@
     void signalEOS(status_t result);
 
     status_t dequeueAccessUnit(sp<ABuffer> *buffer);
+    void requeueAccessUnit(const sp<ABuffer> &buffer);
 
     bool isFinished(int64_t duration) const;
 
+    void enable(bool enable);
+
     sp<AMessage> getLatestEnqueuedMeta();
     sp<AMessage> getLatestDequeuedMeta();
+    sp<AMessage> getMetaAfterLastDequeued(int64_t delayUs);
+
+    void trimBuffersAfterMeta(const sp<AMessage> &meta);
+    sp<AMessage> trimBuffersBeforeMeta(const sp<AMessage> &meta);
 
 protected:
     virtual ~AnotherPacketSource();
 
 private:
+
+    struct DiscontinuitySegment {
+        int64_t mMaxDequeTimeUs, mMaxEnqueTimeUs;
+        DiscontinuitySegment()
+            : mMaxDequeTimeUs(-1),
+              mMaxEnqueTimeUs(-1) {
+        };
+
+        void clear() {
+            mMaxDequeTimeUs = mMaxEnqueTimeUs = -1;
+        }
+    };
+
+    // Discontinuity segments are consecutive access units between
+    // discontinuity markers. There should always be at least _ONE_
+    // discontinuity segment, hence the various CHECKs in
+    // AnotherPacketSource.cpp for non-empty()-ness.
+    List<DiscontinuitySegment> mDiscontinuitySegments;
+
     Mutex mLock;
     Condition mCondition;
 
     bool mIsAudio;
     bool mIsVideo;
+    bool mEnabled;
     sp<MetaData> mFormat;
     int64_t mLastQueuedTimeUs;
     List<sp<ABuffer> > mBuffers;
@@ -85,10 +118,7 @@
     sp<AMessage> mLatestEnqueuedMeta;
     sp<AMessage> mLatestDequeuedMeta;
 
-    size_t  mQueuedDiscontinuityCount;
-
     bool wasFormatChange(int32_t discontinuityType) const;
-    int64_t getBufferedDurationUs_l(status_t *finalResult);
 
     DISALLOW_EVIL_CONSTRUCTORS(AnotherPacketSource);
 };
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index 5527df0..36ec367 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -56,6 +56,8 @@
     if (clearFormat) {
         mFormat.clear();
     }
+
+    mEOSReached = false;
 }
 
 // Parse AC3 header assuming the current ptr is start position of syncframe,
@@ -415,13 +417,14 @@
             }
 
             case PCM_AUDIO:
+            case METADATA:
             {
                 break;
             }
 
             default:
-                TRESPASS();
-                break;
+                ALOGE("Unknown mode: %d", mMode);
+                return ERROR_MALFORMED;
         }
     }
 
@@ -499,8 +502,13 @@
             return dequeueAccessUnitMPEG4Video();
         case PCM_AUDIO:
             return dequeueAccessUnitPCMAudio();
+        case METADATA:
+            return dequeueAccessUnitMetadata();
         default:
-            CHECK_EQ((unsigned)mMode, (unsigned)MPEG_AUDIO);
+            if (mMode != MPEG_AUDIO) {
+                ALOGE("Unknown mode");
+                return NULL;
+            }
             return dequeueAccessUnitMPEGAudio();
     }
 }
@@ -537,8 +545,12 @@
     memcpy(accessUnit->data(), mBuffer->data(), syncStartPos + payloadSize);
 
     int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize);
-    CHECK_GE(timeUs, 0ll);
+    if (timeUs < 0ll) {
+        ALOGE("negative timeUs");
+        return NULL;
+    }
     accessUnit->meta()->setInt64("timeUs", timeUs);
+    accessUnit->meta()->setInt32("isSync", 1);
 
     memmove(
             mBuffer->data(),
@@ -556,15 +568,24 @@
     }
 
     ABitReader bits(mBuffer->data(), 4);
-    CHECK_EQ(bits.getBits(8), 0xa0);
+    if (bits.getBits(8) != 0xa0) {
+        ALOGE("Unexpected bit values");
+        return NULL;
+    }
     unsigned numAUs = bits.getBits(8);
     bits.skipBits(8);
     unsigned quantization_word_length __unused = bits.getBits(2);
     unsigned audio_sampling_frequency = bits.getBits(3);
     unsigned num_channels = bits.getBits(3);
 
-    CHECK_EQ(audio_sampling_frequency, 2);  // 48kHz
-    CHECK_EQ(num_channels, 1u);  // stereo!
+    if (audio_sampling_frequency != 2) {
+        ALOGE("Wrong sampling freq");
+        return NULL;
+    }
+    if (num_channels != 1u) {
+        ALOGE("Wrong channel #");
+        return NULL;
+    }
 
     if (mFormat == NULL) {
         mFormat = new MetaData;
@@ -586,8 +607,12 @@
     memcpy(accessUnit->data(), mBuffer->data() + 4, payloadSize);
 
     int64_t timeUs = fetchTimestamp(payloadSize + 4);
-    CHECK_GE(timeUs, 0ll);
+    if (timeUs < 0ll) {
+        ALOGE("Negative timeUs");
+        return NULL;
+    }
     accessUnit->meta()->setInt64("timeUs", timeUs);
+    accessUnit->meta()->setInt32("isSync", 1);
 
     int16_t *ptr = (int16_t *)accessUnit->data();
     for (size_t i = 0; i < payloadSize / sizeof(int16_t); ++i) {
@@ -609,22 +634,25 @@
         return NULL;
     }
 
-    CHECK(!mRangeInfos.empty());
+    if (mRangeInfos.empty()) {
+        return NULL;
+    }
 
     const RangeInfo &info = *mRangeInfos.begin();
     if (mBuffer->size() < info.mLength) {
         return NULL;
     }
 
-    CHECK_GE(info.mTimestampUs, 0ll);
+    if (info.mTimestampUs < 0ll) {
+        ALOGE("Negative info.mTimestampUs");
+        return NULL;
+    }
 
     // The idea here is consume all AAC frames starting at offsets before
     // info.mLength so we can assign a meaningful timestamp without
     // having to interpolate.
     // The final AAC frame may well extend into the next RangeInfo but
     // that's ok.
-    // TODO: the logic commented above is skipped because codec cannot take
-    // arbitrary sized input buffers;
     size_t offset = 0;
     while (offset < info.mLength) {
         if (offset + 7 > mBuffer->size()) {
@@ -635,17 +663,26 @@
 
         // adts_fixed_header
 
-        CHECK_EQ(bits.getBits(12), 0xfffu);
+        if (bits.getBits(12) != 0xfffu) {
+            ALOGE("Wrong atds_fixed_header");
+            return NULL;
+        }
         bits.skipBits(3);  // ID, layer
         bool protection_absent __unused = bits.getBits(1) != 0;
 
         if (mFormat == NULL) {
             unsigned profile = bits.getBits(2);
-            CHECK_NE(profile, 3u);
+            if (profile == 3u) {
+                ALOGE("profile should not be 3");
+                return NULL;
+            }
             unsigned sampling_freq_index = bits.getBits(4);
             bits.getBits(1);  // private_bit
             unsigned channel_configuration = bits.getBits(3);
-            CHECK_NE(channel_configuration, 0u);
+            if (channel_configuration == 0u) {
+                ALOGE("channel_config should not be 0");
+                return NULL;
+            }
             bits.skipBits(2);  // original_copy, home
 
             mFormat = MakeAACCodecSpecificData(
@@ -655,8 +692,14 @@
 
             int32_t sampleRate;
             int32_t numChannels;
-            CHECK(mFormat->findInt32(kKeySampleRate, &sampleRate));
-            CHECK(mFormat->findInt32(kKeyChannelCount, &numChannels));
+            if (!mFormat->findInt32(kKeySampleRate, &sampleRate)) {
+                ALOGE("SampleRate not found");
+                return NULL;
+            }
+            if (!mFormat->findInt32(kKeyChannelCount, &numChannels)) {
+                ALOGE("ChannelCount not found");
+                return NULL;
+            }
 
             ALOGI("found AAC codec config (%d Hz, %d channels)",
                  sampleRate, numChannels);
@@ -679,7 +722,8 @@
 
         if (number_of_raw_data_blocks_in_frame != 0) {
             // To be implemented.
-            TRESPASS();
+            ALOGE("Should not reach here.");
+            return NULL;
         }
 
         if (offset + aac_frame_length > mBuffer->size()) {
@@ -689,12 +733,9 @@
         size_t headerSize __unused = protection_absent ? 7 : 9;
 
         offset += aac_frame_length;
-        // TODO: move back to concatenation when codec can support arbitrary input buffers.
-        // For now only queue a single buffer
-        break;
     }
 
-    int64_t timeUs = fetchTimestampAAC(offset);
+    int64_t timeUs = fetchTimestamp(offset);
 
     sp<ABuffer> accessUnit = new ABuffer(offset);
     memcpy(accessUnit->data(), mBuffer->data(), offset);
@@ -704,6 +745,7 @@
     mBuffer->setRange(0, mBuffer->size() - offset);
 
     accessUnit->meta()->setInt64("timeUs", timeUs);
+    accessUnit->meta()->setInt32("isSync", 1);
 
     return accessUnit;
 }
@@ -713,7 +755,9 @@
     bool first = true;
 
     while (size > 0) {
-        CHECK(!mRangeInfos.empty());
+        if (mRangeInfos.empty()) {
+            return timeUs;
+        }
 
         RangeInfo *info = &*mRangeInfos.begin();
 
@@ -741,50 +785,6 @@
     return timeUs;
 }
 
-// TODO: avoid interpolating timestamps once codec supports arbitrary sized input buffers
-int64_t ElementaryStreamQueue::fetchTimestampAAC(size_t size) {
-    int64_t timeUs = -1;
-    bool first = true;
-
-    size_t samplesize = size;
-    while (size > 0) {
-        CHECK(!mRangeInfos.empty());
-
-        RangeInfo *info = &*mRangeInfos.begin();
-
-        if (first) {
-            timeUs = info->mTimestampUs;
-            first = false;
-        }
-
-        if (info->mLength > size) {
-            int32_t sampleRate;
-            CHECK(mFormat->findInt32(kKeySampleRate, &sampleRate));
-            info->mLength -= size;
-            size_t numSamples = 1024 * size / samplesize;
-            info->mTimestampUs += numSamples * 1000000ll / sampleRate;
-            size = 0;
-        } else {
-            size -= info->mLength;
-
-            mRangeInfos.erase(mRangeInfos.begin());
-            info = NULL;
-        }
-
-    }
-
-    if (timeUs == 0ll) {
-        ALOGV("Returning 0 timestamp");
-    }
-
-    return timeUs;
-}
-
-struct NALPosition {
-    size_t nalOffset;
-    size_t nalSize;
-};
-
 sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitH264() {
     const uint8_t *data = mBuffer->data();
 
@@ -792,11 +792,13 @@
     Vector<NALPosition> nals;
 
     size_t totalSize = 0;
+    size_t seiCount = 0;
 
     status_t err;
     const uint8_t *nalStart;
     size_t nalSize;
     bool foundSlice = false;
+    bool foundIDR = false;
     while ((err = getNextNALUnit(&data, &size, &nalStart, &nalSize)) == OK) {
         if (nalSize == 0) continue;
 
@@ -804,6 +806,9 @@
         bool flush = false;
 
         if (nalType == 1 || nalType == 5) {
+            if (nalType == 5) {
+                foundIDR = true;
+            }
             if (foundSlice) {
                 ABitReader br(nalStart + 1, nalSize);
                 unsigned first_mb_in_slice = parseUE(&br);
@@ -821,6 +826,9 @@
             // next frame.
 
             flush = true;
+        } else if (nalType == 6 && nalSize > 0) {
+            // found non-zero sized SEI
+            ++seiCount;
         }
 
         if (flush) {
@@ -829,21 +837,32 @@
 
             size_t auSize = 4 * nals.size() + totalSize;
             sp<ABuffer> accessUnit = new ABuffer(auSize);
+            sp<ABuffer> sei;
+
+            if (seiCount > 0) {
+                sei = new ABuffer(seiCount * sizeof(NALPosition));
+                accessUnit->meta()->setBuffer("sei", sei);
+            }
 
 #if !LOG_NDEBUG
             AString out;
 #endif
 
             size_t dstOffset = 0;
+            size_t seiIndex = 0;
             for (size_t i = 0; i < nals.size(); ++i) {
                 const NALPosition &pos = nals.itemAt(i);
 
                 unsigned nalType = mBuffer->data()[pos.nalOffset] & 0x1f;
 
-                if (nalType == 6) {
-                    sp<ABuffer> sei = new ABuffer(pos.nalSize);
-                    memcpy(sei->data(), mBuffer->data() + pos.nalOffset, pos.nalSize);
-                    accessUnit->meta()->setBuffer("sei", sei);
+                if (nalType == 6 && pos.nalSize > 0) {
+                    if (seiIndex >= sei->size() / sizeof(NALPosition)) {
+                        ALOGE("Wrong seiIndex");
+                        return NULL;
+                    }
+                    NALPosition &seiPos = ((NALPosition *)sei->data())[seiIndex++];
+                    seiPos.nalOffset = dstOffset + 4;
+                    seiPos.nalSize = pos.nalSize;
                 }
 
 #if !LOG_NDEBUG
@@ -878,9 +897,15 @@
             mBuffer->setRange(0, mBuffer->size() - nextScan);
 
             int64_t timeUs = fetchTimestamp(nextScan);
-            CHECK_GE(timeUs, 0ll);
+            if (timeUs < 0ll) {
+                ALOGE("Negative timeUs");
+                return NULL;
+            }
 
             accessUnit->meta()->setInt64("timeUs", timeUs);
+            if (foundIDR) {
+                accessUnit->meta()->setInt32("isSync", 1);
+            }
 
             if (mFormat == NULL) {
                 mFormat = MakeAVCCodecSpecificData(accessUnit);
@@ -897,7 +922,10 @@
 
         totalSize += nalSize;
     }
-    CHECK_EQ(err, (status_t)-EAGAIN);
+    if (err != (status_t)-EAGAIN) {
+        ALOGE("Unexpeted err");
+        return NULL;
+    }
 
     return NULL;
 }
@@ -914,9 +942,12 @@
 
     size_t frameSize;
     int samplingRate, numChannels, bitrate, numSamples;
-    CHECK(GetMPEGAudioFrameSize(
+    if (!GetMPEGAudioFrameSize(
                 header, &frameSize, &samplingRate, &numChannels,
-                &bitrate, &numSamples));
+                &bitrate, &numSamples)) {
+        ALOGE("Failed to get audio frame size");
+        return NULL;
+    }
 
     if (size < frameSize) {
         return NULL;
@@ -934,9 +965,13 @@
     mBuffer->setRange(0, mBuffer->size() - frameSize);
 
     int64_t timeUs = fetchTimestamp(frameSize);
-    CHECK_GE(timeUs, 0ll);
+    if (timeUs < 0ll) {
+        ALOGE("Negative timeUs");
+        return NULL;
+    }
 
     accessUnit->meta()->setInt64("timeUs", timeUs);
+    accessUnit->meta()->setInt32("isSync", 1);
 
     if (mFormat == NULL) {
         mFormat = new MetaData;
@@ -955,7 +990,7 @@
                         kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_MPEG);
                 break;
             default:
-                TRESPASS();
+                return NULL;
         }
 
         mFormat->setInt32(kKeySampleRate, samplingRate);
@@ -966,7 +1001,10 @@
 }
 
 static void EncodeSize14(uint8_t **_ptr, size_t size) {
-    CHECK_LE(size, 0x3fff);
+    if (size > 0x3fff) {
+        ALOGE("Wrong size");
+        return;
+    }
 
     uint8_t *ptr = *_ptr;
 
@@ -1013,6 +1051,9 @@
     int pprevStartCode = -1;
     int prevStartCode = -1;
     int currentStartCode = -1;
+    bool gopFound = false;
+    bool isClosedGop = false;
+    bool brokenLink = false;
 
     size_t offset = 0;
     while (offset + 3 < size) {
@@ -1038,7 +1079,10 @@
             // seqHeader without/with extension
 
             if (mFormat == NULL) {
-                CHECK_GE(size, 7u);
+                if (size < 7u) {
+                    ALOGE("Size too small");
+                    return NULL;
+                }
 
                 unsigned width =
                     (data[4] << 4) | data[5] >> 4;
@@ -1075,6 +1119,13 @@
             }
         }
 
+        if (mFormat != NULL && currentStartCode == 0xb8) {
+            // GOP layer
+            gopFound = true;
+            isClosedGop = (data[offset + 7] & 0x40) != 0;
+            brokenLink = (data[offset + 7] & 0x20) != 0;
+        }
+
         if (mFormat != NULL && currentStartCode == 0x00) {
             // Picture start
 
@@ -1091,11 +1142,17 @@
                 mBuffer->setRange(0, mBuffer->size() - offset);
 
                 int64_t timeUs = fetchTimestamp(offset);
-                CHECK_GE(timeUs, 0ll);
+                if (timeUs < 0ll) {
+                    ALOGE("Negative timeUs");
+                    return NULL;
+                }
 
                 offset = 0;
 
                 accessUnit->meta()->setInt64("timeUs", timeUs);
+                if (gopFound && (!brokenLink || isClosedGop)) {
+                    accessUnit->meta()->setInt32("isSync", 1);
+                }
 
                 ALOGV("returning MPEG video access unit at time %" PRId64 " us",
                       timeUs);
@@ -1121,7 +1178,7 @@
     }
 
     if (memcmp(kStartCode, data, 3)) {
-        TRESPASS();
+        return -EAGAIN;
     }
 
     size_t offset = 3;
@@ -1181,25 +1238,37 @@
 
             case EXPECT_VISUAL_OBJECT_START:
             {
-                CHECK_EQ(chunkType, 0xb5);
+                if (chunkType != 0xb5) {
+                    ALOGE("Unexpected chunkType");
+                    return NULL;
+                }
                 state = EXPECT_VO_START;
                 break;
             }
 
             case EXPECT_VO_START:
             {
-                CHECK_LE(chunkType, 0x1f);
+                if (chunkType > 0x1f) {
+                    ALOGE("Unexpected chunkType");
+                    return NULL;
+                }
                 state = EXPECT_VOL_START;
                 break;
             }
 
             case EXPECT_VOL_START:
             {
-                CHECK((chunkType & 0xf0) == 0x20);
+                if ((chunkType & 0xf0) != 0x20) {
+                    ALOGE("Wrong chunkType");
+                    return NULL;
+                }
 
-                CHECK(ExtractDimensionsFromVOLHeader(
+                if (!ExtractDimensionsFromVOLHeader(
                             &data[offset], chunkSize,
-                            &width, &height));
+                            &width, &height)) {
+                    ALOGE("Failed to get dimension");
+                    return NULL;
+                }
 
                 state = WAIT_FOR_VOP_START;
                 break;
@@ -1240,6 +1309,8 @@
             case SKIP_TO_VOP_START:
             {
                 if (chunkType == 0xb6) {
+                    int vopCodingType = (data[offset + 4] & 0xc0) >> 6;
+
                     offset += chunkSize;
 
                     sp<ABuffer> accessUnit = new ABuffer(offset);
@@ -1250,11 +1321,17 @@
                     mBuffer->setRange(0, size);
 
                     int64_t timeUs = fetchTimestamp(offset);
-                    CHECK_GE(timeUs, 0ll);
+                    if (timeUs < 0ll) {
+                        ALOGE("Negative timeus");
+                        return NULL;
+                    }
 
                     offset = 0;
 
                     accessUnit->meta()->setInt64("timeUs", timeUs);
+                    if (vopCodingType == 0) {  // intra-coded VOP
+                        accessUnit->meta()->setInt32("isSync", 1);
+                    }
 
                     ALOGV("returning MPEG4 video access unit at time %" PRId64 " us",
                          timeUs);
@@ -1271,7 +1348,8 @@
             }
 
             default:
-                TRESPASS();
+                ALOGE("Unknown state: %d", state);
+                return NULL;
         }
 
         if (discard) {
@@ -1300,5 +1378,25 @@
     }
 }
 
+sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitMetadata() {
+    size_t size = mBuffer->size();
+    if (!size) {
+        return NULL;
+    }
+
+    sp<ABuffer> accessUnit = new ABuffer(size);
+    int64_t timeUs = fetchTimestamp(size);
+    accessUnit->meta()->setInt64("timeUs", timeUs);
+
+    memcpy(accessUnit->data(), mBuffer->data(), size);
+    mBuffer->setRange(0, 0);
+
+    if (mFormat == NULL) {
+        mFormat = new MetaData;
+        mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_DATA_TIMED_ID3);
+    }
+
+    return accessUnit;
+}
 
 }  // namespace android
diff --git a/media/libstagefright/mpeg2ts/ESQueue.h b/media/libstagefright/mpeg2ts/ESQueue.h
index a6d812f..e9f96b7 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.h
+++ b/media/libstagefright/mpeg2ts/ESQueue.h
@@ -37,6 +37,7 @@
         MPEG_VIDEO,
         MPEG4_VIDEO,
         PCM_AUDIO,
+        METADATA,
     };
 
     enum Flags {
@@ -75,11 +76,11 @@
     sp<ABuffer> dequeueAccessUnitMPEGVideo();
     sp<ABuffer> dequeueAccessUnitMPEG4Video();
     sp<ABuffer> dequeueAccessUnitPCMAudio();
+    sp<ABuffer> dequeueAccessUnitMetadata();
 
     // consume a logical (compressed) access unit of size "size",
     // returns its timestamp in us (or -1 if no time information).
     int64_t fetchTimestamp(size_t size);
-    int64_t fetchTimestampAAC(size_t size);
 
     DISALLOW_EVIL_CONSTRUCTORS(ElementaryStreamQueue);
 };
diff --git a/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
index 85859f7..6d9fe9d 100644
--- a/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2PSExtractor.cpp
@@ -265,7 +265,10 @@
     }
 
     unsigned PES_packet_length = U16_AT(mBuffer->data() + 4);
-    CHECK_NE(PES_packet_length, 0u);
+    if (PES_packet_length == 0u) {
+        ALOGE("PES_packet_length is 0");
+        return -EAGAIN;
+    }
 
     size_t n = PES_packet_length + 6;
 
@@ -286,7 +289,10 @@
         return ERROR_MALFORMED;
     }
 
-    CHECK_EQ(packet_startcode_prefix, 0x000001u);
+    if (packet_startcode_prefix != 0x000001u) {
+        ALOGE("Wrong PES prefix");
+        return ERROR_MALFORMED;
+    }
 
     unsigned stream_id = br.getBits(8);
     ALOGV("stream_id = 0x%02x", stream_id);
@@ -366,8 +372,7 @@
             && stream_id != 0xff  // program_stream_directory
             && stream_id != 0xf2  // DSMCC
             && stream_id != 0xf8) {  // H.222.1 type E
-        CHECK_EQ(br.getBits(2), 2u);
-
+        /* unsigned PES_marker_bits = */br.getBits(2);  // should be 0x2(hex)
         /* unsigned PES_scrambling_control = */br.getBits(2);
         /* unsigned PES_priority = */br.getBits(1);
         /* unsigned data_alignment_indicator = */br.getBits(1);
@@ -400,16 +405,26 @@
         uint64_t PTS = 0, DTS = 0;
 
         if (PTS_DTS_flags == 2 || PTS_DTS_flags == 3) {
-            CHECK_GE(optional_bytes_remaining, 5u);
+            if (optional_bytes_remaining < 5u) {
+                return ERROR_MALFORMED;
+            }
 
-            CHECK_EQ(br.getBits(4), PTS_DTS_flags);
+            if (br.getBits(4) != PTS_DTS_flags) {
+                return ERROR_MALFORMED;
+            }
 
             PTS = ((uint64_t)br.getBits(3)) << 30;
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             PTS |= ((uint64_t)br.getBits(15)) << 15;
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             PTS |= br.getBits(15);
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
 
             ALOGV("PTS = %" PRIu64, PTS);
             // ALOGI("PTS = %.2f secs", PTS / 90000.0f);
@@ -417,16 +432,26 @@
             optional_bytes_remaining -= 5;
 
             if (PTS_DTS_flags == 3) {
-                CHECK_GE(optional_bytes_remaining, 5u);
+                if (optional_bytes_remaining < 5u) {
+                    return ERROR_MALFORMED;
+                }
 
-                CHECK_EQ(br.getBits(4), 1u);
+                if (br.getBits(4) != 1u) {
+                    return ERROR_MALFORMED;
+                }
 
                 DTS = ((uint64_t)br.getBits(3)) << 30;
-                CHECK_EQ(br.getBits(1), 1u);
+                if (br.getBits(1) != 1u) {
+                    return ERROR_MALFORMED;
+                }
                 DTS |= ((uint64_t)br.getBits(15)) << 15;
-                CHECK_EQ(br.getBits(1), 1u);
+                if (br.getBits(1) != 1u) {
+                    return ERROR_MALFORMED;
+                }
                 DTS |= br.getBits(15);
-                CHECK_EQ(br.getBits(1), 1u);
+                if (br.getBits(1) != 1u) {
+                    return ERROR_MALFORMED;
+                }
 
                 ALOGV("DTS = %" PRIu64, DTS);
 
@@ -435,40 +460,62 @@
         }
 
         if (ESCR_flag) {
-            CHECK_GE(optional_bytes_remaining, 6u);
+            if (optional_bytes_remaining < 6u) {
+                return ERROR_MALFORMED;
+            }
 
             br.getBits(2);
 
             uint64_t ESCR = ((uint64_t)br.getBits(3)) << 30;
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             ESCR |= ((uint64_t)br.getBits(15)) << 15;
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             ESCR |= br.getBits(15);
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
 
             ALOGV("ESCR = %" PRIu64, ESCR);
             /* unsigned ESCR_extension = */br.getBits(9);
 
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
 
             optional_bytes_remaining -= 6;
         }
 
         if (ES_rate_flag) {
-            CHECK_GE(optional_bytes_remaining, 3u);
+            if (optional_bytes_remaining < 3u) {
+                return ERROR_MALFORMED;
+            }
 
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
             /* unsigned ES_rate = */br.getBits(22);
-            CHECK_EQ(br.getBits(1), 1u);
+            if (br.getBits(1) != 1u) {
+                return ERROR_MALFORMED;
+            }
 
             optional_bytes_remaining -= 3;
         }
 
+        if (br.numBitsLeft() < optional_bytes_remaining * 8) {
+            return ERROR_MALFORMED;
+        }
+
         br.skipBits(optional_bytes_remaining * 8);
 
         // ES data follows.
 
-        CHECK_GE(PES_packet_length, PES_header_data_length + 3);
+        if (PES_packet_length < PES_header_data_length + 3) {
+            return ERROR_MALFORMED;
+        }
 
         unsigned dataLength =
             PES_packet_length - 3 - PES_header_data_length;
@@ -481,7 +528,9 @@
             return ERROR_MALFORMED;
         }
 
-        CHECK_GE(br.numBitsLeft(), dataLength * 8);
+        if (br.numBitsLeft() < dataLength * 8) {
+            return ERROR_MALFORMED;
+        }
 
         ssize_t index = mTracks.indexOfKey(stream_id);
         if (index < 0 && mScanning) {
@@ -521,10 +570,14 @@
             return err;
         }
     } else if (stream_id == 0xbe) {  // padding_stream
-        CHECK_NE(PES_packet_length, 0u);
+        if (PES_packet_length == 0u) {
+            return ERROR_MALFORMED;
+        }
         br.skipBits(PES_packet_length * 8);
     } else {
-        CHECK_NE(PES_packet_length, 0u);
+        if (PES_packet_length == 0u) {
+            return ERROR_MALFORMED;
+        }
         br.skipBits(PES_packet_length * 8);
     }
 
diff --git a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
index 74cb5d8..cbe9673 100644
--- a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
@@ -16,17 +16,22 @@
 
 //#define LOG_NDEBUG 0
 #define LOG_TAG "MPEG2TSExtractor"
+
+#include <inttypes.h>
 #include <utils/Log.h>
 
 #include "include/MPEG2TSExtractor.h"
 #include "include/NuCachedSource2.h"
 
+#include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALooper.h>
 #include <media/stagefright/DataSource.h>
 #include <media/stagefright/MediaDefs.h>
 #include <media/stagefright/MediaErrors.h>
 #include <media/stagefright/MediaSource.h>
 #include <media/stagefright/MetaData.h>
+#include <media/IStreamSource.h>
 #include <utils/String8.h>
 
 #include "AnotherPacketSource.h"
@@ -40,7 +45,7 @@
     MPEG2TSSource(
             const sp<MPEG2TSExtractor> &extractor,
             const sp<AnotherPacketSource> &impl,
-            bool seekable);
+            bool doesSeek);
 
     virtual status_t start(MetaData *params = NULL);
     virtual status_t stop();
@@ -54,8 +59,8 @@
     sp<AnotherPacketSource> mImpl;
 
     // If there are both audio and video streams, only the video stream
-    // will be seekable, otherwise the single stream will be seekable.
-    bool mSeekable;
+    // will signal seek on the extractor; otherwise the single stream will seek.
+    bool mDoesSeek;
 
     DISALLOW_EVIL_CONSTRUCTORS(MPEG2TSSource);
 };
@@ -63,10 +68,10 @@
 MPEG2TSSource::MPEG2TSSource(
         const sp<MPEG2TSExtractor> &extractor,
         const sp<AnotherPacketSource> &impl,
-        bool seekable)
+        bool doesSeek)
     : mExtractor(extractor),
       mImpl(impl),
-      mSeekable(seekable) {
+      mDoesSeek(doesSeek) {
 }
 
 status_t MPEG2TSSource::start(MetaData *params) {
@@ -85,27 +90,18 @@
         MediaBuffer **out, const ReadOptions *options) {
     *out = NULL;
 
-    status_t finalResult;
-    while (!mImpl->hasBufferAvailable(&finalResult)) {
-        if (finalResult != OK) {
-            return ERROR_END_OF_STREAM;
-        }
-
-        status_t err = mExtractor->feedMore();
+    int64_t seekTimeUs;
+    ReadOptions::SeekMode seekMode;
+    if (mDoesSeek && options && options->getSeekTo(&seekTimeUs, &seekMode)) {
+        // seek is needed
+        status_t err = mExtractor->seek(seekTimeUs, seekMode);
         if (err != OK) {
-            mImpl->signalEOS(err);
+            return err;
         }
     }
 
-    int64_t seekTimeUs;
-    ReadOptions::SeekMode seekMode;
-    if (mSeekable && options && options->getSeekTo(&seekTimeUs, &seekMode)) {
-        // A seek was requested, but we don't actually support seeking and so can only "seek" to
-        // the current position
-        int64_t nextBufTimeUs;
-        if (mImpl->nextBufferTime(&nextBufTimeUs) != OK || seekTimeUs != nextBufTimeUs) {
-            return ERROR_UNSUPPORTED;
-        }
+    if (mExtractor->feedUntilBufferAvailable(mImpl) != OK) {
+        return ERROR_END_OF_STREAM;
     }
 
     return mImpl->read(out, options);
@@ -129,20 +125,10 @@
         return NULL;
     }
 
-    bool seekable = true;
-    if (mSourceImpls.size() > 1) {
-        CHECK_EQ(mSourceImpls.size(), 2u);
-
-        sp<MetaData> meta = mSourceImpls.editItemAt(index)->getFormat();
-        const char *mime;
-        CHECK(meta->findCString(kKeyMIMEType, &mime));
-
-        if (!strncasecmp("audio/", mime, 6)) {
-            seekable = false;
-        }
-    }
-
-    return new MPEG2TSSource(this, mSourceImpls.editItemAt(index), seekable);
+    // The seek reference track (video if present; audio otherwise) performs
+    // seek requests, while other tracks ignore requests.
+    return new MPEG2TSSource(this, mSourceImpls.editItemAt(index),
+            (mSeekSyncPoints == &mSyncPoints.editItemAt(index)));
 }
 
 sp<MetaData> MPEG2TSExtractor::getTrackMetaData(
@@ -161,7 +147,7 @@
 void MPEG2TSExtractor::init() {
     bool haveAudio = false;
     bool haveVideo = false;
-    int numPacketsParsed = 0;
+    int64_t startTime = ALooper::GetNowUs();
 
     while (feedMore() == OK) {
         if (haveAudio && haveVideo) {
@@ -175,6 +161,8 @@
             if (impl != NULL) {
                 haveVideo = true;
                 mSourceImpls.push(impl);
+                mSyncPoints.push();
+                mSeekSyncPoints = &mSyncPoints.editTop();
             }
         }
 
@@ -186,15 +174,75 @@
             if (impl != NULL) {
                 haveAudio = true;
                 mSourceImpls.push(impl);
+                mSyncPoints.push();
+                if (!haveVideo) {
+                    mSeekSyncPoints = &mSyncPoints.editTop();
+                }
             }
         }
 
-        if (++numPacketsParsed > 10000) {
+        // Wait only for 2 seconds to detect audio/video streams.
+        if (ALooper::GetNowUs() - startTime > 2000000ll) {
             break;
         }
     }
 
-    ALOGI("haveAudio=%d, haveVideo=%d", haveAudio, haveVideo);
+    off64_t size;
+    if (mDataSource->getSize(&size) == OK && (haveAudio || haveVideo)) {
+        sp<AnotherPacketSource> impl = haveVideo
+                ? (AnotherPacketSource *)mParser->getSource(
+                        ATSParser::VIDEO).get()
+                : (AnotherPacketSource *)mParser->getSource(
+                        ATSParser::AUDIO).get();
+        size_t prevSyncSize = 1;
+        int64_t durationUs = -1;
+        List<int64_t> durations;
+        // Estimate duration --- stabilize until you get <500ms deviation.
+        while (feedMore() == OK
+                && ALooper::GetNowUs() - startTime <= 2000000ll) {
+            if (mSeekSyncPoints->size() > prevSyncSize) {
+                prevSyncSize = mSeekSyncPoints->size();
+                int64_t diffUs = mSeekSyncPoints->keyAt(prevSyncSize - 1)
+                        - mSeekSyncPoints->keyAt(0);
+                off64_t diffOffset = mSeekSyncPoints->valueAt(prevSyncSize - 1)
+                        - mSeekSyncPoints->valueAt(0);
+                durationUs = size * diffUs / diffOffset;
+                durations.push_back(durationUs);
+                if (durations.size() > 5) {
+                    durations.erase(durations.begin());
+                    int64_t min = *durations.begin();
+                    int64_t max = *durations.begin();
+                    for (List<int64_t>::iterator i = durations.begin();
+                            i != durations.end();
+                            ++i) {
+                        if (min > *i) {
+                            min = *i;
+                        }
+                        if (max < *i) {
+                            max = *i;
+                        }
+                    }
+                    if (max - min < 500 * 1000) {
+                        break;
+                    }
+                }
+            }
+        }
+        status_t err;
+        int64_t bufferedDurationUs;
+        bufferedDurationUs = impl->getBufferedDurationUs(&err);
+        if (err == ERROR_END_OF_STREAM) {
+            durationUs = bufferedDurationUs;
+        }
+        if (durationUs > 0) {
+            const sp<MetaData> meta = impl->getFormat();
+            meta->setInt64(kKeyDuration, durationUs);
+            impl->setFormat(meta);
+        }
+    }
+
+    ALOGI("haveAudio=%d, haveVideo=%d, elaspedTime=%" PRId64,
+            haveAudio, haveVideo, ALooper::GetNowUs() - startTime);
 }
 
 status_t MPEG2TSExtractor::feedMore() {
@@ -210,12 +258,206 @@
         return (n < 0) ? (status_t)n : ERROR_END_OF_STREAM;
     }
 
+    ATSParser::SyncEvent event(mOffset);
     mOffset += n;
-    return mParser->feedTSPacket(packet, kTSPacketSize);
+    status_t err = mParser->feedTSPacket(packet, kTSPacketSize, &event);
+    if (event.isInit()) {
+        for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+            if (mSourceImpls[i].get() == event.getMediaSource().get()) {
+                KeyedVector<int64_t, off64_t> *syncPoints = &mSyncPoints.editItemAt(i);
+                syncPoints->add(event.getTimeUs(), event.getOffset());
+                // We're keeping the size of the sync points at most 5mb per a track.
+                size_t size = syncPoints->size();
+                if (size >= 327680) {
+                    int64_t firstTimeUs = syncPoints->keyAt(0);
+                    int64_t lastTimeUs = syncPoints->keyAt(size - 1);
+                    if (event.getTimeUs() - firstTimeUs > lastTimeUs - event.getTimeUs()) {
+                        syncPoints->removeItemsAt(0, 4096);
+                    } else {
+                        syncPoints->removeItemsAt(size - 4096, 4096);
+                    }
+                }
+                break;
+            }
+        }
+    }
+    return err;
 }
 
 uint32_t MPEG2TSExtractor::flags() const {
-    return CAN_PAUSE;
+    return CAN_PAUSE | CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD;
+}
+
+status_t MPEG2TSExtractor::seek(int64_t seekTimeUs,
+        const MediaSource::ReadOptions::SeekMode &seekMode) {
+    if (mSeekSyncPoints == NULL || mSeekSyncPoints->isEmpty()) {
+        ALOGW("No sync point to seek to.");
+        // ... and therefore we have nothing useful to do here.
+        return OK;
+    }
+
+    // Determine whether we're seeking beyond the known area.
+    bool shouldSeekBeyond =
+            (seekTimeUs > mSeekSyncPoints->keyAt(mSeekSyncPoints->size() - 1));
+
+    // Determine the sync point to seek.
+    size_t index = 0;
+    for (; index < mSeekSyncPoints->size(); ++index) {
+        int64_t timeUs = mSeekSyncPoints->keyAt(index);
+        if (timeUs > seekTimeUs) {
+            break;
+        }
+    }
+
+    switch (seekMode) {
+        case MediaSource::ReadOptions::SEEK_NEXT_SYNC:
+            if (index == mSeekSyncPoints->size()) {
+                ALOGW("Next sync not found; starting from the latest sync.");
+                --index;
+            }
+            break;
+        case MediaSource::ReadOptions::SEEK_CLOSEST_SYNC:
+        case MediaSource::ReadOptions::SEEK_CLOSEST:
+            ALOGW("seekMode not supported: %d; falling back to PREVIOUS_SYNC",
+                    seekMode);
+            // fall-through
+        case MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC:
+            if (index == 0) {
+                ALOGW("Previous sync not found; starting from the earliest "
+                        "sync.");
+            } else {
+                --index;
+            }
+            break;
+    }
+    if (!shouldSeekBeyond || mOffset <= mSeekSyncPoints->valueAt(index)) {
+        int64_t actualSeekTimeUs = mSeekSyncPoints->keyAt(index);
+        mOffset = mSeekSyncPoints->valueAt(index);
+        status_t err = queueDiscontinuityForSeek(actualSeekTimeUs);
+        if (err != OK) {
+            return err;
+        }
+    }
+
+    if (shouldSeekBeyond) {
+        status_t err = seekBeyond(seekTimeUs);
+        if (err != OK) {
+            return err;
+        }
+    }
+
+    // Fast-forward to sync frame.
+    for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+        const sp<AnotherPacketSource> &impl = mSourceImpls[i];
+        status_t err;
+        feedUntilBufferAvailable(impl);
+        while (impl->hasBufferAvailable(&err)) {
+            sp<AMessage> meta = impl->getMetaAfterLastDequeued(0);
+            sp<ABuffer> buffer;
+            if (meta == NULL) {
+                return UNKNOWN_ERROR;
+            }
+            int32_t sync;
+            if (meta->findInt32("isSync", &sync) && sync) {
+                break;
+            }
+            err = impl->dequeueAccessUnit(&buffer);
+            if (err != OK) {
+                return err;
+            }
+            feedUntilBufferAvailable(impl);
+        }
+    }
+
+    return OK;
+}
+
+status_t MPEG2TSExtractor::queueDiscontinuityForSeek(int64_t actualSeekTimeUs) {
+    // Signal discontinuity
+    sp<AMessage> extra(new AMessage);
+    extra->setInt64(IStreamListener::kKeyMediaTimeUs, actualSeekTimeUs);
+    mParser->signalDiscontinuity(ATSParser::DISCONTINUITY_TIME, extra);
+
+    // After discontinuity, impl should only have discontinuities
+    // with the last being what we queued. Dequeue them all here.
+    for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+        const sp<AnotherPacketSource> &impl = mSourceImpls.itemAt(i);
+        sp<ABuffer> buffer;
+        status_t err;
+        while (impl->hasBufferAvailable(&err)) {
+            if (err != OK) {
+                return err;
+            }
+            err = impl->dequeueAccessUnit(&buffer);
+            // If the source contains anything but discontinuity, that's
+            // a programming mistake.
+            CHECK(err == INFO_DISCONTINUITY);
+        }
+    }
+
+    // Feed until we have a buffer for each source.
+    for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+        const sp<AnotherPacketSource> &impl = mSourceImpls.itemAt(i);
+        sp<ABuffer> buffer;
+        status_t err = feedUntilBufferAvailable(impl);
+        if (err != OK) {
+            return err;
+        }
+    }
+
+    return OK;
+}
+
+status_t MPEG2TSExtractor::seekBeyond(int64_t seekTimeUs) {
+    // If we're seeking beyond where we know --- read until we reach there.
+    size_t syncPointsSize = mSeekSyncPoints->size();
+
+    while (seekTimeUs > mSeekSyncPoints->keyAt(
+            mSeekSyncPoints->size() - 1)) {
+        status_t err;
+        if (syncPointsSize < mSeekSyncPoints->size()) {
+            syncPointsSize = mSeekSyncPoints->size();
+            int64_t syncTimeUs = mSeekSyncPoints->keyAt(syncPointsSize - 1);
+            // Dequeue buffers before sync point in order to avoid too much
+            // cache building up.
+            sp<ABuffer> buffer;
+            for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+                const sp<AnotherPacketSource> &impl = mSourceImpls[i];
+                int64_t timeUs;
+                while ((err = impl->nextBufferTime(&timeUs)) == OK) {
+                    if (timeUs < syncTimeUs) {
+                        impl->dequeueAccessUnit(&buffer);
+                    } else {
+                        break;
+                    }
+                }
+                if (err != OK && err != -EWOULDBLOCK) {
+                    return err;
+                }
+            }
+        }
+        if (feedMore() != OK) {
+            return ERROR_END_OF_STREAM;
+        }
+    }
+
+    return OK;
+}
+
+status_t MPEG2TSExtractor::feedUntilBufferAvailable(
+        const sp<AnotherPacketSource> &impl) {
+    status_t finalResult;
+    while (!impl->hasBufferAvailable(&finalResult)) {
+        if (finalResult != OK) {
+            return finalResult;
+        }
+
+        status_t err = feedMore();
+        if (err != OK) {
+            impl->signalEOS(err);
+        }
+    }
+    return OK;
 }
 
 ////////////////////////////////////////////////////////////////////////////////
diff --git a/media/libstagefright/omx/Android.mk b/media/libstagefright/omx/Android.mk
index aaa8334..5f0f567 100644
--- a/media/libstagefright/omx/Android.mk
+++ b/media/libstagefright/omx/Android.mk
@@ -1,11 +1,8 @@
 LOCAL_PATH:= $(call my-dir)
 include $(CLEAR_VARS)
 
-ifeq ($(TARGET_DEVICE), manta)
-    LOCAL_CFLAGS += -DSURFACE_IS_BGR32
-endif
-
 LOCAL_SRC_FILES:=                     \
+        FrameDropper.cpp              \
         GraphicBufferSource.cpp       \
         OMX.cpp                       \
         OMXMaster.cpp                 \
@@ -34,6 +31,8 @@
         libdl
 
 LOCAL_MODULE:= libstagefright_omx
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
 
 include $(BUILD_SHARED_LIBRARY)
 
diff --git a/media/libstagefright/omx/FrameDropper.cpp b/media/libstagefright/omx/FrameDropper.cpp
new file mode 100644
index 0000000..9a4952e
--- /dev/null
+++ b/media/libstagefright/omx/FrameDropper.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FrameDropper"
+#include <utils/Log.h>
+
+#include "FrameDropper.h"
+
+#include <media/stagefright/foundation/ADebug.h>
+
+namespace android {
+
+static const int64_t kMaxJitterUs = 2000;
+
+FrameDropper::FrameDropper()
+    : mDesiredMinTimeUs(-1),
+      mMinIntervalUs(0) {
+}
+
+FrameDropper::~FrameDropper() {
+}
+
+status_t FrameDropper::setMaxFrameRate(float maxFrameRate) {
+    if (maxFrameRate <= 0) {
+        ALOGE("framerate should be positive but got %f.", maxFrameRate);
+        return BAD_VALUE;
+    }
+    mMinIntervalUs = (int64_t) (1000000.0f / maxFrameRate);
+    return OK;
+}
+
+bool FrameDropper::shouldDrop(int64_t timeUs) {
+    if (mMinIntervalUs <= 0) {
+        return false;
+    }
+
+    if (mDesiredMinTimeUs < 0) {
+        mDesiredMinTimeUs = timeUs + mMinIntervalUs;
+        ALOGV("first frame %lld, next desired frame %lld",
+                (long long)timeUs, (long long)mDesiredMinTimeUs);
+        return false;
+    }
+
+    if (timeUs < (mDesiredMinTimeUs - kMaxJitterUs)) {
+        ALOGV("drop frame %lld, desired frame %lld, diff %lld",
+                (long long)timeUs, (long long)mDesiredMinTimeUs,
+                (long long)(mDesiredMinTimeUs - timeUs));
+        return true;
+    }
+
+    int64_t n = (timeUs - mDesiredMinTimeUs + kMaxJitterUs) / mMinIntervalUs;
+    mDesiredMinTimeUs += (n + 1) * mMinIntervalUs;
+    ALOGV("keep frame %lld, next desired frame %lld, diff %lld",
+            (long long)timeUs, (long long)mDesiredMinTimeUs,
+            (long long)(mDesiredMinTimeUs - timeUs));
+    return false;
+}
+
+}  // namespace android
diff --git a/media/libstagefright/omx/FrameDropper.h b/media/libstagefright/omx/FrameDropper.h
new file mode 100644
index 0000000..c5a6d4b
--- /dev/null
+++ b/media/libstagefright/omx/FrameDropper.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FRAME_DROPPER_H_
+
+#define FRAME_DROPPER_H_
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include <media/stagefright/foundation/ABase.h>
+
+namespace android {
+
+struct FrameDropper : public RefBase {
+    // No frames will be dropped until a valid max frame rate is set.
+    FrameDropper();
+
+    // maxFrameRate required to be positive.
+    status_t setMaxFrameRate(float maxFrameRate);
+
+    // Returns false if max frame rate has not been set via setMaxFrameRate.
+    bool shouldDrop(int64_t timeUs);
+
+protected:
+    virtual ~FrameDropper();
+
+private:
+    int64_t mDesiredMinTimeUs;
+    int64_t mMinIntervalUs;
+
+    DISALLOW_EVIL_CONSTRUCTORS(FrameDropper);
+};
+
+}  // namespace android
+
+#endif  // FRAME_DROPPER_H_
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index 2945644..1a7dc9d 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -23,28 +23,104 @@
 #include "GraphicBufferSource.h"
 
 #include <OMX_Core.h>
+#include <OMX_IndexExt.h>
 #include <media/stagefright/foundation/ADebug.h>
 #include <media/stagefright/foundation/AMessage.h>
 
 #include <media/hardware/MetadataBufferType.h>
 #include <ui/GraphicBuffer.h>
 #include <gui/BufferItem.h>
+#include <HardwareAPI.h>
 
 #include <inttypes.h>
+#include "FrameDropper.h"
 
 namespace android {
 
 static const bool EXTRA_CHECK = true;
 
+GraphicBufferSource::PersistentProxyListener::PersistentProxyListener(
+        const wp<IGraphicBufferConsumer> &consumer,
+        const wp<ConsumerListener>& consumerListener) :
+    mConsumerListener(consumerListener),
+    mConsumer(consumer) {}
 
-GraphicBufferSource::GraphicBufferSource(OMXNodeInstance* nodeInstance,
-        uint32_t bufferWidth, uint32_t bufferHeight, uint32_t bufferCount,
-        bool useGraphicBufferInMeta) :
+GraphicBufferSource::PersistentProxyListener::~PersistentProxyListener() {}
+
+void GraphicBufferSource::PersistentProxyListener::onFrameAvailable(
+        const BufferItem& item) {
+    sp<ConsumerListener> listener(mConsumerListener.promote());
+    if (listener != NULL) {
+        listener->onFrameAvailable(item);
+    } else {
+        sp<IGraphicBufferConsumer> consumer(mConsumer.promote());
+        if (consumer == NULL) {
+            return;
+        }
+        BufferItem bi;
+        status_t err = consumer->acquireBuffer(&bi, 0);
+        if (err != OK) {
+            ALOGE("PersistentProxyListener: acquireBuffer failed (%d)", err);
+            return;
+        }
+
+        err = consumer->detachBuffer(bi.mBuf);
+        if (err != OK) {
+            ALOGE("PersistentProxyListener: detachBuffer failed (%d)", err);
+            return;
+        }
+
+        err = consumer->attachBuffer(&bi.mBuf, bi.mGraphicBuffer);
+        if (err != OK) {
+            ALOGE("PersistentProxyListener: attachBuffer failed (%d)", err);
+            return;
+        }
+
+        err = consumer->releaseBuffer(bi.mBuf, 0,
+                EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, bi.mFence);
+        if (err != OK) {
+            ALOGE("PersistentProxyListener: releaseBuffer failed (%d)", err);
+        }
+    }
+}
+
+void GraphicBufferSource::PersistentProxyListener::onFrameReplaced(
+        const BufferItem& item) {
+    sp<ConsumerListener> listener(mConsumerListener.promote());
+    if (listener != NULL) {
+        listener->onFrameReplaced(item);
+    }
+}
+
+void GraphicBufferSource::PersistentProxyListener::onBuffersReleased() {
+    sp<ConsumerListener> listener(mConsumerListener.promote());
+    if (listener != NULL) {
+        listener->onBuffersReleased();
+    }
+}
+
+void GraphicBufferSource::PersistentProxyListener::onSidebandStreamChanged() {
+    sp<ConsumerListener> listener(mConsumerListener.promote());
+    if (listener != NULL) {
+        listener->onSidebandStreamChanged();
+    }
+}
+
+GraphicBufferSource::GraphicBufferSource(
+        OMXNodeInstance* nodeInstance,
+        uint32_t bufferWidth,
+        uint32_t bufferHeight,
+        uint32_t bufferCount,
+        uint32_t consumerUsage,
+        const sp<IGraphicBufferConsumer> &consumer) :
     mInitCheck(UNKNOWN_ERROR),
     mNodeInstance(nodeInstance),
     mExecuting(false),
     mSuspended(false),
+    mIsPersistent(false),
+    mConsumer(consumer),
     mNumFramesAvailable(0),
+    mNumBufferAcquired(0),
     mEndOfStream(false),
     mEndOfStreamSent(false),
     mMaxTimestampGapUs(-1ll),
@@ -54,15 +130,15 @@
     mRepeatAfterUs(-1ll),
     mRepeatLastFrameGeneration(0),
     mRepeatLastFrameTimestamp(-1ll),
-    mLatestSubmittedBufferId(-1),
-    mLatestSubmittedBufferFrameNum(0),
-    mLatestSubmittedBufferUseCount(0),
+    mLatestBufferId(-1),
+    mLatestBufferFrameNum(0),
+    mLatestBufferUseCount(0),
+    mLatestBufferFence(Fence::NO_FENCE),
     mRepeatBufferDeferred(false),
     mTimePerCaptureUs(-1ll),
     mTimePerFrameUs(-1ll),
     mPrevCaptureUs(-1ll),
-    mPrevFrameUs(-1ll),
-    mUseGraphicBufferInMeta(useGraphicBufferInMeta) {
+    mPrevFrameUs(-1ll) {
 
     ALOGV("GraphicBufferSource w=%u h=%u c=%u",
             bufferWidth, bufferHeight, bufferCount);
@@ -73,26 +149,38 @@
         return;
     }
 
-    String8 name("GraphicBufferSource");
+    if (mConsumer == NULL) {
+        String8 name("GraphicBufferSource");
 
-    BufferQueue::createBufferQueue(&mProducer, &mConsumer);
-    mConsumer->setConsumerName(name);
-    mConsumer->setDefaultBufferSize(bufferWidth, bufferHeight);
-    mConsumer->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER);
+        BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+        mConsumer->setConsumerName(name);
 
-    mInitCheck = mConsumer->setMaxAcquiredBufferCount(bufferCount);
-    if (mInitCheck != NO_ERROR) {
-        ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
-                bufferCount, mInitCheck);
-        return;
+        // use consumer usage bits queried from encoder, but always add HW_VIDEO_ENCODER
+        // for backward compatibility.
+        consumerUsage |= GRALLOC_USAGE_HW_VIDEO_ENCODER;
+        mConsumer->setConsumerUsageBits(consumerUsage);
+
+        mInitCheck = mConsumer->setMaxAcquiredBufferCount(bufferCount);
+        if (mInitCheck != NO_ERROR) {
+            ALOGE("Unable to set BQ max acquired buffer count to %u: %d",
+                    bufferCount, mInitCheck);
+            return;
+        }
+    } else {
+        mIsPersistent = true;
     }
-
+    mConsumer->setDefaultBufferSize(bufferWidth, bufferHeight);
     // Note that we can't create an sp<...>(this) in a ctor that will not keep a
     // reference once the ctor ends, as that would cause the refcount of 'this'
     // dropping to 0 at the end of the ctor.  Since all we need is a wp<...>
     // that's what we create.
     wp<BufferQueue::ConsumerListener> listener = static_cast<BufferQueue::ConsumerListener*>(this);
-    sp<BufferQueue::ProxyConsumerListener> proxy = new BufferQueue::ProxyConsumerListener(listener);
+    sp<IConsumerListener> proxy;
+    if (!mIsPersistent) {
+        proxy = new BufferQueue::ProxyConsumerListener(listener);
+    } else {
+        proxy = new PersistentProxyListener(mConsumer, listener);
+    }
 
     mInitCheck = mConsumer->consumerConnect(proxy, false);
     if (mInitCheck != NO_ERROR) {
@@ -105,8 +193,15 @@
 }
 
 GraphicBufferSource::~GraphicBufferSource() {
-    ALOGV("~GraphicBufferSource");
-    if (mConsumer != NULL) {
+    if (mLatestBufferId >= 0) {
+        releaseBuffer(
+                mLatestBufferId, mLatestBufferFrameNum,
+                mBufferSlot[mLatestBufferId], mLatestBufferFence);
+    }
+    if (mNumBufferAcquired != 0) {
+        ALOGW("potential buffer leak (acquired %d)", mNumBufferAcquired);
+    }
+    if (mConsumer != NULL && !mIsPersistent) {
         status_t err = mConsumer->consumerDisconnect();
         if (err != NO_ERROR) {
             ALOGW("consumerDisconnect failed: %d", err);
@@ -153,9 +248,9 @@
         mLooper->registerHandler(mReflector);
         mLooper->start();
 
-        if (mLatestSubmittedBufferId >= 0) {
+        if (mLatestBufferId >= 0) {
             sp<AMessage> msg =
-                new AMessage(kWhatRepeatLastFrame, mReflector->id());
+                new AMessage(kWhatRepeatLastFrame, mReflector);
 
             msg->setInt32("generation", ++mRepeatLastFrameGeneration);
             msg->post(mRepeatAfterUs);
@@ -218,9 +313,8 @@
     mCodecBuffers.add(codecBuffer);
 }
 
-void GraphicBufferSource::codecBufferEmptied(OMX_BUFFERHEADERTYPE* header) {
+void GraphicBufferSource::codecBufferEmptied(OMX_BUFFERHEADERTYPE* header, int fenceFd) {
     Mutex::Autolock autoLock(mMutex);
-
     if (!mExecuting) {
         return;
     }
@@ -229,6 +323,9 @@
     if (cbi < 0) {
         // This should never happen.
         ALOGE("codecBufferEmptied: buffer not recognized (h=%p)", header);
+        if (fenceFd >= 0) {
+            ::close(fenceFd);
+        }
         return;
     }
 
@@ -250,30 +347,33 @@
         }
         // No GraphicBuffer to deal with, no additional input or output is
         // expected, so just return.
+        if (fenceFd >= 0) {
+            ::close(fenceFd);
+        }
         return;
     }
 
-    if (EXTRA_CHECK) {
+    if (EXTRA_CHECK && header->nAllocLen >= sizeof(MetadataBufferType)) {
         // Pull the graphic buffer handle back out of the buffer, and confirm
         // that it matches expectations.
         OMX_U8* data = header->pBuffer;
         MetadataBufferType type = *(MetadataBufferType *)data;
-        if (type == kMetadataBufferTypeGrallocSource) {
-            buffer_handle_t bufferHandle;
-            memcpy(&bufferHandle, data + 4, sizeof(buffer_handle_t));
-            if (bufferHandle != codecBuffer.mGraphicBuffer->handle) {
+        if (type == kMetadataBufferTypeGrallocSource
+                && header->nAllocLen >= sizeof(VideoGrallocMetadata)) {
+            VideoGrallocMetadata &grallocMeta = *(VideoGrallocMetadata *)data;
+            if (grallocMeta.pHandle != codecBuffer.mGraphicBuffer->handle) {
                 // should never happen
                 ALOGE("codecBufferEmptied: buffer's handle is %p, expected %p",
-                        bufferHandle, codecBuffer.mGraphicBuffer->handle);
+                        grallocMeta.pHandle, codecBuffer.mGraphicBuffer->handle);
                 CHECK(!"codecBufferEmptied: mismatched buffer");
             }
-        } else if (type == kMetadataBufferTypeGraphicBuffer) {
-            GraphicBuffer *buffer;
-            memcpy(&buffer, data + 4, sizeof(buffer));
-            if (buffer != codecBuffer.mGraphicBuffer.get()) {
+        } else if (type == kMetadataBufferTypeANWBuffer
+                && header->nAllocLen >= sizeof(VideoNativeMetadata)) {
+            VideoNativeMetadata &nativeMeta = *(VideoNativeMetadata *)data;
+            if (nativeMeta.pBuffer != codecBuffer.mGraphicBuffer->getNativeBuffer()) {
                 // should never happen
                 ALOGE("codecBufferEmptied: buffer is %p, expected %p",
-                        buffer, codecBuffer.mGraphicBuffer.get());
+                        nativeMeta.pBuffer, codecBuffer.mGraphicBuffer->getNativeBuffer());
                 CHECK(!"codecBufferEmptied: mismatched buffer");
             }
         }
@@ -283,20 +383,21 @@
     // If we find a match, release that slot.  If we don't, the BufferQueue
     // has dropped that GraphicBuffer, and there's nothing for us to release.
     int id = codecBuffer.mBuf;
+    sp<Fence> fence = new Fence(fenceFd);
     if (mBufferSlot[id] != NULL &&
         mBufferSlot[id]->handle == codecBuffer.mGraphicBuffer->handle) {
         ALOGV("cbi %d matches bq slot %d, handle=%p",
                 cbi, id, mBufferSlot[id]->handle);
 
-        if (id == mLatestSubmittedBufferId) {
-            CHECK_GT(mLatestSubmittedBufferUseCount--, 0);
+        if (id == mLatestBufferId) {
+            CHECK_GT(mLatestBufferUseCount--, 0);
         } else {
-            mConsumer->releaseBuffer(id, codecBuffer.mFrameNumber,
-                    EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
+            releaseBuffer(id, codecBuffer.mFrameNumber, mBufferSlot[id], fence);
         }
     } else {
         ALOGV("codecBufferEmptied: no match for emptied buffer in cbi %d",
                 cbi);
+        // we will not reuse codec buffer, so there is no need to wait for fence
     }
 
     // Mark the codec buffer as available by clearing the GraphicBuffer ref.
@@ -314,11 +415,11 @@
         ALOGV("buffer freed, EOS pending");
         submitEndOfInputStream_l();
     } else if (mRepeatBufferDeferred) {
-        bool success = repeatLatestSubmittedBuffer_l();
+        bool success = repeatLatestBuffer_l();
         if (success) {
-            ALOGV("deferred repeatLatestSubmittedBuffer_l SUCCESS");
+            ALOGV("deferred repeatLatestBuffer_l SUCCESS");
         } else {
-            ALOGV("deferred repeatLatestSubmittedBuffer_l FAILURE");
+            ALOGV("deferred repeatLatestBuffer_l FAILURE");
         }
         mRepeatBufferDeferred = false;
     }
@@ -372,10 +473,11 @@
                 break;
             }
 
+            ++mNumBufferAcquired;
             --mNumFramesAvailable;
 
-            mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
-                    EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
+            releaseBuffer(item.mBuf, item.mFrameNumber,
+                    item.mGraphicBuffer, item.mFence);
         }
         return;
     }
@@ -383,12 +485,12 @@
     mSuspended = false;
 
     if (mExecuting && mNumFramesAvailable == 0 && mRepeatBufferDeferred) {
-        if (repeatLatestSubmittedBuffer_l()) {
-            ALOGV("suspend/deferred repeatLatestSubmittedBuffer_l SUCCESS");
+        if (repeatLatestBuffer_l()) {
+            ALOGV("suspend/deferred repeatLatestBuffer_l SUCCESS");
 
             mRepeatBufferDeferred = false;
         } else {
-            ALOGV("suspend/deferred repeatLatestSubmittedBuffer_l FAILURE");
+            ALOGV("suspend/deferred repeatLatestBuffer_l FAILURE");
         }
     }
 }
@@ -422,15 +524,9 @@
         return false;
     }
 
+    mNumBufferAcquired++;
     mNumFramesAvailable--;
 
-    // Wait for it to become available.
-    err = item.mFence->waitForever("GraphicBufferSource::fillCodecBuffer_l");
-    if (err != OK) {
-        ALOGW("failed to wait for buffer fence: %d", err);
-        // keep going
-    }
-
     // If this is the first time we're seeing this buffer, add it to our
     // slot table.
     if (item.mGraphicBuffer != NULL) {
@@ -442,61 +538,72 @@
 
     // only submit sample if start time is unspecified, or sample
     // is queued after the specified start time
+    bool dropped = false;
     if (mSkipFramesBeforeNs < 0ll || item.mTimestamp >= mSkipFramesBeforeNs) {
         // if start time is set, offset time stamp by start time
         if (mSkipFramesBeforeNs > 0) {
             item.mTimestamp -= mSkipFramesBeforeNs;
         }
-        err = submitBuffer_l(item, cbi);
+
+        int64_t timeUs = item.mTimestamp / 1000;
+        if (mFrameDropper != NULL && mFrameDropper->shouldDrop(timeUs)) {
+            ALOGV("skipping frame (%lld) to meet max framerate", static_cast<long long>(timeUs));
+            // set err to OK so that the skipped frame can still be saved as the lastest frame
+            err = OK;
+            dropped = true;
+        } else {
+            err = submitBuffer_l(item, cbi);
+        }
     }
 
     if (err != OK) {
         ALOGV("submitBuffer_l failed, releasing bq buf %d", item.mBuf);
-        mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
-                EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
+        releaseBuffer(item.mBuf, item.mFrameNumber, item.mGraphicBuffer, item.mFence);
     } else {
         ALOGV("buffer submitted (bq %d, cbi %d)", item.mBuf, cbi);
-        setLatestSubmittedBuffer_l(item);
+        setLatestBuffer_l(item, dropped);
     }
 
     return true;
 }
 
-bool GraphicBufferSource::repeatLatestSubmittedBuffer_l() {
+bool GraphicBufferSource::repeatLatestBuffer_l() {
     CHECK(mExecuting && mNumFramesAvailable == 0);
 
-    if (mLatestSubmittedBufferId < 0 || mSuspended) {
+    if (mLatestBufferId < 0 || mSuspended) {
         return false;
     }
-    if (mBufferSlot[mLatestSubmittedBufferId] == NULL) {
+    if (mBufferSlot[mLatestBufferId] == NULL) {
         // This can happen if the remote side disconnects, causing
         // onBuffersReleased() to NULL out our copy of the slots.  The
         // buffer is gone, so we have nothing to show.
         //
         // To be on the safe side we try to release the buffer.
-        ALOGD("repeatLatestSubmittedBuffer_l: slot was NULL");
+        ALOGD("repeatLatestBuffer_l: slot was NULL");
         mConsumer->releaseBuffer(
-                mLatestSubmittedBufferId,
-                mLatestSubmittedBufferFrameNum,
+                mLatestBufferId,
+                mLatestBufferFrameNum,
                 EGL_NO_DISPLAY,
                 EGL_NO_SYNC_KHR,
-                Fence::NO_FENCE);
-        mLatestSubmittedBufferId = -1;
-        mLatestSubmittedBufferFrameNum = 0;
+                mLatestBufferFence);
+        mLatestBufferId = -1;
+        mLatestBufferFrameNum = 0;
+        mLatestBufferFence = Fence::NO_FENCE;
         return false;
     }
 
     int cbi = findAvailableCodecBuffer_l();
     if (cbi < 0) {
         // No buffers available, bail.
-        ALOGV("repeatLatestSubmittedBuffer_l: no codec buffers.");
+        ALOGV("repeatLatestBuffer_l: no codec buffers.");
         return false;
     }
 
     BufferItem item;
-    item.mBuf = mLatestSubmittedBufferId;
-    item.mFrameNumber = mLatestSubmittedBufferFrameNum;
+    item.mBuf = mLatestBufferId;
+    item.mFrameNumber = mLatestBufferFrameNum;
     item.mTimestamp = mRepeatLastFrameTimestamp;
+    item.mFence = mLatestBufferFence;
 
     status_t err = submitBuffer_l(item, cbi);
 
@@ -504,7 +611,7 @@
         return false;
     }
 
-    ++mLatestSubmittedBufferUseCount;
+    ++mLatestBufferUseCount;
 
     /* repeat last frame up to kRepeatLastFrameCount times.
      * in case of static scene, a single repeat might not get rid of encoder
@@ -514,7 +621,7 @@
         mRepeatLastFrameTimestamp = item.mTimestamp + mRepeatAfterUs * 1000;
 
         if (mReflector != NULL) {
-            sp<AMessage> msg = new AMessage(kWhatRepeatLastFrame, mReflector->id());
+            sp<AMessage> msg = new AMessage(kWhatRepeatLastFrame, mReflector);
             msg->setInt32("generation", ++mRepeatLastFrameGeneration);
             msg->post(mRepeatAfterUs);
         }
@@ -523,31 +630,29 @@
     return true;
 }
 
-void GraphicBufferSource::setLatestSubmittedBuffer_l(
-        const BufferItem &item) {
-    ALOGV("setLatestSubmittedBuffer_l");
+void GraphicBufferSource::setLatestBuffer_l(
+        const BufferItem &item, bool dropped) {
+    ALOGV("setLatestBuffer_l");
 
-    if (mLatestSubmittedBufferId >= 0) {
-        if (mLatestSubmittedBufferUseCount == 0) {
-            mConsumer->releaseBuffer(
-                    mLatestSubmittedBufferId,
-                    mLatestSubmittedBufferFrameNum,
-                    EGL_NO_DISPLAY,
-                    EGL_NO_SYNC_KHR,
-                    Fence::NO_FENCE);
+    if (mLatestBufferId >= 0) {
+        if (mLatestBufferUseCount == 0) {
+            releaseBuffer(mLatestBufferId, mLatestBufferFrameNum,
+                    mBufferSlot[mLatestBufferId], mLatestBufferFence);
+            // mLatestBufferFence will be set to new fence just below
         }
     }
 
-    mLatestSubmittedBufferId = item.mBuf;
-    mLatestSubmittedBufferFrameNum = item.mFrameNumber;
+    mLatestBufferId = item.mBuf;
+    mLatestBufferFrameNum = item.mFrameNumber;
     mRepeatLastFrameTimestamp = item.mTimestamp + mRepeatAfterUs * 1000;
 
-    mLatestSubmittedBufferUseCount = 1;
+    mLatestBufferUseCount = dropped ? 0 : 1;
     mRepeatBufferDeferred = false;
     mRepeatLastFrameCount = kRepeatLastFrameCount;
+    mLatestBufferFence = item.mFence;
 
     if (mReflector != NULL) {
-        sp<AMessage> msg = new AMessage(kWhatRepeatLastFrame, mReflector->id());
+        sp<AMessage> msg = new AMessage(kWhatRepeatLastFrame, mReflector);
         msg->setInt32("generation", ++mRepeatLastFrameGeneration);
         msg->post(mRepeatAfterUs);
     }
@@ -640,8 +745,7 @@
     return timeUs;
 }
 
-status_t GraphicBufferSource::submitBuffer_l(
-        const BufferItem &item, int cbi) {
+status_t GraphicBufferSource::submitBuffer_l(const BufferItem &item, int cbi) {
     ALOGV("submitBuffer_l cbi=%d", cbi);
 
     int64_t timeUs = getTimestamp(item);
@@ -655,36 +759,18 @@
     codecBuffer.mFrameNumber = item.mFrameNumber;
 
     OMX_BUFFERHEADERTYPE* header = codecBuffer.mHeader;
-    CHECK(header->nAllocLen >= 4 + sizeof(buffer_handle_t));
-    OMX_U8* data = header->pBuffer;
-    buffer_handle_t handle;
-    if (!mUseGraphicBufferInMeta) {
-        const OMX_U32 type = kMetadataBufferTypeGrallocSource;
-        handle = codecBuffer.mGraphicBuffer->handle;
-        memcpy(data, &type, 4);
-        memcpy(data + 4, &handle, sizeof(buffer_handle_t));
-    } else {
-        // codecBuffer holds a reference to the GraphicBuffer, so
-        // it is valid while it is with the OMX component
-        const OMX_U32 type = kMetadataBufferTypeGraphicBuffer;
-        memcpy(data, &type, 4);
-        // passing a non-reference-counted graphicBuffer
-        GraphicBuffer *buffer = codecBuffer.mGraphicBuffer.get();
-        handle = buffer->handle;
-        memcpy(data + 4, &buffer, sizeof(buffer));
-    }
-
-    status_t err = mNodeInstance->emptyDirectBuffer(header, 0,
-            4 + sizeof(buffer_handle_t), OMX_BUFFERFLAG_ENDOFFRAME,
-            timeUs);
+    sp<GraphicBuffer> buffer = codecBuffer.mGraphicBuffer;
+    status_t err = mNodeInstance->emptyGraphicBuffer(
+            header, buffer, OMX_BUFFERFLAG_ENDOFFRAME, timeUs,
+            item.mFence->isValid() ? item.mFence->dup() : -1);
     if (err != OK) {
-        ALOGW("WARNING: emptyDirectBuffer failed: 0x%x", err);
+        ALOGW("WARNING: emptyNativeWindowBuffer failed: 0x%x", err);
         codecBuffer.mGraphicBuffer = NULL;
         return err;
     }
 
-    ALOGV("emptyDirectBuffer succeeded, h=%p p=%p bufhandle=%p",
-            header, header->pBuffer, handle);
+    ALOGV("emptyNativeWindowBuffer succeeded, h=%p p=%p buf=%p bufhandle=%p",
+            header, header->pBuffer, buffer->getNativeBuffer(), buffer->handle);
     return OK;
 }
 
@@ -707,19 +793,9 @@
     CodecBuffer& codecBuffer(mCodecBuffers.editItemAt(cbi));
 
     OMX_BUFFERHEADERTYPE* header = codecBuffer.mHeader;
-    if (EXTRA_CHECK) {
-        // Guard against implementations that don't check nFilledLen.
-        size_t fillLen = 4 + sizeof(buffer_handle_t);
-        CHECK(header->nAllocLen >= fillLen);
-        OMX_U8* data = header->pBuffer;
-        memset(data, 0xcd, fillLen);
-    }
-
-    uint64_t timestamp = 0; // does this matter?
-
-    status_t err = mNodeInstance->emptyDirectBuffer(header, /*offset*/ 0,
-            /*length*/ 0, OMX_BUFFERFLAG_ENDOFFRAME | OMX_BUFFERFLAG_EOS,
-            timestamp);
+    status_t err = mNodeInstance->emptyGraphicBuffer(
+            header, NULL /* buffer */, OMX_BUFFERFLAG_ENDOFFRAME | OMX_BUFFERFLAG_EOS,
+            0 /* timestamp */, -1 /* fenceFd */);
     if (err != OK) {
         ALOGW("emptyDirectBuffer EOS failed: 0x%x", err);
     } else {
@@ -750,6 +826,35 @@
     return -1;
 }
 
+/*
+ * Releases an acquired buffer back to the consumer for either persistent
+ * or non-persistent surfaces.
+ *
+ * id: buffer slot to release (in persistent case the id might be changed)
+ * frameNum: frame number of the frame being released
+ * buffer: GraphicBuffer pointer to release (note this must not be & as we
+ *         will clear the original mBufferSlot in persistent case)
+ * fence: fence of the frame being released
+ */
+void GraphicBufferSource::releaseBuffer(
+        int &id, uint64_t frameNum,
+        const sp<GraphicBuffer> buffer, const sp<Fence> &fence) {
+    if (mIsPersistent) {
+        mConsumer->detachBuffer(id);
+        mBufferSlot[id] = NULL;
+
+        if (mConsumer->attachBuffer(&id, buffer) == OK) {
+            mConsumer->releaseBuffer(
+                    id, 0, EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, fence);
+        }
+    } else {
+        mConsumer->releaseBuffer(
+                id, frameNum, EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, fence);
+    }
+    id = -1; // invalidate id
+    mNumBufferAcquired--;
+}
+
 // BufferQueue::ConsumerListener callback
 void GraphicBufferSource::onFrameAvailable(const BufferItem& /*item*/) {
     Mutex::Autolock autoLock(mMutex);
@@ -770,14 +875,17 @@
         BufferItem item;
         status_t err = mConsumer->acquireBuffer(&item, 0);
         if (err == OK) {
+            mNumBufferAcquired++;
+
             // If this is the first time we're seeing this buffer, add it to our
             // slot table.
             if (item.mGraphicBuffer != NULL) {
                 ALOGV("onFrameAvailable: setting mBufferSlot %d", item.mBuf);
                 mBufferSlot[item.mBuf] = item.mGraphicBuffer;
             }
-            mConsumer->releaseBuffer(item.mBuf, item.mFrameNumber,
-                    EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, item.mFence);
+
+            releaseBuffer(item.mBuf, item.mFrameNumber,
+                    item.mGraphicBuffer, item.mFence);
         }
         return;
     }
@@ -842,6 +950,23 @@
     return OK;
 }
 
+status_t GraphicBufferSource::setMaxFps(float maxFps) {
+    Mutex::Autolock autoLock(mMutex);
+
+    if (mExecuting) {
+        return INVALID_OPERATION;
+    }
+
+    mFrameDropper = new FrameDropper();
+    status_t err = mFrameDropper->setMaxFrameRate(maxFps);
+    if (err != OK) {
+        mFrameDropper.clear();
+        return err;
+    }
+
+    return OK;
+}
+
 void GraphicBufferSource::setSkipFramesBeforeUs(int64_t skipFramesBeforeUs) {
     Mutex::Autolock autoLock(mMutex);
 
@@ -880,12 +1005,12 @@
                 break;
             }
 
-            bool success = repeatLatestSubmittedBuffer_l();
+            bool success = repeatLatestBuffer_l();
 
             if (success) {
-                ALOGV("repeatLatestSubmittedBuffer_l SUCCESS");
+                ALOGV("repeatLatestBuffer_l SUCCESS");
             } else {
-                ALOGV("repeatLatestSubmittedBuffer_l FAILURE");
+                ALOGV("repeatLatestBuffer_l FAILURE");
                 mRepeatBufferDeferred = true;
             }
             break;
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index 401bbc3..2f929d9 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -30,6 +30,8 @@
 
 namespace android {
 
+struct FrameDropper;
+
 /*
  * This class is used to feed OMX codecs from a Surface via BufferQueue.
  *
@@ -48,9 +50,15 @@
  */
 class GraphicBufferSource : public BufferQueue::ConsumerListener {
 public:
-    GraphicBufferSource(OMXNodeInstance* nodeInstance,
-            uint32_t bufferWidth, uint32_t bufferHeight, uint32_t bufferCount,
-            bool useGraphicBufferInMeta = false);
+    GraphicBufferSource(
+            OMXNodeInstance* nodeInstance,
+            uint32_t bufferWidth,
+            uint32_t bufferHeight,
+            uint32_t bufferCount,
+            uint32_t consumerUsage,
+            const sp<IGraphicBufferConsumer> &consumer = NULL
+    );
+
     virtual ~GraphicBufferSource();
 
     // We can't throw an exception if the constructor fails, so we just set
@@ -86,7 +94,7 @@
 
     // Called from OnEmptyBufferDone.  If we have a BQ buffer available,
     // fill it with a new frame of data; otherwise, just mark it as available.
-    void codecBufferEmptied(OMX_BUFFERHEADERTYPE* header);
+    void codecBufferEmptied(OMX_BUFFERHEADERTYPE* header, int fenceFd);
 
     // Called when omx_message::FILL_BUFFER_DONE is received. (Currently the
     // buffer source will fix timestamp in the header if needed.)
@@ -119,6 +127,9 @@
     // of suspension on input.
     status_t setMaxTimestampGapUs(int64_t maxGapUs);
 
+    // When set, the max frame rate fed to the encoder will be capped at maxFps.
+    status_t setMaxFps(float maxFps);
+
     // Sets the time lapse (or slow motion) parameters.
     // data[0] is the time (us) between two frames for playback
     // data[1] is the time (us) between two frames for capture
@@ -150,6 +161,31 @@
     virtual void onSidebandStreamChanged();
 
 private:
+    // PersistentProxyListener is similar to BufferQueue::ProxyConsumerListener
+    // except that it returns (acquire/detach/re-attache/release) buffers
+    // in onFrameAvailable() if the actual consumer object is no longer valid.
+    //
+    // This class is used in persistent input surface case to prevent buffer
+    // loss when onFrameAvailable() is received while we don't have a valid
+    // consumer around.
+    class PersistentProxyListener : public BnConsumerListener {
+        public:
+            PersistentProxyListener(
+                    const wp<IGraphicBufferConsumer> &consumer,
+                    const wp<ConsumerListener>& consumerListener);
+            virtual ~PersistentProxyListener();
+            virtual void onFrameAvailable(const BufferItem& item) override;
+            virtual void onFrameReplaced(const BufferItem& item) override;
+            virtual void onBuffersReleased() override;
+            virtual void onSidebandStreamChanged() override;
+         private:
+            // mConsumerListener is a weak reference to the IConsumerListener.
+            wp<ConsumerListener> mConsumerListener;
+            // mConsumer is a weak reference to the IGraphicBufferConsumer, use
+            // a weak ref to avoid circular ref between mConsumer and this class
+            wp<IGraphicBufferConsumer> mConsumer;
+    };
+
     // Keep track of codec input buffers.  They may either be available
     // (mGraphicBuffer == NULL) or in use by the codec.
     struct CodecBuffer {
@@ -193,8 +229,13 @@
     // doing anything if we don't have a codec buffer available.
     void submitEndOfInputStream_l();
 
-    void setLatestSubmittedBuffer_l(const BufferItem &item);
-    bool repeatLatestSubmittedBuffer_l();
+    // Release buffer to the consumer
+    void releaseBuffer(
+            int &id, uint64_t frameNum,
+            const sp<GraphicBuffer> buffer, const sp<Fence> &fence);
+
+    void setLatestBuffer_l(const BufferItem &item, bool dropped);
+    bool repeatLatestBuffer_l();
     int64_t getTimestamp(const BufferItem &item);
 
     // Lock, covers all member variables.
@@ -214,6 +255,7 @@
     // Our BufferQueue interfaces. mProducer is passed to the producer through
     // getIGraphicBufferProducer, and mConsumer is used internally to retrieve
     // the buffers queued by the producer.
+    bool mIsPersistent;
     sp<IGraphicBufferProducer> mProducer;
     sp<IGraphicBufferConsumer> mConsumer;
 
@@ -221,6 +263,9 @@
     // forwarded to the codec.
     size_t mNumFramesAvailable;
 
+    // Number of frames acquired from consumer (debug only)
+    int32_t mNumBufferAcquired;
+
     // Set to true if we want to send end-of-stream after we run out of
     // frames in BufferQueue.
     bool mEndOfStream;
@@ -235,7 +280,7 @@
     Vector<CodecBuffer> mCodecBuffers;
 
     ////
-    friend class AHandlerReflector<GraphicBufferSource>;
+    friend struct AHandlerReflector<GraphicBufferSource>;
 
     enum {
         kWhatRepeatLastFrame,
@@ -250,6 +295,8 @@
     int64_t mPrevModifiedTimeUs;
     int64_t mSkipFramesBeforeNs;
 
+    sp<FrameDropper> mFrameDropper;
+
     sp<ALooper> mLooper;
     sp<AHandlerReflector<GraphicBufferSource> > mReflector;
 
@@ -258,11 +305,12 @@
     int64_t mRepeatLastFrameTimestamp;
     int32_t mRepeatLastFrameCount;
 
-    int mLatestSubmittedBufferId;
-    uint64_t mLatestSubmittedBufferFrameNum;
-    int32_t mLatestSubmittedBufferUseCount;
+    int mLatestBufferId;
+    uint64_t mLatestBufferFrameNum;
+    int32_t mLatestBufferUseCount;
+    sp<Fence> mLatestBufferFence;
 
-    // The previously submitted buffer should've been repeated but
+    // The previous buffer should've been repeated but
     // no codec buffer was available at the time.
     bool mRepeatBufferDeferred;
 
@@ -272,7 +320,7 @@
     int64_t mPrevCaptureUs;
     int64_t mPrevFrameUs;
 
-    bool mUseGraphicBufferInMeta;
+    MetadataBufferType mMetadataBufferType;
 
     void onMessageReceived(const sp<AMessage> &msg);
 
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index f8d38ff..cb7ab5e 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -32,7 +32,9 @@
 
 #include "OMXMaster.h"
 
+#include <OMX_AsString.h>
 #include <OMX_Component.h>
+#include <OMX_VideoExt.h>
 
 namespace android {
 
@@ -60,7 +62,11 @@
 struct OMX::CallbackDispatcher : public RefBase {
     CallbackDispatcher(OMXNodeInstance *owner);
 
-    void post(const omx_message &msg);
+    // Posts |msg| to the listener's queue. If |realTime| is true, the listener thread is notified
+    // that a new message is available on the queue. Otherwise, the message stays on the queue, but
+    // the listener is not notified of it. It will process this message when a subsequent message
+    // is posted with |realTime| set to true.
+    void post(const omx_message &msg, bool realTime = true);
 
     bool loop();
 
@@ -73,11 +79,11 @@
     OMXNodeInstance *mOwner;
     bool mDone;
     Condition mQueueChanged;
-    List<omx_message> mQueue;
+    std::list<omx_message> mQueue;
 
     sp<CallbackDispatcherThread> mThread;
 
-    void dispatch(const omx_message &msg);
+    void dispatch(std::list<omx_message> &messages);
 
     CallbackDispatcher(const CallbackDispatcher &);
     CallbackDispatcher &operator=(const CallbackDispatcher &);
@@ -108,24 +114,26 @@
     }
 }
 
-void OMX::CallbackDispatcher::post(const omx_message &msg) {
+void OMX::CallbackDispatcher::post(const omx_message &msg, bool realTime) {
     Mutex::Autolock autoLock(mLock);
 
     mQueue.push_back(msg);
-    mQueueChanged.signal();
+    if (realTime) {
+        mQueueChanged.signal();
+    }
 }
 
-void OMX::CallbackDispatcher::dispatch(const omx_message &msg) {
+void OMX::CallbackDispatcher::dispatch(std::list<omx_message> &messages) {
     if (mOwner == NULL) {
         ALOGV("Would have dispatched a message to a node that's already gone.");
         return;
     }
-    mOwner->onMessage(msg);
+    mOwner->onMessages(messages);
 }
 
 bool OMX::CallbackDispatcher::loop() {
     for (;;) {
-        omx_message msg;
+        std::list<omx_message> messages;
 
         {
             Mutex::Autolock autoLock(mLock);
@@ -137,11 +145,10 @@
                 break;
             }
 
-            msg = *mQueue.begin();
-            mQueue.erase(mQueue.begin());
+            messages.swap(mQueue);
         }
 
-        dispatch(msg);
+        dispatch(messages);
     }
 
     return false;
@@ -233,11 +240,11 @@
             instance, &handle);
 
     if (err != OMX_ErrorNone) {
-        ALOGE("FAILED to allocate omx component '%s'", name);
+        ALOGE("FAILED to allocate omx component '%s' err=%s(%#x)", name, asString(err), err);
 
         instance->onGetHandleFailed();
 
-        return UNKNOWN_ERROR;
+        return StatusFromOMXError(err);
     }
 
     *node = makeNodeID(instance);
@@ -331,8 +338,8 @@
 }
 
 status_t OMX::storeMetaDataInBuffers(
-        node_id node, OMX_U32 port_index, OMX_BOOL enable) {
-    return findInstance(node)->storeMetaDataInBuffers(port_index, enable);
+        node_id node, OMX_U32 port_index, OMX_BOOL enable, MetadataBufferType *type) {
+    return findInstance(node)->storeMetaDataInBuffers(port_index, enable, type);
 }
 
 status_t OMX::prepareForAdaptivePlayback(
@@ -351,9 +358,9 @@
 
 status_t OMX::useBuffer(
         node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-        buffer_id *buffer) {
+        buffer_id *buffer, OMX_U32 allottedSize) {
     return findInstance(node)->useBuffer(
-            port_index, params, buffer);
+            port_index, params, buffer, allottedSize);
 }
 
 status_t OMX::useGraphicBuffer(
@@ -372,11 +379,25 @@
 
 status_t OMX::createInputSurface(
         node_id node, OMX_U32 port_index,
-        sp<IGraphicBufferProducer> *bufferProducer) {
+        sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
     return findInstance(node)->createInputSurface(
-            port_index, bufferProducer);
+            port_index, bufferProducer, type);
 }
 
+status_t OMX::createPersistentInputSurface(
+        sp<IGraphicBufferProducer> *bufferProducer,
+        sp<IGraphicBufferConsumer> *bufferConsumer) {
+    return OMXNodeInstance::createPersistentInputSurface(
+            bufferProducer, bufferConsumer);
+}
+
+status_t OMX::setInputSurface(
+        node_id node, OMX_U32 port_index,
+        const sp<IGraphicBufferConsumer> &bufferConsumer, MetadataBufferType *type) {
+    return findInstance(node)->setInputSurface(port_index, bufferConsumer, type);
+}
+
+
 status_t OMX::signalEndOfInputStream(node_id node) {
     return findInstance(node)->signalEndOfInputStream();
 }
@@ -390,9 +411,9 @@
 
 status_t OMX::allocateBufferWithBackup(
         node_id node, OMX_U32 port_index, const sp<IMemory> &params,
-        buffer_id *buffer) {
+        buffer_id *buffer, OMX_U32 allottedSize) {
     return findInstance(node)->allocateBufferWithBackup(
-            port_index, params, buffer);
+            port_index, params, buffer, allottedSize);
 }
 
 status_t OMX::freeBuffer(node_id node, OMX_U32 port_index, buffer_id buffer) {
@@ -400,17 +421,17 @@
             port_index, buffer);
 }
 
-status_t OMX::fillBuffer(node_id node, buffer_id buffer) {
-    return findInstance(node)->fillBuffer(buffer);
+status_t OMX::fillBuffer(node_id node, buffer_id buffer, int fenceFd) {
+    return findInstance(node)->fillBuffer(buffer, fenceFd);
 }
 
 status_t OMX::emptyBuffer(
         node_id node,
         buffer_id buffer,
         OMX_U32 range_offset, OMX_U32 range_length,
-        OMX_U32 flags, OMX_TICKS timestamp) {
+        OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
     return findInstance(node)->emptyBuffer(
-            buffer, range_offset, range_length, flags, timestamp);
+            buffer, range_offset, range_length, flags, timestamp, fenceFd);
 }
 
 status_t OMX::getExtensionIndex(
@@ -435,31 +456,56 @@
         OMX_IN OMX_EVENTTYPE eEvent,
         OMX_IN OMX_U32 nData1,
         OMX_IN OMX_U32 nData2,
-        OMX_IN OMX_PTR /* pEventData */) {
+        OMX_IN OMX_PTR pEventData) {
     ALOGV("OnEvent(%d, %" PRIu32", %" PRIu32 ")", eEvent, nData1, nData2);
 
     // Forward to OMXNodeInstance.
     findInstance(node)->onEvent(eEvent, nData1, nData2);
 
+    sp<OMX::CallbackDispatcher> dispatcher = findDispatcher(node);
+
+    // output rendered events are not processed as regular events until they hit the observer
+    if (eEvent == OMX_EventOutputRendered) {
+        if (pEventData == NULL) {
+            return OMX_ErrorBadParameter;
+        }
+
+        // process data from array
+        OMX_VIDEO_RENDEREVENTTYPE *renderData = (OMX_VIDEO_RENDEREVENTTYPE *)pEventData;
+        for (size_t i = 0; i < nData1; ++i) {
+            omx_message msg;
+            msg.type = omx_message::FRAME_RENDERED;
+            msg.node = node;
+            msg.fenceFd = -1;
+            msg.u.render_data.timestamp = renderData[i].nMediaTimeUs;
+            msg.u.render_data.nanoTime = renderData[i].nSystemTimeNs;
+
+            dispatcher->post(msg, false /* realTime */);
+        }
+        return OMX_ErrorNone;
+    }
+
     omx_message msg;
     msg.type = omx_message::EVENT;
     msg.node = node;
+    msg.fenceFd = -1;
     msg.u.event_data.event = eEvent;
     msg.u.event_data.data1 = nData1;
     msg.u.event_data.data2 = nData2;
 
-    findDispatcher(node)->post(msg);
+    dispatcher->post(msg, true /* realTime */);
 
     return OMX_ErrorNone;
 }
 
 OMX_ERRORTYPE OMX::OnEmptyBufferDone(
-        node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer) {
+        node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer, int fenceFd) {
     ALOGV("OnEmptyBufferDone buffer=%p", pBuffer);
 
     omx_message msg;
     msg.type = omx_message::EMPTY_BUFFER_DONE;
     msg.node = node;
+    msg.fenceFd = fenceFd;
     msg.u.buffer_data.buffer = buffer;
 
     findDispatcher(node)->post(msg);
@@ -468,12 +514,13 @@
 }
 
 OMX_ERRORTYPE OMX::OnFillBufferDone(
-        node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer) {
+        node_id node, buffer_id buffer, OMX_IN OMX_BUFFERHEADERTYPE *pBuffer, int fenceFd) {
     ALOGV("OnFillBufferDone buffer=%p", pBuffer);
 
     omx_message msg;
     msg.type = omx_message::FILL_BUFFER_DONE;
     msg.node = node;
+    msg.fenceFd = fenceFd;
     msg.u.extended_buffer_data.buffer = buffer;
     msg.u.extended_buffer_data.range_offset = pBuffer->nOffset;
     msg.u.extended_buffer_data.range_length = pBuffer->nFilledLen;
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 63c8f54..9f1c5d8 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -32,6 +32,7 @@
 #include <gui/BufferQueue.h>
 #include <HardwareAPI.h>
 #include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ABuffer.h>
 #include <media/stagefright/MediaErrors.h>
 
 #include <utils/misc.h>
@@ -75,11 +76,11 @@
 #define SIMPLE_NEW_BUFFER(buffer_id, port, size, data) \
     NEW_BUFFER_FMT(buffer_id, port, "%zu@%p", (size), (data))
 
-#define EMPTY_BUFFER(addr, header) "%#x [%u@%p]", \
-    (addr), (header)->nAllocLen, (header)->pBuffer
-#define FULL_BUFFER(addr, header) "%#" PRIxPTR " [%u@%p (%u..+%u) f=%x ts=%lld]", \
+#define EMPTY_BUFFER(addr, header, fenceFd) "%#x [%u@%p fc=%d]", \
+    (addr), (header)->nAllocLen, (header)->pBuffer, (fenceFd)
+#define FULL_BUFFER(addr, header, fenceFd) "%#" PRIxPTR " [%u@%p (%u..+%u) f=%x ts=%lld fc=%d]", \
     (intptr_t)(addr), (header)->nAllocLen, (header)->pBuffer, \
-    (header)->nOffset, (header)->nFilledLen, (header)->nFlags, (header)->nTimeStamp
+    (header)->nOffset, (header)->nFilledLen, (header)->nFlags, (header)->nTimeStamp, (fenceFd)
 
 #define WITH_STATS_WRAPPER(fmt, ...) fmt " { IN=%zu/%zu OUT=%zu/%zu }", ##__VA_ARGS__, \
     mInputBuffersWithCodec.size(), mNumPortBuffers[kPortIndexInput], \
@@ -120,9 +121,10 @@
             return;
         }
 
-        memcpy((OMX_U8 *)mMem->pointer() + header->nOffset,
-                header->pBuffer + header->nOffset,
-                header->nFilledLen);
+        // check component returns proper range
+        sp<ABuffer> codec = getBuffer(header, false /* backup */, true /* limit */);
+
+        memcpy((OMX_U8 *)mMem->pointer() + header->nOffset, codec->data(), codec->size());
     }
 
     void CopyToOMX(const OMX_BUFFERHEADERTYPE *header) {
@@ -135,6 +137,25 @@
                 header->nFilledLen);
     }
 
+    // return either the codec or the backup buffer
+    sp<ABuffer> getBuffer(const OMX_BUFFERHEADERTYPE *header, bool backup, bool limit) {
+        sp<ABuffer> buf;
+        if (backup && mMem != NULL) {
+            buf = new ABuffer(mMem->pointer(), mMem->size());
+        } else {
+            buf = new ABuffer(header->pBuffer, header->nAllocLen);
+        }
+        if (limit) {
+            if (header->nOffset + header->nFilledLen > header->nOffset
+                    && header->nOffset + header->nFilledLen <= header->nAllocLen) {
+                buf->setRange(header->nOffset, header->nFilledLen);
+            } else {
+                buf->setRange(0, 0);
+            }
+        }
+        return buf;
+    }
+
     void setGraphicBuffer(const sp<GraphicBuffer> &graphicBuffer) {
         mGraphicBuffer = graphicBuffer;
     }
@@ -180,6 +201,8 @@
     mNumPortBuffers[1] = 0;
     mDebugLevelBumpPendingBuffers[0] = 0;
     mDebugLevelBumpPendingBuffers[1] = 0;
+    mMetadataType[0] = kMetadataBufferTypeInvalid;
+    mMetadataType[1] = kMetadataBufferTypeInvalid;
 }
 
 OMXNodeInstance::~OMXNodeInstance() {
@@ -218,13 +241,15 @@
     return mNodeID;
 }
 
-static status_t StatusFromOMXError(OMX_ERRORTYPE err) {
+status_t StatusFromOMXError(OMX_ERRORTYPE err) {
     switch (err) {
         case OMX_ErrorNone:
             return OK;
         case OMX_ErrorUnsupportedSetting:
         case OMX_ErrorUnsupportedIndex:
             return ERROR_UNSUPPORTED;
+        case OMX_ErrorInsufficientResources:
+            return NO_MEMORY;
         default:
             return UNKNOWN_ERROR;
     }
@@ -484,63 +509,73 @@
 }
 
 status_t OMXNodeInstance::storeMetaDataInBuffers(
-        OMX_U32 portIndex,
-        OMX_BOOL enable) {
+        OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type) {
     Mutex::Autolock autolock(mLock);
     CLOG_CONFIG(storeMetaDataInBuffers, "%s:%u en:%d", portString(portIndex), portIndex, enable);
-    return storeMetaDataInBuffers_l(
-            portIndex, enable,
-            OMX_FALSE /* useGraphicBuffer */, NULL /* usingGraphicBufferInMetadata */);
+    return storeMetaDataInBuffers_l(portIndex, enable, type);
 }
 
 status_t OMXNodeInstance::storeMetaDataInBuffers_l(
-        OMX_U32 portIndex,
-        OMX_BOOL enable,
-        OMX_BOOL useGraphicBuffer,
-        OMX_BOOL *usingGraphicBufferInMetadata) {
+        OMX_U32 portIndex, OMX_BOOL enable, MetadataBufferType *type) {
+    if (portIndex != kPortIndexInput && portIndex != kPortIndexOutput) {
+        return BAD_VALUE;
+    }
+
     OMX_INDEXTYPE index;
     OMX_STRING name = const_cast<OMX_STRING>(
             "OMX.google.android.index.storeMetaDataInBuffers");
 
-    OMX_STRING graphicBufferName = const_cast<OMX_STRING>(
-            "OMX.google.android.index.storeGraphicBufferInMetaData");
-    if (usingGraphicBufferInMetadata == NULL) {
-        usingGraphicBufferInMetadata = &useGraphicBuffer;
-    }
+    OMX_STRING nativeBufferName = const_cast<OMX_STRING>(
+            "OMX.google.android.index.storeANWBufferInMetadata");
+    MetadataBufferType negotiatedType;
 
-    OMX_ERRORTYPE err =
-        (useGraphicBuffer && portIndex == kPortIndexInput)
-                ? OMX_GetExtensionIndex(mHandle, graphicBufferName, &index)
-                : OMX_ErrorBadParameter;
-    if (err == OMX_ErrorNone) {
-        *usingGraphicBufferInMetadata = OMX_TRUE;
-        name = graphicBufferName;
-    } else {
-        err = OMX_GetExtensionIndex(mHandle, name, &index);
-    }
+    StoreMetaDataInBuffersParams params;
+    InitOMXParams(&params);
+    params.nPortIndex = portIndex;
+    params.bStoreMetaData = enable;
 
+    OMX_ERRORTYPE err = OMX_GetExtensionIndex(mHandle, nativeBufferName, &index);
     OMX_ERRORTYPE xerr = err;
     if (err == OMX_ErrorNone) {
-        StoreMetaDataInBuffersParams params;
-        InitOMXParams(&params);
-        params.nPortIndex = portIndex;
-        params.bStoreMetaData = enable;
-
         err = OMX_SetParameter(mHandle, index, &params);
+        if (err == OMX_ErrorNone) {
+            name = nativeBufferName; // set name for debugging
+            negotiatedType = kMetadataBufferTypeANWBuffer;
+        }
+    }
+    if (err != OMX_ErrorNone) {
+        err = OMX_GetExtensionIndex(mHandle, name, &index);
+        xerr = err;
+        if (err == OMX_ErrorNone) {
+            negotiatedType = kMetadataBufferTypeGrallocSource;
+            err = OMX_SetParameter(mHandle, index, &params);
+        }
     }
 
     // don't log loud error if component does not support metadata mode on the output
     if (err != OMX_ErrorNone) {
-        *usingGraphicBufferInMetadata = OMX_FALSE;
         if (err == OMX_ErrorUnsupportedIndex && portIndex == kPortIndexOutput) {
             CLOGW("component does not support metadata mode; using fallback");
         } else if (xerr != OMX_ErrorNone) {
             CLOG_ERROR(getExtensionIndex, xerr, "%s", name);
         } else {
-            CLOG_ERROR(setParameter, err, "%s(%#x): %s:%u en=%d GB=%d", name, index,
-                    portString(portIndex), portIndex, enable, useGraphicBuffer);
+            CLOG_ERROR(setParameter, err, "%s(%#x): %s:%u en=%d type=%d", name, index,
+                    portString(portIndex), portIndex, enable, negotiatedType);
         }
+        negotiatedType = mMetadataType[portIndex];
+    } else {
+        if (!enable) {
+            negotiatedType = kMetadataBufferTypeInvalid;
+        }
+        mMetadataType[portIndex] = negotiatedType;
     }
+    CLOG_CONFIG(storeMetaDataInBuffers, "%s:%u negotiated %s:%d",
+            portString(portIndex), portIndex, asString(negotiatedType), negotiatedType);
+
+    if (type != NULL) {
+        *type = negotiatedType;
+    }
+
     return StatusFromOMXError(err);
 }
 
@@ -618,8 +653,11 @@
 
 status_t OMXNodeInstance::useBuffer(
         OMX_U32 portIndex, const sp<IMemory> &params,
-        OMX::buffer_id *buffer) {
+        OMX::buffer_id *buffer, OMX_U32 allottedSize) {
     Mutex::Autolock autoLock(mLock);
+    if (allottedSize > params->size()) {
+        return BAD_VALUE;
+    }
 
     BufferMeta *buffer_meta = new BufferMeta(params);
 
@@ -627,10 +665,11 @@
 
     OMX_ERRORTYPE err = OMX_UseBuffer(
             mHandle, &header, portIndex, buffer_meta,
-            params->size(), static_cast<OMX_U8 *>(params->pointer()));
+            allottedSize, static_cast<OMX_U8 *>(params->pointer()));
 
     if (err != OMX_ErrorNone) {
-        CLOG_ERROR(useBuffer, err, SIMPLE_BUFFER(portIndex, params->size(), params->pointer()));
+        CLOG_ERROR(useBuffer, err, SIMPLE_BUFFER(
+                portIndex, (size_t)allottedSize, params->pointer()));
 
         delete buffer_meta;
         buffer_meta = NULL;
@@ -652,7 +691,7 @@
     }
 
     CLOG_BUFFER(useBuffer, NEW_BUFFER_FMT(
-            *buffer, portIndex, "%zu@%p", params->size(), params->pointer()));
+            *buffer, portIndex, "%u(%zu)@%p", allottedSize, params->size(), params->pointer()));
     return OK;
 }
 
@@ -768,38 +807,60 @@
     return OK;
 }
 
+status_t OMXNodeInstance::updateGraphicBufferInMeta_l(
+        OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
+        OMX::buffer_id buffer, OMX_BUFFERHEADERTYPE *header) {
+    if (portIndex != kPortIndexInput && portIndex != kPortIndexOutput) {
+        return BAD_VALUE;
+    }
+
+    BufferMeta *bufferMeta = (BufferMeta *)(header->pAppPrivate);
+    bufferMeta->setGraphicBuffer(graphicBuffer);
+    if (mMetadataType[portIndex] == kMetadataBufferTypeGrallocSource
+            && header->nAllocLen >= sizeof(VideoGrallocMetadata)) {
+        VideoGrallocMetadata &metadata = *(VideoGrallocMetadata *)(header->pBuffer);
+        metadata.eType = kMetadataBufferTypeGrallocSource;
+        metadata.pHandle = graphicBuffer == NULL ? NULL : graphicBuffer->handle;
+    } else if (mMetadataType[portIndex] == kMetadataBufferTypeANWBuffer
+            && header->nAllocLen >= sizeof(VideoNativeMetadata)) {
+        VideoNativeMetadata &metadata = *(VideoNativeMetadata *)(header->pBuffer);
+        metadata.eType = kMetadataBufferTypeANWBuffer;
+        metadata.pBuffer = graphicBuffer == NULL ? NULL : graphicBuffer->getNativeBuffer();
+        metadata.nFenceFd = -1;
+    } else {
+        CLOG_BUFFER(updateGraphicBufferInMeta, "%s:%u, %#x bad type (%d) or size (%u)",
+            portString(portIndex), portIndex, buffer, mMetadataType[portIndex], header->nAllocLen);
+        return BAD_VALUE;
+    }
+
+    CLOG_BUFFER(updateGraphicBufferInMeta, "%s:%u, %#x := %p",
+            portString(portIndex), portIndex, buffer,
+            graphicBuffer == NULL ? NULL : graphicBuffer->handle);
+    return OK;
+}
+
 status_t OMXNodeInstance::updateGraphicBufferInMeta(
         OMX_U32 portIndex, const sp<GraphicBuffer>& graphicBuffer,
         OMX::buffer_id buffer) {
     Mutex::Autolock autoLock(mLock);
-
     OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer);
-    VideoDecoderOutputMetaData *metadata =
-        (VideoDecoderOutputMetaData *)(header->pBuffer);
-    BufferMeta *bufferMeta = (BufferMeta *)(header->pAppPrivate);
-    bufferMeta->setGraphicBuffer(graphicBuffer);
-    metadata->eType = kMetadataBufferTypeGrallocSource;
-    metadata->pHandle = graphicBuffer->handle;
-    CLOG_BUFFER(updateGraphicBufferInMeta, "%s:%u, %#x := %p",
-            portString(portIndex), portIndex, buffer, graphicBuffer->handle);
-    return OK;
+    return updateGraphicBufferInMeta_l(portIndex, graphicBuffer, buffer, header);
 }
 
-status_t OMXNodeInstance::createInputSurface(
-        OMX_U32 portIndex, sp<IGraphicBufferProducer> *bufferProducer) {
-    Mutex::Autolock autolock(mLock);
+status_t OMXNodeInstance::createGraphicBufferSource(
+        OMX_U32 portIndex, sp<IGraphicBufferConsumer> bufferConsumer, MetadataBufferType *type) {
     status_t err;
 
     const sp<GraphicBufferSource>& surfaceCheck = getGraphicBufferSource();
     if (surfaceCheck != NULL) {
+        if (portIndex < NELEM(mMetadataType) && type != NULL) {
+            *type = mMetadataType[portIndex];
+        }
         return ALREADY_EXISTS;
     }
 
-    // Input buffers will hold meta-data (gralloc references).
-    OMX_BOOL usingGraphicBuffer = OMX_FALSE;
-    err = storeMetaDataInBuffers_l(
-            portIndex, OMX_TRUE,
-            OMX_TRUE /* useGraphicBuffer */, &usingGraphicBuffer);
+    // Input buffers will hold meta-data (ANativeWindowBuffer references).
+    err = storeMetaDataInBuffers_l(portIndex, OMX_TRUE, type);
     if (err != OK) {
         return err;
     }
@@ -825,19 +886,75 @@
         return INVALID_OPERATION;
     }
 
-    GraphicBufferSource* bufferSource = new GraphicBufferSource(
-            this, def.format.video.nFrameWidth, def.format.video.nFrameHeight,
-            def.nBufferCountActual, usingGraphicBuffer);
+    uint32_t usageBits;
+    oerr = OMX_GetParameter(
+            mHandle, (OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits, &usageBits);
+    if (oerr != OMX_ErrorNone) {
+        usageBits = 0;
+    }
+
+    sp<GraphicBufferSource> bufferSource = new GraphicBufferSource(this,
+            def.format.video.nFrameWidth,
+            def.format.video.nFrameHeight,
+            def.nBufferCountActual,
+            usageBits,
+            bufferConsumer);
+
     if ((err = bufferSource->initCheck()) != OK) {
-        delete bufferSource;
         return err;
     }
     setGraphicBufferSource(bufferSource);
 
-    *bufferProducer = bufferSource->getIGraphicBufferProducer();
     return OK;
 }
 
+status_t OMXNodeInstance::createInputSurface(
+        OMX_U32 portIndex, sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
+    Mutex::Autolock autolock(mLock);
+    status_t err = createGraphicBufferSource(portIndex, NULL /* bufferConsumer */, type);
+
+    if (err != OK) {
+        return err;
+    }
+
+    *bufferProducer = mGraphicBufferSource->getIGraphicBufferProducer();
+    return OK;
+}
+
+//static
+status_t OMXNodeInstance::createPersistentInputSurface(
+        sp<IGraphicBufferProducer> *bufferProducer,
+        sp<IGraphicBufferConsumer> *bufferConsumer) {
+    String8 name("GraphicBufferSource");
+
+    sp<IGraphicBufferProducer> producer;
+    sp<IGraphicBufferConsumer> consumer;
+    BufferQueue::createBufferQueue(&producer, &consumer);
+    consumer->setConsumerName(name);
+    consumer->setConsumerUsageBits(GRALLOC_USAGE_HW_VIDEO_ENCODER);
+
+    sp<BufferQueue::ProxyConsumerListener> proxy =
+        new BufferQueue::ProxyConsumerListener(NULL);
+    status_t err = consumer->consumerConnect(proxy, false);
+    if (err != NO_ERROR) {
+        ALOGE("Error connecting to BufferQueue: %s (%d)",
+                strerror(-err), err);
+        return err;
+    }
+
+    *bufferProducer = producer;
+    *bufferConsumer = consumer;
+
+    return OK;
+}
+
+status_t OMXNodeInstance::setInputSurface(
+        OMX_U32 portIndex, const sp<IGraphicBufferConsumer> &bufferConsumer,
+        MetadataBufferType *type) {
+    Mutex::Autolock autolock(mLock);
+    return createGraphicBufferSource(portIndex, bufferConsumer, type);
+}
+
 status_t OMXNodeInstance::signalEndOfInputStream() {
     // For non-Surface input, the MediaCodec should convert the call to a
     // pair of requests (dequeue input buffer, queue input buffer with EOS
@@ -890,19 +1007,21 @@
 
 status_t OMXNodeInstance::allocateBufferWithBackup(
         OMX_U32 portIndex, const sp<IMemory> &params,
-        OMX::buffer_id *buffer) {
+        OMX::buffer_id *buffer, OMX_U32 allottedSize) {
     Mutex::Autolock autoLock(mLock);
+    if (allottedSize > params->size()) {
+        return BAD_VALUE;
+    }
 
     BufferMeta *buffer_meta = new BufferMeta(params, true);
 
     OMX_BUFFERHEADERTYPE *header;
 
     OMX_ERRORTYPE err = OMX_AllocateBuffer(
-            mHandle, &header, portIndex, buffer_meta, params->size());
-
+            mHandle, &header, portIndex, buffer_meta, allottedSize);
     if (err != OMX_ErrorNone) {
         CLOG_ERROR(allocateBufferWithBackup, err,
-                SIMPLE_BUFFER(portIndex, params->size(), params->pointer()));
+                SIMPLE_BUFFER(portIndex, (size_t)allottedSize, params->pointer()));
         delete buffer_meta;
         buffer_meta = NULL;
 
@@ -922,8 +1041,8 @@
         bufferSource->addCodecBuffer(header);
     }
 
-    CLOG_BUFFER(allocateBufferWithBackup, NEW_BUFFER_FMT(*buffer, portIndex, "%zu@%p :> %p",
-            params->size(), params->pointer(), header->pBuffer));
+    CLOG_BUFFER(allocateBufferWithBackup, NEW_BUFFER_FMT(*buffer, portIndex, "%zu@%p :> %u@%p",
+            params->size(), params->pointer(), allottedSize, header->pBuffer));
 
     return OK;
 }
@@ -948,7 +1067,7 @@
     return StatusFromOMXError(err);
 }
 
-status_t OMXNodeInstance::fillBuffer(OMX::buffer_id buffer) {
+status_t OMXNodeInstance::fillBuffer(OMX::buffer_id buffer, int fenceFd) {
     Mutex::Autolock autoLock(mLock);
 
     OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer);
@@ -956,15 +1075,22 @@
     header->nOffset = 0;
     header->nFlags = 0;
 
+    // meta now owns fenceFd
+    status_t res = storeFenceInMeta_l(header, fenceFd, kPortIndexOutput);
+    if (res != OK) {
+        CLOG_ERROR(fillBuffer::storeFenceInMeta, res, EMPTY_BUFFER(buffer, header, fenceFd));
+        return res;
+    }
+
     {
         Mutex::Autolock _l(mDebugLock);
         mOutputBuffersWithCodec.add(header);
-        CLOG_BUMPED_BUFFER(fillBuffer, WITH_STATS(EMPTY_BUFFER(buffer, header)));
+        CLOG_BUMPED_BUFFER(fillBuffer, WITH_STATS(EMPTY_BUFFER(buffer, header, fenceFd)));
     }
 
     OMX_ERRORTYPE err = OMX_FillThisBuffer(mHandle, header);
     if (err != OMX_ErrorNone) {
-        CLOG_ERROR(fillBuffer, err, EMPTY_BUFFER(buffer, header));
+        CLOG_ERROR(fillBuffer, err, EMPTY_BUFFER(buffer, header, fenceFd));
         Mutex::Autolock _l(mDebugLock);
         mOutputBuffersWithCodec.remove(header);
     }
@@ -974,24 +1100,48 @@
 status_t OMXNodeInstance::emptyBuffer(
         OMX::buffer_id buffer,
         OMX_U32 rangeOffset, OMX_U32 rangeLength,
-        OMX_U32 flags, OMX_TICKS timestamp) {
+        OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
     Mutex::Autolock autoLock(mLock);
 
     OMX_BUFFERHEADERTYPE *header = findBufferHeader(buffer);
-    // rangeLength and rangeOffset must be a subset of the allocated data in the buffer.
-    // corner case: we permit rangeOffset == end-of-buffer with rangeLength == 0.
-    if (rangeOffset > header->nAllocLen
-            || rangeLength > header->nAllocLen - rangeOffset) {
-        return BAD_VALUE;
-    }
-    header->nFilledLen = rangeLength;
-    header->nOffset = rangeOffset;
-
     BufferMeta *buffer_meta =
         static_cast<BufferMeta *>(header->pAppPrivate);
-    buffer_meta->CopyToOMX(header);
+    sp<ABuffer> backup = buffer_meta->getBuffer(header, true /* backup */, false /* limit */);
+    sp<ABuffer> codec = buffer_meta->getBuffer(header, false /* backup */, false /* limit */);
 
-    return emptyBuffer_l(header, flags, timestamp, (intptr_t)buffer);
+    // convert incoming ANW meta buffers if component is configured for gralloc metadata mode
+    // ignore rangeOffset in this case
+    if (mMetadataType[kPortIndexInput] == kMetadataBufferTypeGrallocSource
+            && backup->capacity() >= sizeof(VideoNativeMetadata)
+            && codec->capacity() >= sizeof(VideoGrallocMetadata)
+            && ((VideoNativeMetadata *)backup->base())->eType
+                    == kMetadataBufferTypeANWBuffer) {
+        VideoNativeMetadata &backupMeta = *(VideoNativeMetadata *)backup->base();
+        VideoGrallocMetadata &codecMeta = *(VideoGrallocMetadata *)codec->base();
+        CLOG_BUFFER(emptyBuffer, "converting ANWB %p to handle %p",
+                backupMeta.pBuffer, backupMeta.pBuffer->handle);
+        codecMeta.pHandle = backupMeta.pBuffer != NULL ? backupMeta.pBuffer->handle : NULL;
+        codecMeta.eType = kMetadataBufferTypeGrallocSource;
+        header->nFilledLen = rangeLength ? sizeof(codecMeta) : 0;
+        header->nOffset = 0;
+    } else {
+        // rangeLength and rangeOffset must be a subset of the allocated data in the buffer.
+        // corner case: we permit rangeOffset == end-of-buffer with rangeLength == 0.
+        if (rangeOffset > header->nAllocLen
+                || rangeLength > header->nAllocLen - rangeOffset) {
+            CLOG_ERROR(emptyBuffer, OMX_ErrorBadParameter, FULL_BUFFER(NULL, header, fenceFd));
+            if (fenceFd >= 0) {
+                ::close(fenceFd);
+            }
+            return BAD_VALUE;
+        }
+        header->nFilledLen = rangeLength;
+        header->nOffset = rangeOffset;
+
+        buffer_meta->CopyToOMX(header);
+    }
+
+    return emptyBuffer_l(header, flags, timestamp, (intptr_t)buffer, fenceFd);
 }
 
 // log queued buffer activity for the next few input and/or output frames
@@ -1018,11 +1168,62 @@
     }
 }
 
+status_t OMXNodeInstance::storeFenceInMeta_l(
+        OMX_BUFFERHEADERTYPE *header, int fenceFd, OMX_U32 portIndex) {
+    // propagate fence if component supports it; wait for it otherwise
+    OMX_U32 metaSize = portIndex == kPortIndexInput ? header->nFilledLen : header->nAllocLen;
+    if (mMetadataType[portIndex] == kMetadataBufferTypeANWBuffer
+            && metaSize >= sizeof(VideoNativeMetadata)) {
+        VideoNativeMetadata &nativeMeta = *(VideoNativeMetadata *)(header->pBuffer);
+        if (nativeMeta.nFenceFd >= 0) {
+            ALOGE("fence (%d) already exists in meta", nativeMeta.nFenceFd);
+            if (fenceFd >= 0) {
+                ::close(fenceFd);
+            }
+            return ALREADY_EXISTS;
+        }
+        nativeMeta.nFenceFd = fenceFd;
+    } else if (fenceFd >= 0) {
+        CLOG_BUFFER(storeFenceInMeta, "waiting for fence %d", fenceFd);
+        sp<Fence> fence = new Fence(fenceFd);
+        return fence->wait(IOMX::kFenceTimeoutMs);
+    }
+    return OK;
+}
+
+int OMXNodeInstance::retrieveFenceFromMeta_l(
+        OMX_BUFFERHEADERTYPE *header, OMX_U32 portIndex) {
+    OMX_U32 metaSize = portIndex == kPortIndexInput ? header->nAllocLen : header->nFilledLen;
+    int fenceFd = -1;
+    if (mMetadataType[portIndex] == kMetadataBufferTypeANWBuffer
+            && header->nAllocLen >= sizeof(VideoNativeMetadata)) {
+        VideoNativeMetadata &nativeMeta = *(VideoNativeMetadata *)(header->pBuffer);
+        if (nativeMeta.eType == kMetadataBufferTypeANWBuffer) {
+            fenceFd = nativeMeta.nFenceFd;
+            nativeMeta.nFenceFd = -1;
+        }
+        if (metaSize < sizeof(nativeMeta) && fenceFd >= 0) {
+            CLOG_ERROR(foundFenceInEmptyMeta, BAD_VALUE, FULL_BUFFER(
+                    NULL, header, nativeMeta.nFenceFd));
+            fenceFd = -1;
+        }
+    }
+    return fenceFd;
+}
+
 status_t OMXNodeInstance::emptyBuffer_l(
-        OMX_BUFFERHEADERTYPE *header, OMX_U32 flags, OMX_TICKS timestamp, intptr_t debugAddr) {
+        OMX_BUFFERHEADERTYPE *header, OMX_U32 flags, OMX_TICKS timestamp,
+        intptr_t debugAddr, int fenceFd) {
     header->nFlags = flags;
     header->nTimeStamp = timestamp;
 
+    status_t res = storeFenceInMeta_l(header, fenceFd, kPortIndexInput);
+    if (res != OK) {
+        CLOG_ERROR(emptyBuffer::storeFenceInMeta, res, WITH_STATS(
+                FULL_BUFFER(debugAddr, header, fenceFd)));
+        return res;
+    }
+
     {
         Mutex::Autolock _l(mDebugLock);
         mInputBuffersWithCodec.add(header);
@@ -1032,11 +1233,11 @@
             bumpDebugLevel_l(2 /* numInputBuffers */, 0 /* numOutputBuffers */);
         }
 
-        CLOG_BUMPED_BUFFER(emptyBuffer, WITH_STATS(FULL_BUFFER(debugAddr, header)));
+        CLOG_BUMPED_BUFFER(emptyBuffer, WITH_STATS(FULL_BUFFER(debugAddr, header, fenceFd)));
     }
 
     OMX_ERRORTYPE err = OMX_EmptyThisBuffer(mHandle, header);
-    CLOG_IF_ERROR(emptyBuffer, err, FULL_BUFFER(debugAddr, header));
+    CLOG_IF_ERROR(emptyBuffer, err, FULL_BUFFER(debugAddr, header, fenceFd));
 
     {
         Mutex::Autolock _l(mDebugLock);
@@ -1051,16 +1252,21 @@
 }
 
 // like emptyBuffer, but the data is already in header->pBuffer
-status_t OMXNodeInstance::emptyDirectBuffer(
-        OMX_BUFFERHEADERTYPE *header,
-        OMX_U32 rangeOffset, OMX_U32 rangeLength,
-        OMX_U32 flags, OMX_TICKS timestamp) {
+status_t OMXNodeInstance::emptyGraphicBuffer(
+        OMX_BUFFERHEADERTYPE *header, const sp<GraphicBuffer> &graphicBuffer,
+        OMX_U32 flags, OMX_TICKS timestamp, int fenceFd) {
     Mutex::Autolock autoLock(mLock);
+    OMX::buffer_id buffer = findBufferID(header);
+    status_t err = updateGraphicBufferInMeta_l(kPortIndexInput, graphicBuffer, buffer, header);
+    if (err != OK) {
+        CLOG_ERROR(emptyGraphicBuffer, err, FULL_BUFFER(
+                (intptr_t)header->pBuffer, header, fenceFd));
+        return err;
+    }
 
-    header->nFilledLen = rangeLength;
-    header->nOffset = rangeOffset;
-
-    return emptyBuffer_l(header, flags, timestamp, (intptr_t)header->pBuffer);
+    header->nOffset = 0;
+    header->nFilledLen = graphicBuffer == NULL ? 0 : header->nAllocLen;
+    return emptyBuffer_l(header, flags, timestamp, (intptr_t)header->pBuffer, fenceFd);
 }
 
 status_t OMXNodeInstance::getExtensionIndex(
@@ -1079,6 +1285,7 @@
         case IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY:
             return "REPEAT_PREVIOUS_FRAME_DELAY";
         case IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP: return "MAX_TIMESTAMP_GAP";
+        case IOMX::INTERNAL_OPTION_MAX_FPS:           return "MAX_FPS";
         case IOMX::INTERNAL_OPTION_START_TIME:        return "START_TIME";
         case IOMX::INTERNAL_OPTION_TIME_LAPSE:        return "TIME_LAPSE";
         default:                                      return def;
@@ -1096,6 +1303,7 @@
         case IOMX::INTERNAL_OPTION_SUSPEND:
         case IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY:
         case IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP:
+        case IOMX::INTERNAL_OPTION_MAX_FPS:
         case IOMX::INTERNAL_OPTION_START_TIME:
         case IOMX::INTERNAL_OPTION_TIME_LAPSE:
         {
@@ -1133,6 +1341,14 @@
                 int64_t maxGapUs = *(int64_t *)data;
                 CLOG_CONFIG(setInternalOption, "gapUs=%lld", (long long)maxGapUs);
                 return bufferSource->setMaxTimestampGapUs(maxGapUs);
+            } else if (type == IOMX::INTERNAL_OPTION_MAX_FPS) {
+                if (size != sizeof(float)) {
+                    return INVALID_OPERATION;
+                }
+
+                float maxFps = *(float *)data;
+                CLOG_CONFIG(setInternalOption, "maxFps=%f", maxFps);
+                return bufferSource->setMaxFps(maxFps);
             } else if (type == IOMX::INTERNAL_OPTION_START_TIME) {
                 if (size != sizeof(int64_t)) {
                     return INVALID_OPERATION;
@@ -1162,7 +1378,7 @@
     }
 }
 
-void OMXNodeInstance::onMessage(const omx_message &msg) {
+bool OMXNodeInstance::handleMessage(omx_message &msg) {
     const sp<GraphicBufferSource>& bufferSource(getGraphicBufferSource());
 
     if (msg.type == omx_message::FILL_BUFFER_DONE) {
@@ -1174,7 +1390,8 @@
             mOutputBuffersWithCodec.remove(buffer);
 
             CLOG_BUMPED_BUFFER(
-                    FBD, WITH_STATS(FULL_BUFFER(msg.u.extended_buffer_data.buffer, buffer)));
+                    FBD, WITH_STATS(FULL_BUFFER(
+                            msg.u.extended_buffer_data.buffer, buffer, msg.fenceFd)));
 
             unbumpDebugLevel_l(kPortIndexOutput);
         }
@@ -1182,16 +1399,18 @@
         BufferMeta *buffer_meta =
             static_cast<BufferMeta *>(buffer->pAppPrivate);
 
+        if (buffer->nOffset + buffer->nFilledLen < buffer->nOffset
+                || buffer->nOffset + buffer->nFilledLen > buffer->nAllocLen) {
+            CLOG_ERROR(onFillBufferDone, OMX_ErrorBadParameter,
+                    FULL_BUFFER(NULL, buffer, msg.fenceFd));
+        }
         buffer_meta->CopyFromOMX(buffer);
 
         if (bufferSource != NULL) {
             // fix up the buffer info (especially timestamp) if needed
             bufferSource->codecBufferFilled(buffer);
 
-            omx_message newMsg = msg;
-            newMsg.u.extended_buffer_data.timestamp = buffer->nTimeStamp;
-            mObserver->onMessage(newMsg);
-            return;
+            msg.u.extended_buffer_data.timestamp = buffer->nTimeStamp;
         }
     } else if (msg.type == omx_message::EMPTY_BUFFER_DONE) {
         OMX_BUFFERHEADERTYPE *buffer =
@@ -1202,7 +1421,7 @@
             mInputBuffersWithCodec.remove(buffer);
 
             CLOG_BUMPED_BUFFER(
-                    EBD, WITH_STATS(EMPTY_BUFFER(msg.u.buffer_data.buffer, buffer)));
+                    EBD, WITH_STATS(EMPTY_BUFFER(msg.u.buffer_data.buffer, buffer, msg.fenceFd)));
         }
 
         if (bufferSource != NULL) {
@@ -1211,12 +1430,26 @@
             // Don't dispatch a message back to ACodec, since it doesn't
             // know that anyone asked to have the buffer emptied and will
             // be very confused.
-            bufferSource->codecBufferEmptied(buffer);
-            return;
+            bufferSource->codecBufferEmptied(buffer, msg.fenceFd);
+            return true;
         }
     }
 
-    mObserver->onMessage(msg);
+    return false;
+}
+
+void OMXNodeInstance::onMessages(std::list<omx_message> &messages) {
+    for (std::list<omx_message>::iterator it = messages.begin(); it != messages.end(); ) {
+        if (handleMessage(*it)) {
+            messages.erase(it++);
+        } else {
+            ++it;
+        }
+    }
+
+    if (!messages.empty()) {
+        mObserver->onMessages(messages);
+    }
 }
 
 void OMXNodeInstance::onObserverDied(OMXMaster *master) {
@@ -1306,8 +1539,9 @@
     if (instance->mDying) {
         return OMX_ErrorNone;
     }
+    int fenceFd = instance->retrieveFenceFromMeta_l(pBuffer, kPortIndexOutput);
     return instance->owner()->OnEmptyBufferDone(instance->nodeID(),
-            instance->findBufferID(pBuffer), pBuffer);
+            instance->findBufferID(pBuffer), pBuffer, fenceFd);
 }
 
 // static
@@ -1319,8 +1553,9 @@
     if (instance->mDying) {
         return OMX_ErrorNone;
     }
+    int fenceFd = instance->retrieveFenceFromMeta_l(pBuffer, kPortIndexOutput);
     return instance->owner()->OnFillBufferDone(instance->nodeID(),
-            instance->findBufferID(pBuffer), pBuffer);
+            instance->findBufferID(pBuffer), pBuffer, fenceFd);
 }
 
 void OMXNodeInstance::addActiveBuffer(OMX_U32 portIndex, OMX::buffer_id id) {
@@ -1381,7 +1616,12 @@
         return NULL;
     }
     Mutex::Autolock autoLock(mBufferIDLock);
-    return mBufferIDToBufferHeader.valueFor(buffer);
+    ssize_t index = mBufferIDToBufferHeader.indexOfKey(buffer);
+    if (index < 0) {
+        CLOGW("findBufferHeader: buffer %u not found", buffer);
+        return NULL;
+    }
+    return mBufferIDToBufferHeader.valueAt(index);
 }
 
 OMX::buffer_id OMXNodeInstance::findBufferID(OMX_BUFFERHEADERTYPE *bufferHeader) {
@@ -1389,7 +1629,12 @@
         return 0;
     }
     Mutex::Autolock autoLock(mBufferIDLock);
-    return mBufferHeaderToBufferID.valueFor(bufferHeader);
+    ssize_t index = mBufferHeaderToBufferID.indexOfKey(bufferHeader);
+    if (index < 0) {
+        CLOGW("findBufferID: bufferHeader %p not found", bufferHeader);
+        return 0;
+    }
+    return mBufferHeaderToBufferID.valueAt(index);
 }
 
 void OMXNodeInstance::invalidateBufferID(OMX::buffer_id buffer) {
@@ -1397,8 +1642,13 @@
         return;
     }
     Mutex::Autolock autoLock(mBufferIDLock);
-    mBufferHeaderToBufferID.removeItem(mBufferIDToBufferHeader.valueFor(buffer));
-    mBufferIDToBufferHeader.removeItem(buffer);
+    ssize_t index = mBufferIDToBufferHeader.indexOfKey(buffer);
+    if (index < 0) {
+        CLOGW("invalidateBufferID: buffer %u not found", buffer);
+        return;
+    }
+    mBufferHeaderToBufferID.removeItem(mBufferIDToBufferHeader.valueAt(index));
+    mBufferIDToBufferHeader.removeItemsAt(index);
 }
 
 }  // namespace android
diff --git a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
index 7f99dcd..e6a0c49 100644
--- a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
+++ b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
@@ -58,7 +58,7 @@
         OMX_COMMANDTYPE cmd, OMX_U32 param, OMX_PTR data) {
     CHECK(data == NULL);
 
-    sp<AMessage> msg = new AMessage(kWhatSendCommand, mHandler->id());
+    sp<AMessage> msg = new AMessage(kWhatSendCommand, mHandler);
     msg->setInt32("cmd", cmd);
     msg->setInt32("param", param);
     msg->post();
@@ -307,7 +307,7 @@
 
 OMX_ERRORTYPE SimpleSoftOMXComponent::emptyThisBuffer(
         OMX_BUFFERHEADERTYPE *buffer) {
-    sp<AMessage> msg = new AMessage(kWhatEmptyThisBuffer, mHandler->id());
+    sp<AMessage> msg = new AMessage(kWhatEmptyThisBuffer, mHandler);
     msg->setPointer("header", buffer);
     msg->post();
 
@@ -316,7 +316,7 @@
 
 OMX_ERRORTYPE SimpleSoftOMXComponent::fillThisBuffer(
         OMX_BUFFERHEADERTYPE *buffer) {
-    sp<AMessage> msg = new AMessage(kWhatFillThisBuffer, mHandler->id());
+    sp<AMessage> msg = new AMessage(kWhatFillThisBuffer, mHandler);
     msg->setPointer("header", buffer);
     msg->post();
 
@@ -505,7 +505,15 @@
     CHECK_LT(portIndex, mPorts.size());
 
     PortInfo *port = &mPorts.editItemAt(portIndex);
-    CHECK_EQ((int)port->mTransition, (int)PortInfo::NONE);
+    // Ideally, the port should not in transitioning state when flushing.
+    // However, in error handling case, e.g., the client can't allocate buffers
+    // when it tries to re-enable the port, the port will be stuck in ENABLING.
+    // The client will then transition the component from Executing to Idle,
+    // which leads to flushing ports. At this time, it should be ok to notify
+    // the client of the error and still clear all buffers on the port.
+    if (port->mTransition != PortInfo::NONE) {
+        notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
+    }
 
     for (size_t i = 0; i < port->mBuffers.size(); ++i) {
         BufferInfo *buffer = &port->mBuffers.editItemAt(i);
@@ -598,7 +606,7 @@
 
         if (port->mTransition == PortInfo::DISABLING) {
             if (port->mBuffers.empty()) {
-                ALOGV("Port %d now disabled.", i);
+                ALOGV("Port %zu now disabled.", i);
 
                 port->mTransition = PortInfo::NONE;
                 notify(OMX_EventCmdComplete, OMX_CommandPortDisable, i, NULL);
@@ -607,7 +615,7 @@
             }
         } else if (port->mTransition == PortInfo::ENABLING) {
             if (port->mDef.bPopulated == OMX_TRUE) {
-                ALOGV("Port %d now enabled.", i);
+                ALOGV("Port %zu now enabled.", i);
 
                 port->mTransition = PortInfo::NONE;
                 port->mDef.bEnabled = OMX_TRUE;
@@ -628,14 +636,14 @@
     info->mTransition = PortInfo::NONE;
 }
 
-void SimpleSoftOMXComponent::onQueueFilled(OMX_U32 portIndex) {
+void SimpleSoftOMXComponent::onQueueFilled(OMX_U32 portIndex __unused) {
 }
 
-void SimpleSoftOMXComponent::onPortFlushCompleted(OMX_U32 portIndex) {
+void SimpleSoftOMXComponent::onPortFlushCompleted(OMX_U32 portIndex __unused) {
 }
 
 void SimpleSoftOMXComponent::onPortEnableCompleted(
-        OMX_U32 portIndex, bool enabled) {
+        OMX_U32 portIndex __unused, bool enabled __unused) {
 }
 
 List<SimpleSoftOMXComponent::BufferInfo *> &
diff --git a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
index d4d6217..8ea7a6e 100644
--- a/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoEncoderOMXComponent.cpp
@@ -34,6 +34,8 @@
 #include <ui/GraphicBuffer.h>
 #include <ui/GraphicBufferMapper.h>
 
+#include <OMX_IndexExt.h>
+
 namespace android {
 
 const static OMX_COLOR_FORMATTYPE kSupportedColorFormats[] = {
@@ -155,7 +157,7 @@
     uint32_t rawBufferSize =
         inDef->format.video.nStride * inDef->format.video.nSliceHeight * 3 / 2;
     if (inDef->format.video.eColorFormat == OMX_COLOR_FormatAndroidOpaque) {
-        inDef->nBufferSize = 4 + max(sizeof(buffer_handle_t), sizeof(GraphicBuffer *));
+        inDef->nBufferSize = max(sizeof(VideoNativeMetadata), sizeof(VideoGrallocMetadata));
     } else {
         inDef->nBufferSize = rawBufferSize;
     }
@@ -293,7 +295,7 @@
 
 OMX_ERRORTYPE SoftVideoEncoderOMXComponent::internalGetParameter(
         OMX_INDEXTYPE index, OMX_PTR param) {
-    switch (index) {
+    switch ((int)index) {
         case OMX_IndexParamVideoErrorCorrection:
         {
             return OMX_ErrorNotImplemented;
@@ -343,6 +345,13 @@
             return OMX_ErrorNone;
         }
 
+        case OMX_IndexParamConsumerUsageBits:
+        {
+            OMX_U32 *usageBits = (OMX_U32 *)param;
+            *usageBits = GRALLOC_USAGE_SW_READ_OFTEN;
+            return OMX_ErrorNone;
+        }
+
         default:
             return SimpleSoftOMXComponent::internalGetParameter(index, param);
     }
@@ -482,8 +491,8 @@
     size_t dstVStride = height;
 
     MetadataBufferType bufferType = *(MetadataBufferType *)src;
-    bool usingGraphicBuffer = bufferType == kMetadataBufferTypeGraphicBuffer;
-    if (!usingGraphicBuffer && bufferType != kMetadataBufferTypeGrallocSource) {
+    bool usingANWBuffer = bufferType == kMetadataBufferTypeANWBuffer;
+    if (!usingANWBuffer && bufferType != kMetadataBufferTypeGrallocSource) {
         ALOGE("Unsupported metadata type (%d)", bufferType);
         return NULL;
     }
@@ -499,13 +508,14 @@
     int format;
     size_t srcStride;
     size_t srcVStride;
-    if (usingGraphicBuffer) {
-        if (srcSize < sizeof(OMX_U32) + sizeof(GraphicBuffer *)) {
-            ALOGE("Metadata is too small (%zu vs %zu)", srcSize, sizeof(OMX_U32) + sizeof(GraphicBuffer *));
+    if (usingANWBuffer) {
+        if (srcSize < sizeof(VideoNativeMetadata)) {
+            ALOGE("Metadata is too small (%zu vs %zu)", srcSize, sizeof(VideoNativeMetadata));
             return NULL;
         }
 
-        GraphicBuffer *buffer = *(GraphicBuffer **)(src + sizeof(OMX_U32));
+        VideoNativeMetadata &nativeMeta = *(VideoNativeMetadata *)src;
+        ANativeWindowBuffer *buffer = nativeMeta.pBuffer;
         handle = buffer->handle;
         format = buffer->format;
         srcStride = buffer->stride;
@@ -516,15 +526,26 @@
             // TODO do we need to support other formats?
             srcStride *= 4;
         }
+
+        if (nativeMeta.nFenceFd >= 0) {
+            sp<Fence> fence = new Fence(nativeMeta.nFenceFd);
+            nativeMeta.nFenceFd = -1;
+            status_t err = fence->wait(IOMX::kFenceTimeoutMs);
+            if (err != OK) {
+                ALOGE("Timed out waiting on input fence");
+                return NULL;
+            }
+        }
     } else {
         // TODO: remove this part.  Check if anyone uses this.
 
-        if (srcSize < sizeof(OMX_U32) + sizeof(buffer_handle_t)) {
-            ALOGE("Metadata is too small (%zu vs %zu)", srcSize, sizeof(OMX_U32) + sizeof(buffer_handle_t));
+        if (srcSize < sizeof(VideoGrallocMetadata)) {
+            ALOGE("Metadata is too small (%zu vs %zu)", srcSize, sizeof(VideoGrallocMetadata));
             return NULL;
         }
 
-        handle = *(buffer_handle_t *)(src + sizeof(OMX_U32));
+        VideoGrallocMetadata &grallocMeta = *(VideoGrallocMetadata *)(src);
+        handle = grallocMeta.pHandle;
         // assume HAL_PIXEL_FORMAT_RGBA_8888
         // there is no way to get the src stride without the graphic buffer
         format = HAL_PIXEL_FORMAT_RGBA_8888;
@@ -606,7 +627,7 @@
 OMX_ERRORTYPE SoftVideoEncoderOMXComponent::getExtensionIndex(
         const char *name, OMX_INDEXTYPE *index) {
     if (!strcmp(name, "OMX.google.android.index.storeMetaDataInBuffers") ||
-        !strcmp(name, "OMX.google.android.index.storeGraphicBufferInMetaData")) {
+        !strcmp(name, "OMX.google.android.index.storeANWBufferInMetadata")) {
         *(int32_t*)index = kStoreMetaDataExtensionIndex;
         return OMX_ErrorNone;
     }
diff --git a/media/libstagefright/omx/tests/Android.mk b/media/libstagefright/omx/tests/Android.mk
index 447b29e..02e97f1 100644
--- a/media/libstagefright/omx/tests/Android.mk
+++ b/media/libstagefright/omx/tests/Android.mk
@@ -11,7 +11,8 @@
 	$(TOP)/frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Werror
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE := omx_tests
 
@@ -20,3 +21,24 @@
 LOCAL_32_BIT_ONLY := true
 
 include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := FrameDropper_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+	FrameDropper_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+	libstagefright_omx \
+	libutils \
+
+LOCAL_C_INCLUDES := \
+	frameworks/av/media/libstagefright/omx \
+
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
+
+include $(BUILD_NATIVE_TEST)
diff --git a/media/libstagefright/omx/tests/FrameDropper_test.cpp b/media/libstagefright/omx/tests/FrameDropper_test.cpp
new file mode 100644
index 0000000..f966b5e
--- /dev/null
+++ b/media/libstagefright/omx/tests/FrameDropper_test.cpp
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FrameDropper_test"
+#include <utils/Log.h>
+
+#include <gtest/gtest.h>
+
+#include "FrameDropper.h"
+#include <media/stagefright/foundation/ADebug.h>
+
+namespace android {
+
+struct TestFrame {
+  int64_t timeUs;
+  bool shouldDrop;
+};
+
+static const TestFrame testFrames20Fps[] = {
+    {1000000, false}, {1050000, false}, {1100000, false}, {1150000, false},
+    {1200000, false}, {1250000, false}, {1300000, false}, {1350000, false},
+    {1400000, false}, {1450000, false}, {1500000, false}, {1550000, false},
+    {1600000, false}, {1650000, false}, {1700000, false}, {1750000, false},
+    {1800000, false}, {1850000, false}, {1900000, false}, {1950000, false},
+};
+
+static const TestFrame testFrames30Fps[] = {
+    {1000000, false}, {1033333, false}, {1066667, false}, {1100000, false},
+    {1133333, false}, {1166667, false}, {1200000, false}, {1233333, false},
+    {1266667, false}, {1300000, false}, {1333333, false}, {1366667, false},
+    {1400000, false}, {1433333, false}, {1466667, false}, {1500000, false},
+    {1533333, false}, {1566667, false}, {1600000, false}, {1633333, false},
+};
+
+static const TestFrame testFrames40Fps[] = {
+    {1000000, false}, {1025000, true}, {1050000, false}, {1075000, false},
+    {1100000, false}, {1125000, true}, {1150000, false}, {1175000, false},
+    {1200000, false}, {1225000, true}, {1250000, false}, {1275000, false},
+    {1300000, false}, {1325000, true}, {1350000, false}, {1375000, false},
+    {1400000, false}, {1425000, true}, {1450000, false}, {1475000, false},
+};
+
+static const TestFrame testFrames60Fps[] = {
+    {1000000, false}, {1016667, true}, {1033333, false}, {1050000, true},
+    {1066667, false}, {1083333, true}, {1100000, false}, {1116667, true},
+    {1133333, false}, {1150000, true}, {1166667, false}, {1183333, true},
+    {1200000, false}, {1216667, true}, {1233333, false}, {1250000, true},
+    {1266667, false}, {1283333, true}, {1300000, false}, {1316667, true},
+};
+
+static const TestFrame testFramesVariableFps[] = {
+    // 40fps
+    {1000000, false}, {1025000, true}, {1050000, false}, {1075000, false},
+    {1100000, false}, {1125000, true}, {1150000, false}, {1175000, false},
+    {1200000, false}, {1225000, true}, {1250000, false}, {1275000, false},
+    {1300000, false}, {1325000, true}, {1350000, false}, {1375000, false},
+    {1400000, false}, {1425000, true}, {1450000, false}, {1475000, false},
+    // a timestamp jump plus switch to 20fps
+    {2000000, false}, {2050000, false}, {2100000, false}, {2150000, false},
+    {2200000, false}, {2250000, false}, {2300000, false}, {2350000, false},
+    {2400000, false}, {2450000, false}, {2500000, false}, {2550000, false},
+    {2600000, false}, {2650000, false}, {2700000, false}, {2750000, false},
+    {2800000, false}, {2850000, false}, {2900000, false}, {2950000, false},
+    // 60fps
+    {2966667, false}, {2983333, true}, {3000000, false}, {3016667, true},
+    {3033333, false}, {3050000, true}, {3066667, false}, {3083333, true},
+    {3100000, false}, {3116667, true}, {3133333, false}, {3150000, true},
+    {3166667, false}, {3183333, true}, {3200000, false}, {3216667, true},
+    {3233333, false}, {3250000, true}, {3266667, false}, {3283333, true},
+};
+
+static const int kMaxTestJitterUs = 2000;
+// return one of 1000, 0, -1000 as jitter.
+static int GetJitter(size_t i) {
+    return (1 - (i % 3)) * (kMaxTestJitterUs / 2);
+}
+
+class FrameDropperTest : public ::testing::Test {
+public:
+    FrameDropperTest() : mFrameDropper(new FrameDropper()) {
+        EXPECT_EQ(OK, mFrameDropper->setMaxFrameRate(30.0));
+    }
+
+protected:
+    void RunTest(const TestFrame* frames, size_t size) {
+        for (size_t i = 0; i < size; ++i) {
+            int jitter = GetJitter(i);
+            int64_t testTimeUs = frames[i].timeUs + jitter;
+            printf("time %lld, testTime %lld, jitter %d\n",
+                    (long long)frames[i].timeUs, (long long)testTimeUs, jitter);
+            EXPECT_EQ(frames[i].shouldDrop, mFrameDropper->shouldDrop(testTimeUs));
+        }
+    }
+
+    sp<FrameDropper> mFrameDropper;
+};
+
+TEST_F(FrameDropperTest, TestInvalidMaxFrameRate) {
+    EXPECT_NE(OK, mFrameDropper->setMaxFrameRate(-1.0));
+    EXPECT_NE(OK, mFrameDropper->setMaxFrameRate(0));
+}
+
+TEST_F(FrameDropperTest, Test20Fps) {
+    RunTest(testFrames20Fps, ARRAY_SIZE(testFrames20Fps));
+}
+
+TEST_F(FrameDropperTest, Test30Fps) {
+    RunTest(testFrames30Fps, ARRAY_SIZE(testFrames30Fps));
+}
+
+TEST_F(FrameDropperTest, Test40Fps) {
+    RunTest(testFrames40Fps, ARRAY_SIZE(testFrames40Fps));
+}
+
+TEST_F(FrameDropperTest, Test60Fps) {
+    RunTest(testFrames60Fps, ARRAY_SIZE(testFrames60Fps));
+}
+
+TEST_F(FrameDropperTest, TestVariableFps) {
+    RunTest(testFramesVariableFps, ARRAY_SIZE(testFramesVariableFps));
+}
+
+} // namespace android
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index 67ff145..644b6ed 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -64,9 +64,11 @@
     return mOMX != 0 ? OK : NO_INIT;
 }
 
-void Harness::onMessage(const omx_message &msg) {
+void Harness::onMessages(const std::list<omx_message> &messages) {
     Mutex::Autolock autoLock(mLock);
-    mMessageQueue.push_back(msg);
+    for (std::list<omx_message>::const_iterator it = messages.cbegin(); it != messages.cend(); ) {
+        mMessageQueue.push_back(*it++);
+    }
     mMessageAddedCondition.signal();
 }
 
@@ -193,7 +195,7 @@
         CHECK(buffer.mMemory != NULL);
 
         err = mOMX->allocateBufferWithBackup(
-                node, portIndex, buffer.mMemory, &buffer.mID);
+                node, portIndex, buffer.mMemory, &buffer.mID, buffer.mMemory->size());
         EXPECT_SUCCESS(err, "allocateBuffer");
 
         buffers->push(buffer);
diff --git a/media/libstagefright/omx/tests/OMXHarness.h b/media/libstagefright/omx/tests/OMXHarness.h
index bb8fd0c..1ebf3aa 100644
--- a/media/libstagefright/omx/tests/OMXHarness.h
+++ b/media/libstagefright/omx/tests/OMXHarness.h
@@ -74,7 +74,7 @@
 
     status_t testAll();
 
-    virtual void onMessage(const omx_message &msg);
+    virtual void onMessages(const std::list<omx_message> &messages);
 
 protected:
     virtual ~Harness();
diff --git a/media/libstagefright/rtsp/ARTPConnection.cpp b/media/libstagefright/rtsp/ARTPConnection.cpp
index a6bd824..a86ab74 100644
--- a/media/libstagefright/rtsp/ARTPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTPConnection.cpp
@@ -82,7 +82,7 @@
         size_t index,
         const sp<AMessage> &notify,
         bool injected) {
-    sp<AMessage> msg = new AMessage(kWhatAddStream, id());
+    sp<AMessage> msg = new AMessage(kWhatAddStream, this);
     msg->setInt32("rtp-socket", rtpSocket);
     msg->setInt32("rtcp-socket", rtcpSocket);
     msg->setObject("session-desc", sessionDesc);
@@ -93,7 +93,7 @@
 }
 
 void ARTPConnection::removeStream(int rtpSocket, int rtcpSocket) {
-    sp<AMessage> msg = new AMessage(kWhatRemoveStream, id());
+    sp<AMessage> msg = new AMessage(kWhatRemoveStream, this);
     msg->setInt32("rtp-socket", rtpSocket);
     msg->setInt32("rtcp-socket", rtcpSocket);
     msg->post();
@@ -233,7 +233,7 @@
         return;
     }
 
-    sp<AMessage> msg = new AMessage(kWhatPollStreams, id());
+    sp<AMessage> msg = new AMessage(kWhatPollStreams, this);
     msg->post();
 
     mPollEventPending = true;
@@ -639,7 +639,7 @@
 }
 
 void ARTPConnection::injectPacket(int index, const sp<ABuffer> &buffer) {
-    sp<AMessage> msg = new AMessage(kWhatInjectPacket, id());
+    sp<AMessage> msg = new AMessage(kWhatInjectPacket, this);
     msg->setInt32("index", index);
     msg->setBuffer("buffer", buffer);
     msg->post();
diff --git a/media/libstagefright/rtsp/ARTPSession.cpp b/media/libstagefright/rtsp/ARTPSession.cpp
index ba4e33c..e5acb06 100644
--- a/media/libstagefright/rtsp/ARTPSession.cpp
+++ b/media/libstagefright/rtsp/ARTPSession.cpp
@@ -82,7 +82,7 @@
         info->mRTPSocket = rtpSocket;
         info->mRTCPSocket = rtcpSocket;
 
-        sp<AMessage> notify = new AMessage(kWhatAccessUnitComplete, id());
+        sp<AMessage> notify = new AMessage(kWhatAccessUnitComplete, this);
         notify->setSize("track-index", mTracks.size() - 1);
 
         mRTPConn->addStream(
diff --git a/media/libstagefright/rtsp/ARTPWriter.cpp b/media/libstagefright/rtsp/ARTPWriter.cpp
index e1607bf..56c4aa6 100644
--- a/media/libstagefright/rtsp/ARTPWriter.cpp
+++ b/media/libstagefright/rtsp/ARTPWriter.cpp
@@ -146,7 +146,7 @@
         TRESPASS();
     }
 
-    (new AMessage(kWhatStart, mReflector->id()))->post();
+    (new AMessage(kWhatStart, mReflector))->post();
 
     while (!(mFlags & kFlagStarted)) {
         mCondition.wait(mLock);
@@ -161,7 +161,7 @@
         return OK;
     }
 
-    (new AMessage(kWhatStop, mReflector->id()))->post();
+    (new AMessage(kWhatStop, mReflector))->post();
 
     while (mFlags & kFlagStarted) {
         mCondition.wait(mLock);
@@ -213,8 +213,8 @@
                 mCondition.signal();
             }
 
-            (new AMessage(kWhatRead, mReflector->id()))->post();
-            (new AMessage(kWhatSendSR, mReflector->id()))->post();
+            (new AMessage(kWhatRead, mReflector))->post();
+            (new AMessage(kWhatSendSR, mReflector))->post();
             break;
         }
 
diff --git a/media/libstagefright/rtsp/ARTPWriter.h b/media/libstagefright/rtsp/ARTPWriter.h
index fdc8d23..be8bc13 100644
--- a/media/libstagefright/rtsp/ARTPWriter.h
+++ b/media/libstagefright/rtsp/ARTPWriter.h
@@ -32,7 +32,7 @@
 namespace android {
 
 struct ABuffer;
-struct MediaBuffer;
+class MediaBuffer;
 
 struct ARTPWriter : public MediaWriter {
     ARTPWriter(int fd);
diff --git a/media/libstagefright/rtsp/ARTSPConnection.cpp b/media/libstagefright/rtsp/ARTSPConnection.cpp
index 60b3aaf..855ffdc 100644
--- a/media/libstagefright/rtsp/ARTSPConnection.cpp
+++ b/media/libstagefright/rtsp/ARTSPConnection.cpp
@@ -68,28 +68,28 @@
 }
 
 void ARTSPConnection::connect(const char *url, const sp<AMessage> &reply) {
-    sp<AMessage> msg = new AMessage(kWhatConnect, id());
+    sp<AMessage> msg = new AMessage(kWhatConnect, this);
     msg->setString("url", url);
     msg->setMessage("reply", reply);
     msg->post();
 }
 
 void ARTSPConnection::disconnect(const sp<AMessage> &reply) {
-    sp<AMessage> msg = new AMessage(kWhatDisconnect, id());
+    sp<AMessage> msg = new AMessage(kWhatDisconnect, this);
     msg->setMessage("reply", reply);
     msg->post();
 }
 
 void ARTSPConnection::sendRequest(
         const char *request, const sp<AMessage> &reply) {
-    sp<AMessage> msg = new AMessage(kWhatSendRequest, id());
+    sp<AMessage> msg = new AMessage(kWhatSendRequest, this);
     msg->setString("request", request);
     msg->setMessage("reply", reply);
     msg->post();
 }
 
 void ARTSPConnection::observeBinaryData(const sp<AMessage> &reply) {
-    sp<AMessage> msg = new AMessage(kWhatObserveBinaryData, id());
+    sp<AMessage> msg = new AMessage(kWhatObserveBinaryData, this);
     msg->setMessage("reply", reply);
     msg->post();
 }
@@ -286,7 +286,7 @@
 
     if (err < 0) {
         if (errno == EINPROGRESS) {
-            sp<AMessage> msg = new AMessage(kWhatCompleteConnection, id());
+            sp<AMessage> msg = new AMessage(kWhatCompleteConnection, this);
             msg->setMessage("reply", reply);
             msg->setInt32("connection-id", mConnectionID);
             msg->post();
@@ -523,7 +523,7 @@
         return;
     }
 
-    sp<AMessage> msg = new AMessage(kWhatReceiveResponse, id());
+    sp<AMessage> msg = new AMessage(kWhatReceiveResponse, this);
     msg->post();
 
     mReceiveResponseEventPending = true;
@@ -746,7 +746,7 @@
             AString request;
             CHECK(reply->findString("original-request", &request));
 
-            sp<AMessage> msg = new AMessage(kWhatSendRequest, id());
+            sp<AMessage> msg = new AMessage(kWhatSendRequest, this);
             msg->setMessage("reply", reply);
             msg->setString("request", request.c_str(), request.size());
 
diff --git a/media/libstagefright/rtsp/Android.mk b/media/libstagefright/rtsp/Android.mk
index 9fedb71..c5e8c35 100644
--- a/media/libstagefright/rtsp/Android.mk
+++ b/media/libstagefright/rtsp/Android.mk
@@ -31,7 +31,8 @@
     LOCAL_CFLAGS += -Wno-psabi
 endif
 
-LOCAL_CFLAGS += -Werror
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
 
@@ -54,7 +55,8 @@
 	frameworks/av/media/libstagefright \
 	$(TOP)/frameworks/native/include/media/openmax
 
-LOCAL_CFLAGS += -Wno-multichar
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_MODULE_TAGS := optional
 
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index 3bf489b..0d0baf3 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -98,6 +98,7 @@
     enum {
         kWhatConnected                  = 'conn',
         kWhatDisconnected               = 'disc',
+        kWhatSeekPaused                 = 'spau',
         kWhatSeekDone                   = 'sdon',
 
         kWhatAccessUnit                 = 'accU',
@@ -169,10 +170,10 @@
         looper()->registerHandler(mConn);
         (1 ? mNetLooper : looper())->registerHandler(mRTPConn);
 
-        sp<AMessage> notify = new AMessage('biny', id());
+        sp<AMessage> notify = new AMessage('biny', this);
         mConn->observeBinaryData(notify);
 
-        sp<AMessage> reply = new AMessage('conn', id());
+        sp<AMessage> reply = new AMessage('conn', this);
         mConn->connect(mOriginalSessionURL.c_str(), reply);
     }
 
@@ -180,10 +181,10 @@
         looper()->registerHandler(mConn);
         (1 ? mNetLooper : looper())->registerHandler(mRTPConn);
 
-        sp<AMessage> notify = new AMessage('biny', id());
+        sp<AMessage> notify = new AMessage('biny', this);
         mConn->observeBinaryData(notify);
 
-        sp<AMessage> reply = new AMessage('sdpl', id());
+        sp<AMessage> reply = new AMessage('sdpl', this);
         reply->setObject("description", desc);
         mConn->connect(mOriginalSessionURL.c_str(), reply);
     }
@@ -210,29 +211,35 @@
     }
 
     void disconnect() {
-        (new AMessage('abor', id()))->post();
+        (new AMessage('abor', this))->post();
     }
 
     void seek(int64_t timeUs) {
-        sp<AMessage> msg = new AMessage('seek', id());
+        sp<AMessage> msg = new AMessage('seek', this);
         msg->setInt64("time", timeUs);
         mPauseGeneration++;
         msg->post();
     }
 
+    void continueSeekAfterPause(int64_t timeUs) {
+        sp<AMessage> msg = new AMessage('see1', this);
+        msg->setInt64("time", timeUs);
+        msg->post();
+    }
+
     bool isSeekable() const {
         return mSeekable;
     }
 
     void pause() {
-        sp<AMessage> msg = new AMessage('paus', id());
+        sp<AMessage> msg = new AMessage('paus', this);
         mPauseGeneration++;
         msg->setInt32("pausecheck", mPauseGeneration);
         msg->post(kPauseDelayUs);
     }
 
     void resume() {
-        sp<AMessage> msg = new AMessage('resu', id());
+        sp<AMessage> msg = new AMessage('resu', this);
         mPauseGeneration++;
         msg->post();
     }
@@ -454,10 +461,10 @@
                     request.append("Accept: application/sdp\r\n");
                     request.append("\r\n");
 
-                    sp<AMessage> reply = new AMessage('desc', id());
+                    sp<AMessage> reply = new AMessage('desc', this);
                     mConn->sendRequest(request.c_str(), reply);
                 } else {
-                    (new AMessage('disc', id()))->post();
+                    (new AMessage('disc', this))->post();
                 }
                 break;
             }
@@ -468,10 +475,10 @@
 
                 int32_t reconnect;
                 if (msg->findInt32("reconnect", &reconnect) && reconnect) {
-                    sp<AMessage> reply = new AMessage('conn', id());
+                    sp<AMessage> reply = new AMessage('conn', this);
                     mConn->connect(mOriginalSessionURL.c_str(), reply);
                 } else {
-                    (new AMessage('quit', id()))->post();
+                    (new AMessage('quit', this))->post();
                 }
                 break;
             }
@@ -514,7 +521,7 @@
                             ALOGI("rewritten session url: '%s'", mSessionURL.c_str());
                         }
 
-                        sp<AMessage> reply = new AMessage('conn', id());
+                        sp<AMessage> reply = new AMessage('conn', this);
                         mConn->connect(mOriginalSessionURL.c_str(), reply);
                         break;
                     }
@@ -586,7 +593,7 @@
                 }
 
                 if (result != OK) {
-                    sp<AMessage> reply = new AMessage('disc', id());
+                    sp<AMessage> reply = new AMessage('disc', this);
                     mConn->disconnect(reply);
                 }
                 break;
@@ -631,7 +638,7 @@
                 }
 
                 if (result != OK) {
-                    sp<AMessage> reply = new AMessage('disc', id());
+                    sp<AMessage> reply = new AMessage('disc', this);
                     mConn->disconnect(reply);
                 }
                 break;
@@ -651,7 +658,7 @@
                 int32_t result;
                 CHECK(msg->findInt32("result", &result));
 
-                ALOGI("SETUP(%d) completed with result %d (%s)",
+                ALOGI("SETUP(%zu) completed with result %d (%s)",
                      index, result, strerror(-result));
 
                 if (result == OK) {
@@ -703,7 +710,7 @@
                             mSessionID.erase(i, mSessionID.size() - i);
                         }
 
-                        sp<AMessage> notify = new AMessage('accu', id());
+                        sp<AMessage> notify = new AMessage('accu', this);
                         notify->setSize("track-index", trackIndex);
 
                         i = response->mHeaders.indexOfKey("transport");
@@ -769,10 +776,10 @@
 
                     request.append("\r\n");
 
-                    sp<AMessage> reply = new AMessage('play', id());
+                    sp<AMessage> reply = new AMessage('play', this);
                     mConn->sendRequest(request.c_str(), reply);
                 } else {
-                    sp<AMessage> reply = new AMessage('disc', id());
+                    sp<AMessage> reply = new AMessage('disc', this);
                     mConn->disconnect(reply);
                 }
                 break;
@@ -797,7 +804,7 @@
                     } else {
                         parsePlayResponse(response);
 
-                        sp<AMessage> timeout = new AMessage('tiou', id());
+                        sp<AMessage> timeout = new AMessage('tiou', this);
                         mCheckTimeoutGeneration++;
                         timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
                         timeout->post(kStartupTimeoutUs);
@@ -805,7 +812,7 @@
                 }
 
                 if (result != OK) {
-                    sp<AMessage> reply = new AMessage('disc', id());
+                    sp<AMessage> reply = new AMessage('disc', this);
                     mConn->disconnect(reply);
                 }
 
@@ -831,7 +838,7 @@
                 request.append("\r\n");
                 request.append("\r\n");
 
-                sp<AMessage> reply = new AMessage('opts', id());
+                sp<AMessage> reply = new AMessage('opts', this);
                 reply->setInt32("generation", mKeepAliveGeneration);
                 mConn->sendRequest(request.c_str(), reply);
                 break;
@@ -894,7 +901,7 @@
                 mPausing = false;
                 mSeekable = true;
 
-                sp<AMessage> reply = new AMessage('tear', id());
+                sp<AMessage> reply = new AMessage('tear', this);
 
                 int32_t reconnect;
                 if (msg->findInt32("reconnect", &reconnect) && reconnect) {
@@ -926,7 +933,7 @@
                 ALOGI("TEARDOWN completed with result %d (%s)",
                      result, strerror(-result));
 
-                sp<AMessage> reply = new AMessage('disc', id());
+                sp<AMessage> reply = new AMessage('disc', this);
 
                 int32_t reconnect;
                 if (msg->findInt32("reconnect", &reconnect) && reconnect) {
@@ -958,7 +965,7 @@
                 if (mNumAccessUnitsReceived == 0) {
 #if 1
                     ALOGI("stream ended? aborting.");
-                    (new AMessage('abor', id()))->post();
+                    (new AMessage('abor', this))->post();
                     break;
 #else
                     ALOGI("haven't seen an AU in a looong time.");
@@ -1012,7 +1019,7 @@
 
                 int32_t eos;
                 if (msg->findInt32("eos", &eos)) {
-                    ALOGI("received BYE on track index %d", trackIndex);
+                    ALOGI("received BYE on track index %zu", trackIndex);
                     if (!mAllTracksHaveTime && dataReceivedOnAllChannels()) {
                         ALOGI("No time established => fake existing data");
 
@@ -1077,7 +1084,7 @@
 
                 request.append("\r\n");
 
-                sp<AMessage> reply = new AMessage('pau2', id());
+                sp<AMessage> reply = new AMessage('pau2', this);
                 mConn->sendRequest(request.c_str(), reply);
                 break;
             }
@@ -1114,7 +1121,7 @@
 
                 request.append("\r\n");
 
-                sp<AMessage> reply = new AMessage('res2', id());
+                sp<AMessage> reply = new AMessage('res2', this);
                 mConn->sendRequest(request.c_str(), reply);
                 break;
             }
@@ -1143,7 +1150,7 @@
 
                         // Post new timeout in order to make sure to use
                         // fake timestamps if no new Sender Reports arrive
-                        sp<AMessage> timeout = new AMessage('tiou', id());
+                        sp<AMessage> timeout = new AMessage('tiou', this);
                         mCheckTimeoutGeneration++;
                         timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
                         timeout->post(kStartupTimeoutUs);
@@ -1152,7 +1159,7 @@
 
                 if (result != OK) {
                     ALOGE("resume failed, aborting.");
-                    (new AMessage('abor', id()))->post();
+                    (new AMessage('abor', this))->post();
                 }
 
                 mPausing = false;
@@ -1180,7 +1187,7 @@
                 mCheckPending = true;
                 ++mCheckGeneration;
 
-                sp<AMessage> reply = new AMessage('see1', id());
+                sp<AMessage> reply = new AMessage('see0', this);
                 reply->setInt64("time", timeUs);
 
                 if (mPausing) {
@@ -1203,9 +1210,26 @@
                 break;
             }
 
-            case 'see1':
+            case 'see0':
             {
                 // Session is paused now.
+                status_t err = OK;
+                msg->findInt32("result", &err);
+
+                int64_t timeUs;
+                CHECK(msg->findInt64("time", &timeUs));
+
+                sp<AMessage> notify = mNotify->dup();
+                notify->setInt32("what", kWhatSeekPaused);
+                notify->setInt32("err", err);
+                notify->setInt64("time", timeUs);
+                notify->post();
+                break;
+
+            }
+
+            case 'see1':
+            {
                 for (size_t i = 0; i < mTracks.size(); ++i) {
                     TrackInfo *info = &mTracks.editItemAt(i);
 
@@ -1221,7 +1245,7 @@
 
                 // Start new timeoutgeneration to avoid getting timeout
                 // before PLAY response arrive
-                sp<AMessage> timeout = new AMessage('tiou', id());
+                sp<AMessage> timeout = new AMessage('tiou', this);
                 mCheckTimeoutGeneration++;
                 timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
                 timeout->post(kStartupTimeoutUs);
@@ -1243,7 +1267,7 @@
 
                 request.append("\r\n");
 
-                sp<AMessage> reply = new AMessage('see2', id());
+                sp<AMessage> reply = new AMessage('see2', this);
                 mConn->sendRequest(request.c_str(), reply);
                 break;
             }
@@ -1277,7 +1301,7 @@
 
                         // Post new timeout in order to make sure to use
                         // fake timestamps if no new Sender Reports arrive
-                        sp<AMessage> timeout = new AMessage('tiou', id());
+                        sp<AMessage> timeout = new AMessage('tiou', this);
                         mCheckTimeoutGeneration++;
                         timeout->setInt32("tioucheck", mCheckTimeoutGeneration);
                         timeout->post(kStartupTimeoutUs);
@@ -1293,7 +1317,7 @@
 
                 if (result != OK) {
                     ALOGE("seek failed, aborting.");
-                    (new AMessage('abor', id()))->post();
+                    (new AMessage('abor', this))->post();
                 }
 
                 mPausing = false;
@@ -1343,12 +1367,12 @@
 
                         mTryTCPInterleaving = true;
 
-                        sp<AMessage> msg = new AMessage('abor', id());
+                        sp<AMessage> msg = new AMessage('abor', this);
                         msg->setInt32("reconnect", true);
                         msg->post();
                     } else {
                         ALOGW("Never received any data, disconnecting.");
-                        (new AMessage('abor', id()))->post();
+                        (new AMessage('abor', this))->post();
                     }
                 } else {
                     if (!mAllTracksHaveTime) {
@@ -1369,7 +1393,7 @@
     }
 
     void postKeepAlive() {
-        sp<AMessage> msg = new AMessage('aliv', id());
+        sp<AMessage> msg = new AMessage('aliv', this);
         msg->setInt32("generation", mKeepAliveGeneration);
         msg->post((mKeepAliveTimeoutUs * 9) / 10);
     }
@@ -1380,7 +1404,7 @@
         }
 
         mCheckPending = true;
-        sp<AMessage> check = new AMessage('chek', id());
+        sp<AMessage> check = new AMessage('chek', this);
         check->setInt32("generation", mCheckGeneration);
         check->post(kAccessUnitTimeoutUs);
     }
@@ -1564,9 +1588,9 @@
             new APacketSource(mSessionDesc, index);
 
         if (source->initCheck() != OK) {
-            ALOGW("Unsupported format. Ignoring track #%d.", index);
+            ALOGW("Unsupported format. Ignoring track #%zu.", index);
 
-            sp<AMessage> reply = new AMessage('setu', id());
+            sp<AMessage> reply = new AMessage('setu', this);
             reply->setSize("index", index);
             reply->setInt32("result", ERROR_UNSUPPORTED);
             reply->post();
@@ -1606,7 +1630,7 @@
         info->mTimeScale = timescale;
         info->mEOSReceived = false;
 
-        ALOGV("track #%d URL=%s", mTracks.size(), trackURL.c_str());
+        ALOGV("track #%zu URL=%s", mTracks.size(), trackURL.c_str());
 
         AString request = "SETUP ";
         request.append(trackURL);
@@ -1652,7 +1676,7 @@
 
         request.append("\r\n");
 
-        sp<AMessage> reply = new AMessage('setu', id());
+        sp<AMessage> reply = new AMessage('setu', this);
         reply->setSize("index", index);
         reply->setSize("track-index", mTracks.size() - 1);
         mConn->sendRequest(request.c_str(), reply);
@@ -1673,21 +1697,11 @@
         }
 
         size_t n = strlen(baseURL);
-        if (baseURL[n - 1] == '/') {
-            out->setTo(baseURL);
-            out->append(url);
-        } else {
-            const char *slashPos = strrchr(baseURL, '/');
-
-            if (slashPos > &baseURL[6]) {
-                out->setTo(baseURL, slashPos - baseURL);
-            } else {
-                out->setTo(baseURL);
-            }
-
+        out->setTo(baseURL);
+        if (baseURL[n - 1] != '/') {
             out->append("/");
-            out->append(url);
         }
+        out->append(url);
 
         return true;
     }
@@ -1731,8 +1745,8 @@
     }
 
     void onTimeUpdate(int32_t trackIndex, uint32_t rtpTime, uint64_t ntpTime) {
-        ALOGV("onTimeUpdate track %d, rtpTime = 0x%08x, ntpTime = 0x%016llx",
-             trackIndex, rtpTime, ntpTime);
+        ALOGV("onTimeUpdate track %d, rtpTime = 0x%08x, ntpTime = %#016llx",
+             trackIndex, rtpTime, (long long)ntpTime);
 
         int64_t ntpTimeUs = (int64_t)(ntpTime * 1E6 / (1ll << 32));
 
@@ -1747,7 +1761,7 @@
         }
 
         if (!mAllTracksHaveTime) {
-            bool allTracksHaveTime = true;
+            bool allTracksHaveTime = (mTracks.size() > 0);
             for (size_t i = 0; i < mTracks.size(); ++i) {
                 TrackInfo *track = &mTracks.editItemAt(i);
                 if (track->mNTPAnchorUs < 0) {
@@ -1851,8 +1865,8 @@
             return false;
         }
 
-        ALOGV("track %d rtpTime=%d mediaTimeUs = %lld us (%.2f secs)",
-             trackIndex, rtpTime, mediaTimeUs, mediaTimeUs / 1E6);
+        ALOGV("track %d rtpTime=%u mediaTimeUs = %lld us (%.2f secs)",
+             trackIndex, rtpTime, (long long)mediaTimeUs, mediaTimeUs / 1E6);
 
         accessUnit->meta()->setInt64("timeUs", mediaTimeUs);
 
diff --git a/media/libstagefright/rtsp/MyTransmitter.h b/media/libstagefright/rtsp/MyTransmitter.h
index 009a3b1..369f276 100644
--- a/media/libstagefright/rtsp/MyTransmitter.h
+++ b/media/libstagefright/rtsp/MyTransmitter.h
@@ -100,7 +100,7 @@
         mLooper->registerHandler(this);
         mLooper->registerHandler(mConn);
 
-        sp<AMessage> reply = new AMessage('conn', id());
+        sp<AMessage> reply = new AMessage('conn', this);
         mConn->connect(mServerURL.c_str(), reply);
 
 #ifdef ANDROID
@@ -229,7 +229,7 @@
         request.append("\r\n");
         request.append(sdp);
 
-        sp<AMessage> reply = new AMessage('anno', id());
+        sp<AMessage> reply = new AMessage('anno', this);
         mConn->sendRequest(request.c_str(), reply);
     }
 
@@ -350,7 +350,7 @@
                      << result << " (" << strerror(-result) << ")";
 
                 if (result != OK) {
-                    (new AMessage('quit', id()))->post();
+                    (new AMessage('quit', this))->post();
                     break;
                 }
 
@@ -381,7 +381,7 @@
                     if (response->mStatusCode == 401) {
                         if (mAuthType != NONE) {
                             LOG(INFO) << "FAILED to authenticate";
-                            (new AMessage('quit', id()))->post();
+                            (new AMessage('quit', this))->post();
                             break;
                         }
 
@@ -391,14 +391,14 @@
                 }
 
                 if (result != OK || response->mStatusCode != 200) {
-                    (new AMessage('quit', id()))->post();
+                    (new AMessage('quit', this))->post();
                     break;
                 }
 
                 unsigned rtpPort;
                 ARTPConnection::MakePortPair(&mRTPSocket, &mRTCPSocket, &rtpPort);
 
-                // (new AMessage('poll', id()))->post();
+                // (new AMessage('poll', this))->post();
 
                 AString request;
                 request.append("SETUP ");
@@ -414,7 +414,7 @@
                 request.append(";mode=record\r\n");
                 request.append("\r\n");
 
-                sp<AMessage> reply = new AMessage('setu', id());
+                sp<AMessage> reply = new AMessage('setu', this);
                 mConn->sendRequest(request.c_str(), reply);
                 break;
             }
@@ -468,7 +468,7 @@
                 }
 
                 if (result != OK || response->mStatusCode != 200) {
-                    (new AMessage('quit', id()))->post();
+                    (new AMessage('quit', this))->post();
                     break;
                 }
 
@@ -535,7 +535,7 @@
                 request.append("\r\n");
                 request.append("\r\n");
 
-                sp<AMessage> reply = new AMessage('reco', id());
+                sp<AMessage> reply = new AMessage('reco', this);
                 mConn->sendRequest(request.c_str(), reply);
                 break;
             }
@@ -558,13 +558,13 @@
                 }
 
                 if (result != OK) {
-                    (new AMessage('quit', id()))->post();
+                    (new AMessage('quit', this))->post();
                     break;
                 }
 
-                (new AMessage('more', id()))->post();
-                (new AMessage('sr  ', id()))->post();
-                (new AMessage('aliv', id()))->post(30000000ll);
+                (new AMessage('more', this))->post();
+                (new AMessage('sr  ', this))->post();
+                (new AMessage('aliv', this))->post(30000000ll);
                 break;
             }
 
@@ -586,7 +586,7 @@
                 request.append("\r\n");
                 request.append("\r\n");
 
-                sp<AMessage> reply = new AMessage('opts', id());
+                sp<AMessage> reply = new AMessage('opts', this);
                 mConn->sendRequest(request.c_str(), reply);
                 break;
             }
@@ -603,7 +603,7 @@
                     break;
                 }
 
-                (new AMessage('aliv', id()))->post(30000000ll);
+                (new AMessage('aliv', this))->post(30000000ll);
                 break;
             }
 
@@ -702,7 +702,7 @@
                     request.append("\r\n");
                     request.append("\r\n");
 
-                    sp<AMessage> reply = new AMessage('paus', id());
+                    sp<AMessage> reply = new AMessage('paus', this);
                     mConn->sendRequest(request.c_str(), reply);
                 }
                 break;
@@ -753,7 +753,7 @@
                 request.append("\r\n");
                 request.append("\r\n");
 
-                sp<AMessage> reply = new AMessage('tear', id());
+                sp<AMessage> reply = new AMessage('tear', this);
                 mConn->sendRequest(request.c_str(), reply);
                 break;
             }
@@ -775,7 +775,7 @@
                     CHECK(response != NULL);
                 }
 
-                (new AMessage('quit', id()))->post();
+                (new AMessage('quit', this))->post();
                 break;
             }
 
@@ -784,14 +784,14 @@
                 LOG(INFO) << "disconnect completed";
 
                 mConnected = false;
-                (new AMessage('quit', id()))->post();
+                (new AMessage('quit', this))->post();
                 break;
             }
 
             case 'quit':
             {
                 if (mConnected) {
-                    mConn->disconnect(new AMessage('disc', id()));
+                    mConn->disconnect(new AMessage('disc', this));
                     break;
                 }
 
diff --git a/media/libstagefright/rtsp/SDPLoader.cpp b/media/libstagefright/rtsp/SDPLoader.cpp
index a24eb69..0f46c83 100644
--- a/media/libstagefright/rtsp/SDPLoader.cpp
+++ b/media/libstagefright/rtsp/SDPLoader.cpp
@@ -51,7 +51,7 @@
 void SDPLoader::load(const char *url, const KeyedVector<String8, String8> *headers) {
     mNetLooper->registerHandler(this);
 
-    sp<AMessage> msg = new AMessage(kWhatLoad, id());
+    sp<AMessage> msg = new AMessage(kWhatLoad, this);
     msg->setString("url", url);
 
     if (headers != NULL) {
diff --git a/media/libstagefright/rtsp/UDPPusher.cpp b/media/libstagefright/rtsp/UDPPusher.cpp
index 47ea6f1..5c685a1 100644
--- a/media/libstagefright/rtsp/UDPPusher.cpp
+++ b/media/libstagefright/rtsp/UDPPusher.cpp
@@ -65,7 +65,7 @@
     mFirstTimeMs = fromlel(timeMs);
     mFirstTimeUs = ALooper::GetNowUs();
 
-    (new AMessage(kWhatPush, id()))->post();
+    (new AMessage(kWhatPush, this))->post();
 }
 
 bool UDPPusher::onPush() {
@@ -103,7 +103,7 @@
     timeMs -= mFirstTimeMs;
     int64_t whenUs = mFirstTimeUs + timeMs * 1000ll;
     int64_t nowUs = ALooper::GetNowUs();
-    (new AMessage(kWhatPush, id()))->post(whenUs - nowUs);
+    (new AMessage(kWhatPush, this))->post(whenUs - nowUs);
 
     return true;
 }
diff --git a/media/libstagefright/tests/Android.mk b/media/libstagefright/tests/Android.mk
index 8d6ff5b..111e6c5 100644
--- a/media/libstagefright/tests/Android.mk
+++ b/media/libstagefright/tests/Android.mk
@@ -31,6 +31,9 @@
 	frameworks/av/media/libstagefright/include \
 	$(TOP)/frameworks/native/include/media/openmax \
 
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
+
 LOCAL_32_BIT_ONLY := true
 
 include $(BUILD_NATIVE_TEST)
@@ -60,6 +63,39 @@
 	frameworks/av/media/libstagefright/include \
 	$(TOP)/frameworks/native/include/media/openmax \
 
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
+
+include $(BUILD_NATIVE_TEST)
+
+include $(CLEAR_VARS)
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
+LOCAL_MODULE := MediaCodecListOverrides_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+	MediaCodecListOverrides_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+	libmedia \
+	libstagefright \
+	libstagefright_foundation \
+	libstagefright_omx \
+	libutils \
+	liblog
+
+LOCAL_C_INCLUDES := \
+	frameworks/av/media/libstagefright \
+	frameworks/av/media/libstagefright/include \
+	frameworks/native/include/media/openmax \
+
+LOCAL_32_BIT_ONLY := true
+
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
+
 include $(BUILD_NATIVE_TEST)
 
 # Include subdirectory makefiles
diff --git a/media/libstagefright/tests/DummyRecorder.h b/media/libstagefright/tests/DummyRecorder.h
index 1cbea1b..cd4d0ee 100644
--- a/media/libstagefright/tests/DummyRecorder.h
+++ b/media/libstagefright/tests/DummyRecorder.h
@@ -24,7 +24,7 @@
 
 namespace android {
 
-class MediaSource;
+struct MediaSource;
 class MediaBuffer;
 
 class DummyRecorder {
diff --git a/media/libstagefright/tests/MediaCodecListOverrides_test.cpp b/media/libstagefright/tests/MediaCodecListOverrides_test.cpp
new file mode 100644
index 0000000..ab547be
--- /dev/null
+++ b/media/libstagefright/tests/MediaCodecListOverrides_test.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "MediaCodecListOverrides_test"
+#include <utils/Log.h>
+
+#include <gtest/gtest.h>
+
+#include "MediaCodecListOverrides.h"
+
+#include <media/MediaCodecInfo.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/MediaCodecList.h>
+
+namespace android {
+
+static const char kTestOverridesStr[] =
+"<MediaCodecs>\n"
+"    <Settings>\n"
+"        <Setting name=\"supports-multiple-secure-codecs\" value=\"false\" />\n"
+"        <Setting name=\"supports-secure-with-non-secure-codec\" value=\"true\" />\n"
+"    </Settings>\n"
+"    <Encoders>\n"
+"        <MediaCodec name=\"OMX.qcom.video.encoder.avc\" type=\"video/avc\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"4\" />\n"
+"        </MediaCodec>\n"
+"        <MediaCodec name=\"OMX.qcom.video.encoder.mpeg4\" type=\"video/mp4v-es\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"4\" />\n"
+"        </MediaCodec>\n"
+"    </Encoders>\n"
+"    <Decoders>\n"
+"        <MediaCodec name=\"OMX.qcom.video.decoder.avc.secure\" type=\"video/avc\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"1\" />\n"
+"        </MediaCodec>\n"
+"        <MediaCodec name=\"OMX.qcom.video.decoder.h263\" type=\"video/3gpp\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"4\" />\n"
+"        </MediaCodec>\n"
+"        <MediaCodec name=\"OMX.qcom.video.decoder.mpeg2\" type=\"video/mpeg2\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"3\" />\n"
+"        </MediaCodec>\n"
+"        <MediaCodec name=\"OMX.qcom.video.decoder.mpeg4\" type=\"video/mp4v-es\" update=\"true\" >\n"
+"            <Limit name=\"max-supported-instances\" value=\"3\" />\n"
+"        </MediaCodec>\n"
+"    </Decoders>\n"
+"</MediaCodecs>\n";
+
+class MediaCodecListOverridesTest : public ::testing::Test {
+public:
+    MediaCodecListOverridesTest() {}
+
+    void addMaxInstancesSetting(
+            const AString &key,
+            const AString &value,
+            KeyedVector<AString, CodecSettings> *results) {
+        CodecSettings settings;
+        settings.add("max-supported-instances", value);
+        results->add(key, settings);
+    }
+
+    void verifyProfileResults(const KeyedVector<AString, CodecSettings> &results) {
+        EXPECT_LT(0u, results.size());
+        for (size_t i = 0; i < results.size(); ++i) {
+            AString key = results.keyAt(i);
+            CodecSettings settings = results.valueAt(i);
+            EXPECT_EQ(1u, settings.size());
+            EXPECT_TRUE(settings.keyAt(0) == "max-supported-instances");
+            AString valueS = settings.valueAt(0);
+            int32_t value = strtol(valueS.c_str(), NULL, 10);
+            EXPECT_LT(0, value);
+            ALOGV("profileCodecs results %s %s", key.c_str(), valueS.c_str());
+        }
+    }
+
+    void exportTestResultsToXML(const char *fileName) {
+        CodecSettings gR;
+        gR.add("supports-multiple-secure-codecs", "false");
+        gR.add("supports-secure-with-non-secure-codec", "true");
+        KeyedVector<AString, CodecSettings> eR;
+        addMaxInstancesSetting("OMX.qcom.video.encoder.avc video/avc", "4", &eR);
+        addMaxInstancesSetting("OMX.qcom.video.encoder.mpeg4 video/mp4v-es", "4", &eR);
+        KeyedVector<AString, CodecSettings> dR;
+        addMaxInstancesSetting("OMX.qcom.video.decoder.avc.secure video/avc", "1", &dR);
+        addMaxInstancesSetting("OMX.qcom.video.decoder.h263 video/3gpp", "4", &dR);
+        addMaxInstancesSetting("OMX.qcom.video.decoder.mpeg2 video/mpeg2", "3", &dR);
+        addMaxInstancesSetting("OMX.qcom.video.decoder.mpeg4 video/mp4v-es", "3", &dR);
+
+        exportResultsToXML(fileName, gR, eR, dR);
+    }
+};
+
+TEST_F(MediaCodecListOverridesTest, splitString) {
+    AString s = "abc123";
+    AString delimiter = " ";
+    AString s1;
+    AString s2;
+    EXPECT_FALSE(splitString(s, delimiter, &s1, &s2));
+    s = "abc 123";
+    EXPECT_TRUE(splitString(s, delimiter, &s1, &s2));
+    EXPECT_TRUE(s1 == "abc");
+    EXPECT_TRUE(s2 == "123");
+}
+
+// TODO: the codec component never returns OMX_EventCmdComplete in unit test.
+TEST_F(MediaCodecListOverridesTest, DISABLED_profileCodecs) {
+    sp<IMediaCodecList> list = MediaCodecList::getInstance();
+    Vector<sp<MediaCodecInfo>> infos;
+    for (size_t i = 0; i < list->countCodecs(); ++i) {
+        infos.push_back(list->getCodecInfo(i));
+    }
+    CodecSettings global_results;
+    KeyedVector<AString, CodecSettings> encoder_results;
+    KeyedVector<AString, CodecSettings> decoder_results;
+    profileCodecs(
+            infos, &global_results, &encoder_results, &decoder_results, true /* forceToMeasure */);
+    verifyProfileResults(encoder_results);
+    verifyProfileResults(decoder_results);
+}
+
+TEST_F(MediaCodecListOverridesTest, exportTestResultsToXML) {
+    const char *fileName = "/sdcard/mediacodec_list_overrides_test.xml";
+    remove(fileName);
+
+    exportTestResultsToXML(fileName);
+
+    // verify
+    AString overrides;
+    FILE *f = fopen(fileName, "rb");
+    ASSERT_TRUE(f != NULL);
+    fseek(f, 0, SEEK_END);
+    long size = ftell(f);
+    rewind(f);
+
+    char *buf = (char *)malloc(size);
+    EXPECT_EQ((size_t)1, fread(buf, size, 1, f));
+    overrides.setTo(buf, size);
+    fclose(f);
+    free(buf);
+
+    AString expected;
+    expected.append(getProfilingVersionString());
+    expected.append("\n");
+    expected.append(kTestOverridesStr);
+    EXPECT_TRUE(overrides == expected);
+
+    remove(fileName);
+}
+
+} // namespace android
diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
index fd889f9..3860e9b 100644
--- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp
+++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
@@ -19,6 +19,7 @@
 
 #include <gtest/gtest.h>
 #include <utils/String8.h>
+#include <utils/String16.h>
 #include <utils/Errors.h>
 #include <fcntl.h>
 #include <unistd.h>
@@ -466,7 +467,7 @@
 // Set up the MediaRecorder which runs in the same process as mediaserver
 sp<MediaRecorder> SurfaceMediaSourceGLTest::setUpMediaRecorder(int fd, int videoSource,
         int outputFormat, int videoEncoder, int width, int height, int fps) {
-    sp<MediaRecorder> mr = new MediaRecorder();
+    sp<MediaRecorder> mr = new MediaRecorder(String16());
     mr->setVideoSource(videoSource);
     mr->setOutputFormat(outputFormat);
     mr->setVideoEncoder(videoEncoder);
diff --git a/media/libstagefright/tests/Utils_test.cpp b/media/libstagefright/tests/Utils_test.cpp
index c1e663c..d736501 100644
--- a/media/libstagefright/tests/Utils_test.cpp
+++ b/media/libstagefright/tests/Utils_test.cpp
@@ -109,21 +109,21 @@
 
 TEST_F(UtilsTest, TestDebug) {
 #define LVL(x) (ADebug::Level)(x)
-    ASSERT_EQ(ADebug::GetDebugLevelFromString("video", "", LVL(5)), LVL(5));
-    ASSERT_EQ(ADebug::GetDebugLevelFromString("video", "   \t  \n ", LVL(2)), LVL(2));
-    ASSERT_EQ(ADebug::GetDebugLevelFromString("video", "3", LVL(5)), LVL(3));
-    ASSERT_EQ(ADebug::GetDebugLevelFromString("video", "3:*deo", LVL(5)), LVL(3));
-    ASSERT_EQ(ADebug::GetDebugLevelFromString(
+    ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", "", LVL(5)), LVL(5));
+    ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", "   \t  \n ", LVL(2)), LVL(2));
+    ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", "3", LVL(5)), LVL(3));
+    ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", "3:*deo", LVL(5)), LVL(3));
+    ASSERT_EQ(ADebug::GetLevelFromSettingsString(
             "video", "\t\n 3 \t\n:\t\n video \t\n", LVL(5)), LVL(3));
-    ASSERT_EQ(ADebug::GetDebugLevelFromString("video", "3:*deo,2:vid*", LVL(5)), LVL(2));
-    ASSERT_EQ(ADebug::GetDebugLevelFromString(
+    ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", "3:*deo,2:vid*", LVL(5)), LVL(2));
+    ASSERT_EQ(ADebug::GetLevelFromSettingsString(
             "avideo", "\t\n 3 \t\n:\t\n avideo \t\n,\t\n 2 \t\n:\t\n video \t\n", LVL(5)), LVL(3));
-    ASSERT_EQ(ADebug::GetDebugLevelFromString(
+    ASSERT_EQ(ADebug::GetLevelFromSettingsString(
             "audio.omx", "4:*omx,3:*d*o*,2:audio*", LVL(5)), LVL(2));
-    ASSERT_EQ(ADebug::GetDebugLevelFromString(
+    ASSERT_EQ(ADebug::GetLevelFromSettingsString(
             "video.omx", "4:*omx,3:*d*o*,2:audio*", LVL(5)), LVL(3));
-    ASSERT_EQ(ADebug::GetDebugLevelFromString("video", "4:*omx,3:*d*o*,2:audio*", LVL(5)), LVL(3));
-    ASSERT_EQ(ADebug::GetDebugLevelFromString("omx", "4:*omx,3:*d*o*,2:audio*", LVL(5)), LVL(4));
+    ASSERT_EQ(ADebug::GetLevelFromSettingsString("video", "4:*omx,3:*d*o*,2:audio*", LVL(5)), LVL(3));
+    ASSERT_EQ(ADebug::GetLevelFromSettingsString("omx", "4:*omx,3:*d*o*,2:audio*", LVL(5)), LVL(4));
 #undef LVL
 }
 
diff --git a/media/libstagefright/timedtext/Android.mk b/media/libstagefright/timedtext/Android.mk
index 6a8b9fc..58fb12f 100644
--- a/media/libstagefright/timedtext/Android.mk
+++ b/media/libstagefright/timedtext/Android.mk
@@ -9,7 +9,8 @@
         TimedTextSRTSource.cpp    \
         TimedTextPlayer.cpp
 
-LOCAL_CFLAGS += -Wno-multichar -Werror
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_C_INCLUDES:= \
         $(TOP)/frameworks/av/include/media/stagefright/timedtext \
diff --git a/media/libstagefright/timedtext/TimedTextPlayer.cpp b/media/libstagefright/timedtext/TimedTextPlayer.cpp
index a070487..aecf666 100644
--- a/media/libstagefright/timedtext/TimedTextPlayer.cpp
+++ b/media/libstagefright/timedtext/TimedTextPlayer.cpp
@@ -56,25 +56,25 @@
 }
 
 void TimedTextPlayer::start() {
-    (new AMessage(kWhatStart, id()))->post();
+    (new AMessage(kWhatStart, this))->post();
 }
 
 void TimedTextPlayer::pause() {
-    (new AMessage(kWhatPause, id()))->post();
+    (new AMessage(kWhatPause, this))->post();
 }
 
 void TimedTextPlayer::resume() {
-    (new AMessage(kWhatResume, id()))->post();
+    (new AMessage(kWhatResume, this))->post();
 }
 
 void TimedTextPlayer::seekToAsync(int64_t timeUs) {
-    sp<AMessage> msg = new AMessage(kWhatSeek, id());
+    sp<AMessage> msg = new AMessage(kWhatSeek, this);
     msg->setInt64("seekTimeUs", timeUs);
     msg->post();
 }
 
 void TimedTextPlayer::setDataSource(sp<TimedTextSource> source) {
-    sp<AMessage> msg = new AMessage(kWhatSetSource, id());
+    sp<AMessage> msg = new AMessage(kWhatSetSource, this);
     msg->setObject("source", source);
     msg->post();
 }
@@ -231,7 +231,7 @@
     status_t err = mSource->read(&startTimeUs, &endTimeUs,
                                  &(parcelEvent->parcel), options);
     if (err == WOULD_BLOCK) {
-        sp<AMessage> msg = new AMessage(kWhatRetryRead, id());
+        sp<AMessage> msg = new AMessage(kWhatRetryRead, this);
         if (options != NULL) {
             int64_t seekTimeUs = kInvalidTimeUs;
             MediaSource::ReadOptions::SeekMode seekMode =
@@ -259,7 +259,7 @@
 
 void TimedTextPlayer::postTextEvent(const sp<ParcelEvent>& parcel, int64_t timeUs) {
     int64_t delayUs = delayUsFromCurrentTime(timeUs);
-    sp<AMessage> msg = new AMessage(kWhatSendSubtitle, id());
+    sp<AMessage> msg = new AMessage(kWhatSendSubtitle, this);
     msg->setInt32("generation", mSendSubtitleGeneration);
     if (parcel != NULL) {
         msg->setObject("subtitle", parcel);
diff --git a/media/libstagefright/timedtext/test/Android.mk b/media/libstagefright/timedtext/test/Android.mk
index 9a9fde2..e0e0e0d 100644
--- a/media/libstagefright/timedtext/test/Android.mk
+++ b/media/libstagefright/timedtext/test/Android.mk
@@ -26,4 +26,7 @@
     libstagefright_foundation \
     libutils
 
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
+
 include $(BUILD_NATIVE_TEST)
diff --git a/media/libstagefright/timedtext/test/TimedTextSRTSource_test.cpp b/media/libstagefright/timedtext/test/TimedTextSRTSource_test.cpp
index 3a06d61..211e732 100644
--- a/media/libstagefright/timedtext/test/TimedTextSRTSource_test.cpp
+++ b/media/libstagefright/timedtext/test/TimedTextSRTSource_test.cpp
@@ -63,10 +63,10 @@
     }
 
     virtual ssize_t readAt(off64_t offset, void *data, size_t size) {
-        if (offset >= mSize) return 0;
+        if ((size_t)offset >= mSize) return 0;
 
         ssize_t avail = mSize - offset;
-        if (avail > size) {
+        if ((size_t)avail > size) {
             avail = size;
         }
         memcpy(data, mData + offset, avail);
diff --git a/media/libstagefright/webm/Android.mk b/media/libstagefright/webm/Android.mk
index 7081463..bc53c56 100644
--- a/media/libstagefright/webm/Android.mk
+++ b/media/libstagefright/webm/Android.mk
@@ -1,8 +1,10 @@
 LOCAL_PATH:= $(call my-dir)
 include $(CLEAR_VARS)
 
-LOCAL_CPPFLAGS += -D__STDINT_LIMITS \
-                  -Werror
+LOCAL_CPPFLAGS += -D__STDINT_LIMITS
+
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
 
 LOCAL_SRC_FILES:= EbmlUtil.cpp        \
                   WebmElement.cpp     \
diff --git a/media/libstagefright/webm/WebmWriter.cpp b/media/libstagefright/webm/WebmWriter.cpp
index 069961b..737f144 100644
--- a/media/libstagefright/webm/WebmWriter.cpp
+++ b/media/libstagefright/webm/WebmWriter.cpp
@@ -80,38 +80,6 @@
             mCuePoints);
 }
 
-WebmWriter::WebmWriter(const char *filename)
-    : mInitCheck(NO_INIT),
-      mTimeCodeScale(1000000),
-      mStartTimestampUs(0),
-      mStartTimeOffsetMs(0),
-      mSegmentOffset(0),
-      mSegmentDataStart(0),
-      mInfoOffset(0),
-      mInfoSize(0),
-      mTracksOffset(0),
-      mCuesOffset(0),
-      mPaused(false),
-      mStarted(false),
-      mIsFileSizeLimitExplicitlyRequested(false),
-      mIsRealTimeRecording(false),
-      mStreamableFile(true),
-      mEstimatedCuesSize(0) {
-    mFd = open(filename, O_CREAT | O_LARGEFILE | O_TRUNC | O_RDWR, S_IRUSR | S_IWUSR);
-    if (mFd >= 0) {
-        ALOGV("fd %d; flags: %o", mFd, fcntl(mFd, F_GETFL, 0));
-        mInitCheck = OK;
-    }
-    mStreams[kAudioIndex] = WebmStream(kAudioType, "Audio", &WebmWriter::audioTrack);
-    mStreams[kVideoIndex] = WebmStream(kVideoType, "Video", &WebmWriter::videoTrack);
-    mSinkThread = new WebmFrameSinkThread(
-            mFd,
-            mSegmentDataStart,
-            mStreams[kVideoIndex].mSink,
-            mStreams[kAudioIndex].mSink,
-            mCuePoints);
-}
-
 // static
 sp<WebmElement> WebmWriter::videoTrack(const sp<MetaData>& md) {
     int32_t width, height;
diff --git a/media/libstagefright/webm/WebmWriter.h b/media/libstagefright/webm/WebmWriter.h
index 36b6965..4ad770e 100644
--- a/media/libstagefright/webm/WebmWriter.h
+++ b/media/libstagefright/webm/WebmWriter.h
@@ -37,7 +37,6 @@
 class WebmWriter : public MediaWriter {
 public:
     WebmWriter(int fd);
-    WebmWriter(const char *filename);
     ~WebmWriter() { reset(); }
 
 
diff --git a/media/libstagefright/wifi-display/Android.mk b/media/libstagefright/wifi-display/Android.mk
index f70454a..fb28624 100644
--- a/media/libstagefright/wifi-display/Android.mk
+++ b/media/libstagefright/wifi-display/Android.mk
@@ -30,6 +30,9 @@
         libui                           \
         libutils                        \
 
+LOCAL_CFLAGS += -Wno-multichar -Werror -Wall
+LOCAL_CLANG := true
+
 LOCAL_MODULE:= libstagefright_wfd
 
 LOCAL_MODULE_TAGS:= optional
diff --git a/media/libstagefright/wifi-display/MediaSender.cpp b/media/libstagefright/wifi-display/MediaSender.cpp
index b1cdec0..6f0087f 100644
--- a/media/libstagefright/wifi-display/MediaSender.cpp
+++ b/media/libstagefright/wifi-display/MediaSender.cpp
@@ -121,7 +121,7 @@
         }
 
         if (err == OK) {
-            sp<AMessage> notify = new AMessage(kWhatSenderNotify, id());
+            sp<AMessage> notify = new AMessage(kWhatSenderNotify, this);
             notify->setInt32("generation", mGeneration);
             mTSSender = new RTPSender(mNetSession, notify);
             looper()->registerHandler(mTSSender);
@@ -170,7 +170,7 @@
         return INVALID_OPERATION;
     }
 
-    sp<AMessage> notify = new AMessage(kWhatSenderNotify, id());
+    sp<AMessage> notify = new AMessage(kWhatSenderNotify, this);
     notify->setInt32("generation", mGeneration);
     notify->setSize("trackIndex", trackIndex);
 
diff --git a/media/libstagefright/wifi-display/VideoFormats.cpp b/media/libstagefright/wifi-display/VideoFormats.cpp
index 2f4af5b..dbc511c 100644
--- a/media/libstagefright/wifi-display/VideoFormats.cpp
+++ b/media/libstagefright/wifi-display/VideoFormats.cpp
@@ -248,8 +248,8 @@
     }
 
     if (bestProfile == -1 || bestLevel == -1) {
-        ALOGE("Profile or level not set for resolution type %d, index %d",
-              type, index);
+        ALOGE("Profile or level not set for resolution type %d, index %zu",
+                type, index);
         bestProfile = PROFILE_CBP;
         bestLevel = LEVEL_31;
     }
@@ -382,7 +382,6 @@
     disableAll();
 
     unsigned native, dummy;
-    unsigned res[3];
     size_t size = strlen(spec);
     size_t offset = 0;
 
@@ -507,7 +506,7 @@
                 continue;
             }
 
-            ALOGV("type %u, index %u, %u x %u %c%u supported",
+            ALOGV("type %zu, index %zu, %zu x %zu %c%zu supported",
                   i, j, width, height, interlaced ? 'i' : 'p', framesPerSecond);
 
             uint32_t score = width * height * framesPerSecond;
diff --git a/media/libstagefright/wifi-display/rtp/RTPSender.cpp b/media/libstagefright/wifi-display/rtp/RTPSender.cpp
index e88a3bd..c66a898 100644
--- a/media/libstagefright/wifi-display/rtp/RTPSender.cpp
+++ b/media/libstagefright/wifi-display/rtp/RTPSender.cpp
@@ -95,11 +95,11 @@
         return INVALID_OPERATION;
     }
 
-    sp<AMessage> rtpNotify = new AMessage(kWhatRTPNotify, id());
+    sp<AMessage> rtpNotify = new AMessage(kWhatRTPNotify, this);
 
     sp<AMessage> rtcpNotify;
     if (remoteRTCPPort >= 0) {
-        rtcpNotify = new AMessage(kWhatRTCPNotify, id());
+        rtcpNotify = new AMessage(kWhatRTCPNotify, this);
     }
 
     CHECK_EQ(mRTPSessionID, 0);
@@ -252,8 +252,6 @@
     int64_t timeUs;
     CHECK(tsPackets->meta()->findInt64("timeUs", &timeUs));
 
-    const size_t numTSPackets = tsPackets->size() / 188;
-
     size_t srcOffset = 0;
     while (srcOffset < tsPackets->size()) {
         sp<ABuffer> udpPacket =
@@ -672,8 +670,8 @@
 
             default:
             {
-                ALOGW("Unknown RTCP packet type %u of size %d",
-                     (unsigned)data[1], headerLength);
+                ALOGW("Unknown RTCP packet type %u of size %zu",
+                        (unsigned)data[1], headerLength);
                 break;
             }
         }
@@ -764,7 +762,7 @@
     return OK;
 }
 
-status_t RTPSender::parseAPP(const uint8_t *data, size_t size) {
+status_t RTPSender::parseAPP(const uint8_t *data, size_t size __unused) {
     if (!memcmp("late", &data[8], 4)) {
         int64_t avgLatencyUs = (int64_t)U64_AT(&data[12]);
         int64_t maxLatencyUs = (int64_t)U64_AT(&data[20]);
diff --git a/media/libstagefright/wifi-display/source/Converter.cpp b/media/libstagefright/wifi-display/source/Converter.cpp
index 2834a66..471152e 100644
--- a/media/libstagefright/wifi-display/source/Converter.cpp
+++ b/media/libstagefright/wifi-display/source/Converter.cpp
@@ -93,7 +93,7 @@
 
 void Converter::shutdownAsync() {
     ALOGV("shutdown");
-    (new AMessage(kWhatShutdown, id()))->post();
+    (new AMessage(kWhatShutdown, this))->post();
 }
 
 status_t Converter::init() {
@@ -482,11 +482,11 @@
 
 #if 1
     if (mEncoderActivityNotify == NULL) {
-        mEncoderActivityNotify = new AMessage(kWhatEncoderActivity, id());
+        mEncoderActivityNotify = new AMessage(kWhatEncoderActivity, this);
     }
     mEncoder->requestActivityNotification(mEncoderActivityNotify->dup());
 #else
-    sp<AMessage> notify = new AMessage(kWhatEncoderActivity, id());
+    sp<AMessage> notify = new AMessage(kWhatEncoderActivity, this);
     notify->setInt64("whenUs", ALooper::GetNowUs());
     mEncoder->requestActivityNotification(notify);
 #endif
@@ -731,8 +731,7 @@
 
                 // MediaSender will post the following message when HDCP
                 // is done, to release the output buffer back to encoder.
-                sp<AMessage> notify(new AMessage(
-                        kWhatReleaseOutputBuffer, id()));
+                sp<AMessage> notify(new AMessage(kWhatReleaseOutputBuffer, this));
                 notify->setInt32("bufferIndex", bufferIndex);
 
                 buffer = new ABuffer(
@@ -748,7 +747,7 @@
             buffer->meta()->setInt64("timeUs", timeUs);
 
             ALOGV("[%s] time %lld us (%.2f secs)",
-                  mIsVideo ? "video" : "audio", timeUs, timeUs / 1E6);
+                    mIsVideo ? "video" : "audio", (long long)timeUs, timeUs / 1E6);
 
             memcpy(buffer->data(), outbuf->base() + offset, size);
 
@@ -787,18 +786,18 @@
 }
 
 void Converter::requestIDRFrame() {
-    (new AMessage(kWhatRequestIDRFrame, id()))->post();
+    (new AMessage(kWhatRequestIDRFrame, this))->post();
 }
 
 void Converter::dropAFrame() {
     // Unsupported in surface input mode.
     CHECK(!(mFlags & FLAG_USE_SURFACE_INPUT));
 
-    (new AMessage(kWhatDropAFrame, id()))->post();
+    (new AMessage(kWhatDropAFrame, this))->post();
 }
 
 void Converter::suspendEncoding(bool suspend) {
-    sp<AMessage> msg = new AMessage(kWhatSuspendEncoding, id());
+    sp<AMessage> msg = new AMessage(kWhatSuspendEncoding, this);
     msg->setInt32("suspend", suspend);
     msg->post();
 }
diff --git a/media/libstagefright/wifi-display/source/Converter.h b/media/libstagefright/wifi-display/source/Converter.h
index 5876e07..b182990 100644
--- a/media/libstagefright/wifi-display/source/Converter.h
+++ b/media/libstagefright/wifi-display/source/Converter.h
@@ -23,7 +23,7 @@
 namespace android {
 
 struct ABuffer;
-struct IGraphicBufferProducer;
+class IGraphicBufferProducer;
 struct MediaCodec;
 
 #define ENABLE_SILENCE_DETECTION        0
diff --git a/media/libstagefright/wifi-display/source/MediaPuller.cpp b/media/libstagefright/wifi-display/source/MediaPuller.cpp
index 86b918f..ce07a4e 100644
--- a/media/libstagefright/wifi-display/source/MediaPuller.cpp
+++ b/media/libstagefright/wifi-display/source/MediaPuller.cpp
@@ -63,21 +63,21 @@
 }
 
 status_t MediaPuller::start() {
-    return postSynchronouslyAndReturnError(new AMessage(kWhatStart, id()));
+    return postSynchronouslyAndReturnError(new AMessage(kWhatStart, this));
 }
 
 void MediaPuller::stopAsync(const sp<AMessage> &notify) {
-    sp<AMessage> msg = new AMessage(kWhatStop, id());
+    sp<AMessage> msg = new AMessage(kWhatStop, this);
     msg->setMessage("notify", notify);
     msg->post();
 }
 
 void MediaPuller::pause() {
-    (new AMessage(kWhatPause, id()))->post();
+    (new AMessage(kWhatPause, this))->post();
 }
 
 void MediaPuller::resume() {
-    (new AMessage(kWhatResume, id()))->post();
+    (new AMessage(kWhatResume, this))->post();
 }
 
 void MediaPuller::onMessageReceived(const sp<AMessage> &msg) {
@@ -105,7 +105,7 @@
             sp<AMessage> response = new AMessage;
             response->setInt32("err", err);
 
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
             response->postReply(replyID);
             break;
@@ -215,7 +215,7 @@
 }
 
 void MediaPuller::schedulePull() {
-    sp<AMessage> msg = new AMessage(kWhatPull, id());
+    sp<AMessage> msg = new AMessage(kWhatPull, this);
     msg->setInt32("generation", mPullGeneration);
     msg->post();
 }
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index 2cb4786..ed5a404 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -214,7 +214,7 @@
         mConverter->shutdownAsync();
     }
 
-    sp<AMessage> msg = new AMessage(kWhatMediaPullerStopped, id());
+    sp<AMessage> msg = new AMessage(kWhatMediaPullerStopped, this);
 
     if (mStarted && mMediaPuller != NULL) {
         if (mRepeaterSource != NULL) {
@@ -345,12 +345,14 @@
 ////////////////////////////////////////////////////////////////////////////////
 
 WifiDisplaySource::PlaybackSession::PlaybackSession(
+        const String16 &opPackageName,
         const sp<ANetworkSession> &netSession,
         const sp<AMessage> &notify,
         const in_addr &interfaceAddr,
         const sp<IHDCP> &hdcp,
         const char *path)
-    : mNetSession(netSession),
+    : mOpPackageName(opPackageName),
+      mNetSession(netSession),
       mNotify(notify),
       mInterfaceAddr(interfaceAddr),
       mHDCP(hdcp),
@@ -382,7 +384,7 @@
         size_t videoResolutionIndex,
         VideoFormats::ProfileType videoProfileType,
         VideoFormats::LevelType videoLevelType) {
-    sp<AMessage> notify = new AMessage(kWhatMediaSenderNotify, id());
+    sp<AMessage> notify = new AMessage(kWhatMediaSenderNotify, this);
     mMediaSender = new MediaSender(mNetSession, notify);
     looper()->registerHandler(mMediaSender);
 
@@ -440,7 +442,7 @@
 status_t WifiDisplaySource::PlaybackSession::play() {
     updateLiveness();
 
-    (new AMessage(kWhatResume, id()))->post();
+    (new AMessage(kWhatResume, this))->post();
 
     return OK;
 }
@@ -460,7 +462,7 @@
 status_t WifiDisplaySource::PlaybackSession::pause() {
     updateLiveness();
 
-    (new AMessage(kWhatPause, id()))->post();
+    (new AMessage(kWhatPause, this))->post();
 
     return OK;
 }
@@ -508,7 +510,7 @@
             } else if (what == Converter::kWhatEOS) {
                 CHECK_EQ(what, Converter::kWhatEOS);
 
-                ALOGI("output EOS on track %d", trackIndex);
+                ALOGI("output EOS on track %zu", trackIndex);
 
                 ssize_t index = mTracks.indexOfKey(trackIndex);
                 CHECK_GE(index, 0);
@@ -581,7 +583,7 @@
             CHECK(msg->findSize("trackIndex", &trackIndex));
 
             if (what == Track::kWhatStopped) {
-                ALOGI("Track %d stopped", trackIndex);
+                ALOGI("Track %zu stopped", trackIndex);
 
                 sp<Track> track = mTracks.valueFor(trackIndex);
                 looper()->unregisterHandler(track->id());
@@ -786,7 +788,7 @@
 
         size_t trackIndex = mTracks.size();
 
-        sp<AMessage> notify = new AMessage(kWhatTrackNotify, id());
+        sp<AMessage> notify = new AMessage(kWhatTrackNotify, this);
         notify->setSize("trackIndex", trackIndex);
 
         sp<Track> track = new Track(notify, format);
@@ -821,21 +823,27 @@
         return;
     }
 
+    int64_t delayUs = 1000000; // default delay is 1 sec
     int64_t sampleTimeUs;
     status_t err = mExtractor->getSampleTime(&sampleTimeUs);
 
-    int64_t nowUs = ALooper::GetNowUs();
+    if (err == OK) {
+        int64_t nowUs = ALooper::GetNowUs();
 
-    if (mFirstSampleTimeRealUs < 0ll) {
-        mFirstSampleTimeRealUs = nowUs;
-        mFirstSampleTimeUs = sampleTimeUs;
+        if (mFirstSampleTimeRealUs < 0ll) {
+            mFirstSampleTimeRealUs = nowUs;
+            mFirstSampleTimeUs = sampleTimeUs;
+        }
+
+        int64_t whenUs = sampleTimeUs - mFirstSampleTimeUs + mFirstSampleTimeRealUs;
+        delayUs = whenUs - nowUs;
+    } else {
+        ALOGW("could not get sample time (%d)", err);
     }
 
-    int64_t whenUs = sampleTimeUs - mFirstSampleTimeUs + mFirstSampleTimeRealUs;
-
-    sp<AMessage> msg = new AMessage(kWhatPullExtractorSample, id());
+    sp<AMessage> msg = new AMessage(kWhatPullExtractorSample, this);
     msg->setInt32("generation", mPullExtractorGeneration);
-    msg->post(whenUs - nowUs);
+    msg->post(delayUs);
 
     mPullExtractorPending = true;
 }
@@ -857,7 +865,7 @@
     size_t trackIndex;
     CHECK_EQ((status_t)OK, mExtractor->getSampleTrackIndex(&trackIndex));
 
-    sp<AMessage> msg = new AMessage(kWhatConverterNotify, id());
+    sp<AMessage> msg = new AMessage(kWhatConverterNotify, this);
 
     msg->setSize(
             "trackIndex", mExtractorTrackToInternalTrack.valueFor(trackIndex));
@@ -955,7 +963,7 @@
                     ? MEDIA_MIMETYPE_AUDIO_RAW : MEDIA_MIMETYPE_AUDIO_AAC);
     }
 
-    notify = new AMessage(kWhatConverterNotify, id());
+    notify = new AMessage(kWhatConverterNotify, this);
     notify->setSize("trackIndex", trackIndex);
 
     sp<Converter> converter = new Converter(notify, codecLooper, format);
@@ -970,7 +978,7 @@
         return err;
     }
 
-    notify = new AMessage(Converter::kWhatMediaPullerNotify, converter->id());
+    notify = new AMessage(Converter::kWhatMediaPullerNotify, converter);
     notify->setSize("trackIndex", trackIndex);
 
     sp<MediaPuller> puller = new MediaPuller(source, notify);
@@ -980,7 +988,7 @@
         *numInputBuffers = converter->getInputBufferCount();
     }
 
-    notify = new AMessage(kWhatTrackNotify, id());
+    notify = new AMessage(kWhatTrackNotify, this);
     notify->setSize("trackIndex", trackIndex);
 
     sp<Track> track = new Track(
@@ -1063,6 +1071,7 @@
 status_t WifiDisplaySource::PlaybackSession::addAudioSource(bool usePCMAudio) {
     sp<AudioSource> audioSource = new AudioSource(
             AUDIO_SOURCE_REMOTE_SUBMIX,
+            mOpPackageName,
             48000 /* sampleRate */,
             2 /* channelCount */);
 
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.h b/media/libstagefright/wifi-display/source/PlaybackSession.h
index 2824143..f6673df 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.h
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.h
@@ -22,11 +22,13 @@
 #include "VideoFormats.h"
 #include "WifiDisplaySource.h"
 
+#include <utils/String16.h>
+
 namespace android {
 
 struct ABuffer;
 struct IHDCP;
-struct IGraphicBufferProducer;
+class IGraphicBufferProducer;
 struct MediaPuller;
 struct MediaSource;
 struct MediaSender;
@@ -36,6 +38,7 @@
 // display.
 struct WifiDisplaySource::PlaybackSession : public AHandler {
     PlaybackSession(
+            const String16 &opPackageName,
             const sp<ANetworkSession> &netSession,
             const sp<AMessage> &notify,
             const struct in_addr &interfaceAddr,
@@ -96,6 +99,8 @@
         kWhatPullExtractorSample,
     };
 
+    String16 mOpPackageName;
+
     sp<ANetworkSession> mNetSession;
     sp<AMessage> mNotify;
     in_addr mInterfaceAddr;
diff --git a/media/libstagefright/wifi-display/source/RepeaterSource.cpp b/media/libstagefright/wifi-display/source/RepeaterSource.cpp
index 59d7e6e..af6b663 100644
--- a/media/libstagefright/wifi-display/source/RepeaterSource.cpp
+++ b/media/libstagefright/wifi-display/source/RepeaterSource.cpp
@@ -173,7 +173,7 @@
 }
 
 void RepeaterSource::postRead() {
-    (new AMessage(kWhatRead, mReflector->id()))->post();
+    (new AMessage(kWhatRead, mReflector))->post();
 }
 
 void RepeaterSource::onMessageReceived(const sp<AMessage> &msg) {
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
index 7eb8b73..e26165e 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.cpp
@@ -50,14 +50,16 @@
 const AString WifiDisplaySource::sUserAgent = MakeUserAgent();
 
 WifiDisplaySource::WifiDisplaySource(
+        const String16 &opPackageName,
         const sp<ANetworkSession> &netSession,
         const sp<IRemoteDisplayClient> &client,
         const char *path)
-    : mState(INITIALIZED),
+    : mOpPackageName(opPackageName),
+      mState(INITIALIZED),
       mNetSession(netSession),
       mClient(client),
       mSessionID(0),
-      mStopReplyID(0),
+      mStopReplyID(NULL),
       mChosenRTPPort(-1),
       mUsingPCMAudio(false),
       mClientSessionID(0),
@@ -106,7 +108,7 @@
 status_t WifiDisplaySource::start(const char *iface) {
     CHECK_EQ(mState, INITIALIZED);
 
-    sp<AMessage> msg = new AMessage(kWhatStart, id());
+    sp<AMessage> msg = new AMessage(kWhatStart, this);
     msg->setString("iface", iface);
 
     sp<AMessage> response;
@@ -114,21 +116,21 @@
 }
 
 status_t WifiDisplaySource::stop() {
-    sp<AMessage> msg = new AMessage(kWhatStop, id());
+    sp<AMessage> msg = new AMessage(kWhatStop, this);
 
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
 }
 
 status_t WifiDisplaySource::pause() {
-    sp<AMessage> msg = new AMessage(kWhatPause, id());
+    sp<AMessage> msg = new AMessage(kWhatPause, this);
 
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
 }
 
 status_t WifiDisplaySource::resume() {
-    sp<AMessage> msg = new AMessage(kWhatResume, id());
+    sp<AMessage> msg = new AMessage(kWhatResume, this);
 
     sp<AMessage> response;
     return PostAndAwaitResponse(msg, &response);
@@ -138,7 +140,7 @@
     switch (msg->what()) {
         case kWhatStart:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             AString iface;
@@ -167,7 +169,7 @@
 
             if (err == OK) {
                 if (inet_aton(iface.c_str(), &mInterfaceAddr) != 0) {
-                    sp<AMessage> notify = new AMessage(kWhatRTSPNotify, id());
+                    sp<AMessage> notify = new AMessage(kWhatRTSPNotify, this);
 
                     err = mNetSession->createRTSPServer(
                             mInterfaceAddr, port, notify, &mSessionID);
@@ -310,7 +312,7 @@
                 if (err == OK) {
                     mState = AWAITING_CLIENT_TEARDOWN;
 
-                    (new AMessage(kWhatTeardownTriggerTimedOut, id()))->post(
+                    (new AMessage(kWhatTeardownTriggerTimedOut, this))->post(
                             kTeardownTriggerTimeouSecs * 1000000ll);
 
                     break;
@@ -325,7 +327,7 @@
 
         case kWhatPause:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             status_t err = OK;
@@ -345,7 +347,7 @@
 
         case kWhatResume:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             CHECK(msg->senderAwaitsResponse(&replyID));
 
             status_t err = OK;
@@ -492,7 +494,7 @@
             if (mState == AWAITING_CLIENT_TEARDOWN) {
                 ALOGI("TEARDOWN trigger timed out, forcing disconnection.");
 
-                CHECK_NE(mStopReplyID, 0);
+                CHECK(mStopReplyID != NULL);
                 finishStop();
                 break;
             }
@@ -529,7 +531,7 @@
                     // HDCPObserver::notify is completely handled before
                     // we clear the HDCP instance and unload the shared
                     // library :(
-                    (new AMessage(kWhatFinishStop2, id()))->post(300000ll);
+                    (new AMessage(kWhatFinishStop2, this))->post(300000ll);
                     break;
                 }
 
@@ -881,7 +883,7 @@
                     &framesPerSecond,
                     &interlaced));
 
-        ALOGI("Picked video resolution %u x %u %c%u",
+        ALOGI("Picked video resolution %zu x %zu %c%zu",
               width, height, interlaced ? 'i' : 'p', framesPerSecond);
 
         ALOGI("Picked AVC profile %d, level %d",
@@ -1027,7 +1029,7 @@
     }
 
     mReaperPending = true;
-    (new AMessage(kWhatReapDeadClients, id()))->post(kReaperIntervalUs);
+    (new AMessage(kWhatReapDeadClients, this))->post(kReaperIntervalUs);
 }
 
 void WifiDisplaySource::scheduleKeepAlive(int32_t sessionID) {
@@ -1035,7 +1037,7 @@
     // expire, make sure the timeout is greater than 5 secs to begin with.
     CHECK_GT(kPlaybackSessionTimeoutUs, 5000000ll);
 
-    sp<AMessage> msg = new AMessage(kWhatKeepAlive, id());
+    sp<AMessage> msg = new AMessage(kWhatKeepAlive, this);
     msg->setInt32("sessionID", sessionID);
     msg->post(kPlaybackSessionTimeoutUs - 5000000ll);
 }
@@ -1239,13 +1241,13 @@
 
     int32_t playbackSessionID = makeUniquePlaybackSessionID();
 
-    sp<AMessage> notify = new AMessage(kWhatPlaybackSessionNotify, id());
+    sp<AMessage> notify = new AMessage(kWhatPlaybackSessionNotify, this);
     notify->setInt32("playbackSessionID", playbackSessionID);
     notify->setInt32("sessionID", sessionID);
 
     sp<PlaybackSession> playbackSession =
         new PlaybackSession(
-                mNetSession, notify, mInterfaceAddr, mHDCP, mMediaPath.c_str());
+                mOpPackageName, mNetSession, notify, mInterfaceAddr, mHDCP, mMediaPath.c_str());
 
     looper()->registerHandler(playbackSession);
 
@@ -1470,7 +1472,7 @@
     mNetSession->sendRequest(sessionID, response.c_str());
 
     if (mState == AWAITING_CLIENT_TEARDOWN) {
-        CHECK_NE(mStopReplyID, 0);
+        CHECK(mStopReplyID != NULL);
         finishStop();
     } else {
         mClient->onDisplayError(IRemoteDisplayClient::kDisplayErrorUnknown);
@@ -1707,7 +1709,7 @@
         return ERROR_UNSUPPORTED;
     }
 
-    sp<AMessage> notify = new AMessage(kWhatHDCPNotify, id());
+    sp<AMessage> notify = new AMessage(kWhatHDCPNotify, this);
     mHDCPObserver = new HDCPObserver(notify);
 
     status_t err = mHDCP->setObserver(mHDCPObserver);
diff --git a/media/libstagefright/wifi-display/source/WifiDisplaySource.h b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
index 750265f..c25a675 100644
--- a/media/libstagefright/wifi-display/source/WifiDisplaySource.h
+++ b/media/libstagefright/wifi-display/source/WifiDisplaySource.h
@@ -25,10 +25,13 @@
 
 #include <netinet/in.h>
 
+#include <utils/String16.h>
+
 namespace android {
 
+struct AReplyToken;
 struct IHDCP;
-struct IRemoteDisplayClient;
+class IRemoteDisplayClient;
 struct ParsedMessage;
 
 // Represents the RTSP server acting as a wifi display source.
@@ -37,6 +40,7 @@
     static const unsigned kWifiDisplayDefaultPort = 7236;
 
     WifiDisplaySource(
+            const String16 &opPackageName,
             const sp<ANetworkSession> &netSession,
             const sp<IRemoteDisplayClient> &client,
             const char *path = NULL);
@@ -113,6 +117,8 @@
 
     static const AString sUserAgent;
 
+    String16 mOpPackageName;
+
     State mState;
     VideoFormats mSupportedSourceVideoFormats;
     sp<ANetworkSession> mNetSession;
@@ -121,7 +127,7 @@
     struct in_addr mInterfaceAddr;
     int32_t mSessionID;
 
-    uint32_t mStopReplyID;
+    sp<AReplyToken> mStopReplyID;
 
     AString mWfdClientRtpPorts;
     int32_t mChosenRTPPort;  // extracted from "wfd_client_rtp_ports"
diff --git a/media/libstagefright/yuv/Android.mk b/media/libstagefright/yuv/Android.mk
index bb86dfc..dc67288 100644
--- a/media/libstagefright/yuv/Android.mk
+++ b/media/libstagefright/yuv/Android.mk
@@ -12,7 +12,7 @@
 LOCAL_MODULE:= libstagefright_yuv
 
 
-LOCAL_CFLAGS += -Werror
-
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
 
 include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libstagefright/yuv/YUVImage.cpp b/media/libstagefright/yuv/YUVImage.cpp
index bb3e2fd..c098135 100644
--- a/media/libstagefright/yuv/YUVImage.cpp
+++ b/media/libstagefright/yuv/YUVImage.cpp
@@ -374,13 +374,13 @@
 
 void YUVImage::yuv2rgb(uint8_t yValue, uint8_t uValue, uint8_t vValue,
         uint8_t *r, uint8_t *g, uint8_t *b) const {
-    *r = yValue + (1.370705 * (vValue-128));
-    *g = yValue - (0.698001 * (vValue-128)) - (0.337633 * (uValue-128));
-    *b = yValue + (1.732446 * (uValue-128));
+    int rTmp = yValue + (1.370705 * (vValue-128));
+    int gTmp = yValue - (0.698001 * (vValue-128)) - (0.337633 * (uValue-128));
+    int bTmp = yValue + (1.732446 * (uValue-128));
 
-    *r = clamp(*r, 0, 255);
-    *g = clamp(*g, 0, 255);
-    *b = clamp(*b, 0, 255);
+    *r = clamp(rTmp, 0, 255);
+    *g = clamp(gTmp, 0, 255);
+    *b = clamp(bTmp, 0, 255);
 }
 
 bool YUVImage::writeToPPM(const char *filename) const {
diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk
index 3a280f0..b6de0d9 100644
--- a/media/mediaserver/Android.mk
+++ b/media/mediaserver/Android.mk
@@ -11,14 +11,16 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= \
-	main_mediaserver.cpp 
+	main_mediaserver.cpp
 
 LOCAL_SHARED_LIBRARIES := \
 	libaudioflinger \
 	libaudiopolicyservice \
 	libcamera_metadata\
 	libcameraservice \
+	libicuuc \
 	libmedialogservice \
+	libresourcemanagerservice \
 	libcutils \
 	libnbaio \
 	libmedia \
@@ -26,19 +28,27 @@
 	libutils \
 	liblog \
 	libbinder \
-	libsoundtriggerservice
+	libsoundtriggerservice \
+	libradioservice
 
 LOCAL_STATIC_LIBRARIES := \
-	libregistermsext
+        libicuandroid_utils \
+        libregistermsext
 
 LOCAL_C_INCLUDES := \
     frameworks/av/media/libmediaplayerservice \
     frameworks/av/services/medialog \
     frameworks/av/services/audioflinger \
     frameworks/av/services/audiopolicy \
+    frameworks/av/services/audiopolicy/common/managerdefinitions/include \
+    frameworks/av/services/audiopolicy/common/include \
+    frameworks/av/services/audiopolicy/engine/interface \
     frameworks/av/services/camera/libcameraservice \
+    frameworks/av/services/mediaresourcemanager \
     $(call include-path-for, audio-utils) \
-    frameworks/av/services/soundtrigger
+    frameworks/av/services/soundtrigger \
+    frameworks/av/services/radio \
+    external/sonic
 
 LOCAL_MODULE:= mediaserver
 LOCAL_32_BIT_ONLY := true
diff --git a/media/mediaserver/main_mediaserver.cpp b/media/mediaserver/main_mediaserver.cpp
index af1c9e6..4a485ed 100644
--- a/media/mediaserver/main_mediaserver.cpp
+++ b/media/mediaserver/main_mediaserver.cpp
@@ -31,10 +31,13 @@
 // from LOCAL_C_INCLUDES
 #include "AudioFlinger.h"
 #include "CameraService.h"
+#include "IcuUtils.h"
 #include "MediaLogService.h"
 #include "MediaPlayerService.h"
-#include "AudioPolicyService.h"
+#include "ResourceManagerService.h"
+#include "service/AudioPolicyService.h"
 #include "SoundTriggerHwService.h"
+#include "RadioService.h"
 
 using namespace android;
 
@@ -122,14 +125,17 @@
             prctl(PR_SET_PDEATHSIG, SIGKILL);   // if parent media.log dies before me, kill me also
             setpgid(0, 0);                      // but if I die first, don't kill my parent
         }
+        InitializeIcuOrDie();
         sp<ProcessState> proc(ProcessState::self());
         sp<IServiceManager> sm = defaultServiceManager();
         ALOGI("ServiceManager: %p", sm.get());
         AudioFlinger::instantiate();
         MediaPlayerService::instantiate();
+        ResourceManagerService::instantiate();
         CameraService::instantiate();
         AudioPolicyService::instantiate();
         SoundTriggerHwService::instantiate();
+        RadioService::instantiate();
         registerExtensions();
         ProcessState::self()->startThreadPool();
         IPCThreadState::self()->joinThreadPool();
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index ed00b72..cd0c462 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -116,7 +116,7 @@
 
         case kWhatStopActivityNotifications:
         {
-            uint32_t replyID;
+            sp<AReplyToken> replyID;
             msg->senderAwaitsResponse(&replyID);
 
             mCodec->mGeneration++;
@@ -136,7 +136,7 @@
 
 
 static void requestActivityNotification(AMediaCodec *codec) {
-    (new AMessage(kWhatRequestActivityNotifications, codec->mHandler->id()))->post();
+    (new AMessage(kWhatRequestActivityNotifications, codec->mHandler))->post();
 }
 
 extern "C" {
@@ -154,6 +154,10 @@
     } else {
         mData->mCodec = android::MediaCodec::CreateByComponentName(mData->mLooper, name);
     }
+    if (mData->mCodec == NULL) {  // failed to create codec
+        AMediaCodec_delete(mData);
+        return NULL;
+    }
     mData->mHandler = new CodecHandler(mData);
     mData->mLooper->registerHandler(mData->mHandler);
     mData->mGeneration = 1;
@@ -180,17 +184,21 @@
 
 EXPORT
 media_status_t AMediaCodec_delete(AMediaCodec *mData) {
-    if (mData->mCodec != NULL) {
-        mData->mCodec->release();
-        mData->mCodec.clear();
-    }
+    if (mData != NULL) {
+        if (mData->mCodec != NULL) {
+            mData->mCodec->release();
+            mData->mCodec.clear();
+        }
 
-    if (mData->mLooper != NULL) {
-        mData->mLooper->unregisterHandler(mData->mHandler->id());
-        mData->mLooper->stop();
-        mData->mLooper.clear();
+        if (mData->mLooper != NULL) {
+            if (mData->mHandler != NULL) {
+                mData->mLooper->unregisterHandler(mData->mHandler->id());
+            }
+            mData->mLooper->stop();
+            mData->mLooper.clear();
+        }
+        delete mData;
     }
-    delete mData;
     return AMEDIA_OK;
 }
 
@@ -219,7 +227,7 @@
     if (ret != OK) {
         return translate_error(ret);
     }
-    mData->mActivityNotification = new AMessage(kWhatActivityNotify, mData->mHandler->id());
+    mData->mActivityNotification = new AMessage(kWhatActivityNotify, mData->mHandler);
     mData->mActivityNotification->setInt32("generation", mData->mGeneration);
     requestActivityNotification(mData);
     return AMEDIA_OK;
@@ -229,7 +237,7 @@
 media_status_t AMediaCodec_stop(AMediaCodec *mData) {
     media_status_t ret = translate_error(mData->mCodec->stop());
 
-    sp<AMessage> msg = new AMessage(kWhatStopActivityNotifications, mData->mHandler->id());
+    sp<AMessage> msg = new AMessage(kWhatStopActivityNotifications, mData->mHandler);
     sp<AMessage> response;
     msg->postAndAwaitResponse(&response);
     mData->mActivityNotification.clear();
@@ -352,7 +360,8 @@
 }
 
 //EXPORT
-media_status_t AMediaCodec_setNotificationCallback(AMediaCodec *mData, OnCodecEvent callback, void *userdata) {
+media_status_t AMediaCodec_setNotificationCallback(AMediaCodec *mData, OnCodecEvent callback,
+        void *userdata) {
     mData->mCallback = callback;
     mData->mCallbackUserData = userdata;
     return AMEDIA_OK;
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 7a1048c..83a5ba1 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -312,8 +312,10 @@
                 String8(optionalParameters[i].mValue));
     }
     String8 defaultUrl;
+    DrmPlugin::KeyRequestType keyRequestType;
     status_t status = mObj->mDrm->getKeyRequest(*iter, mdInit, String8(mimeType),
-            mdKeyType, mdOptionalParameters, mObj->mKeyRequest, defaultUrl);
+            mdKeyType, mdOptionalParameters, mObj->mKeyRequest, defaultUrl,
+            &keyRequestType);
     if (status != OK) {
         return translateStatus(status);
     } else {
@@ -725,4 +727,3 @@
 }
 
 } // extern "C"
-
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index db57d0b..0ecd64f 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -70,7 +70,8 @@
 }
 
 EXPORT
-media_status_t AMediaExtractor_setDataSourceFd(AMediaExtractor *mData, int fd, off64_t offset, off64_t length) {
+media_status_t AMediaExtractor_setDataSourceFd(AMediaExtractor *mData, int fd, off64_t offset,
+        off64_t length) {
     ALOGV("setDataSource(%d, %lld, %lld)", fd, offset, length);
     return translate_error(mData->mImpl->setDataSource(fd, offset, length));
 }
diff --git a/media/utils/Android.mk b/media/utils/Android.mk
new file mode 100644
index 0000000..dfadbc8
--- /dev/null
+++ b/media/utils/Android.mk
@@ -0,0 +1,39 @@
+# Copyright 2015 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+  BatteryNotifier.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+  libbinder \
+  libcutils \
+  liblog \
+  libutils \
+
+LOCAL_C_INCLUDES := $(LOCAL_PATH)/include
+
+LOCAL_CFLAGS += \
+  -Wall \
+  -Wextra \
+  -Werror \
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
+
+LOCAL_MODULE := libmediautils
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/utils/BatteryNotifier.cpp b/media/utils/BatteryNotifier.cpp
new file mode 100644
index 0000000..7f9cd7a
--- /dev/null
+++ b/media/utils/BatteryNotifier.cpp
@@ -0,0 +1,213 @@
+/*
+ * Copyright 2015, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "include/mediautils/BatteryNotifier.h"
+
+#include <binder/IServiceManager.h>
+#include <utils/Log.h>
+#include <private/android_filesystem_config.h>
+
+namespace android {
+
+void BatteryNotifier::DeathNotifier::binderDied(const wp<IBinder>& /*who*/) {
+    BatteryNotifier::getInstance().onBatteryStatServiceDied();
+}
+
+BatteryNotifier::BatteryNotifier() : mVideoRefCount(0), mAudioRefCount(0) {}
+
+BatteryNotifier::~BatteryNotifier() {
+    Mutex::Autolock _l(mLock);
+    if (mDeathNotifier != nullptr) {
+        IInterface::asBinder(mBatteryStatService)->unlinkToDeath(mDeathNotifier);
+    }
+}
+
+void BatteryNotifier::noteStartVideo() {
+    Mutex::Autolock _l(mLock);
+    sp<IBatteryStats> batteryService = getBatteryService_l();
+    if (mVideoRefCount == 0 && batteryService != nullptr) {
+        batteryService->noteStartVideo(AID_MEDIA);
+    }
+    mVideoRefCount++;
+}
+
+void BatteryNotifier::noteStopVideo() {
+    Mutex::Autolock _l(mLock);
+    if (mVideoRefCount == 0) {
+        ALOGW("%s: video refcount is broken.", __FUNCTION__);
+        return;
+    }
+
+    sp<IBatteryStats> batteryService = getBatteryService_l();
+
+    mVideoRefCount--;
+    if (mVideoRefCount == 0 && batteryService != nullptr) {
+        batteryService->noteStopVideo(AID_MEDIA);
+    }
+}
+
+void BatteryNotifier::noteResetVideo() {
+    Mutex::Autolock _l(mLock);
+    sp<IBatteryStats> batteryService = getBatteryService_l();
+    mVideoRefCount = 0;
+    if (batteryService != nullptr) {
+        batteryService->noteResetAudio();
+    }
+}
+
+void BatteryNotifier::noteStartAudio() {
+    Mutex::Autolock _l(mLock);
+    sp<IBatteryStats> batteryService = getBatteryService_l();
+    if (mAudioRefCount == 0 && batteryService != nullptr) {
+        batteryService->noteStartAudio(AID_MEDIA);
+    }
+    mAudioRefCount++;
+}
+
+void BatteryNotifier::noteStopAudio() {
+    Mutex::Autolock _l(mLock);
+    if (mAudioRefCount == 0) {
+        ALOGW("%s: audio refcount is broken.", __FUNCTION__);
+        return;
+    }
+
+    sp<IBatteryStats> batteryService = getBatteryService_l();
+
+    mAudioRefCount--;
+    if (mAudioRefCount == 0 && batteryService != nullptr) {
+        batteryService->noteStopAudio(AID_MEDIA);
+    }
+}
+
+void BatteryNotifier::noteResetAudio() {
+    Mutex::Autolock _l(mLock);
+    sp<IBatteryStats> batteryService = getBatteryService_l();
+    mAudioRefCount = 0;
+    if (batteryService != nullptr) {
+        batteryService->noteResetAudio();
+    }
+}
+
+void BatteryNotifier::noteFlashlightOn(const String8& id, int uid) {
+    Mutex::Autolock _l(mLock);
+    sp<IBatteryStats> batteryService = getBatteryService_l();
+
+    std::pair<String8, int> k = std::make_pair(id, uid);
+    if (!mFlashlightState[k]) {
+        mFlashlightState[k] = true;
+        if (batteryService != nullptr) {
+            batteryService->noteFlashlightOn(uid);
+        }
+    }
+}
+
+void BatteryNotifier::noteFlashlightOff(const String8& id, int uid) {
+    Mutex::Autolock _l(mLock);
+    sp<IBatteryStats> batteryService = getBatteryService_l();
+
+    std::pair<String8, int> k = std::make_pair(id, uid);
+    if (mFlashlightState[k]) {
+        mFlashlightState[k] = false;
+        if (batteryService != nullptr) {
+            batteryService->noteFlashlightOff(uid);
+        }
+    }
+}
+
+void BatteryNotifier::noteResetFlashlight() {
+    Mutex::Autolock _l(mLock);
+    sp<IBatteryStats> batteryService = getBatteryService_l();
+    mFlashlightState.clear();
+    if (batteryService != nullptr) {
+        batteryService->noteResetFlashlight();
+    }
+}
+
+void BatteryNotifier::noteStartCamera(const String8& id, int uid) {
+    Mutex::Autolock _l(mLock);
+    sp<IBatteryStats> batteryService = getBatteryService_l();
+    std::pair<String8, int> k = std::make_pair(id, uid);
+    if (!mCameraState[k]) {
+        mCameraState[k] = true;
+        if (batteryService != nullptr) {
+            batteryService->noteStartCamera(uid);
+        }
+    }
+}
+
+void BatteryNotifier::noteStopCamera(const String8& id, int uid) {
+    Mutex::Autolock _l(mLock);
+    sp<IBatteryStats> batteryService = getBatteryService_l();
+    std::pair<String8, int> k = std::make_pair(id, uid);
+    if (mCameraState[k]) {
+        mCameraState[k] = false;
+        if (batteryService != nullptr) {
+            batteryService->noteStopCamera(uid);
+        }
+    }
+}
+
+void BatteryNotifier::noteResetCamera() {
+    Mutex::Autolock _l(mLock);
+    sp<IBatteryStats> batteryService = getBatteryService_l();
+    mCameraState.clear();
+    if (batteryService != nullptr) {
+        batteryService->noteResetCamera();
+    }
+}
+
+void BatteryNotifier::onBatteryStatServiceDied() {
+    Mutex::Autolock _l(mLock);
+    mBatteryStatService.clear();
+    mDeathNotifier.clear();
+    // Do not reset mVideoRefCount and mAudioRefCount here. The ref
+    // counting is independent of the battery service availability.
+    // We need this if battery service becomes available after media
+    // started.
+
+}
+
+sp<IBatteryStats> BatteryNotifier::getBatteryService_l() {
+    if (mBatteryStatService != nullptr) {
+        return mBatteryStatService;
+    }
+    // Get battery service from service manager
+    const sp<IServiceManager> sm(defaultServiceManager());
+    if (sm != nullptr) {
+        const String16 name("batterystats");
+        mBatteryStatService = interface_cast<IBatteryStats>(sm->checkService(name));
+        if (mBatteryStatService == nullptr) {
+            ALOGE("batterystats service unavailable!");
+            return nullptr;
+        }
+
+        mDeathNotifier = new DeathNotifier();
+        IInterface::asBinder(mBatteryStatService)->linkToDeath(mDeathNotifier);
+
+        // Notify start now if media already started
+        if (mVideoRefCount > 0) {
+            mBatteryStatService->noteStartVideo(AID_MEDIA);
+        }
+        if (mAudioRefCount > 0) {
+            mBatteryStatService->noteStartAudio(AID_MEDIA);
+        }
+    }
+    return mBatteryStatService;
+}
+
+ANDROID_SINGLETON_STATIC_INSTANCE(BatteryNotifier);
+
+}  // namespace android
diff --git a/media/utils/README b/media/utils/README
new file mode 100644
index 0000000..65ab0b8
--- /dev/null
+++ b/media/utils/README
@@ -0,0 +1,4 @@
+This is a common shared library for media utility classes.
+
+Consider adding your utility class/function here if it will
+be used across several of the media libraries.
diff --git a/media/utils/include/mediautils/BatteryNotifier.h b/media/utils/include/mediautils/BatteryNotifier.h
new file mode 100644
index 0000000..49048042
--- /dev/null
+++ b/media/utils/include/mediautils/BatteryNotifier.h
@@ -0,0 +1,73 @@
+/*
+ * Copyright 2015, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_BATTERY_NOTIFIER_H
+#define MEDIA_BATTERY_NOTIFIER_H
+
+#include <binder/IBatteryStats.h>
+#include <utils/Singleton.h>
+#include <utils/String8.h>
+
+#include <map>
+#include <utility>
+
+namespace android {
+
+/**
+ * Class used for logging battery life events in mediaserver.
+ */
+class BatteryNotifier : public Singleton<BatteryNotifier> {
+
+    friend class Singleton<BatteryNotifier>;
+    BatteryNotifier();
+
+public:
+    ~BatteryNotifier();
+
+    void noteStartVideo();
+    void noteStopVideo();
+    void noteResetVideo();
+    void noteStartAudio();
+    void noteStopAudio();
+    void noteResetAudio();
+    void noteFlashlightOn(const String8& id, int uid);
+    void noteFlashlightOff(const String8& id, int uid);
+    void noteResetFlashlight();
+    void noteStartCamera(const String8& id, int uid);
+    void noteStopCamera(const String8& id, int uid);
+    void noteResetCamera();
+
+private:
+    void onBatteryStatServiceDied();
+
+    class DeathNotifier : public IBinder::DeathRecipient {
+        virtual void binderDied(const wp<IBinder>& /*who*/);
+    };
+
+    Mutex mLock;
+    int mVideoRefCount;
+    int mAudioRefCount;
+    std::map<std::pair<String8, int>, bool> mFlashlightState;
+    std::map<std::pair<String8, int>, bool> mCameraState;
+    sp<IBatteryStats> mBatteryStatService;
+    sp<DeathNotifier> mDeathNotifier;
+
+    sp<IBatteryStats> getBatteryService_l();
+};
+
+}  // namespace android
+
+#endif // MEDIA_BATTERY_NOTIFIER_H
diff --git a/radio/Android.mk b/radio/Android.mk
new file mode 100644
index 0000000..ecbb8fd
--- /dev/null
+++ b/radio/Android.mk
@@ -0,0 +1,39 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+	Radio.cpp \
+	IRadio.cpp \
+	IRadioClient.cpp \
+	IRadioService.cpp
+
+LOCAL_SHARED_LIBRARIES := \
+	libcutils \
+	libutils \
+	liblog \
+	libbinder \
+	libhardware \
+	libradio_metadata
+
+#LOCAL_C_INCLUDES += \
+	system/media/camera/include \
+	system/media/private/camera/include
+
+LOCAL_MODULE:= libradio
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/radio/IRadio.cpp b/radio/IRadio.cpp
new file mode 100644
index 0000000..242df77
--- /dev/null
+++ b/radio/IRadio.cpp
@@ -0,0 +1,344 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "IRadio"
+#include <utils/Log.h>
+#include <utils/Errors.h>
+#include <binder/IMemory.h>
+#include <radio/IRadio.h>
+#include <radio/IRadioService.h>
+#include <radio/IRadioClient.h>
+#include <system/radio.h>
+#include <system/radio_metadata.h>
+
+namespace android {
+
+enum {
+    DETACH = IBinder::FIRST_CALL_TRANSACTION,
+    SET_CONFIGURATION,
+    GET_CONFIGURATION,
+    SET_MUTE,
+    GET_MUTE,
+    SCAN,
+    STEP,
+    TUNE,
+    CANCEL,
+    GET_PROGRAM_INFORMATION,
+    HAS_CONTROL
+};
+
+class BpRadio: public BpInterface<IRadio>
+{
+public:
+    BpRadio(const sp<IBinder>& impl)
+        : BpInterface<IRadio>(impl)
+    {
+    }
+
+    void detach()
+    {
+        ALOGV("detach");
+        Parcel data, reply;
+        data.writeInterfaceToken(IRadio::getInterfaceDescriptor());
+        remote()->transact(DETACH, data, &reply);
+    }
+
+    virtual status_t setConfiguration(const struct radio_band_config *config)
+    {
+        Parcel data, reply;
+        if (config == NULL) {
+            return BAD_VALUE;
+        }
+        data.writeInterfaceToken(IRadio::getInterfaceDescriptor());
+        data.write(config, sizeof(struct radio_band_config));
+        status_t status = remote()->transact(SET_CONFIGURATION, data, &reply);
+        if (status == NO_ERROR) {
+            status = (status_t)reply.readInt32();
+        }
+        return status;
+    }
+
+    virtual status_t getConfiguration(struct radio_band_config *config)
+    {
+        Parcel data, reply;
+        if (config == NULL) {
+            return BAD_VALUE;
+        }
+        data.writeInterfaceToken(IRadio::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_CONFIGURATION, data, &reply);
+        if (status == NO_ERROR) {
+            status = (status_t)reply.readInt32();
+            if (status == NO_ERROR) {
+                reply.read(config, sizeof(struct radio_band_config));
+            }
+        }
+        return status;
+    }
+
+    virtual status_t setMute(bool mute)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IRadio::getInterfaceDescriptor());
+        data.writeInt32(mute ? 1 : 0);
+        status_t status = remote()->transact(SET_MUTE, data, &reply);
+        if (status == NO_ERROR) {
+            status = (status_t)reply.readInt32();
+        }
+        return status;
+    }
+
+    virtual status_t getMute(bool *mute)
+    {
+        Parcel data, reply;
+        if (mute == NULL) {
+            return BAD_VALUE;
+        }
+        data.writeInterfaceToken(IRadio::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_MUTE, data, &reply);
+        if (status == NO_ERROR) {
+            status = (status_t)reply.readInt32();
+            if (status == NO_ERROR) {
+                int muteread = reply.readInt32();
+                *mute = muteread != 0;
+            }
+        }
+        return status;
+    }
+
+    virtual status_t scan(radio_direction_t direction, bool skipSubChannel)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IRadio::getInterfaceDescriptor());
+        data.writeInt32(direction);
+        data.writeInt32(skipSubChannel ? 1 : 0);
+        status_t status = remote()->transact(SCAN, data, &reply);
+        if (status == NO_ERROR) {
+            status = (status_t)reply.readInt32();
+        }
+        return status;
+    }
+
+    virtual status_t step(radio_direction_t direction, bool skipSubChannel)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IRadio::getInterfaceDescriptor());
+        data.writeInt32(direction);
+        data.writeInt32(skipSubChannel ? 1 : 0);
+        status_t status = remote()->transact(STEP, data, &reply);
+        if (status == NO_ERROR) {
+            status = (status_t)reply.readInt32();
+        }
+        return status;
+    }
+
+    virtual status_t tune(unsigned int channel, unsigned int subChannel)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IRadio::getInterfaceDescriptor());
+        data.writeInt32(channel);
+        data.writeInt32(subChannel);
+        status_t status = remote()->transact(TUNE, data, &reply);
+        if (status == NO_ERROR) {
+            status = (status_t)reply.readInt32();
+        }
+        return status;
+    }
+
+    virtual status_t cancel()
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IRadio::getInterfaceDescriptor());
+        status_t status = remote()->transact(CANCEL, data, &reply);
+        if (status == NO_ERROR) {
+            status = (status_t)reply.readInt32();
+        }
+        return status;
+    }
+
+    virtual status_t getProgramInformation(struct radio_program_info *info)
+    {
+        Parcel data, reply;
+        if (info == NULL) {
+            return BAD_VALUE;
+        }
+        radio_metadata_t *metadata = info->metadata;
+        data.writeInterfaceToken(IRadio::getInterfaceDescriptor());
+        status_t status = remote()->transact(GET_PROGRAM_INFORMATION, data, &reply);
+        if (status == NO_ERROR) {
+            status = (status_t)reply.readInt32();
+            if (status == NO_ERROR) {
+                reply.read(info, sizeof(struct radio_program_info));
+                info->metadata = metadata;
+                if (metadata == NULL) {
+                    return status;
+                }
+                size_t size = (size_t)reply.readInt32();
+                if (size == 0) {
+                    return status;
+                }
+                metadata =
+                    (radio_metadata_t *)calloc(size / sizeof(unsigned int), sizeof(unsigned int));
+                if (metadata == NULL) {
+                    return NO_MEMORY;
+                }
+                reply.read(metadata, size);
+                status = radio_metadata_add_metadata(&info->metadata, metadata);
+                free(metadata);
+            }
+        }
+        return status;
+    }
+
+    virtual status_t hasControl(bool *hasControl)
+    {
+        Parcel data, reply;
+        if (hasControl == NULL) {
+            return BAD_VALUE;
+        }
+        data.writeInterfaceToken(IRadio::getInterfaceDescriptor());
+        status_t status = remote()->transact(HAS_CONTROL, data, &reply);
+        if (status == NO_ERROR) {
+            status = (status_t)reply.readInt32();
+            if (status == NO_ERROR) {
+                *hasControl = reply.readInt32() != 0;
+            }
+        }
+        return status;
+    }
+};
+
+IMPLEMENT_META_INTERFACE(Radio, "android.hardware.IRadio");
+
+// ----------------------------------------------------------------------
+
+status_t BnRadio::onTransact(
+    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+    switch(code) {
+        case DETACH: {
+            ALOGV("DETACH");
+            CHECK_INTERFACE(IRadio, data, reply);
+            detach();
+            return NO_ERROR;
+        } break;
+        case SET_CONFIGURATION: {
+            CHECK_INTERFACE(IRadio, data, reply);
+            struct radio_band_config config;
+            data.read(&config, sizeof(struct radio_band_config));
+            status_t status = setConfiguration(&config);
+            reply->writeInt32(status);
+            return NO_ERROR;
+        }
+        case GET_CONFIGURATION: {
+            CHECK_INTERFACE(IRadio, data, reply);
+            struct radio_band_config config;
+            status_t status = getConfiguration(&config);
+            reply->writeInt32(status);
+            if (status == NO_ERROR) {
+                reply->write(&config, sizeof(struct radio_band_config));
+            }
+            return NO_ERROR;
+        }
+        case SET_MUTE: {
+            CHECK_INTERFACE(IRadio, data, reply);
+            bool mute = data.readInt32() != 0;
+            status_t status = setMute(mute);
+            reply->writeInt32(status);
+            return NO_ERROR;
+        }
+        case GET_MUTE: {
+            CHECK_INTERFACE(IRadio, data, reply);
+            bool mute;
+            status_t status = getMute(&mute);
+            reply->writeInt32(status);
+            if (status == NO_ERROR) {
+                reply->writeInt32(mute ? 1 : 0);
+            }
+            return NO_ERROR;
+        }
+        case SCAN: {
+            CHECK_INTERFACE(IRadio, data, reply);
+            radio_direction_t direction = (radio_direction_t)data.readInt32();
+            bool skipSubChannel = data.readInt32() == 1;
+            status_t status = scan(direction, skipSubChannel);
+            reply->writeInt32(status);
+            return NO_ERROR;
+        }
+        case STEP: {
+            CHECK_INTERFACE(IRadio, data, reply);
+            radio_direction_t direction = (radio_direction_t)data.readInt32();
+            bool skipSubChannel = data.readInt32() == 1;
+            status_t status = step(direction, skipSubChannel);
+            reply->writeInt32(status);
+            return NO_ERROR;
+        }
+        case TUNE: {
+            CHECK_INTERFACE(IRadio, data, reply);
+            unsigned int channel = (unsigned int)data.readInt32();
+            unsigned int subChannel = (unsigned int)data.readInt32();
+            status_t status = tune(channel, subChannel);
+            reply->writeInt32(status);
+            return NO_ERROR;
+        }
+        case CANCEL: {
+            CHECK_INTERFACE(IRadio, data, reply);
+            status_t status = cancel();
+            reply->writeInt32(status);
+            return NO_ERROR;
+        }
+        case GET_PROGRAM_INFORMATION: {
+            CHECK_INTERFACE(IRadio, data, reply);
+            struct radio_program_info info;
+
+            status_t status = radio_metadata_allocate(&info.metadata, 0, 0);
+            if (status != NO_ERROR) {
+                return status;
+            }
+            status = getProgramInformation(&info);
+            reply->writeInt32(status);
+            if (status == NO_ERROR) {
+                reply->write(&info, sizeof(struct radio_program_info));
+                int count = radio_metadata_get_count(info.metadata);
+                if (count > 0) {
+                    size_t size = radio_metadata_get_size(info.metadata);
+                    reply->writeInt32(size);
+                    reply->write(info.metadata, size);
+                } else {
+                    reply->writeInt32(0);
+                }
+            }
+            radio_metadata_deallocate(info.metadata);
+            return NO_ERROR;
+        }
+        case HAS_CONTROL: {
+            CHECK_INTERFACE(IRadio, data, reply);
+            bool control;
+            status_t status = hasControl(&control);
+            reply->writeInt32(status);
+            if (status == NO_ERROR) {
+                reply->writeInt32(control ? 1 : 0);
+            }
+            return NO_ERROR;
+        }
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
diff --git a/radio/IRadioClient.cpp b/radio/IRadioClient.cpp
new file mode 100644
index 0000000..033ca49
--- /dev/null
+++ b/radio/IRadioClient.cpp
@@ -0,0 +1,75 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <binder/IMemory.h>
+#include <binder/Parcel.h>
+#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
+#include <radio/IRadioClient.h>
+
+namespace android {
+
+enum {
+    ON_EVENT = IBinder::FIRST_CALL_TRANSACTION,
+};
+
+class BpRadioClient: public BpInterface<IRadioClient>
+{
+
+public:
+    BpRadioClient(const sp<IBinder>& impl)
+        : BpInterface<IRadioClient>(impl)
+    {
+    }
+
+    virtual void onEvent(const sp<IMemory>& eventMemory)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IRadioClient::getInterfaceDescriptor());
+        data.writeStrongBinder(IInterface::asBinder(eventMemory));
+        remote()->transact(ON_EVENT,
+                           data,
+                           &reply);
+    }
+};
+
+IMPLEMENT_META_INTERFACE(RadioClient,
+                         "android.hardware.IRadioClient");
+
+// ----------------------------------------------------------------------
+
+status_t BnRadioClient::onTransact(
+    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+    switch(code) {
+        case ON_EVENT: {
+            CHECK_INTERFACE(IRadioClient, data, reply);
+            sp<IMemory> eventMemory = interface_cast<IMemory>(
+                data.readStrongBinder());
+            onEvent(eventMemory);
+            return NO_ERROR;
+        } break;
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }   return NO_ERROR;
+}
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
diff --git a/radio/IRadioService.cpp b/radio/IRadioService.cpp
new file mode 100644
index 0000000..8c2b3ef
--- /dev/null
+++ b/radio/IRadioService.cpp
@@ -0,0 +1,181 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "BpRadioService"
+//
+#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Errors.h>
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <binder/IMemory.h>
+#include <binder/Parcel.h>
+#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
+
+#include <radio/IRadioService.h>
+#include <radio/IRadio.h>
+#include <radio/IRadioClient.h>
+
+namespace android {
+
+enum {
+    LIST_MODULES = IBinder::FIRST_CALL_TRANSACTION,
+    ATTACH,
+};
+
+#define MAX_ITEMS_PER_LIST 1024
+
+class BpRadioService: public BpInterface<IRadioService>
+{
+public:
+    BpRadioService(const sp<IBinder>& impl)
+        : BpInterface<IRadioService>(impl)
+    {
+    }
+
+    virtual status_t listModules(struct radio_properties *properties,
+                                 uint32_t *numModules)
+    {
+        if (numModules == NULL || (*numModules != 0 && properties == NULL)) {
+            return BAD_VALUE;
+        }
+        Parcel data, reply;
+        data.writeInterfaceToken(IRadioService::getInterfaceDescriptor());
+        unsigned int numModulesReq = (properties == NULL) ? 0 : *numModules;
+        data.writeInt32(numModulesReq);
+        status_t status = remote()->transact(LIST_MODULES, data, &reply);
+        if (status == NO_ERROR) {
+            status = (status_t)reply.readInt32();
+            *numModules = (unsigned int)reply.readInt32();
+        }
+        ALOGV("listModules() status %d got *numModules %d", status, *numModules);
+        if (status == NO_ERROR) {
+            if (numModulesReq > *numModules) {
+                numModulesReq = *numModules;
+            }
+            if (numModulesReq > 0) {
+                reply.read(properties, numModulesReq * sizeof(struct radio_properties));
+            }
+        }
+        return status;
+    }
+
+    virtual status_t attach(radio_handle_t handle,
+                            const sp<IRadioClient>& client,
+                            const struct radio_band_config *config,
+                            bool withAudio,
+                            sp<IRadio>& radio)
+    {
+        Parcel data, reply;
+        data.writeInterfaceToken(IRadioService::getInterfaceDescriptor());
+        data.writeInt32(handle);
+        data.writeStrongBinder(IInterface::asBinder(client));
+        ALOGV("attach() config %p withAudio %d region %d type %d", config, withAudio, config->region, config->band.type);
+        if (config == NULL) {
+            data.writeInt32(0);
+        } else {
+            data.writeInt32(1);
+            data.write(config, sizeof(struct radio_band_config));
+        }
+        data.writeInt32(withAudio ? 1 : 0);
+        status_t status = remote()->transact(ATTACH, data, &reply);
+        if (status != NO_ERROR) {
+            return status;
+        }
+        status = reply.readInt32();
+        if (reply.readInt32() != 0) {
+            radio = interface_cast<IRadio>(reply.readStrongBinder());
+        }
+        return status;
+    }
+};
+
+IMPLEMENT_META_INTERFACE(RadioService, "android.hardware.IRadioService");
+
+// ----------------------------------------------------------------------
+
+status_t BnRadioService::onTransact(
+    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
+{
+    switch(code) {
+        case LIST_MODULES: {
+            CHECK_INTERFACE(IRadioService, data, reply);
+            unsigned int numModulesReq = data.readInt32();
+            if (numModulesReq > MAX_ITEMS_PER_LIST) {
+                numModulesReq = MAX_ITEMS_PER_LIST;
+            }
+            unsigned int numModules = numModulesReq;
+            struct radio_properties *properties =
+                    (struct radio_properties *)calloc(numModulesReq,
+                                                      sizeof(struct radio_properties));
+            if (properties == NULL) {
+                reply->writeInt32(NO_MEMORY);
+                reply->writeInt32(0);
+                return NO_ERROR;
+            }
+
+            status_t status = listModules(properties, &numModules);
+            reply->writeInt32(status);
+            reply->writeInt32(numModules);
+            ALOGV("LIST_MODULES status %d got numModules %d", status, numModules);
+
+            if (status == NO_ERROR) {
+                if (numModulesReq > numModules) {
+                    numModulesReq = numModules;
+                }
+                reply->write(properties,
+                             numModulesReq * sizeof(struct radio_properties));
+            }
+            free(properties);
+            return NO_ERROR;
+        } break;
+
+        case ATTACH: {
+            CHECK_INTERFACE(IRadioService, data, reply);
+            radio_handle_t handle = data.readInt32();
+            sp<IRadioClient> client =
+                    interface_cast<IRadioClient>(data.readStrongBinder());
+            struct radio_band_config config;
+            struct radio_band_config *configPtr = NULL;
+            if (data.readInt32() != 0) {
+                data.read(&config, sizeof(struct radio_band_config));
+                configPtr = &config;
+            }
+            bool withAudio = data.readInt32() != 0;
+            ALOGV("ATTACH configPtr %p withAudio %d", configPtr, withAudio);
+            sp<IRadio> radio;
+            status_t status = attach(handle, client, configPtr, withAudio, radio);
+            reply->writeInt32(status);
+            if (radio != 0) {
+                reply->writeInt32(1);
+                reply->writeStrongBinder(IInterface::asBinder(radio));
+            } else {
+                reply->writeInt32(0);
+            }
+            return NO_ERROR;
+        } break;
+        default:
+            return BBinder::onTransact(code, data, reply, flags);
+    }
+}
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
diff --git a/radio/Radio.cpp b/radio/Radio.cpp
new file mode 100644
index 0000000..e3554c2
--- /dev/null
+++ b/radio/Radio.cpp
@@ -0,0 +1,283 @@
+/*
+**
+** Copyright (C) 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "Radio"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/threads.h>
+#include <binder/IPCThreadState.h>
+#include <binder/IServiceManager.h>
+#include <binder/IMemory.h>
+
+#include <radio/Radio.h>
+#include <radio/IRadio.h>
+#include <radio/IRadioService.h>
+#include <radio/IRadioClient.h>
+#include <radio/RadioCallback.h>
+
+namespace android {
+
+namespace {
+    sp<IRadioService>          gRadioService;
+    const int                  kRadioServicePollDelay = 500000; // 0.5s
+    const char*                kRadioServiceName      = "media.radio";
+    Mutex                      gLock;
+
+    class DeathNotifier : public IBinder::DeathRecipient
+    {
+    public:
+        DeathNotifier() {
+        }
+
+        virtual void binderDied(const wp<IBinder>& who __unused) {
+            ALOGV("binderDied");
+            Mutex::Autolock _l(gLock);
+            gRadioService.clear();
+            ALOGW("Radio service died!");
+        }
+    };
+
+    sp<DeathNotifier>         gDeathNotifier;
+}; // namespace anonymous
+
+const sp<IRadioService>& Radio::getRadioService()
+{
+    Mutex::Autolock _l(gLock);
+    if (gRadioService.get() == 0) {
+        sp<IServiceManager> sm = defaultServiceManager();
+        sp<IBinder> binder;
+        do {
+            binder = sm->getService(String16(kRadioServiceName));
+            if (binder != 0) {
+                break;
+            }
+            ALOGW("RadioService not published, waiting...");
+            usleep(kRadioServicePollDelay);
+        } while(true);
+        if (gDeathNotifier == NULL) {
+            gDeathNotifier = new DeathNotifier();
+        }
+        binder->linkToDeath(gDeathNotifier);
+        gRadioService = interface_cast<IRadioService>(binder);
+    }
+    ALOGE_IF(gRadioService == 0, "no RadioService!?");
+    return gRadioService;
+}
+
+// Static methods
+status_t Radio::listModules(struct radio_properties *properties,
+                            uint32_t *numModules)
+{
+    ALOGV("listModules()");
+    const sp<IRadioService>& service = getRadioService();
+    if (service == 0) {
+        return NO_INIT;
+    }
+    return service->listModules(properties, numModules);
+}
+
+sp<Radio> Radio::attach(radio_handle_t handle,
+                        const struct radio_band_config *config,
+                        bool withAudio,
+                        const sp<RadioCallback>& callback)
+{
+    ALOGV("attach()");
+    sp<Radio> radio;
+    const sp<IRadioService>& service = getRadioService();
+    if (service == 0) {
+        return radio;
+    }
+    radio = new Radio(handle, callback);
+    status_t status = service->attach(handle, radio, config, withAudio, radio->mIRadio);
+
+    if (status == NO_ERROR && radio->mIRadio != 0) {
+        IInterface::asBinder(radio->mIRadio)->linkToDeath(radio);
+    } else {
+        ALOGW("Error %d connecting to radio service", status);
+        radio.clear();
+    }
+    return radio;
+}
+
+
+
+// Radio
+Radio::Radio(radio_handle_t handle, const sp<RadioCallback>& callback)
+    : mHandle(handle), mCallback(callback)
+{
+}
+
+Radio::~Radio()
+{
+    if (mIRadio != 0) {
+        mIRadio->detach();
+    }
+}
+
+
+void Radio::detach() {
+    ALOGV("detach()");
+    Mutex::Autolock _l(mLock);
+    mCallback.clear();
+    if (mIRadio != 0) {
+        mIRadio->detach();
+        IInterface::asBinder(mIRadio)->unlinkToDeath(this);
+        mIRadio = 0;
+    }
+}
+
+status_t Radio::setConfiguration(const struct radio_band_config *config)
+{
+    Mutex::Autolock _l(mLock);
+    if (mIRadio == 0) {
+        return NO_INIT;
+    }
+    return mIRadio->setConfiguration(config);
+}
+
+status_t Radio::getConfiguration(struct radio_band_config *config)
+{
+    Mutex::Autolock _l(mLock);
+    if (mIRadio == 0) {
+        return NO_INIT;
+    }
+    return mIRadio->getConfiguration(config);
+}
+
+status_t Radio::setMute(bool mute)
+{
+    Mutex::Autolock _l(mLock);
+    if (mIRadio == 0) {
+        return NO_INIT;
+    }
+    return mIRadio->setMute(mute);
+}
+
+status_t Radio::getMute(bool *mute)
+{
+    Mutex::Autolock _l(mLock);
+    if (mIRadio == 0) {
+        return NO_INIT;
+    }
+    return mIRadio->getMute(mute);
+}
+
+status_t Radio::scan(radio_direction_t direction, bool skipSubchannel)
+{
+    Mutex::Autolock _l(mLock);
+    if (mIRadio == 0) {
+        return NO_INIT;
+    }
+    return mIRadio->scan(direction, skipSubchannel);
+}
+
+status_t Radio::step(radio_direction_t direction, bool skipSubchannel)
+{
+    Mutex::Autolock _l(mLock);
+    if (mIRadio == 0) {
+        return NO_INIT;
+    }
+    return mIRadio->step(direction, skipSubchannel);
+}
+
+status_t Radio::tune(unsigned int channel, unsigned int subChannel)
+{
+    Mutex::Autolock _l(mLock);
+    if (mIRadio == 0) {
+        return NO_INIT;
+    }
+    return mIRadio->tune(channel, subChannel);
+}
+
+status_t Radio::cancel()
+{
+    Mutex::Autolock _l(mLock);
+    if (mIRadio == 0) {
+        return NO_INIT;
+    }
+    return mIRadio->cancel();
+}
+
+status_t Radio::getProgramInformation(struct radio_program_info *info)
+{
+    Mutex::Autolock _l(mLock);
+    if (mIRadio == 0) {
+        return NO_INIT;
+    }
+    return mIRadio->getProgramInformation(info);
+}
+
+status_t Radio::hasControl(bool *hasControl)
+{
+    Mutex::Autolock _l(mLock);
+    if (mIRadio == 0) {
+        return NO_INIT;
+    }
+    return mIRadio->hasControl(hasControl);
+}
+
+
+// BpRadioClient
+void Radio::onEvent(const sp<IMemory>& eventMemory)
+{
+    Mutex::Autolock _l(mLock);
+    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+        return;
+    }
+
+    struct radio_event *event = (struct radio_event *)eventMemory->pointer();
+    // restore local metadata pointer from offset
+    switch (event->type) {
+    case RADIO_EVENT_TUNED:
+    case RADIO_EVENT_AF_SWITCH:
+        if (event->info.metadata != NULL) {
+            event->info.metadata =
+                    (radio_metadata_t *)((char *)event + (size_t)event->info.metadata);
+        }
+        break;
+    case RADIO_EVENT_METADATA:
+        if (event->metadata != NULL) {
+            event->metadata =
+                    (radio_metadata_t *)((char *)event + (size_t)event->metadata);
+        }
+        break;
+    default:
+        break;
+    }
+
+    if (mCallback != 0) {
+        mCallback->onEvent(event);
+    }
+}
+
+
+//IBinder::DeathRecipient
+void Radio::binderDied(const wp<IBinder>& who __unused) {
+    Mutex::Autolock _l(mLock);
+    ALOGW("Radio server binder Died ");
+    mIRadio = 0;
+    struct radio_event event;
+    memset(&event, 0, sizeof(struct radio_event));
+    event.type = RADIO_EVENT_SERVER_DIED;
+    event.status = DEAD_OBJECT;
+    if (mCallback != 0) {
+        mCallback->onEvent(&event);
+    }
+}
+
+}; // namespace android
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 44d2553..debcdf9 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -29,29 +29,28 @@
 
 include $(CLEAR_VARS)
 
-# Clang++ aborts on AudioMixer.cpp,
-# b/18373866, "do not know how to split this operator."
-ifeq ($(filter $(TARGET_ARCH),arm arm64),$(TARGET_ARCH))
-    LOCAL_CLANG := false
-endif
-
 LOCAL_SRC_FILES:=               \
     AudioFlinger.cpp            \
     Threads.cpp                 \
     Tracks.cpp                  \
+    AudioHwDevice.cpp           \
+    AudioStreamOut.cpp          \
+    SpdifStreamOut.cpp          \
     Effects.cpp                 \
     AudioMixer.cpp.arm          \
-    PatchPanel.cpp
-
-LOCAL_SRC_FILES += StateQueue.cpp
+    BufferProviders.cpp         \
+    PatchPanel.cpp              \
+    StateQueue.cpp
 
 LOCAL_C_INCLUDES := \
     $(TOPDIR)frameworks/av/services/audiopolicy \
+    $(TOPDIR)external/sonic \
     $(call include-path-for, audio-effects) \
     $(call include-path-for, audio-utils)
 
 LOCAL_SHARED_LIBRARIES := \
     libaudioresampler \
+    libaudiospdif \
     libaudioutils \
     libcommon_time_client \
     libcutils \
@@ -64,7 +63,8 @@
     libhardware_legacy \
     libeffects \
     libpowermanager \
-    libserviceutility
+    libserviceutility \
+    libsonic
 
 LOCAL_STATIC_LIBRARIES := \
     libscheduling_policy \
@@ -74,9 +74,17 @@
 LOCAL_MODULE:= libaudioflinger
 LOCAL_32_BIT_ONLY := true
 
-LOCAL_SRC_FILES += FastMixer.cpp FastMixerState.cpp AudioWatchdog.cpp
-LOCAL_SRC_FILES += FastThread.cpp FastThreadState.cpp
-LOCAL_SRC_FILES += FastCapture.cpp FastCaptureState.cpp
+LOCAL_SRC_FILES += \
+    AudioWatchdog.cpp        \
+    FastCapture.cpp          \
+    FastCaptureDumpState.cpp \
+    FastCaptureState.cpp     \
+    FastMixer.cpp            \
+    FastMixerDumpState.cpp   \
+    FastMixerState.cpp       \
+    FastThread.cpp           \
+    FastThreadDumpState.cpp  \
+    FastThreadState.cpp
 
 LOCAL_CFLAGS += -DSTATE_QUEUE_INSTANTIATIONS='"StateQueueInstantiations.cpp"'
 
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 18ba1ae..9ec5802 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -45,6 +45,8 @@
 #include "AudioFlinger.h"
 #include "ServiceUtilities.h"
 
+#include <media/AudioResamplerPublic.h>
+
 #include <media/EffectsFactoryApi.h>
 #include <audio_effects/effect_visualizer.h>
 #include <audio_effects/effect_ns.h>
@@ -179,13 +181,14 @@
       mIsLowRamDevice(true),
       mIsDeviceTypeKnown(false),
       mGlobalEffectEnableTime(0),
-      mPrimaryOutputSampleRate(0)
+      mSystemReady(false)
 {
     getpid_cached = getpid();
     char value[PROPERTY_VALUE_MAX];
     bool doLog = (property_get("ro.test_harness", value, "0") > 0) && (atoi(value) == 1);
     if (doLog) {
-        mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters", MemoryHeapBase::READ_ONLY);
+        mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters",
+                MemoryHeapBase::READ_ONLY);
     }
 
 #ifdef TEE_SINK
@@ -271,7 +274,7 @@
 };
 #define ARRAY_SIZE(x) (sizeof((x))/sizeof(((x)[0])))
 
-AudioFlinger::AudioHwDevice* AudioFlinger::findSuitableHwDev_l(
+AudioHwDevice* AudioFlinger::findSuitableHwDev_l(
         audio_module_handle_t module,
         audio_devices_t devices)
 {
@@ -401,6 +404,9 @@
             String8 result(kClientLockedString);
             write(fd, result.string(), result.size());
         }
+
+        EffectDumpEffects(fd);
+
         dumpClients(fd, args);
         if (clientLocked) {
             mClientLock.unlock();
@@ -826,14 +832,20 @@
     if (ret != NO_ERROR) {
         return false;
     }
-
+    bool mute = true;
     bool state = AUDIO_MODE_INVALID;
     AutoMutex lock(mHardwareLock);
-    audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
     mHardwareStatus = AUDIO_HW_GET_MIC_MUTE;
-    dev->get_mic_mute(dev, &state);
+    for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+        audio_hw_device_t *dev = mAudioHwDevs.valueAt(i)->hwDevice();
+        status_t result = dev->get_mic_mute(dev, &state);
+        if (result == NO_ERROR) {
+            mute = mute && state;
+        }
+    }
     mHardwareStatus = AUDIO_HW_IDLE;
-    return state;
+
+    return mute;
 }
 
 status_t AudioFlinger::setMasterMute(bool muted)
@@ -1009,6 +1021,14 @@
     return streamMute_l(stream);
 }
 
+
+void AudioFlinger::broacastParametersToRecordThreads_l(const String8& keyValuePairs)
+{
+    for (size_t i = 0; i < mRecordThreads.size(); i++) {
+        mRecordThreads.valueAt(i)->setParameters(keyValuePairs);
+    }
+}
+
 status_t AudioFlinger::setParameters(audio_io_handle_t ioHandle, const String8& keyValuePairs)
 {
     ALOGV("setParameters(): io %d, keyvalue %s, calling pid %d",
@@ -1083,9 +1103,7 @@
             int value;
             if ((param.getInt(String8(AudioParameter::keyRouting), value) == NO_ERROR) &&
                     (value != 0)) {
-                for (size_t i = 0; i < mRecordThreads.size(); i++) {
-                    mRecordThreads.valueAt(i)->setParameters(keyValuePairs);
-                }
+                broacastParametersToRecordThreads_l(keyValuePairs);
             }
         }
     }
@@ -1138,19 +1156,46 @@
     if (ret != NO_ERROR) {
         return 0;
     }
+    if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) {
+        return 0;
+    }
 
     AutoMutex lock(mHardwareLock);
     mHardwareStatus = AUDIO_HW_GET_INPUT_BUFFER_SIZE;
-    audio_config_t config;
-    memset(&config, 0, sizeof(config));
-    config.sample_rate = sampleRate;
-    config.channel_mask = channelMask;
-    config.format = format;
+    audio_config_t config, proposed;
+    memset(&proposed, 0, sizeof(proposed));
+    proposed.sample_rate = sampleRate;
+    proposed.channel_mask = channelMask;
+    proposed.format = format;
 
     audio_hw_device_t *dev = mPrimaryHardwareDev->hwDevice();
-    size_t size = dev->get_input_buffer_size(dev, &config);
+    size_t frames;
+    for (;;) {
+        // Note: config is currently a const parameter for get_input_buffer_size()
+        // but we use a copy from proposed in case config changes from the call.
+        config = proposed;
+        frames = dev->get_input_buffer_size(dev, &config);
+        if (frames != 0) {
+            break; // hal success, config is the result
+        }
+        // change one parameter of the configuration each iteration to a more "common" value
+        // to see if the device will support it.
+        if (proposed.format != AUDIO_FORMAT_PCM_16_BIT) {
+            proposed.format = AUDIO_FORMAT_PCM_16_BIT;
+        } else if (proposed.sample_rate != 44100) { // 44.1 is claimed as must in CDD as well as
+            proposed.sample_rate = 44100;           // legacy AudioRecord.java. TODO: Query hw?
+        } else {
+            ALOGW("getInputBufferSize failed with minimum buffer size sampleRate %u, "
+                    "format %#x, channelMask 0x%X",
+                    sampleRate, format, channelMask);
+            break; // retries failed, break out of loop with frames == 0.
+        }
+    }
     mHardwareStatus = AUDIO_HW_IDLE;
-    return size;
+    if (frames > 0 && config.sample_rate != sampleRate) {
+        frames = destinationFramesPossible(frames, sampleRate, config.sample_rate);
+    }
+    return frames; // may be converted to bytes at the Java level.
 }
 
 uint32_t AudioFlinger::getInputFramesLost(audio_io_handle_t ioHandle) const
@@ -1206,11 +1251,9 @@
     if (client == 0) {
         return;
     }
-    bool clientAdded = false;
+    pid_t pid = IPCThreadState::self()->getCallingPid();
     {
         Mutex::Autolock _cl(mClientLock);
-
-        pid_t pid = IPCThreadState::self()->getCallingPid();
         if (mNotificationClients.indexOfKey(pid) < 0) {
             sp<NotificationClient> notificationClient = new NotificationClient(this,
                                                                                 client,
@@ -1221,22 +1264,19 @@
 
             sp<IBinder> binder = IInterface::asBinder(client);
             binder->linkToDeath(notificationClient);
-            clientAdded = true;
         }
     }
 
     // mClientLock should not be held here because ThreadBase::sendIoConfigEvent() will lock the
     // ThreadBase mutex and the locking order is ThreadBase::mLock then AudioFlinger::mClientLock.
-    if (clientAdded) {
-        // the config change is always sent from playback or record threads to avoid deadlock
-        // with AudioSystem::gLock
-        for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
-            mPlaybackThreads.valueAt(i)->sendIoConfigEvent(AudioSystem::OUTPUT_OPENED);
-        }
+    // the config change is always sent from playback or record threads to avoid deadlock
+    // with AudioSystem::gLock
+    for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+        mPlaybackThreads.valueAt(i)->sendIoConfigEvent(AUDIO_OUTPUT_OPENED, pid);
+    }
 
-        for (size_t i = 0; i < mRecordThreads.size(); i++) {
-            mRecordThreads.valueAt(i)->sendIoConfigEvent(AudioSystem::INPUT_OPENED);
-        }
+    for (size_t i = 0; i < mRecordThreads.size(); i++) {
+        mRecordThreads.valueAt(i)->sendIoConfigEvent(AUDIO_INPUT_OPENED, pid);
     }
 }
 
@@ -1269,14 +1309,16 @@
     }
 }
 
-void AudioFlinger::audioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2)
+void AudioFlinger::ioConfigChanged(audio_io_config_event event,
+                                   const sp<AudioIoDescriptor>& ioDesc,
+                                   pid_t pid)
 {
     Mutex::Autolock _l(mClientLock);
     size_t size = mNotificationClients.size();
     for (size_t i = 0; i < size; i++) {
-        mNotificationClients.valueAt(i)->audioFlingerClient()->ioConfigChanged(event,
-                                                                              ioHandle,
-                                                                              param2);
+        if ((pid == 0) || (mNotificationClients.keyAt(i) == pid)) {
+            mNotificationClients.valueAt(i)->audioFlingerClient()->ioConfigChanged(event, ioDesc);
+        }
     }
 }
 
@@ -1385,9 +1427,11 @@
         uint32_t sampleRate,
         audio_format_t format,
         audio_channel_mask_t channelMask,
+        const String16& opPackageName,
         size_t *frameCount,
         IAudioFlinger::track_flags_t *flags,
         pid_t tid,
+        int clientUid,
         int *sessionId,
         size_t *notificationFrames,
         sp<IMemory>& cblk,
@@ -1404,7 +1448,7 @@
     buffers.clear();
 
     // check calling permissions
-    if (!recordingAllowed()) {
+    if (!recordingAllowed(opPackageName)) {
         ALOGE("openRecord() permission denied: recording not allowed");
         lStatus = PERMISSION_DENIED;
         goto Exit;
@@ -1417,9 +1461,8 @@
         goto Exit;
     }
 
-    // we don't yet support anything other than 16-bit PCM
-    if (!(audio_is_valid_format(format) &&
-            audio_is_linear_pcm(format) && format == AUDIO_FORMAT_PCM_16_BIT)) {
+    // we don't yet support anything other than linear PCM
+    if (!audio_is_valid_format(format) || !audio_is_linear_pcm(format)) {
         ALOGE("openRecord() invalid format %#x", format);
         lStatus = BAD_VALUE;
         goto Exit;
@@ -1458,8 +1501,7 @@
         // TODO: the uid should be passed in as a parameter to openRecord
         recordTrack = thread->createRecordTrack_l(client, sampleRate, format, channelMask,
                                                   frameCount, lSessionId, notificationFrames,
-                                                  IPCThreadState::self()->getCallingUid(),
-                                                  flags, tid, &lStatus);
+                                                  clientUid, flags, tid, &lStatus);
         LOG_ALWAYS_FATAL_IF((lStatus == NO_ERROR) && (recordTrack == 0));
 
         if (lStatus == NO_ERROR) {
@@ -1678,6 +1720,26 @@
     return (audio_hw_sync_t)value;
 }
 
+status_t AudioFlinger::systemReady()
+{
+    Mutex::Autolock _l(mLock);
+    ALOGI("%s", __FUNCTION__);
+    if (mSystemReady) {
+        ALOGW("%s called twice", __FUNCTION__);
+        return NO_ERROR;
+    }
+    mSystemReady = true;
+    for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+        ThreadBase *thread = (ThreadBase *)mPlaybackThreads.valueAt(i).get();
+        thread->systemReady();
+    }
+    for (size_t i = 0; i < mRecordThreads.size(); i++) {
+        ThreadBase *thread = (ThreadBase *)mRecordThreads.valueAt(i).get();
+        thread->systemReady();
+    }
+    return NO_ERROR;
+}
+
 // setAudioHwSyncForSession_l() must be called with AudioFlinger::mLock held
 void AudioFlinger::setAudioHwSyncForSession_l(PlaybackThread *thread, audio_session_t sessionId)
 {
@@ -1714,8 +1776,6 @@
 
     mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
 
-    audio_stream_out_t *outStream = NULL;
-
     // FOR TESTING ONLY:
     // This if statement allows overriding the audio policy settings
     // and forcing a specific format or channel mask to the HAL/Sink device for testing.
@@ -1737,37 +1797,30 @@
         }
     }
 
-    status_t status = hwDevHal->open_output_stream(hwDevHal,
-                                                   *output,
-                                                   devices,
-                                                   flags,
-                                                   config,
-                                                   &outStream,
-                                                   address.string());
+    AudioStreamOut *outputStream = NULL;
+    status_t status = outHwDev->openOutputStream(
+            &outputStream,
+            *output,
+            devices,
+            flags,
+            config,
+            address.string());
 
     mHardwareStatus = AUDIO_HW_IDLE;
-    ALOGV("openOutput_l() openOutputStream returned output %p, sampleRate %d, Format %#x, "
-            "channelMask %#x, status %d",
-            outStream,
-            config->sample_rate,
-            config->format,
-            config->channel_mask,
-            status);
 
-    if (status == NO_ERROR && outStream != NULL) {
-        AudioStreamOut *outputStream = new AudioStreamOut(outHwDev, outStream, flags);
+    if (status == NO_ERROR) {
 
         PlaybackThread *thread;
         if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
-            thread = new OffloadThread(this, outputStream, *output, devices);
+            thread = new OffloadThread(this, outputStream, *output, devices, mSystemReady);
             ALOGV("openOutput_l() created offload output: ID %d thread %p", *output, thread);
         } else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
                 || !isValidPcmSinkFormat(config->format)
                 || !isValidPcmSinkChannelMask(config->channel_mask)) {
-            thread = new DirectOutputThread(this, outputStream, *output, devices);
+            thread = new DirectOutputThread(this, outputStream, *output, devices, mSystemReady);
             ALOGV("openOutput_l() created direct output: ID %d thread %p", *output, thread);
         } else {
-            thread = new MixerThread(this, outputStream, *output, devices);
+            thread = new MixerThread(this, outputStream, *output, devices, mSystemReady);
             ALOGV("openOutput_l() created mixer output: ID %d thread %p", *output, thread);
         }
         mPlaybackThreads.add(*output, thread);
@@ -1785,7 +1838,7 @@
                                   uint32_t *latencyMs,
                                   audio_output_flags_t flags)
 {
-    ALOGV("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x",
+    ALOGI("openOutput(), module %d Device %x, SamplingRate %d, Format %#08x, Channels %x, flags %x",
               module,
               (devices != NULL) ? *devices : 0,
               config->sample_rate,
@@ -1804,7 +1857,7 @@
         *latencyMs = thread->latency();
 
         // notify client processes of the new output creation
-        thread->audioConfigChanged(AudioSystem::OUTPUT_OPENED);
+        thread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
 
         // the first primary output opened designates the primary hw device
         if ((mPrimaryHardwareDev == NULL) && (flags & AUDIO_OUTPUT_FLAG_PRIMARY)) {
@@ -1815,8 +1868,6 @@
             mHardwareStatus = AUDIO_HW_SET_MODE;
             mPrimaryHardwareDev->hwDevice()->set_mode(mPrimaryHardwareDev->hwDevice(), mMode);
             mHardwareStatus = AUDIO_HW_IDLE;
-
-            mPrimaryOutputSampleRate = config->sample_rate;
         }
         return NO_ERROR;
     }
@@ -1838,11 +1889,11 @@
     }
 
     audio_io_handle_t id = nextUniqueId();
-    DuplicatingThread *thread = new DuplicatingThread(this, thread1, id);
+    DuplicatingThread *thread = new DuplicatingThread(this, thread1, id, mSystemReady);
     thread->addOutputTrack(thread2);
     mPlaybackThreads.add(id, thread);
     // notify client processes of the new output creation
-    thread->audioConfigChanged(AudioSystem::OUTPUT_OPENED);
+    thread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
     return id;
 }
 
@@ -1891,7 +1942,9 @@
                 }
             }
         }
-        audioConfigChanged(AudioSystem::OUTPUT_CLOSED, output, NULL);
+        const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
+        ioDesc->mIoHandle = output;
+        ioConfigChanged(AUDIO_OUTPUT_CLOSED, ioDesc);
     }
     thread->exit();
     // The thread entity (active unit of execution) is no longer running here,
@@ -1954,22 +2007,22 @@
 status_t AudioFlinger::openInput(audio_module_handle_t module,
                                           audio_io_handle_t *input,
                                           audio_config_t *config,
-                                          audio_devices_t *device,
+                                          audio_devices_t *devices,
                                           const String8& address,
                                           audio_source_t source,
                                           audio_input_flags_t flags)
 {
     Mutex::Autolock _l(mLock);
 
-    if (*device == AUDIO_DEVICE_NONE) {
+    if (*devices == AUDIO_DEVICE_NONE) {
         return BAD_VALUE;
     }
 
-    sp<RecordThread> thread = openInput_l(module, input, config, *device, address, source, flags);
+    sp<RecordThread> thread = openInput_l(module, input, config, *devices, address, source, flags);
 
     if (thread != 0) {
         // notify client processes of the new input creation
-        thread->audioConfigChanged(AudioSystem::INPUT_OPENED);
+        thread->ioConfigChanged(AUDIO_INPUT_OPENED);
         return NO_ERROR;
     }
     return NO_INIT;
@@ -1978,12 +2031,12 @@
 sp<AudioFlinger::RecordThread> AudioFlinger::openInput_l(audio_module_handle_t module,
                                                          audio_io_handle_t *input,
                                                          audio_config_t *config,
-                                                         audio_devices_t device,
+                                                         audio_devices_t devices,
                                                          const String8& address,
                                                          audio_source_t source,
                                                          audio_input_flags_t flags)
 {
-    AudioHwDevice *inHwDev = findSuitableHwDev_l(module, device);
+    AudioHwDevice *inHwDev = findSuitableHwDev_l(module, devices);
     if (inHwDev == NULL) {
         *input = AUDIO_IO_HANDLE_NONE;
         return 0;
@@ -1996,7 +2049,7 @@
     audio_config_t halconfig = *config;
     audio_hw_device_t *inHwHal = inHwDev->hwDevice();
     audio_stream_in_t *inStream = NULL;
-    status_t status = inHwHal->open_input_stream(inHwHal, *input, device, &halconfig,
+    status_t status = inHwHal->open_input_stream(inHwHal, *input, devices, &halconfig,
                                         &inStream, flags, address.string(), source);
     ALOGV("openInput_l() openInputStream returned input %p, SamplingRate %d"
            ", Format %#x, Channels %x, flags %#x, status %d addr %s",
@@ -2008,17 +2061,17 @@
             status, address.string());
 
     // If the input could not be opened with the requested parameters and we can handle the
-    // conversion internally, try to open again with the proposed parameters. The AudioFlinger can
-    // resample the input and do mono to stereo or stereo to mono conversions on 16 bit PCM inputs.
+    // conversion internally, try to open again with the proposed parameters.
     if (status == BAD_VALUE &&
-            config->format == halconfig.format && halconfig.format == AUDIO_FORMAT_PCM_16_BIT &&
-        (halconfig.sample_rate <= 2 * config->sample_rate) &&
+        audio_is_linear_pcm(config->format) &&
+        audio_is_linear_pcm(halconfig.format) &&
+        (halconfig.sample_rate <= AUDIO_RESAMPLER_DOWN_RATIO_MAX * config->sample_rate) &&
         (audio_channel_count_from_in_mask(halconfig.channel_mask) <= FCC_2) &&
         (audio_channel_count_from_in_mask(config->channel_mask) <= FCC_2)) {
         // FIXME describe the change proposed by HAL (save old values so we can log them here)
         ALOGV("openInput_l() reopening with proposed sampling rate and channel mask");
         inStream = NULL;
-        status = inHwHal->open_input_stream(inHwHal, *input, device, &halconfig,
+        status = inHwHal->open_input_stream(inHwHal, *input, devices, &halconfig,
                                             &inStream, flags, address.string(), source);
         // FIXME log this new status; HAL should not propose any further changes
     }
@@ -2083,7 +2136,8 @@
                                   inputStream,
                                   *input,
                                   primaryOutputDevice_l(),
-                                  device
+                                  devices,
+                                  mSystemReady
 #ifdef TEE_SINK
                                   , teeSink
 #endif
@@ -2152,7 +2206,9 @@
                 putOrphanEffectChain_l(chain);
             }
         }
-        audioConfigChanged(AudioSystem::INPUT_CLOSED, input, NULL);
+        const sp<AudioIoDescriptor> ioDesc = new AudioIoDescriptor();
+        ioDesc->mIoHandle = input;
+        ioConfigChanged(AUDIO_INPUT_CLOSED, ioDesc);
         mRecordThreads.removeItem(input);
     }
     // FIXME: calling thread->exit() without mLock held should not be needed anymore now that
@@ -2428,6 +2484,7 @@
         int32_t priority,
         audio_io_handle_t io,
         int sessionId,
+        const String16& opPackageName,
         status_t *status,
         int *id,
         int *enabled)
@@ -2524,7 +2581,7 @@
 
         // check recording permission for visualizer
         if ((memcmp(&desc.type, SL_IID_VISUALIZATION, sizeof(effect_uuid_t)) == 0) &&
-            !recordingAllowed()) {
+            !recordingAllowed(opPackageName)) {
             lStatus = PERMISSION_DENIED;
             goto Exit;
         }
@@ -2809,13 +2866,13 @@
 
 
 struct Entry {
-#define MAX_NAME 32     // %Y%m%d%H%M%S_%d.wav
-    char mName[MAX_NAME];
+#define TEE_MAX_FILENAME 32 // %Y%m%d%H%M%S_%d.wav = 4+2+2+2+2+2+1+1+4+1 = 21
+    char mFileName[TEE_MAX_FILENAME];
 };
 
 int comparEntry(const void *p1, const void *p2)
 {
-    return strcmp(((const Entry *) p1)->mName, ((const Entry *) p2)->mName);
+    return strcmp(((const Entry *) p1)->mFileName, ((const Entry *) p2)->mFileName);
 }
 
 #ifdef TEE_SINK
@@ -2834,11 +2891,11 @@
         DIR *dir = opendir(teePath);
         teePath[teePathLen++] = '/';
         if (dir != NULL) {
-#define MAX_SORT 20 // number of entries to sort
-#define MAX_KEEP 10 // number of entries to keep
-            struct Entry entries[MAX_SORT];
+#define TEE_MAX_SORT 20 // number of entries to sort
+#define TEE_MAX_KEEP 10 // number of entries to keep
+            struct Entry entries[TEE_MAX_SORT];
             size_t entryCount = 0;
-            while (entryCount < MAX_SORT) {
+            while (entryCount < TEE_MAX_SORT) {
                 struct dirent de;
                 struct dirent *result = NULL;
                 int rc = readdir_r(dir, &de, &result);
@@ -2855,17 +2912,17 @@
                 }
                 // ignore non .wav file entries
                 size_t nameLen = strlen(de.d_name);
-                if (nameLen <= 4 || nameLen >= MAX_NAME ||
+                if (nameLen <= 4 || nameLen >= TEE_MAX_FILENAME ||
                         strcmp(&de.d_name[nameLen - 4], ".wav")) {
                     continue;
                 }
-                strcpy(entries[entryCount++].mName, de.d_name);
+                strcpy(entries[entryCount++].mFileName, de.d_name);
             }
             (void) closedir(dir);
-            if (entryCount > MAX_KEEP) {
+            if (entryCount > TEE_MAX_KEEP) {
                 qsort(entries, entryCount, sizeof(Entry), comparEntry);
-                for (size_t i = 0; i < entryCount - MAX_KEEP; ++i) {
-                    strcpy(&teePath[teePathLen], entries[i].mName);
+                for (size_t i = 0; i < entryCount - TEE_MAX_KEEP; ++i) {
+                    strcpy(&teePath[teePathLen], entries[i].mFileName);
                     (void) unlink(teePath);
                 }
             }
@@ -2949,4 +3006,4 @@
     return BnAudioFlinger::onTransact(code, data, reply, flags);
 }
 
-}; // namespace android
+} // namespace android
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index aa0af1f..20c34ef 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -56,6 +56,9 @@
 #include <media/nbaio/NBAIO.h>
 #include "AudioWatchdog.h"
 #include "AudioMixer.h"
+#include "AudioStreamOut.h"
+#include "SpdifStreamOut.h"
+#include "AudioHwDevice.h"
 
 #include <powermanager/IPowerManager.h>
 
@@ -70,18 +73,18 @@
 class AudioBuffer;
 class AudioResampler;
 class FastMixer;
+class PassthruBufferProvider;
 class ServerProxy;
 
 // ----------------------------------------------------------------------------
 
-// AudioFlinger has a hard-coded upper limit of 2 channels for capture and playback.
-// There is support for > 2 channel tracks down-mixed to 2 channel output via a down-mix effect.
-// Adding full support for > 2 channel capture or playback would require more than simply changing
-// this #define.  There is an independent hard-coded upper limit in AudioMixer;
-// removing that AudioMixer limit would be necessary but insufficient to support > 2 channels.
-// The macro FCC_2 highlights some (but not all) places where there is are 2-channel assumptions.
+// The macro FCC_2 highlights some (but not all) places where there are are 2-channel assumptions.
+// This is typically due to legacy implementation of stereo input or output.
 // Search also for "2", "left", "right", "[0]", "[1]", ">> 16", "<< 16", etc.
 #define FCC_2 2     // FCC_2 = Fixed Channel Count 2
+// The macro FCC_8 highlights places where there are 8-channel assumptions.
+// This is typically due to audio mixer and resampler limitations.
+#define FCC_8 8     // FCC_8 = Fixed Channel Count 8
 
 static const nsecs_t kDefaultStandbyTimeInNsecs = seconds(3);
 
@@ -117,9 +120,11 @@
                                 uint32_t sampleRate,
                                 audio_format_t format,
                                 audio_channel_mask_t channelMask,
+                                const String16& opPackageName,
                                 size_t *pFrameCount,
                                 IAudioFlinger::track_flags_t *flags,
                                 pid_t tid,
+                                int clientUid,
                                 int *sessionId,
                                 size_t *notificationFrames,
                                 sp<IMemory>& cblk,
@@ -213,6 +218,7 @@
                         int32_t priority,
                         audio_io_handle_t io,
                         int sessionId,
+                        const String16& opPackageName,
                         status_t *status /*non-NULL*/,
                         int *id,
                         int *enabled);
@@ -251,6 +257,9 @@
     /* Get the HW synchronization source used for an audio session */
     virtual audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId);
 
+    /* Indicate JAVA services are ready (scheduling, power management ...) */
+    virtual status_t systemReady();
+
     virtual     status_t    onTransact(
                                 uint32_t code,
                                 const Parcel& data,
@@ -311,7 +320,6 @@
                                         wp<RefBase> cookie);
 
 private:
-    class AudioHwDevice;    // fwd declaration for findSuitableHwDev_l
 
                audio_mode_t getMode() const { return mMode; }
 
@@ -351,6 +359,15 @@
             // check that channelMask is the "canonical" one we expect for the channelCount.
             return channelMask == audio_channel_out_mask_from_count(channelCount);
             }
+        case AUDIO_CHANNEL_REPRESENTATION_INDEX:
+            if (kEnableExtendedChannels) {
+                const uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
+                if (channelCount >= FCC_2 // mono is not supported at this time
+                        && channelCount <= AudioMixer::MAX_NUM_CHANNELS) {
+                    return true;
+                }
+            }
+            return false;
         default:
             return false;
         }
@@ -449,7 +466,7 @@
     class EffectModule;
     class EffectHandle;
     class EffectChain;
-    struct AudioStreamOut;
+
     struct AudioStreamIn;
 
     struct  stream_type_t {
@@ -541,7 +558,9 @@
               // no range check, doesn't check per-thread stream volume, AudioFlinger::mLock held
               float streamVolume_l(audio_stream_type_t stream) const
                                 { return mStreamTypes[stream].volume; }
-              void audioConfigChanged(int event, audio_io_handle_t ioHandle, const void *param2);
+              void ioConfigChanged(audio_io_config_event event,
+                                   const sp<AudioIoDescriptor>& ioDesc,
+                                   pid_t pid = 0);
 
               // Allocate an audio_io_handle_t, session ID, effect ID, or audio_module_handle_t.
               // They all share the same ID space, but the namespaces are actually independent
@@ -586,57 +605,12 @@
                 // Return true if the effect was found in mOrphanEffectChains, false otherwise.
                 bool            updateOrphanEffectChains(const sp<EffectModule>& effect);
 
-    class AudioHwDevice {
-    public:
-        enum Flags {
-            AHWD_CAN_SET_MASTER_VOLUME  = 0x1,
-            AHWD_CAN_SET_MASTER_MUTE    = 0x2,
-        };
+                void broacastParametersToRecordThreads_l(const String8& keyValuePairs);
 
-        AudioHwDevice(audio_module_handle_t handle,
-                      const char *moduleName,
-                      audio_hw_device_t *hwDevice,
-                      Flags flags)
-            : mHandle(handle), mModuleName(strdup(moduleName))
-            , mHwDevice(hwDevice)
-            , mFlags(flags) { }
-        /*virtual*/ ~AudioHwDevice() { free((void *)mModuleName); }
-
-        bool canSetMasterVolume() const {
-            return (0 != (mFlags & AHWD_CAN_SET_MASTER_VOLUME));
-        }
-
-        bool canSetMasterMute() const {
-            return (0 != (mFlags & AHWD_CAN_SET_MASTER_MUTE));
-        }
-
-        audio_module_handle_t handle() const { return mHandle; }
-        const char *moduleName() const { return mModuleName; }
-        audio_hw_device_t *hwDevice() const { return mHwDevice; }
-        uint32_t version() const { return mHwDevice->common.version; }
-
-    private:
-        const audio_module_handle_t mHandle;
-        const char * const mModuleName;
-        audio_hw_device_t * const mHwDevice;
-        const Flags mFlags;
-    };
-
-    // AudioStreamOut and AudioStreamIn are immutable, so their fields are const.
+    // AudioStreamIn is immutable, so their fields are const.
     // For emphasis, we could also make all pointers to them be "const *",
     // but that would clutter the code unnecessarily.
 
-    struct AudioStreamOut {
-        AudioHwDevice* const audioHwDev;
-        audio_stream_out_t* const stream;
-        const audio_output_flags_t flags;
-
-        audio_hw_device_t* hwDev() const { return audioHwDev->hwDevice(); }
-
-        AudioStreamOut(AudioHwDevice *dev, audio_stream_out_t *out, audio_output_flags_t flags) :
-            audioHwDev(dev), stream(out), flags(flags) {}
-    };
-
     struct AudioStreamIn {
         AudioHwDevice* const audioHwDev;
         audio_stream_in_t* const stream;
@@ -789,16 +763,19 @@
 
     sp<PatchPanel> mPatchPanel;
 
-    uint32_t    mPrimaryOutputSampleRate;   // sample rate of the primary output, or zero if none
-                                            // protected by mHardwareLock
+    bool        mSystemReady;
 };
 
 #undef INCLUDING_FROM_AUDIOFLINGER_H
 
 const char *formatToString(audio_format_t format);
+String8 inputFlagsToString(audio_input_flags_t flags);
+String8 outputFlagsToString(audio_output_flags_t flags);
+String8 devicesToString(audio_devices_t devices);
+const char *sourceToString(audio_source_t source);
 
 // ----------------------------------------------------------------------------
 
-}; // namespace android
+} // namespace android
 
 #endif // ANDROID_AUDIO_FLINGER_H
diff --git a/services/audioflinger/AudioHwDevice.cpp b/services/audioflinger/AudioHwDevice.cpp
new file mode 100644
index 0000000..3191598
--- /dev/null
+++ b/services/audioflinger/AudioHwDevice.cpp
@@ -0,0 +1,97 @@
+/*
+**
+** Copyright 2007, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "AudioHwDevice"
+//#define LOG_NDEBUG 0
+
+#include <hardware/audio.h>
+#include <utils/Log.h>
+
+#include <audio_utils/spdif/SPDIFEncoder.h>
+
+#include "AudioHwDevice.h"
+#include "AudioStreamOut.h"
+#include "SpdifStreamOut.h"
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+status_t AudioHwDevice::openOutputStream(
+        AudioStreamOut **ppStreamOut,
+        audio_io_handle_t handle,
+        audio_devices_t devices,
+        audio_output_flags_t flags,
+        struct audio_config *config,
+        const char *address)
+{
+
+    struct audio_config originalConfig = *config;
+    AudioStreamOut *outputStream = new AudioStreamOut(this, flags);
+
+    // Try to open the HAL first using the current format.
+    ALOGV("openOutputStream(), try "
+            " sampleRate %d, Format %#x, "
+            "channelMask %#x",
+            config->sample_rate,
+            config->format,
+            config->channel_mask);
+    status_t status = outputStream->open(handle, devices, config, address);
+
+    if (status != NO_ERROR) {
+        delete outputStream;
+        outputStream = NULL;
+
+        // FIXME Look at any modification to the config.
+        // The HAL might modify the config to suggest a wrapped format.
+        // Log this so we can see what the HALs are doing.
+        ALOGI("openOutputStream(), HAL returned"
+            " sampleRate %d, Format %#x, "
+            "channelMask %#x, status %d",
+            config->sample_rate,
+            config->format,
+            config->channel_mask,
+            status);
+
+        // If the data is encoded then try again using wrapped PCM.
+        bool wrapperNeeded = !audio_is_linear_pcm(originalConfig.format)
+                && ((flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0)
+                && ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0);
+
+        if (wrapperNeeded) {
+            if (SPDIFEncoder::isFormatSupported(originalConfig.format)) {
+                outputStream = new SpdifStreamOut(this, flags, originalConfig.format);
+                status = outputStream->open(handle, devices, &originalConfig, address);
+                if (status != NO_ERROR) {
+                    ALOGE("ERROR - openOutputStream(), SPDIF open returned %d",
+                        status);
+                    delete outputStream;
+                    outputStream = NULL;
+                }
+            } else {
+                ALOGE("ERROR - openOutputStream(), SPDIFEncoder does not support format 0x%08x",
+                    originalConfig.format);
+            }
+        }
+    }
+
+    *ppStreamOut = outputStream;
+    return status;
+}
+
+
+}; // namespace android
diff --git a/services/audioflinger/AudioHwDevice.h b/services/audioflinger/AudioHwDevice.h
new file mode 100644
index 0000000..b9f65c1
--- /dev/null
+++ b/services/audioflinger/AudioHwDevice.h
@@ -0,0 +1,88 @@
+/*
+**
+** Copyright 2007, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_AUDIO_HW_DEVICE_H
+#define ANDROID_AUDIO_HW_DEVICE_H
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/types.h>
+
+#include <hardware/audio.h>
+#include <utils/Errors.h>
+#include <system/audio.h>
+
+
+namespace android {
+
+class AudioStreamOut;
+
+class AudioHwDevice {
+public:
+    enum Flags {
+        AHWD_CAN_SET_MASTER_VOLUME  = 0x1,
+        AHWD_CAN_SET_MASTER_MUTE    = 0x2,
+    };
+
+    AudioHwDevice(audio_module_handle_t handle,
+                  const char *moduleName,
+                  audio_hw_device_t *hwDevice,
+                  Flags flags)
+        : mHandle(handle)
+        , mModuleName(strdup(moduleName))
+        , mHwDevice(hwDevice)
+        , mFlags(flags) { }
+    virtual ~AudioHwDevice() { free((void *)mModuleName); }
+
+    bool canSetMasterVolume() const {
+        return (0 != (mFlags & AHWD_CAN_SET_MASTER_VOLUME));
+    }
+
+    bool canSetMasterMute() const {
+        return (0 != (mFlags & AHWD_CAN_SET_MASTER_MUTE));
+    }
+
+    audio_module_handle_t handle() const { return mHandle; }
+    const char *moduleName() const { return mModuleName; }
+    audio_hw_device_t *hwDevice() const { return mHwDevice; }
+    uint32_t version() const { return mHwDevice->common.version; }
+
+    /** This method creates and opens the audio hardware output stream.
+     * The "address" parameter qualifies the "devices" audio device type if needed.
+     * The format format depends on the device type:
+     * - Bluetooth devices use the MAC address of the device in the form "00:11:22:AA:BB:CC"
+     * - USB devices use the ALSA card and device numbers in the form  "card=X;device=Y"
+     * - Other devices may use a number or any other string.
+     */
+    status_t openOutputStream(
+            AudioStreamOut **ppStreamOut,
+            audio_io_handle_t handle,
+            audio_devices_t devices,
+            audio_output_flags_t flags,
+            struct audio_config *config,
+            const char *address);
+
+private:
+    const audio_module_handle_t mHandle;
+    const char * const          mModuleName;
+    audio_hw_device_t * const   mHwDevice;
+    const Flags                 mFlags;
+};
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_HW_DEVICE_H
diff --git a/services/audioflinger/AudioMixer.cpp b/services/audioflinger/AudioMixer.cpp
index fd28ea1..8a9a837 100644
--- a/services/audioflinger/AudioMixer.cpp
+++ b/services/audioflinger/AudioMixer.cpp
@@ -39,9 +39,6 @@
 #include <common_time/local_clock.h>
 #include <common_time/cc_helper.h>
 
-#include <media/EffectsFactoryApi.h>
-#include <audio_effects/effect_downmix.h>
-
 #include "AudioMixerOps.h"
 #include "AudioMixer.h"
 
@@ -69,9 +66,16 @@
 #define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
 #endif
 
-// Set kUseNewMixer to true to use the new mixer engine. Otherwise the
-// original code will be used.  This is false for now.
-static const bool kUseNewMixer = false;
+// TODO: Move these macro/inlines to a header file.
+template <typename T>
+static inline
+T max(const T& x, const T& y) {
+    return x > y ? x : y;
+}
+
+// Set kUseNewMixer to true to use the new mixer engine always. Otherwise the
+// original code will be used for stereo sinks, the new mixer for multichannel.
+static const bool kUseNewMixer = true;
 
 // Set kUseFloat to true to allow floating input into the mixer engine.
 // If kUseNewMixer is false, this is ignored or may be overridden internally
@@ -91,288 +95,6 @@
     return a < b ? a : b;
 }
 
-AudioMixer::CopyBufferProvider::CopyBufferProvider(size_t inputFrameSize,
-        size_t outputFrameSize, size_t bufferFrameCount) :
-        mInputFrameSize(inputFrameSize),
-        mOutputFrameSize(outputFrameSize),
-        mLocalBufferFrameCount(bufferFrameCount),
-        mLocalBufferData(NULL),
-        mConsumed(0)
-{
-    ALOGV("CopyBufferProvider(%p)(%zu, %zu, %zu)", this,
-            inputFrameSize, outputFrameSize, bufferFrameCount);
-    LOG_ALWAYS_FATAL_IF(inputFrameSize < outputFrameSize && bufferFrameCount == 0,
-            "Requires local buffer if inputFrameSize(%zu) < outputFrameSize(%zu)",
-            inputFrameSize, outputFrameSize);
-    if (mLocalBufferFrameCount) {
-        (void)posix_memalign(&mLocalBufferData, 32, mLocalBufferFrameCount * mOutputFrameSize);
-    }
-    mBuffer.frameCount = 0;
-}
-
-AudioMixer::CopyBufferProvider::~CopyBufferProvider()
-{
-    ALOGV("~CopyBufferProvider(%p)", this);
-    if (mBuffer.frameCount != 0) {
-        mTrackBufferProvider->releaseBuffer(&mBuffer);
-    }
-    free(mLocalBufferData);
-}
-
-status_t AudioMixer::CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
-        int64_t pts)
-{
-    //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
-    //        this, pBuffer, pBuffer->frameCount, pts);
-    if (mLocalBufferFrameCount == 0) {
-        status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts);
-        if (res == OK) {
-            copyFrames(pBuffer->raw, pBuffer->raw, pBuffer->frameCount);
-        }
-        return res;
-    }
-    if (mBuffer.frameCount == 0) {
-        mBuffer.frameCount = pBuffer->frameCount;
-        status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts);
-        // At one time an upstream buffer provider had
-        // res == OK and mBuffer.frameCount == 0, doesn't seem to happen now 7/18/2014.
-        //
-        // By API spec, if res != OK, then mBuffer.frameCount == 0.
-        // but there may be improper implementations.
-        ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
-        if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
-            pBuffer->raw = NULL;
-            pBuffer->frameCount = 0;
-            return res;
-        }
-        mConsumed = 0;
-    }
-    ALOG_ASSERT(mConsumed < mBuffer.frameCount);
-    size_t count = min(mLocalBufferFrameCount, mBuffer.frameCount - mConsumed);
-    count = min(count, pBuffer->frameCount);
-    pBuffer->raw = mLocalBufferData;
-    pBuffer->frameCount = count;
-    copyFrames(pBuffer->raw, (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize,
-            pBuffer->frameCount);
-    return OK;
-}
-
-void AudioMixer::CopyBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer)
-{
-    //ALOGV("CopyBufferProvider(%p)::releaseBuffer(%p(%zu))",
-    //        this, pBuffer, pBuffer->frameCount);
-    if (mLocalBufferFrameCount == 0) {
-        mTrackBufferProvider->releaseBuffer(pBuffer);
-        return;
-    }
-    // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount");
-    mConsumed += pBuffer->frameCount; // TODO: update for efficiency to reuse existing content
-    if (mConsumed != 0 && mConsumed >= mBuffer.frameCount) {
-        mTrackBufferProvider->releaseBuffer(&mBuffer);
-        ALOG_ASSERT(mBuffer.frameCount == 0);
-    }
-    pBuffer->raw = NULL;
-    pBuffer->frameCount = 0;
-}
-
-void AudioMixer::CopyBufferProvider::reset()
-{
-    if (mBuffer.frameCount != 0) {
-        mTrackBufferProvider->releaseBuffer(&mBuffer);
-    }
-    mConsumed = 0;
-}
-
-AudioMixer::DownmixerBufferProvider::DownmixerBufferProvider(
-        audio_channel_mask_t inputChannelMask,
-        audio_channel_mask_t outputChannelMask, audio_format_t format,
-        uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount) :
-        CopyBufferProvider(
-            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask),
-            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask),
-            bufferFrameCount)  // set bufferFrameCount to 0 to do in-place
-{
-    ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)",
-            this, inputChannelMask, outputChannelMask, format,
-            sampleRate, sessionId);
-    if (!sIsMultichannelCapable
-            || EffectCreate(&sDwnmFxDesc.uuid,
-                    sessionId,
-                    SESSION_ID_INVALID_AND_IGNORED,
-                    &mDownmixHandle) != 0) {
-         ALOGE("DownmixerBufferProvider() error creating downmixer effect");
-         mDownmixHandle = NULL;
-         return;
-     }
-     // channel input configuration will be overridden per-track
-     mDownmixConfig.inputCfg.channels = inputChannelMask;   // FIXME: Should be bits
-     mDownmixConfig.outputCfg.channels = outputChannelMask; // FIXME: should be bits
-     mDownmixConfig.inputCfg.format = format;
-     mDownmixConfig.outputCfg.format = format;
-     mDownmixConfig.inputCfg.samplingRate = sampleRate;
-     mDownmixConfig.outputCfg.samplingRate = sampleRate;
-     mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
-     mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
-     // input and output buffer provider, and frame count will not be used as the downmix effect
-     // process() function is called directly (see DownmixerBufferProvider::getNextBuffer())
-     mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS |
-             EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE;
-     mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask;
-
-     int cmdStatus;
-     uint32_t replySize = sizeof(int);
-
-     // Configure downmixer
-     status_t status = (*mDownmixHandle)->command(mDownmixHandle,
-             EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/,
-             &mDownmixConfig /*pCmdData*/,
-             &replySize, &cmdStatus /*pReplyData*/);
-     if (status != 0 || cmdStatus != 0) {
-         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer",
-                 status, cmdStatus);
-         EffectRelease(mDownmixHandle);
-         mDownmixHandle = NULL;
-         return;
-     }
-
-     // Enable downmixer
-     replySize = sizeof(int);
-     status = (*mDownmixHandle)->command(mDownmixHandle,
-             EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/,
-             &replySize, &cmdStatus /*pReplyData*/);
-     if (status != 0 || cmdStatus != 0) {
-         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer",
-                 status, cmdStatus);
-         EffectRelease(mDownmixHandle);
-         mDownmixHandle = NULL;
-         return;
-     }
-
-     // Set downmix type
-     // parameter size rounded for padding on 32bit boundary
-     const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int);
-     const int downmixParamSize =
-             sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t);
-     effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize);
-     param->psize = sizeof(downmix_params_t);
-     const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE;
-     memcpy(param->data, &downmixParam, param->psize);
-     const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD;
-     param->vsize = sizeof(downmix_type_t);
-     memcpy(param->data + psizePadded, &downmixType, param->vsize);
-     replySize = sizeof(int);
-     status = (*mDownmixHandle)->command(mDownmixHandle,
-             EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */,
-             param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/);
-     free(param);
-     if (status != 0 || cmdStatus != 0) {
-         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type",
-                 status, cmdStatus);
-         EffectRelease(mDownmixHandle);
-         mDownmixHandle = NULL;
-         return;
-     }
-     ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType);
-}
-
-AudioMixer::DownmixerBufferProvider::~DownmixerBufferProvider()
-{
-    ALOGV("~DownmixerBufferProvider (%p)", this);
-    EffectRelease(mDownmixHandle);
-    mDownmixHandle = NULL;
-}
-
-void AudioMixer::DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
-{
-    mDownmixConfig.inputCfg.buffer.frameCount = frames;
-    mDownmixConfig.inputCfg.buffer.raw = const_cast<void *>(src);
-    mDownmixConfig.outputCfg.buffer.frameCount = frames;
-    mDownmixConfig.outputCfg.buffer.raw = dst;
-    // may be in-place if src == dst.
-    status_t res = (*mDownmixHandle)->process(mDownmixHandle,
-            &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer);
-    ALOGE_IF(res != OK, "DownmixBufferProvider error %d", res);
-}
-
-/* call once in a pthread_once handler. */
-/*static*/ status_t AudioMixer::DownmixerBufferProvider::init()
-{
-    // find multichannel downmix effect if we have to play multichannel content
-    uint32_t numEffects = 0;
-    int ret = EffectQueryNumberEffects(&numEffects);
-    if (ret != 0) {
-        ALOGE("AudioMixer() error %d querying number of effects", ret);
-        return NO_INIT;
-    }
-    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
-
-    for (uint32_t i = 0 ; i < numEffects ; i++) {
-        if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
-            ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
-            if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
-                ALOGI("found effect \"%s\" from %s",
-                        sDwnmFxDesc.name, sDwnmFxDesc.implementor);
-                sIsMultichannelCapable = true;
-                break;
-            }
-        }
-    }
-    ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect");
-    return NO_INIT;
-}
-
-/*static*/ bool AudioMixer::DownmixerBufferProvider::sIsMultichannelCapable = false;
-/*static*/ effect_descriptor_t AudioMixer::DownmixerBufferProvider::sDwnmFxDesc;
-
-AudioMixer::RemixBufferProvider::RemixBufferProvider(audio_channel_mask_t inputChannelMask,
-        audio_channel_mask_t outputChannelMask, audio_format_t format,
-        size_t bufferFrameCount) :
-        CopyBufferProvider(
-                audio_bytes_per_sample(format)
-                    * audio_channel_count_from_out_mask(inputChannelMask),
-                audio_bytes_per_sample(format)
-                    * audio_channel_count_from_out_mask(outputChannelMask),
-                bufferFrameCount),
-        mFormat(format),
-        mSampleSize(audio_bytes_per_sample(format)),
-        mInputChannels(audio_channel_count_from_out_mask(inputChannelMask)),
-        mOutputChannels(audio_channel_count_from_out_mask(outputChannelMask))
-{
-    ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %zu %zu",
-            this, format, inputChannelMask, outputChannelMask,
-            mInputChannels, mOutputChannels);
-    // TODO: consider channel representation in index array formulation
-    // We ignore channel representation, and just use the bits.
-    memcpy_by_index_array_initialization(mIdxAry, ARRAY_SIZE(mIdxAry),
-            audio_channel_mask_get_bits(outputChannelMask),
-            audio_channel_mask_get_bits(inputChannelMask));
-}
-
-void AudioMixer::RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
-{
-    memcpy_by_index_array(dst, mOutputChannels,
-            src, mInputChannels, mIdxAry, mSampleSize, frames);
-}
-
-AudioMixer::ReformatBufferProvider::ReformatBufferProvider(int32_t channels,
-        audio_format_t inputFormat, audio_format_t outputFormat,
-        size_t bufferFrameCount) :
-        CopyBufferProvider(
-            channels * audio_bytes_per_sample(inputFormat),
-            channels * audio_bytes_per_sample(outputFormat),
-            bufferFrameCount),
-        mChannels(channels),
-        mInputFormat(inputFormat),
-        mOutputFormat(outputFormat)
-{
-    ALOGV("ReformatBufferProvider(%p)(%d, %#x, %#x)", this, channels, inputFormat, outputFormat);
-}
-
-void AudioMixer::ReformatBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
-{
-    memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannels);
-}
-
 // ----------------------------------------------------------------------------
 
 // Ensure mConfiguredNames bitmask is initialized properly on all architectures.
@@ -407,6 +129,7 @@
         t->resampler = NULL;
         t->downmixerBufferProvider = NULL;
         t->mReformatBufferProvider = NULL;
+        t->mTimestretchBufferProvider = NULL;
         t++;
     }
 
@@ -419,6 +142,7 @@
         delete t->resampler;
         delete t->downmixerBufferProvider;
         delete t->mReformatBufferProvider;
+        delete t->mTimestretchBufferProvider;
         t++;
     }
     delete [] mState.outputTemp;
@@ -430,6 +154,10 @@
     mState.mLog = log;
 }
 
+static inline audio_format_t selectMixerInFormat(audio_format_t inputFormat __unused) {
+    return kUseFloat && kUseNewMixer ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+}
+
 int AudioMixer::getTrackName(audio_channel_mask_t channelMask,
         audio_format_t format, int sessionId)
 {
@@ -492,24 +220,25 @@
         t->mInputBufferProvider = NULL;
         t->mReformatBufferProvider = NULL;
         t->downmixerBufferProvider = NULL;
+        t->mPostDownmixReformatBufferProvider = NULL;
+        t->mTimestretchBufferProvider = NULL;
         t->mMixerFormat = AUDIO_FORMAT_PCM_16_BIT;
         t->mFormat = format;
-        t->mMixerInFormat = kUseFloat && kUseNewMixer
-                ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
+        t->mMixerInFormat = selectMixerInFormat(format);
+        t->mDownmixRequiresFormat = AUDIO_FORMAT_INVALID; // no format required
         t->mMixerChannelMask = audio_channel_mask_from_representation_and_bits(
                 AUDIO_CHANNEL_REPRESENTATION_POSITION, AUDIO_CHANNEL_OUT_STEREO);
         t->mMixerChannelCount = audio_channel_count_from_out_mask(t->mMixerChannelMask);
+        t->mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
         // Check the downmixing (or upmixing) requirements.
-        status_t status = initTrackDownmix(t, n);
+        status_t status = t->prepareForDownmix();
         if (status != OK) {
             ALOGE("AudioMixer::getTrackName invalid channelMask (%#x)", channelMask);
             return -1;
         }
-        // initTrackDownmix() may change the input format requirement.
-        // If you desire floating point input to the mixer, it may change
-        // to integer because the downmixer requires integer to process.
+        // prepareForDownmix() may change mDownmixRequiresFormat
         ALOGVV("mMixerFormat:%#x  mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
-        prepareTrackForReformat(t, n);
+        t->prepareForReformat();
         mTrackNames |= 1 << n;
         return TRACK0 + n;
     }
@@ -526,7 +255,7 @@
  }
 
 // Called when channel masks have changed for a track name
-// TODO: Fix Downmixbufferprofider not to (possibly) change mixer input format,
+// TODO: Fix DownmixerBufferProvider not to (possibly) change mixer input format,
 // which will simplify this logic.
 bool AudioMixer::setChannelMasks(int name,
         audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask) {
@@ -551,21 +280,18 @@
 
     // channel masks have changed, does this track need a downmixer?
     // update to try using our desired format (if we aren't already using it)
-    const audio_format_t prevMixerInFormat = track.mMixerInFormat;
-    track.mMixerInFormat = kUseFloat && kUseNewMixer
-            ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
-    const status_t status = initTrackDownmix(&mState.tracks[name], name);
+    const audio_format_t prevDownmixerFormat = track.mDownmixRequiresFormat;
+    const status_t status = mState.tracks[name].prepareForDownmix();
     ALOGE_IF(status != OK,
-            "initTrackDownmix error %d, track channel mask %#x, mixer channel mask %#x",
+            "prepareForDownmix error %d, track channel mask %#x, mixer channel mask %#x",
             status, track.channelMask, track.mMixerChannelMask);
 
-    const bool mixerInFormatChanged = prevMixerInFormat != track.mMixerInFormat;
-    if (mixerInFormatChanged) {
-        prepareTrackForReformat(&track, name); // because of downmixer, track format may change!
+    if (prevDownmixerFormat != track.mDownmixRequiresFormat) {
+        track.prepareForReformat(); // because of downmixer, track format may change!
     }
 
-    if (track.resampler && (mixerInFormatChanged || mixerChannelCountChanged)) {
-        // resampler input format or channels may have changed.
+    if (track.resampler && mixerChannelCountChanged) {
+        // resampler channels may have changed.
         const uint32_t resetToSampleRate = track.sampleRate;
         delete track.resampler;
         track.resampler = NULL;
@@ -576,99 +302,129 @@
     return true;
 }
 
-status_t AudioMixer::initTrackDownmix(track_t* pTrack, int trackName)
-{
-    // Only remix (upmix or downmix) if the track and mixer/device channel masks
-    // are not the same and not handled internally, as mono -> stereo currently is.
-    if (pTrack->channelMask != pTrack->mMixerChannelMask
-            && !(pTrack->channelMask == AUDIO_CHANNEL_OUT_MONO
-                    && pTrack->mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO)) {
-        return prepareTrackForDownmix(pTrack, trackName);
-    }
-    // no remix necessary
-    unprepareTrackForDownmix(pTrack, trackName);
-    return NO_ERROR;
-}
+void AudioMixer::track_t::unprepareForDownmix() {
+    ALOGV("AudioMixer::unprepareForDownmix(%p)", this);
 
-void AudioMixer::unprepareTrackForDownmix(track_t* pTrack, int trackName __unused) {
-    ALOGV("AudioMixer::unprepareTrackForDownmix(%d)", trackName);
-
-    if (pTrack->downmixerBufferProvider != NULL) {
+    mDownmixRequiresFormat = AUDIO_FORMAT_INVALID;
+    if (downmixerBufferProvider != NULL) {
         // this track had previously been configured with a downmixer, delete it
         ALOGV(" deleting old downmixer");
-        delete pTrack->downmixerBufferProvider;
-        pTrack->downmixerBufferProvider = NULL;
-        reconfigureBufferProviders(pTrack);
+        delete downmixerBufferProvider;
+        downmixerBufferProvider = NULL;
+        reconfigureBufferProviders();
     } else {
         ALOGV(" nothing to do, no downmixer to delete");
     }
 }
 
-status_t AudioMixer::prepareTrackForDownmix(track_t* pTrack, int trackName)
+status_t AudioMixer::track_t::prepareForDownmix()
 {
-    ALOGV("AudioMixer::prepareTrackForDownmix(%d) with mask 0x%x", trackName, pTrack->channelMask);
+    ALOGV("AudioMixer::prepareForDownmix(%p) with mask 0x%x",
+            this, channelMask);
 
     // discard the previous downmixer if there was one
-    unprepareTrackForDownmix(pTrack, trackName);
-    if (DownmixerBufferProvider::isMultichannelCapable()) {
-        DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(pTrack->channelMask,
-                pTrack->mMixerChannelMask,
-                AUDIO_FORMAT_PCM_16_BIT /* TODO: use pTrack->mMixerInFormat, now only PCM 16 */,
-                pTrack->sampleRate, pTrack->sessionId, kCopyBufferFrameCount);
+    unprepareForDownmix();
+    // MONO_HACK Only remix (upmix or downmix) if the track and mixer/device channel masks
+    // are not the same and not handled internally, as mono -> stereo currently is.
+    if (channelMask == mMixerChannelMask
+            || (channelMask == AUDIO_CHANNEL_OUT_MONO
+                    && mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO)) {
+        return NO_ERROR;
+    }
+    // DownmixerBufferProvider is only used for position masks.
+    if (audio_channel_mask_get_representation(channelMask)
+                == AUDIO_CHANNEL_REPRESENTATION_POSITION
+            && DownmixerBufferProvider::isMultichannelCapable()) {
+        DownmixerBufferProvider* pDbp = new DownmixerBufferProvider(channelMask,
+                mMixerChannelMask,
+                AUDIO_FORMAT_PCM_16_BIT /* TODO: use mMixerInFormat, now only PCM 16 */,
+                sampleRate, sessionId, kCopyBufferFrameCount);
 
         if (pDbp->isValid()) { // if constructor completed properly
-            pTrack->mMixerInFormat = AUDIO_FORMAT_PCM_16_BIT; // PCM 16 bit required for downmix
-            pTrack->downmixerBufferProvider = pDbp;
-            reconfigureBufferProviders(pTrack);
+            mDownmixRequiresFormat = AUDIO_FORMAT_PCM_16_BIT; // PCM 16 bit required for downmix
+            downmixerBufferProvider = pDbp;
+            reconfigureBufferProviders();
             return NO_ERROR;
         }
         delete pDbp;
     }
 
     // Effect downmixer does not accept the channel conversion.  Let's use our remixer.
-    RemixBufferProvider* pRbp = new RemixBufferProvider(pTrack->channelMask,
-            pTrack->mMixerChannelMask, pTrack->mMixerInFormat, kCopyBufferFrameCount);
+    RemixBufferProvider* pRbp = new RemixBufferProvider(channelMask,
+            mMixerChannelMask, mMixerInFormat, kCopyBufferFrameCount);
     // Remix always finds a conversion whereas Downmixer effect above may fail.
-    pTrack->downmixerBufferProvider = pRbp;
-    reconfigureBufferProviders(pTrack);
+    downmixerBufferProvider = pRbp;
+    reconfigureBufferProviders();
     return NO_ERROR;
 }
 
-void AudioMixer::unprepareTrackForReformat(track_t* pTrack, int trackName __unused) {
-    ALOGV("AudioMixer::unprepareTrackForReformat(%d)", trackName);
-    if (pTrack->mReformatBufferProvider != NULL) {
-        delete pTrack->mReformatBufferProvider;
-        pTrack->mReformatBufferProvider = NULL;
-        reconfigureBufferProviders(pTrack);
+void AudioMixer::track_t::unprepareForReformat() {
+    ALOGV("AudioMixer::unprepareForReformat(%p)", this);
+    bool requiresReconfigure = false;
+    if (mReformatBufferProvider != NULL) {
+        delete mReformatBufferProvider;
+        mReformatBufferProvider = NULL;
+        requiresReconfigure = true;
+    }
+    if (mPostDownmixReformatBufferProvider != NULL) {
+        delete mPostDownmixReformatBufferProvider;
+        mPostDownmixReformatBufferProvider = NULL;
+        requiresReconfigure = true;
+    }
+    if (requiresReconfigure) {
+        reconfigureBufferProviders();
     }
 }
 
-status_t AudioMixer::prepareTrackForReformat(track_t* pTrack, int trackName)
+status_t AudioMixer::track_t::prepareForReformat()
 {
-    ALOGV("AudioMixer::prepareTrackForReformat(%d) with format %#x", trackName, pTrack->mFormat);
-    // discard the previous reformatter if there was one
-    unprepareTrackForReformat(pTrack, trackName);
-    // only configure reformatter if needed
-    if (pTrack->mFormat != pTrack->mMixerInFormat) {
-        pTrack->mReformatBufferProvider = new ReformatBufferProvider(
-                audio_channel_count_from_out_mask(pTrack->channelMask),
-                pTrack->mFormat, pTrack->mMixerInFormat,
+    ALOGV("AudioMixer::prepareForReformat(%p) with format %#x", this, mFormat);
+    // discard previous reformatters
+    unprepareForReformat();
+    // only configure reformatters as needed
+    const audio_format_t targetFormat = mDownmixRequiresFormat != AUDIO_FORMAT_INVALID
+            ? mDownmixRequiresFormat : mMixerInFormat;
+    bool requiresReconfigure = false;
+    if (mFormat != targetFormat) {
+        mReformatBufferProvider = new ReformatBufferProvider(
+                audio_channel_count_from_out_mask(channelMask),
+                mFormat,
+                targetFormat,
                 kCopyBufferFrameCount);
-        reconfigureBufferProviders(pTrack);
+        requiresReconfigure = true;
+    }
+    if (targetFormat != mMixerInFormat) {
+        mPostDownmixReformatBufferProvider = new ReformatBufferProvider(
+                audio_channel_count_from_out_mask(mMixerChannelMask),
+                targetFormat,
+                mMixerInFormat,
+                kCopyBufferFrameCount);
+        requiresReconfigure = true;
+    }
+    if (requiresReconfigure) {
+        reconfigureBufferProviders();
     }
     return NO_ERROR;
 }
 
-void AudioMixer::reconfigureBufferProviders(track_t* pTrack)
+void AudioMixer::track_t::reconfigureBufferProviders()
 {
-    pTrack->bufferProvider = pTrack->mInputBufferProvider;
-    if (pTrack->mReformatBufferProvider) {
-        pTrack->mReformatBufferProvider->setBufferProvider(pTrack->bufferProvider);
-        pTrack->bufferProvider = pTrack->mReformatBufferProvider;
+    bufferProvider = mInputBufferProvider;
+    if (mReformatBufferProvider) {
+        mReformatBufferProvider->setBufferProvider(bufferProvider);
+        bufferProvider = mReformatBufferProvider;
     }
-    if (pTrack->downmixerBufferProvider) {
-        pTrack->downmixerBufferProvider->setBufferProvider(pTrack->bufferProvider);
-        pTrack->bufferProvider = pTrack->downmixerBufferProvider;
+    if (downmixerBufferProvider) {
+        downmixerBufferProvider->setBufferProvider(bufferProvider);
+        bufferProvider = downmixerBufferProvider;
+    }
+    if (mPostDownmixReformatBufferProvider) {
+        mPostDownmixReformatBufferProvider->setBufferProvider(bufferProvider);
+        bufferProvider = mPostDownmixReformatBufferProvider;
+    }
+    if (mTimestretchBufferProvider) {
+        mTimestretchBufferProvider->setBufferProvider(bufferProvider);
+        bufferProvider = mTimestretchBufferProvider;
     }
 }
 
@@ -687,10 +443,12 @@
     delete track.resampler;
     track.resampler = NULL;
     // delete the downmixer
-    unprepareTrackForDownmix(&mState.tracks[name], name);
+    mState.tracks[name].unprepareForDownmix();
     // delete the reformatter
-    unprepareTrackForReformat(&mState.tracks[name], name);
-
+    mState.tracks[name].unprepareForReformat();
+    // delete the timestretch provider
+    delete track.mTimestretchBufferProvider;
+    track.mTimestretchBufferProvider = NULL;
     mTrackNames &= ~(1<<name);
 }
 
@@ -748,41 +506,99 @@
 static inline bool setVolumeRampVariables(float newVolume, int32_t ramp,
         int16_t *pIntSetVolume, int32_t *pIntPrevVolume, int32_t *pIntVolumeInc,
         float *pSetVolume, float *pPrevVolume, float *pVolumeInc) {
+    // check floating point volume to see if it is identical to the previously
+    // set volume.
+    // We do not use a tolerance here (and reject changes too small)
+    // as it may be confusing to use a different value than the one set.
+    // If the resulting volume is too small to ramp, it is a direct set of the volume.
     if (newVolume == *pSetVolume) {
         return false;
     }
-    /* set the floating point volume variables */
-    if (ramp != 0) {
-        *pVolumeInc = (newVolume - *pSetVolume) / ramp;
-        *pPrevVolume = *pSetVolume;
+    if (newVolume < 0) {
+        newVolume = 0; // should not have negative volumes
     } else {
-        *pVolumeInc = 0;
-        *pPrevVolume = newVolume;
+        switch (fpclassify(newVolume)) {
+        case FP_SUBNORMAL:
+        case FP_NAN:
+            newVolume = 0;
+            break;
+        case FP_ZERO:
+            break; // zero volume is fine
+        case FP_INFINITE:
+            // Infinite volume could be handled consistently since
+            // floating point math saturates at infinities,
+            // but we limit volume to unity gain float.
+            // ramp = 0; break;
+            //
+            newVolume = AudioMixer::UNITY_GAIN_FLOAT;
+            break;
+        case FP_NORMAL:
+        default:
+            // Floating point does not have problems with overflow wrap
+            // that integer has.  However, we limit the volume to
+            // unity gain here.
+            // TODO: Revisit the volume limitation and perhaps parameterize.
+            if (newVolume > AudioMixer::UNITY_GAIN_FLOAT) {
+                newVolume = AudioMixer::UNITY_GAIN_FLOAT;
+            }
+            break;
+        }
     }
-    *pSetVolume = newVolume;
 
-    /* set the legacy integer volume variables */
-    int32_t intVolume = newVolume * AudioMixer::UNITY_GAIN_INT;
-    if (intVolume > AudioMixer::UNITY_GAIN_INT) {
-        intVolume = AudioMixer::UNITY_GAIN_INT;
-    } else if (intVolume < 0) {
-        ALOGE("negative volume %.7g", newVolume);
-        intVolume = 0; // should never happen, but for safety check.
+    // set floating point volume ramp
+    if (ramp != 0) {
+        // when the ramp completes, *pPrevVolume is set to *pSetVolume, so there
+        // is no computational mismatch; hence equality is checked here.
+        ALOGD_IF(*pPrevVolume != *pSetVolume, "previous float ramp hasn't finished,"
+                " prev:%f  set_to:%f", *pPrevVolume, *pSetVolume);
+        const float inc = (newVolume - *pPrevVolume) / ramp; // could be inf, nan, subnormal
+        const float maxv = max(newVolume, *pPrevVolume); // could be inf, cannot be nan, subnormal
+
+        if (isnormal(inc) // inc must be a normal number (no subnormals, infinite, nan)
+                && maxv + inc != maxv) { // inc must make forward progress
+            *pVolumeInc = inc;
+            // ramp is set now.
+            // Note: if newVolume is 0, then near the end of the ramp,
+            // it may be possible that the ramped volume may be subnormal or
+            // temporarily negative by a small amount or subnormal due to floating
+            // point inaccuracies.
+        } else {
+            ramp = 0; // ramp not allowed
+        }
     }
-    if (intVolume == *pIntSetVolume) {
-        *pIntVolumeInc = 0;
-        /* TODO: integer/float workaround: ignore floating volume ramp */
+
+    // compute and check integer volume, no need to check negative values
+    // The integer volume is limited to "unity_gain" to avoid wrapping and other
+    // audio artifacts, so it never reaches the range limit of U4.28.
+    // We safely use signed 16 and 32 bit integers here.
+    const float scaledVolume = newVolume * AudioMixer::UNITY_GAIN_INT; // not neg, subnormal, nan
+    const int32_t intVolume = (scaledVolume >= (float)AudioMixer::UNITY_GAIN_INT) ?
+            AudioMixer::UNITY_GAIN_INT : (int32_t)scaledVolume;
+
+    // set integer volume ramp
+    if (ramp != 0) {
+        // integer volume is U4.12 (to use 16 bit multiplies), but ramping uses U4.28.
+        // when the ramp completes, *pIntPrevVolume is set to *pIntSetVolume << 16, so there
+        // is no computational mismatch; hence equality is checked here.
+        ALOGD_IF(*pIntPrevVolume != *pIntSetVolume << 16, "previous int ramp hasn't finished,"
+                " prev:%d  set_to:%d", *pIntPrevVolume, *pIntSetVolume << 16);
+        const int32_t inc = ((intVolume << 16) - *pIntPrevVolume) / ramp;
+
+        if (inc != 0) { // inc must make forward progress
+            *pIntVolumeInc = inc;
+        } else {
+            ramp = 0; // ramp not allowed
+        }
+    }
+
+    // if no ramp, or ramp not allowed, then clear float and integer increments
+    if (ramp == 0) {
         *pVolumeInc = 0;
         *pPrevVolume = newVolume;
-        return true;
-    }
-    if (ramp != 0) {
-        *pIntVolumeInc = ((intVolume - *pIntSetVolume) << 16) / ramp;
-        *pIntPrevVolume = (*pIntVolumeInc == 0 ? intVolume : *pIntSetVolume) << 16;
-    } else {
         *pIntVolumeInc = 0;
         *pIntPrevVolume = intVolume << 16;
     }
+    *pSetVolume = newVolume;
     *pIntSetVolume = intVolume;
     return true;
 }
@@ -828,7 +644,7 @@
                 ALOG_ASSERT(audio_is_linear_pcm(format), "Invalid format %#x", format);
                 track.mFormat = format;
                 ALOGV("setParameter(TRACK, FORMAT, %#x)", format);
-                prepareTrackForReformat(&track, name);
+                track.prepareForReformat();
                 invalidateState(1 << name);
             }
             } break;
@@ -912,6 +728,28 @@
             }
         }
         break;
+        case TIMESTRETCH:
+            switch (param) {
+            case PLAYBACK_RATE: {
+                const AudioPlaybackRate *playbackRate =
+                        reinterpret_cast<AudioPlaybackRate*>(value);
+                ALOGW_IF(!isAudioPlaybackRateValid(*playbackRate),
+                        "bad parameters speed %f, pitch %f",playbackRate->mSpeed,
+                        playbackRate->mPitch);
+                if (track.setPlaybackRate(*playbackRate)) {
+                    ALOGV("setParameter(TIMESTRETCH, PLAYBACK_RATE, STRETCH_MODE, FALLBACK_MODE "
+                            "%f %f %d %d",
+                            playbackRate->mSpeed,
+                            playbackRate->mPitch,
+                            playbackRate->mStretchMode,
+                            playbackRate->mFallbackMode);
+                    // invalidateState(1 << name);
+                }
+            } break;
+            default:
+                LOG_ALWAYS_FATAL("setParameter timestretch: bad param %d", param);
+            }
+            break;
 
     default:
         LOG_ALWAYS_FATAL("setParameter: bad target %d", target);
@@ -931,11 +769,10 @@
                 // FIXME this is flawed for dynamic sample rates, as we choose the resampler
                 // quality level based on the initial ratio, but that could change later.
                 // Should have a way to distinguish tracks with static ratios vs. dynamic ratios.
-                if (!((trackSampleRate == 44100 && devSampleRate == 48000) ||
-                      (trackSampleRate == 48000 && devSampleRate == 44100))) {
-                    quality = AudioResampler::DYN_LOW_QUALITY;
-                } else {
+                if (isMusicRate(trackSampleRate)) {
                     quality = AudioResampler::DEFAULT_QUALITY;
+                } else {
+                    quality = AudioResampler::DYN_LOW_QUALITY;
                 }
 
                 // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
@@ -957,6 +794,30 @@
     return false;
 }
 
+bool AudioMixer::track_t::setPlaybackRate(const AudioPlaybackRate &playbackRate)
+{
+    if ((mTimestretchBufferProvider == NULL &&
+            fabs(playbackRate.mSpeed - mPlaybackRate.mSpeed) < AUDIO_TIMESTRETCH_SPEED_MIN_DELTA &&
+            fabs(playbackRate.mPitch - mPlaybackRate.mPitch) < AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) ||
+            isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
+        return false;
+    }
+    mPlaybackRate = playbackRate;
+    if (mTimestretchBufferProvider == NULL) {
+        // TODO: Remove MONO_HACK. Resampler sees #channels after the downmixer
+        // but if none exists, it is the channel count (1 for mono).
+        const int timestretchChannelCount = downmixerBufferProvider != NULL
+                ? mMixerChannelCount : channelCount;
+        mTimestretchBufferProvider = new TimestretchBufferProvider(timestretchChannelCount,
+                mMixerInFormat, sampleRate, playbackRate);
+        reconfigureBufferProviders();
+    } else {
+        reinterpret_cast<TimestretchBufferProvider*>(mTimestretchBufferProvider)
+                ->setPlaybackRate(playbackRate);
+    }
+    return true;
+}
+
 /* Checks to see if the volume ramp has completed and clears the increment
  * variables appropriately.
  *
@@ -974,7 +835,8 @@
 {
     if (useFloat) {
         for (uint32_t i = 0; i < MAX_NUM_VOLUMES; i++) {
-            if (mVolumeInc[i] != 0 && fabs(mVolume[i] - mPrevVolume[i]) <= fabs(mVolumeInc[i])) {
+            if ((mVolumeInc[i] > 0 && mPrevVolume[i] + mVolumeInc[i] >= mVolume[i]) ||
+                     (mVolumeInc[i] < 0 && mPrevVolume[i] + mVolumeInc[i] <= mVolume[i])) {
                 volumeInc[i] = 0;
                 prevVolume[i] = volume[i] << 16;
                 mVolumeInc[i] = 0.;
@@ -1032,10 +894,15 @@
     if (mState.tracks[name].mReformatBufferProvider != NULL) {
         mState.tracks[name].mReformatBufferProvider->reset();
     } else if (mState.tracks[name].downmixerBufferProvider != NULL) {
+        mState.tracks[name].downmixerBufferProvider->reset();
+    } else if (mState.tracks[name].mPostDownmixReformatBufferProvider != NULL) {
+        mState.tracks[name].mPostDownmixReformatBufferProvider->reset();
+    } else if (mState.tracks[name].mTimestretchBufferProvider != NULL) {
+        mState.tracks[name].mTimestretchBufferProvider->reset();
     }
 
     mState.tracks[name].mInputBufferProvider = bufferProvider;
-    reconfigureBufferProviders(&mState.tracks[name]);
+    mState.tracks[name].reconfigureBufferProviders();
 }
 
 
@@ -1114,7 +981,8 @@
             } else {
                 if ((n & NEEDS_CHANNEL_COUNT__MASK) == NEEDS_CHANNEL_1){
                     t.hook = getTrackHook(
-                            t.mMixerChannelCount == 2 // TODO: MONO_HACK.
+                            (t.mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO  // TODO: MONO_HACK
+                                    && t.channelMask == AUDIO_CHANNEL_OUT_MONO)
                                 ? TRACKTYPE_NORESAMPLEMONO : TRACKTYPE_NORESAMPLE,
                             t.mMixerChannelCount,
                             t.mMixerInFormat, t.mMixerFormat);
@@ -2236,4 +2104,4 @@
 }
 
 // ----------------------------------------------------------------------------
-}; // namespace android
+} // namespace android
diff --git a/services/audioflinger/AudioMixer.h b/services/audioflinger/AudioMixer.h
index f4f142b..7165c6c 100644
--- a/services/audioflinger/AudioMixer.h
+++ b/services/audioflinger/AudioMixer.h
@@ -23,12 +23,14 @@
 
 #include <hardware/audio_effect.h>
 #include <media/AudioBufferProvider.h>
+#include <media/AudioResamplerPublic.h>
 #include <media/nbaio/NBLog.h>
 #include <system/audio.h>
 #include <utils/Compat.h>
 #include <utils/threads.h>
 
 #include "AudioResampler.h"
+#include "BufferProviders.h"
 
 // FIXME This is actually unity gain, which might not be max in future, expressed in U.12
 #define MAX_GAIN_INT AudioMixer::UNITY_GAIN_INT
@@ -72,6 +74,7 @@
         RESAMPLE        = 0x3001,
         RAMP_VOLUME     = 0x3002, // ramp to new volume
         VOLUME          = 0x3003, // don't ramp
+        TIMESTRETCH     = 0x3004,
 
         // set Parameter names
         // for target TRACK
@@ -99,6 +102,9 @@
         VOLUME0         = 0x4200,
         VOLUME1         = 0x4201,
         AUXLEVEL        = 0x4210,
+        // for target TIMESTRETCH
+        PLAYBACK_RATE   = 0x4300, // Configure timestretch on this track name;
+                                  // parameter 'value' is a pointer to the new playback rate.
     };
 
 
@@ -127,10 +133,16 @@
     size_t      getUnreleasedFrames(int name) const;
 
     static inline bool isValidPcmTrackFormat(audio_format_t format) {
-        return format == AUDIO_FORMAT_PCM_16_BIT ||
-                format == AUDIO_FORMAT_PCM_24_BIT_PACKED ||
-                format == AUDIO_FORMAT_PCM_32_BIT ||
-                format == AUDIO_FORMAT_PCM_FLOAT;
+        switch (format) {
+        case AUDIO_FORMAT_PCM_8_BIT:
+        case AUDIO_FORMAT_PCM_16_BIT:
+        case AUDIO_FORMAT_PCM_24_BIT_PACKED:
+        case AUDIO_FORMAT_PCM_32_BIT:
+        case AUDIO_FORMAT_PCM_FLOAT:
+            return true;
+        default:
+            return false;
+        }
     }
 
 private:
@@ -153,7 +165,6 @@
 
     struct state_t;
     struct track_t;
-    class CopyBufferProvider;
 
     typedef void (*hook_t)(track_t* t, int32_t* output, size_t numOutFrames, int32_t* temp,
                            int32_t* aux);
@@ -205,17 +216,38 @@
         int32_t*           auxBuffer;
 
         // 16-byte boundary
+
+        /* Buffer providers are constructed to translate the track input data as needed.
+         *
+         * TODO: perhaps make a single PlaybackConverterProvider class to move
+         * all pre-mixer track buffer conversions outside the AudioMixer class.
+         *
+         * 1) mInputBufferProvider: The AudioTrack buffer provider.
+         * 2) mReformatBufferProvider: If not NULL, performs the audio reformat to
+         *    match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
+         *    requires reformat. For example, it may convert floating point input to
+         *    PCM_16_bit if that's required by the downmixer.
+         * 3) downmixerBufferProvider: If not NULL, performs the channel remixing to match
+         *    the number of channels required by the mixer sink.
+         * 4) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
+         *    the downmixer requirements to the mixer engine input requirements.
+         * 5) mTimestretchBufferProvider: Adds timestretching for playback rate
+         */
         AudioBufferProvider*     mInputBufferProvider;    // externally provided buffer provider.
-        CopyBufferProvider*      mReformatBufferProvider; // provider wrapper for reformatting.
-        CopyBufferProvider*      downmixerBufferProvider; // wrapper for channel conversion.
+        PassthruBufferProvider*  mReformatBufferProvider; // provider wrapper for reformatting.
+        PassthruBufferProvider*  downmixerBufferProvider; // wrapper for channel conversion.
+        PassthruBufferProvider*  mPostDownmixReformatBufferProvider;
+        PassthruBufferProvider*  mTimestretchBufferProvider;
 
         int32_t     sessionId;
 
-        // 16-byte boundary
         audio_format_t mMixerFormat;     // output mix format: AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
         audio_format_t mFormat;          // input track format
         audio_format_t mMixerInFormat;   // mix internal format AUDIO_FORMAT_PCM_(FLOAT|16_BIT)
                                          // each track must be converted to this format.
+        audio_format_t mDownmixRequiresFormat;  // required downmixer format
+                                                // AUDIO_FORMAT_PCM_16_BIT if 16 bit necessary
+                                                // AUDIO_FORMAT_INVALID if no required format
 
         float          mVolume[MAX_NUM_VOLUMES];     // floating point set volume
         float          mPrevVolume[MAX_NUM_VOLUMES]; // floating point previous volume
@@ -225,10 +257,11 @@
         float          mPrevAuxLevel;                 // floating point prev aux level
         float          mAuxInc;                       // floating point aux increment
 
-        // 16-byte boundary
         audio_channel_mask_t mMixerChannelMask;
         uint32_t             mMixerChannelCount;
 
+        AudioPlaybackRate    mPlaybackRate;
+
         bool        needsRamp() { return (volumeInc[0] | volumeInc[1] | auxInc) != 0; }
         bool        setResampler(uint32_t trackSampleRate, uint32_t devSampleRate);
         bool        doesResample() const { return resampler != NULL; }
@@ -236,6 +269,13 @@
         void        adjustVolumeRamp(bool aux, bool useFloat = false);
         size_t      getUnreleasedFrames() const { return resampler != NULL ?
                                                     resampler->getUnreleasedFrames() : 0; };
+
+        status_t    prepareForDownmix();
+        void        unprepareForDownmix();
+        status_t    prepareForReformat();
+        void        unprepareForReformat();
+        bool        setPlaybackRate(const AudioPlaybackRate &playbackRate);
+        void        reconfigureBufferProviders();
     };
 
     typedef void (*process_hook_t)(state_t* state, int64_t pts);
@@ -254,112 +294,6 @@
         track_t         tracks[MAX_NUM_TRACKS] __attribute__((aligned(32)));
     };
 
-    // Base AudioBufferProvider class used for DownMixerBufferProvider, RemixBufferProvider,
-    // and ReformatBufferProvider.
-    // It handles a private buffer for use in converting format or channel masks from the
-    // input data to a form acceptable by the mixer.
-    // TODO: Make a ResamplerBufferProvider when integers are entirely removed from the
-    // processing pipeline.
-    class CopyBufferProvider : public AudioBufferProvider {
-    public:
-        // Use a private buffer of bufferFrameCount frames (each frame is outputFrameSize bytes).
-        // If bufferFrameCount is 0, no private buffer is created and in-place modification of
-        // the upstream buffer provider's buffers is performed by copyFrames().
-        CopyBufferProvider(size_t inputFrameSize, size_t outputFrameSize,
-                size_t bufferFrameCount);
-        virtual ~CopyBufferProvider();
-
-        // Overrides AudioBufferProvider methods
-        virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
-        virtual void releaseBuffer(Buffer* buffer);
-
-        // Other public methods
-
-        // call this to release the buffer to the upstream provider.
-        // treat it as an audio discontinuity for future samples.
-        virtual void reset();
-
-        // this function should be supplied by the derived class.  It converts
-        // #frames in the *src pointer to the *dst pointer.  It is public because
-        // some providers will allow this to work on arbitrary buffers outside
-        // of the internal buffers.
-        virtual void copyFrames(void *dst, const void *src, size_t frames) = 0;
-
-        // set the upstream buffer provider. Consider calling "reset" before this function.
-        void setBufferProvider(AudioBufferProvider *p) {
-            mTrackBufferProvider = p;
-        }
-
-    protected:
-        AudioBufferProvider* mTrackBufferProvider;
-        const size_t         mInputFrameSize;
-        const size_t         mOutputFrameSize;
-    private:
-        AudioBufferProvider::Buffer mBuffer;
-        const size_t         mLocalBufferFrameCount;
-        void*                mLocalBufferData;
-        size_t               mConsumed;
-    };
-
-    // DownmixerBufferProvider wraps a track AudioBufferProvider to provide
-    // position dependent downmixing by an Audio Effect.
-    class DownmixerBufferProvider : public CopyBufferProvider {
-    public:
-        DownmixerBufferProvider(audio_channel_mask_t inputChannelMask,
-                audio_channel_mask_t outputChannelMask, audio_format_t format,
-                uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount);
-        virtual ~DownmixerBufferProvider();
-        virtual void copyFrames(void *dst, const void *src, size_t frames);
-        bool isValid() const { return mDownmixHandle != NULL; }
-
-        static status_t init();
-        static bool isMultichannelCapable() { return sIsMultichannelCapable; }
-
-    protected:
-        effect_handle_t    mDownmixHandle;
-        effect_config_t    mDownmixConfig;
-
-        // effect descriptor for the downmixer used by the mixer
-        static effect_descriptor_t sDwnmFxDesc;
-        // indicates whether a downmix effect has been found and is usable by this mixer
-        static bool                sIsMultichannelCapable;
-        // FIXME: should we allow effects outside of the framework?
-        // We need to here. A special ioId that must be <= -2 so it does not map to a session.
-        static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2;
-    };
-
-    // RemixBufferProvider wraps a track AudioBufferProvider to perform an
-    // upmix or downmix to the proper channel count and mask.
-    class RemixBufferProvider : public CopyBufferProvider {
-    public:
-        RemixBufferProvider(audio_channel_mask_t inputChannelMask,
-                audio_channel_mask_t outputChannelMask, audio_format_t format,
-                size_t bufferFrameCount);
-        virtual void copyFrames(void *dst, const void *src, size_t frames);
-
-    protected:
-        const audio_format_t mFormat;
-        const size_t         mSampleSize;
-        const size_t         mInputChannels;
-        const size_t         mOutputChannels;
-        int8_t               mIdxAry[sizeof(uint32_t)*8]; // 32 bits => channel indices
-    };
-
-    // ReformatBufferProvider wraps a track AudioBufferProvider to convert the input data
-    // to an acceptable mixer input format type.
-    class ReformatBufferProvider : public CopyBufferProvider {
-    public:
-        ReformatBufferProvider(int32_t channels,
-                audio_format_t inputFormat, audio_format_t outputFormat,
-                size_t bufferFrameCount);
-        virtual void copyFrames(void *dst, const void *src, size_t frames);
-
-    protected:
-        const int32_t        mChannels;
-        const audio_format_t mInputFormat;
-        const audio_format_t mOutputFormat;
-    };
-
     // bitmask of allocated track names, where bit 0 corresponds to TRACK0 etc.
     uint32_t        mTrackNames;
 
@@ -382,14 +316,6 @@
     bool setChannelMasks(int name,
             audio_channel_mask_t trackChannelMask, audio_channel_mask_t mixerChannelMask);
 
-    // TODO: remove unused trackName/trackNum from functions below.
-    static status_t initTrackDownmix(track_t* pTrack, int trackName);
-    static status_t prepareTrackForDownmix(track_t* pTrack, int trackNum);
-    static void unprepareTrackForDownmix(track_t* pTrack, int trackName);
-    static status_t prepareTrackForReformat(track_t* pTrack, int trackNum);
-    static void unprepareTrackForReformat(track_t* pTrack, int trackName);
-    static void reconfigureBufferProviders(track_t* pTrack);
-
     static void track__genericResample(track_t* t, int32_t* out, size_t numFrames, int32_t* temp,
             int32_t* aux);
     static void track__nop(track_t* t, int32_t* out, size_t numFrames, int32_t* temp, int32_t* aux);
@@ -465,6 +391,6 @@
 };
 
 // ----------------------------------------------------------------------------
-}; // namespace android
+} // namespace android
 
 #endif // ANDROID_AUDIO_MIXER_H
diff --git a/services/audioflinger/AudioMixerOps.h b/services/audioflinger/AudioMixerOps.h
index 2678857..8d74024 100644
--- a/services/audioflinger/AudioMixerOps.h
+++ b/services/audioflinger/AudioMixerOps.h
@@ -164,13 +164,12 @@
 template <>
 inline int16_t MixMul<int16_t, int16_t, float>(int16_t value, float volume) {
     LOG_ALWAYS_FATAL("MixMul<int16_t, int16_t, float> Runtime Should not be here");
-    return value * volume;
+    return clamp16_from_float(MixMul<float, int16_t, float>(value, volume));
 }
 
 template <>
 inline int16_t MixMul<int16_t, float, float>(float value, float volume) {
-    static const float q_15_from_float = (1 << 15);
-    return value * volume * q_15_from_float;
+    return clamp16_from_float(value * volume);
 }
 
 /*
diff --git a/services/audioflinger/AudioResampler.cpp b/services/audioflinger/AudioResampler.cpp
index 46e3d6c..e49b7b1 100644
--- a/services/audioflinger/AudioResampler.cpp
+++ b/services/audioflinger/AudioResampler.cpp
@@ -41,7 +41,7 @@
     AudioResamplerOrder1(int inChannelCount, int32_t sampleRate) :
         AudioResampler(inChannelCount, sampleRate, LOW_QUALITY), mX0L(0), mX0R(0) {
     }
-    virtual void resample(int32_t* out, size_t outFrameCount,
+    virtual size_t resample(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider);
 private:
     // number of bits used in interpolation multiply - 15 bits avoids overflow
@@ -51,9 +51,9 @@
     static const int kPreInterpShift = kNumPhaseBits - kNumInterpBits;
 
     void init() {}
-    void resampleMono16(int32_t* out, size_t outFrameCount,
+    size_t resampleMono16(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider);
-    void resampleStereo16(int32_t* out, size_t outFrameCount,
+    size_t resampleStereo16(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider);
 #ifdef ASM_ARM_RESAMP1  // asm optimisation for ResamplerOrder1
     void AsmMono16Loop(int16_t *in, int32_t* maxOutPt, int32_t maxInIdx,
@@ -329,7 +329,7 @@
 
 // ----------------------------------------------------------------------------
 
-void AudioResamplerOrder1::resample(int32_t* out, size_t outFrameCount,
+size_t AudioResamplerOrder1::resample(int32_t* out, size_t outFrameCount,
         AudioBufferProvider* provider) {
 
     // should never happen, but we overflow if it does
@@ -338,15 +338,16 @@
     // select the appropriate resampler
     switch (mChannelCount) {
     case 1:
-        resampleMono16(out, outFrameCount, provider);
-        break;
+        return resampleMono16(out, outFrameCount, provider);
     case 2:
-        resampleStereo16(out, outFrameCount, provider);
-        break;
+        return resampleStereo16(out, outFrameCount, provider);
+    default:
+        LOG_ALWAYS_FATAL("invalid channel count: %d", mChannelCount);
+        return 0;
     }
 }
 
-void AudioResamplerOrder1::resampleStereo16(int32_t* out, size_t outFrameCount,
+size_t AudioResamplerOrder1::resampleStereo16(int32_t* out, size_t outFrameCount,
         AudioBufferProvider* provider) {
 
     int32_t vl = mVolume[0];
@@ -442,9 +443,10 @@
     // save state
     mInputIndex = inputIndex;
     mPhaseFraction = phaseFraction;
+    return outputIndex / 2 /* channels for stereo */;
 }
 
-void AudioResamplerOrder1::resampleMono16(int32_t* out, size_t outFrameCount,
+size_t AudioResamplerOrder1::resampleMono16(int32_t* out, size_t outFrameCount,
         AudioBufferProvider* provider) {
 
     int32_t vl = mVolume[0];
@@ -538,6 +540,7 @@
     // save state
     mInputIndex = inputIndex;
     mPhaseFraction = phaseFraction;
+    return outputIndex;
 }
 
 #ifdef ASM_ARM_RESAMP1  // asm optimisation for ResamplerOrder1
diff --git a/services/audioflinger/AudioResampler.h b/services/audioflinger/AudioResampler.h
index 069d946..a8e3e6f 100644
--- a/services/audioflinger/AudioResampler.h
+++ b/services/audioflinger/AudioResampler.h
@@ -67,12 +67,18 @@
     // Resample int16_t samples from provider and accumulate into 'out'.
     // A mono provider delivers a sequence of samples.
     // A stereo provider delivers a sequence of interleaved pairs of samples.
-    // Multi-channel providers are not supported.
+    //
     // In either case, 'out' holds interleaved pairs of fixed-point Q4.27.
     // That is, for a mono provider, there is an implicit up-channeling.
     // Since this method accumulates, the caller is responsible for clearing 'out' initially.
-    // FIXME assumes provider is always successful; it should return the actual frame count.
-    virtual void resample(int32_t* out, size_t outFrameCount,
+    //
+    // For a float resampler, 'out' holds interleaved pairs of float samples.
+    //
+    // Multichannel interleaved frames for n > 2 is supported for quality DYN_LOW_QUALITY,
+    // DYN_MED_QUALITY, and DYN_HIGH_QUALITY.
+    //
+    // Returns the number of frames resampled into the out buffer.
+    virtual size_t resample(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider) = 0;
 
     virtual void reset();
@@ -170,7 +176,6 @@
 };
 
 // ----------------------------------------------------------------------------
-}
-; // namespace android
+} // namespace android
 
 #endif // ANDROID_AUDIO_RESAMPLER_H
diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp
index 8f14ff9..172c2a5 100644
--- a/services/audioflinger/AudioResamplerCubic.cpp
+++ b/services/audioflinger/AudioResamplerCubic.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#define LOG_TAG "AudioSRC"
+#define LOG_TAG "AudioResamplerCubic"
 
 #include <stdint.h>
 #include <string.h>
@@ -32,7 +32,7 @@
     memset(&right, 0, sizeof(state));
 }
 
-void AudioResamplerCubic::resample(int32_t* out, size_t outFrameCount,
+size_t AudioResamplerCubic::resample(int32_t* out, size_t outFrameCount,
         AudioBufferProvider* provider) {
 
     // should never happen, but we overflow if it does
@@ -41,15 +41,16 @@
     // select the appropriate resampler
     switch (mChannelCount) {
     case 1:
-        resampleMono16(out, outFrameCount, provider);
-        break;
+        return resampleMono16(out, outFrameCount, provider);
     case 2:
-        resampleStereo16(out, outFrameCount, provider);
-        break;
+        return resampleStereo16(out, outFrameCount, provider);
+    default:
+        LOG_ALWAYS_FATAL("invalid channel count: %d", mChannelCount);
+        return 0;
     }
 }
 
-void AudioResamplerCubic::resampleStereo16(int32_t* out, size_t outFrameCount,
+size_t AudioResamplerCubic::resampleStereo16(int32_t* out, size_t outFrameCount,
         AudioBufferProvider* provider) {
 
     int32_t vl = mVolume[0];
@@ -67,7 +68,7 @@
         mBuffer.frameCount = inFrameCount;
         provider->getNextBuffer(&mBuffer, mPTS);
         if (mBuffer.raw == NULL) {
-            return;
+            return 0;
         }
         // ALOGW("New buffer: offset=%p, frames=%dn", mBuffer.raw, mBuffer.frameCount);
     }
@@ -115,9 +116,10 @@
     // ALOGW("Done: index=%d, fraction=%u", inputIndex, phaseFraction);
     mInputIndex = inputIndex;
     mPhaseFraction = phaseFraction;
+    return outputIndex / 2 /* channels for stereo */;
 }
 
-void AudioResamplerCubic::resampleMono16(int32_t* out, size_t outFrameCount,
+size_t AudioResamplerCubic::resampleMono16(int32_t* out, size_t outFrameCount,
         AudioBufferProvider* provider) {
 
     int32_t vl = mVolume[0];
@@ -135,7 +137,7 @@
         mBuffer.frameCount = inFrameCount;
         provider->getNextBuffer(&mBuffer, mPTS);
         if (mBuffer.raw == NULL) {
-            return;
+            return 0;
         }
         // ALOGW("New buffer: offset=%p, frames=%d", mBuffer.raw, mBuffer.frameCount);
     }
@@ -182,8 +184,8 @@
     // ALOGW("Done: index=%d, fraction=%u", inputIndex, phaseFraction);
     mInputIndex = inputIndex;
     mPhaseFraction = phaseFraction;
+    return outputIndex;
 }
 
 // ----------------------------------------------------------------------------
-}
-; // namespace android
+} // namespace android
diff --git a/services/audioflinger/AudioResamplerCubic.h b/services/audioflinger/AudioResamplerCubic.h
index b315da5..4b45b0b 100644
--- a/services/audioflinger/AudioResamplerCubic.h
+++ b/services/audioflinger/AudioResamplerCubic.h
@@ -31,7 +31,7 @@
     AudioResamplerCubic(int inChannelCount, int32_t sampleRate) :
         AudioResampler(inChannelCount, sampleRate, MED_QUALITY) {
     }
-    virtual void resample(int32_t* out, size_t outFrameCount,
+    virtual size_t resample(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider);
 private:
     // number of bits used in interpolation multiply - 14 bits avoids overflow
@@ -43,9 +43,9 @@
         int32_t a, b, c, y0, y1, y2, y3;
     } state;
     void init();
-    void resampleMono16(int32_t* out, size_t outFrameCount,
+    size_t resampleMono16(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider);
-    void resampleStereo16(int32_t* out, size_t outFrameCount,
+    size_t resampleStereo16(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider);
     static inline int32_t interp(state* p, int32_t x) {
         return (((((p->a * x >> 14) + p->b) * x >> 14) + p->c) * x >> 14) + p->y1;
@@ -63,6 +63,6 @@
 };
 
 // ----------------------------------------------------------------------------
-}; // namespace android
+} // namespace android
 
 #endif /*ANDROID_AUDIO_RESAMPLER_CUBIC_H*/
diff --git a/services/audioflinger/AudioResamplerDyn.cpp b/services/audioflinger/AudioResamplerDyn.cpp
index 0eeb201..6481b85 100644
--- a/services/audioflinger/AudioResamplerDyn.cpp
+++ b/services/audioflinger/AudioResamplerDyn.cpp
@@ -477,15 +477,15 @@
 }
 
 template<typename TC, typename TI, typename TO>
-void AudioResamplerDyn<TC, TI, TO>::resample(int32_t* out, size_t outFrameCount,
+size_t AudioResamplerDyn<TC, TI, TO>::resample(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider)
 {
-    (this->*mResampleFunc)(reinterpret_cast<TO*>(out), outFrameCount, provider);
+    return (this->*mResampleFunc)(reinterpret_cast<TO*>(out), outFrameCount, provider);
 }
 
 template<typename TC, typename TI, typename TO>
 template<int CHANNELS, bool LOCKED, int STRIDE>
-void AudioResamplerDyn<TC, TI, TO>::resample(TO* out, size_t outFrameCount,
+size_t AudioResamplerDyn<TC, TI, TO>::resample(TO* out, size_t outFrameCount,
         AudioBufferProvider* provider)
 {
     // TODO Mono -> Mono is not supported. OUTPUT_CHANNELS reflects minimum of stereo out.
@@ -610,6 +610,7 @@
     ALOG_ASSERT(mBuffer.frameCount == 0); // there must be no frames in the buffer
     mInBuffer.setImpulse(impulse);
     mPhaseFraction = phaseFraction;
+    return outputIndex / OUTPUT_CHANNELS;
 }
 
 /* instantiate templates used by AudioResampler::create */
@@ -618,4 +619,4 @@
 template class AudioResamplerDyn<int32_t, int16_t, int32_t>;
 
 // ----------------------------------------------------------------------------
-}; // namespace android
+} // namespace android
diff --git a/services/audioflinger/AudioResamplerDyn.h b/services/audioflinger/AudioResamplerDyn.h
index e886a68..3b1c381 100644
--- a/services/audioflinger/AudioResamplerDyn.h
+++ b/services/audioflinger/AudioResamplerDyn.h
@@ -52,7 +52,7 @@
 
     virtual void setVolume(float left, float right);
 
-    virtual void resample(int32_t* out, size_t outFrameCount,
+    virtual size_t resample(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider);
 
 private:
@@ -111,10 +111,10 @@
             int inSampleRate, int outSampleRate, double tbwCheat);
 
     template<int CHANNELS, bool LOCKED, int STRIDE>
-    void resample(TO* out, size_t outFrameCount, AudioBufferProvider* provider);
+    size_t resample(TO* out, size_t outFrameCount, AudioBufferProvider* provider);
 
     // define a pointer to member function type for resample
-    typedef void (AudioResamplerDyn<TC, TI, TO>::*resample_ABP_t)(TO* out,
+    typedef size_t (AudioResamplerDyn<TC, TI, TO>::*resample_ABP_t)(TO* out,
             size_t outFrameCount, AudioBufferProvider* provider);
 
     // data - the contiguous storage and layout of these is important.
@@ -127,6 +127,6 @@
               void* mCoefBuffer;       // if a filter is created, this is not null
 };
 
-}; // namespace android
+} // namespace android
 
 #endif /*ANDROID_AUDIO_RESAMPLER_DYN_H*/
diff --git a/services/audioflinger/AudioResamplerFirGen.h b/services/audioflinger/AudioResamplerFirGen.h
index f3718b6..ad18965 100644
--- a/services/audioflinger/AudioResamplerFirGen.h
+++ b/services/audioflinger/AudioResamplerFirGen.h
@@ -204,7 +204,8 @@
 
 template <>
 struct I0ATerm<0> { // 1/sqrt(2*PI);
-    static const CONSTEXPR double value = 0.398942280401432677939946059934381868475858631164934657665925;
+    static const CONSTEXPR double value =
+            0.398942280401432677939946059934381868475858631164934657665925;
 };
 
 #if USE_HORNERS_METHOD
@@ -706,6 +707,6 @@
     }
 }
 
-}; // namespace android
+} // namespace android
 
 #endif /*ANDROID_AUDIO_RESAMPLER_FIR_GEN_H*/
diff --git a/services/audioflinger/AudioResamplerFirOps.h b/services/audioflinger/AudioResamplerFirOps.h
index bf2163f..658285d 100644
--- a/services/audioflinger/AudioResamplerFirOps.h
+++ b/services/audioflinger/AudioResamplerFirOps.h
@@ -25,7 +25,7 @@
 #define USE_INLINE_ASSEMBLY (false)
 #endif
 
-#if USE_INLINE_ASSEMBLY && defined(__ARM_NEON__)
+#if defined(__aarch64__) || defined(__ARM_NEON__)
 #define USE_NEON (true)
 #include <arm_neon.h>
 #else
@@ -158,6 +158,6 @@
 #endif
 }
 
-}; // namespace android
+} // namespace android
 
 #endif /*ANDROID_AUDIO_RESAMPLER_FIR_OPS_H*/
diff --git a/services/audioflinger/AudioResamplerFirProcess.h b/services/audioflinger/AudioResamplerFirProcess.h
index efc8055..176202e 100644
--- a/services/audioflinger/AudioResamplerFirProcess.h
+++ b/services/audioflinger/AudioResamplerFirProcess.h
@@ -174,7 +174,8 @@
  * Process() calls ProcessBase() with TFUNC = InterpCompute, for interpolated phase.
  */
 
-template <int CHANNELS, int STRIDE, typename TFUNC, typename TC, typename TI, typename TO, typename TINTERP>
+template <int CHANNELS, int STRIDE, typename TFUNC, typename TC, typename TI, typename TO,
+        typename TINTERP>
 static inline
 void ProcessBase(TO* const out,
         size_t count,
@@ -242,6 +243,9 @@
     }
 }
 
+/* Calculates a single output frame from a polyphase resampling filter.
+ * See Process() for parameter details.
+ */
 template <int CHANNELS, int STRIDE, typename TC, typename TI, typename TO>
 static inline
 void ProcessL(TO* const out,
@@ -255,6 +259,39 @@
     ProcessBase<CHANNELS, STRIDE, InterpNull>(out, count, coefsP, coefsN, sP, sN, 0, volumeLR);
 }
 
+/*
+ * Calculates a single output frame from a polyphase resampling filter,
+ * with filter phase interpolation.
+ *
+ * @param out should point to the output buffer with space for at least one output frame.
+ *
+ * @param count should be half the size of the total filter length (halfNumCoefs), as we
+ * use symmetry in filter coefficients to evaluate two dot products.
+ *
+ * @param coefsP is one phase of the polyphase filter bank of size halfNumCoefs, corresponding
+ * to the positive sP.
+ *
+ * @param coefsN is one phase of the polyphase filter bank of size halfNumCoefs, corresponding
+ * to the negative sN.
+ *
+ * @param coefsP1 is the next phase of coefsP (used for interpolation).
+ *
+ * @param coefsN1 is the next phase of coefsN (used for interpolation).
+ *
+ * @param sP is the positive half of the coefficients (as viewed by a convolution),
+ * starting at the original samples pointer and decrementing (by CHANNELS).
+ *
+ * @param sN is the negative half of the samples (as viewed by a convolution),
+ * starting at the original samples pointer + CHANNELS and incrementing (by CHANNELS).
+ *
+ * @param lerpP The fractional siting between the polyphase indices is given by the bits
+ * below coefShift. See fir() for details.
+ *
+ * @param volumeLR is a pointer to an array of two 32 bit volume values, one per stereo channel,
+ * expressed as a S32 integer or float.  A negative value inverts the channel 180 degrees.
+ * The pointer volumeLR should be aligned to a minimum of 8 bytes.
+ * A typical value for volume is 0x1000 to align to a unity gain output of 20.12.
+ */
 template <int CHANNELS, int STRIDE, typename TC, typename TI, typename TO, typename TINTERP>
 static inline
 void Process(TO* const out,
@@ -268,11 +305,12 @@
         TINTERP lerpP,
         const TO* const volumeLR)
 {
-    ProcessBase<CHANNELS, STRIDE, InterpCompute>(out, count, coefsP, coefsN, sP, sN, lerpP, volumeLR);
+    ProcessBase<CHANNELS, STRIDE, InterpCompute>(out, count, coefsP, coefsN, sP, sN, lerpP,
+            volumeLR);
 }
 
 /*
- * Calculates a single output frame (two samples) from input sample pointer.
+ * Calculates a single output frame from input sample pointer.
  *
  * This sets up the params for the accelerated Process() and ProcessL()
  * functions to do the appropriate dot products.
@@ -307,7 +345,7 @@
  * the positive half of the filter is dot product from samples to samples-halfNumCoefs+1.
  *
  * @param volumeLR is a pointer to an array of two 32 bit volume values, one per stereo channel,
- * expressed as a S32 integer.  A negative value inverts the channel 180 degrees.
+ * expressed as a S32 integer or float.  A negative value inverts the channel 180 degrees.
  * The pointer volumeLR should be aligned to a minimum of 8 bytes.
  * A typical value for volume is 0x1000 to align to a unity gain output of 20.12.
  *
@@ -396,6 +434,6 @@
     }
 }
 
-}; // namespace android
+} // namespace android
 
 #endif /*ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_H*/
diff --git a/services/audioflinger/AudioResamplerFirProcessNeon.h b/services/audioflinger/AudioResamplerFirProcessNeon.h
index f311cef..3de9edd 100644
--- a/services/audioflinger/AudioResamplerFirProcessNeon.h
+++ b/services/audioflinger/AudioResamplerFirProcessNeon.h
@@ -22,14 +22,35 @@
 // depends on AudioResamplerFirOps.h, AudioResamplerFirProcess.h
 
 #if USE_NEON
+
+// use intrinsics if inline arm32 assembly is not possible
+#if !USE_INLINE_ASSEMBLY
+#define USE_INTRINSIC
+#endif
+
+// following intrinsics available only on ARM 64 bit ACLE
+#ifndef __aarch64__
+#undef vld1q_f32_x2
+#undef vld1q_s32_x2
+#endif
+
+#define TO_STRING2(x) #x
+#define TO_STRING(x) TO_STRING2(x)
+// uncomment to print GCC version, may be relevant for intrinsic optimizations
+/* #pragma message ("GCC version: " TO_STRING(__GNUC__) \
+        "." TO_STRING(__GNUC_MINOR__) \
+        "." TO_STRING(__GNUC_PATCHLEVEL__)) */
+
 //
-// NEON specializations are enabled for Process() and ProcessL()
+// NEON specializations are enabled for Process() and ProcessL() in AudioResamplerFirProcess.h
 //
-// TODO: Stride 16 and Stride 8 can be combined with one pass stride 8 (if necessary)
-// and looping stride 16 (or vice versa). This has some polyphase coef data alignment
-// issues with S16 coefs. Consider this later.
+// Two variants are presented here:
+// ARM NEON inline assembly which appears up to 10-15% faster than intrinsics (gcc 4.9) for arm32.
+// ARM NEON intrinsics which can also be used by arm64 and x86/64 with NEON header.
+//
 
 // Macros to save a mono/stereo accumulator sample in q0 (and q4) as stereo out.
+// These are only used for inline assembly.
 #define ASSEMBLY_ACCUMULATE_MONO \
         "vld1.s32       {d2}, [%[vLR]:64]        \n"/* (1) load volumes */\
         "vld1.s32       {d3}, %[out]             \n"/* (2) unaligned load the output */\
@@ -49,6 +70,458 @@
         "vqadd.s32      d3, d3, d0               \n"/* (1+4d) accumulate result (saturating)*/\
         "vst1.s32       {d3}, %[out]             \n"/* (2+2d)store result*/
 
+template <int CHANNELS, int STRIDE, bool FIXED>
+static inline void ProcessNeonIntrinsic(int32_t* out,
+        int count,
+        const int16_t* coefsP,
+        const int16_t* coefsN,
+        const int16_t* sP,
+        const int16_t* sN,
+        const int32_t* volumeLR,
+        uint32_t lerpP,
+        const int16_t* coefsP1,
+        const int16_t* coefsN1)
+{
+    ALOG_ASSERT(count > 0 && (count & 7) == 0); // multiple of 8
+    COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS == 1 || CHANNELS == 2);
+
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    coefsP = (const int16_t*)__builtin_assume_aligned(coefsP, 16);
+    coefsN = (const int16_t*)__builtin_assume_aligned(coefsN, 16);
+
+    int16x4_t interp;
+    if (!FIXED) {
+        interp = vdup_n_s16(lerpP);
+        //interp = (int16x4_t)vset_lane_s32 ((int32x2_t)lerpP, interp, 0);
+        coefsP1 = (const int16_t*)__builtin_assume_aligned(coefsP1, 16);
+        coefsN1 = (const int16_t*)__builtin_assume_aligned(coefsN1, 16);
+    }
+    int32x4_t accum, accum2;
+    // warning uninitialized if we use veorq_s32
+    // (alternative to below) accum = veorq_s32(accum, accum);
+    accum = vdupq_n_s32(0);
+    if (CHANNELS == 2) {
+        // (alternative to below) accum2 = veorq_s32(accum2, accum2);
+        accum2 = vdupq_n_s32(0);
+    }
+    do {
+        int16x8_t posCoef = vld1q_s16(coefsP);
+        coefsP += 8;
+        int16x8_t negCoef = vld1q_s16(coefsN);
+        coefsN += 8;
+        if (!FIXED) { // interpolate
+            int16x8_t posCoef1 = vld1q_s16(coefsP1);
+            coefsP1 += 8;
+            int16x8_t negCoef1 = vld1q_s16(coefsN1);
+            coefsN1 += 8;
+
+            posCoef1 = vsubq_s16(posCoef1, posCoef);
+            negCoef = vsubq_s16(negCoef, negCoef1);
+
+            posCoef1 = vqrdmulhq_lane_s16(posCoef1, interp, 0);
+            negCoef = vqrdmulhq_lane_s16(negCoef, interp, 0);
+
+            posCoef = vaddq_s16(posCoef, posCoef1);
+            negCoef = vaddq_s16(negCoef, negCoef1);
+        }
+        switch (CHANNELS) {
+        case 1: {
+            int16x8_t posSamp = vld1q_s16(sP);
+            int16x8_t negSamp = vld1q_s16(sN);
+            sN += 8;
+            posSamp = vrev64q_s16(posSamp);
+
+            // dot product
+            accum = vmlal_s16(accum, vget_low_s16(posSamp), vget_high_s16(posCoef)); // reversed
+            accum = vmlal_s16(accum, vget_high_s16(posSamp), vget_low_s16(posCoef)); // reversed
+            accum = vmlal_s16(accum, vget_low_s16(negSamp), vget_low_s16(negCoef));
+            accum = vmlal_s16(accum, vget_high_s16(negSamp), vget_high_s16(negCoef));
+            sP -= 8;
+        } break;
+        case 2: {
+            int16x8x2_t posSamp = vld2q_s16(sP);
+            int16x8x2_t negSamp = vld2q_s16(sN);
+            sN += 16;
+            posSamp.val[0] = vrev64q_s16(posSamp.val[0]);
+            posSamp.val[1] = vrev64q_s16(posSamp.val[1]);
+
+            // dot product
+            accum = vmlal_s16(accum, vget_low_s16(posSamp.val[0]), vget_high_s16(posCoef)); // r
+            accum = vmlal_s16(accum, vget_high_s16(posSamp.val[0]), vget_low_s16(posCoef)); // r
+            accum2 = vmlal_s16(accum2, vget_low_s16(posSamp.val[1]), vget_high_s16(posCoef)); // r
+            accum2 = vmlal_s16(accum2, vget_high_s16(posSamp.val[1]), vget_low_s16(posCoef)); // r
+            accum = vmlal_s16(accum, vget_low_s16(negSamp.val[0]), vget_low_s16(negCoef));
+            accum = vmlal_s16(accum, vget_high_s16(negSamp.val[0]), vget_high_s16(negCoef));
+            accum2 = vmlal_s16(accum2, vget_low_s16(negSamp.val[1]), vget_low_s16(negCoef));
+            accum2 = vmlal_s16(accum2, vget_high_s16(negSamp.val[1]), vget_high_s16(negCoef));
+            sP -= 16;
+        }
+        } break;
+    } while (count -= 8);
+
+    // multiply by volume and save
+    volumeLR = (const int32_t*)__builtin_assume_aligned(volumeLR, 8);
+    int32x2_t vLR = vld1_s32(volumeLR);
+    int32x2_t outSamp = vld1_s32(out);
+    // combine and funnel down accumulator
+    int32x2_t outAccum = vpadd_s32(vget_low_s32(accum), vget_high_s32(accum));
+    if (CHANNELS == 1) {
+        // duplicate accum to both L and R
+        outAccum = vpadd_s32(outAccum, outAccum);
+    } else if (CHANNELS == 2) {
+        // accum2 contains R, fold in
+        int32x2_t outAccum2 = vpadd_s32(vget_low_s32(accum2), vget_high_s32(accum2));
+        outAccum = vpadd_s32(outAccum, outAccum2);
+    }
+    outAccum = vqrdmulh_s32(outAccum, vLR);
+    outSamp = vqadd_s32(outSamp, outAccum);
+    vst1_s32(out, outSamp);
+}
+
+template <int CHANNELS, int STRIDE, bool FIXED>
+static inline void ProcessNeonIntrinsic(int32_t* out,
+        int count,
+        const int32_t* coefsP,
+        const int32_t* coefsN,
+        const int16_t* sP,
+        const int16_t* sN,
+        const int32_t* volumeLR,
+        uint32_t lerpP,
+        const int32_t* coefsP1,
+        const int32_t* coefsN1)
+{
+    ALOG_ASSERT(count > 0 && (count & 7) == 0); // multiple of 8
+    COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS == 1 || CHANNELS == 2);
+
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    coefsP = (const int32_t*)__builtin_assume_aligned(coefsP, 16);
+    coefsN = (const int32_t*)__builtin_assume_aligned(coefsN, 16);
+
+    int32x2_t interp;
+    if (!FIXED) {
+        interp = vdup_n_s32(lerpP);
+        coefsP1 = (const int32_t*)__builtin_assume_aligned(coefsP1, 16);
+        coefsN1 = (const int32_t*)__builtin_assume_aligned(coefsN1, 16);
+    }
+    int32x4_t accum, accum2;
+    // warning uninitialized if we use veorq_s32
+    // (alternative to below) accum = veorq_s32(accum, accum);
+    accum = vdupq_n_s32(0);
+    if (CHANNELS == 2) {
+        // (alternative to below) accum2 = veorq_s32(accum2, accum2);
+        accum2 = vdupq_n_s32(0);
+    }
+    do {
+#ifdef vld1q_s32_x2
+        int32x4x2_t posCoef = vld1q_s32_x2(coefsP);
+        coefsP += 8;
+        int32x4x2_t negCoef = vld1q_s32_x2(coefsN);
+        coefsN += 8;
+#else
+        int32x4x2_t posCoef;
+        posCoef.val[0] = vld1q_s32(coefsP);
+        coefsP += 4;
+        posCoef.val[1] = vld1q_s32(coefsP);
+        coefsP += 4;
+        int32x4x2_t negCoef;
+        negCoef.val[0] = vld1q_s32(coefsN);
+        coefsN += 4;
+        negCoef.val[1] = vld1q_s32(coefsN);
+        coefsN += 4;
+#endif
+        if (!FIXED) { // interpolate
+#ifdef vld1q_s32_x2
+            int32x4x2_t posCoef1 = vld1q_s32_x2(coefsP1);
+            coefsP1 += 8;
+            int32x4x2_t negCoef1 = vld1q_s32_x2(coefsN1);
+            coefsN1 += 8;
+#else
+            int32x4x2_t posCoef1;
+            posCoef1.val[0] = vld1q_s32(coefsP1);
+            coefsP1 += 4;
+            posCoef1.val[1] = vld1q_s32(coefsP1);
+            coefsP1 += 4;
+            int32x4x2_t negCoef1;
+            negCoef1.val[0] = vld1q_s32(coefsN1);
+            coefsN1 += 4;
+            negCoef1.val[1] = vld1q_s32(coefsN1);
+            coefsN1 += 4;
+#endif
+
+            posCoef1.val[0] = vsubq_s32(posCoef1.val[0], posCoef.val[0]);
+            posCoef1.val[1] = vsubq_s32(posCoef1.val[1], posCoef.val[1]);
+            negCoef.val[0] = vsubq_s32(negCoef.val[0], negCoef1.val[0]);
+            negCoef.val[1] = vsubq_s32(negCoef.val[1], negCoef1.val[1]);
+
+            posCoef1.val[0] = vqrdmulhq_lane_s32(posCoef1.val[0], interp, 0);
+            posCoef1.val[1] = vqrdmulhq_lane_s32(posCoef1.val[1], interp, 0);
+            negCoef.val[0] = vqrdmulhq_lane_s32(negCoef.val[0], interp, 0);
+            negCoef.val[1] = vqrdmulhq_lane_s32(negCoef.val[1], interp, 0);
+
+            posCoef.val[0] = vaddq_s32(posCoef.val[0], posCoef1.val[0]);
+            posCoef.val[1] = vaddq_s32(posCoef.val[1], posCoef1.val[1]);
+            negCoef.val[0] = vaddq_s32(negCoef.val[0], negCoef1.val[0]);
+            negCoef.val[1] = vaddq_s32(negCoef.val[1], negCoef1.val[1]);
+        }
+        switch (CHANNELS) {
+        case 1: {
+            int16x8_t posSamp = vld1q_s16(sP);
+            int16x8_t negSamp = vld1q_s16(sN);
+            sN += 8;
+            posSamp = vrev64q_s16(posSamp);
+
+            int32x4_t posSamp0 = vshll_n_s16(vget_low_s16(posSamp), 15);
+            int32x4_t posSamp1 = vshll_n_s16(vget_high_s16(posSamp), 15);
+            int32x4_t negSamp0 = vshll_n_s16(vget_low_s16(negSamp), 15);
+            int32x4_t negSamp1 = vshll_n_s16(vget_high_s16(negSamp), 15);
+
+            // dot product
+            posSamp0 = vqrdmulhq_s32(posSamp0, posCoef.val[1]); // reversed
+            posSamp1 = vqrdmulhq_s32(posSamp1, posCoef.val[0]); // reversed
+            negSamp0 = vqrdmulhq_s32(negSamp0, negCoef.val[0]);
+            negSamp1 = vqrdmulhq_s32(negSamp1, negCoef.val[1]);
+
+            accum = vaddq_s32(accum, posSamp0);
+            negSamp0 = vaddq_s32(negSamp0, negSamp1);
+            accum = vaddq_s32(accum, posSamp1);
+            accum = vaddq_s32(accum, negSamp0);
+
+            sP -= 8;
+        } break;
+        case 2: {
+            int16x8x2_t posSamp = vld2q_s16(sP);
+            int16x8x2_t negSamp = vld2q_s16(sN);
+            sN += 16;
+            posSamp.val[0] = vrev64q_s16(posSamp.val[0]);
+            posSamp.val[1] = vrev64q_s16(posSamp.val[1]);
+
+            // left
+            int32x4_t posSamp0 = vshll_n_s16(vget_low_s16(posSamp.val[0]), 15);
+            int32x4_t posSamp1 = vshll_n_s16(vget_high_s16(posSamp.val[0]), 15);
+            int32x4_t negSamp0 = vshll_n_s16(vget_low_s16(negSamp.val[0]), 15);
+            int32x4_t negSamp1 = vshll_n_s16(vget_high_s16(negSamp.val[0]), 15);
+
+            // dot product
+            posSamp0 = vqrdmulhq_s32(posSamp0, posCoef.val[1]); // reversed
+            posSamp1 = vqrdmulhq_s32(posSamp1, posCoef.val[0]); // reversed
+            negSamp0 = vqrdmulhq_s32(negSamp0, negCoef.val[0]);
+            negSamp1 = vqrdmulhq_s32(negSamp1, negCoef.val[1]);
+
+            accum = vaddq_s32(accum, posSamp0);
+            negSamp0 = vaddq_s32(negSamp0, negSamp1);
+            accum = vaddq_s32(accum, posSamp1);
+            accum = vaddq_s32(accum, negSamp0);
+
+            // right
+            posSamp0 = vshll_n_s16(vget_low_s16(posSamp.val[1]), 15);
+            posSamp1 = vshll_n_s16(vget_high_s16(posSamp.val[1]), 15);
+            negSamp0 = vshll_n_s16(vget_low_s16(negSamp.val[1]), 15);
+            negSamp1 = vshll_n_s16(vget_high_s16(negSamp.val[1]), 15);
+
+            // dot product
+            posSamp0 = vqrdmulhq_s32(posSamp0, posCoef.val[1]); // reversed
+            posSamp1 = vqrdmulhq_s32(posSamp1, posCoef.val[0]); // reversed
+            negSamp0 = vqrdmulhq_s32(negSamp0, negCoef.val[0]);
+            negSamp1 = vqrdmulhq_s32(negSamp1, negCoef.val[1]);
+
+            accum2 = vaddq_s32(accum2, posSamp0);
+            negSamp0 = vaddq_s32(negSamp0, negSamp1);
+            accum2 = vaddq_s32(accum2, posSamp1);
+            accum2 = vaddq_s32(accum2, negSamp0);
+
+            sP -= 16;
+        } break;
+        }
+    } while (count -= 8);
+
+    // multiply by volume and save
+    volumeLR = (const int32_t*)__builtin_assume_aligned(volumeLR, 8);
+    int32x2_t vLR = vld1_s32(volumeLR);
+    int32x2_t outSamp = vld1_s32(out);
+    // combine and funnel down accumulator
+    int32x2_t outAccum = vpadd_s32(vget_low_s32(accum), vget_high_s32(accum));
+    if (CHANNELS == 1) {
+        // duplicate accum to both L and R
+        outAccum = vpadd_s32(outAccum, outAccum);
+    } else if (CHANNELS == 2) {
+        // accum2 contains R, fold in
+        int32x2_t outAccum2 = vpadd_s32(vget_low_s32(accum2), vget_high_s32(accum2));
+        outAccum = vpadd_s32(outAccum, outAccum2);
+    }
+    outAccum = vqrdmulh_s32(outAccum, vLR);
+    outSamp = vqadd_s32(outSamp, outAccum);
+    vst1_s32(out, outSamp);
+}
+
+template <int CHANNELS, int STRIDE, bool FIXED>
+static inline void ProcessNeonIntrinsic(float* out,
+        int count,
+        const float* coefsP,
+        const float* coefsN,
+        const float* sP,
+        const float* sN,
+        const float* volumeLR,
+        float lerpP,
+        const float* coefsP1,
+        const float* coefsN1)
+{
+    ALOG_ASSERT(count > 0 && (count & 7) == 0); // multiple of 8
+    COMPILE_TIME_ASSERT_FUNCTION_SCOPE(CHANNELS == 1 || CHANNELS == 2);
+
+    sP -= CHANNELS*((STRIDE>>1)-1);
+    coefsP = (const float*)__builtin_assume_aligned(coefsP, 16);
+    coefsN = (const float*)__builtin_assume_aligned(coefsN, 16);
+
+    float32x2_t interp;
+    if (!FIXED) {
+        interp = vdup_n_f32(lerpP);
+        coefsP1 = (const float*)__builtin_assume_aligned(coefsP1, 16);
+        coefsN1 = (const float*)__builtin_assume_aligned(coefsN1, 16);
+    }
+    float32x4_t accum, accum2;
+    // warning uninitialized if we use veorq_s32
+    // (alternative to below) accum = veorq_s32(accum, accum);
+    accum = vdupq_n_f32(0);
+    if (CHANNELS == 2) {
+        // (alternative to below) accum2 = veorq_s32(accum2, accum2);
+        accum2 = vdupq_n_f32(0);
+    }
+    do {
+#ifdef vld1q_f32_x2
+        float32x4x2_t posCoef = vld1q_f32_x2(coefsP);
+        coefsP += 8;
+        float32x4x2_t negCoef = vld1q_f32_x2(coefsN);
+        coefsN += 8;
+#else
+        float32x4x2_t posCoef;
+        posCoef.val[0] = vld1q_f32(coefsP);
+        coefsP += 4;
+        posCoef.val[1] = vld1q_f32(coefsP);
+        coefsP += 4;
+        float32x4x2_t negCoef;
+        negCoef.val[0] = vld1q_f32(coefsN);
+        coefsN += 4;
+        negCoef.val[1] = vld1q_f32(coefsN);
+        coefsN += 4;
+#endif
+        if (!FIXED) { // interpolate
+#ifdef vld1q_f32_x2
+            float32x4x2_t posCoef1 = vld1q_f32_x2(coefsP1);
+            coefsP1 += 8;
+            float32x4x2_t negCoef1 = vld1q_f32_x2(coefsN1);
+            coefsN1 += 8;
+#else
+            float32x4x2_t posCoef1;
+            posCoef1.val[0] = vld1q_f32(coefsP1);
+            coefsP1 += 4;
+            posCoef1.val[1] = vld1q_f32(coefsP1);
+            coefsP1 += 4;
+            float32x4x2_t negCoef1;
+            negCoef1.val[0] = vld1q_f32(coefsN1);
+            coefsN1 += 4;
+            negCoef1.val[1] = vld1q_f32(coefsN1);
+            coefsN1 += 4;
+#endif
+            posCoef1.val[0] = vsubq_f32(posCoef1.val[0], posCoef.val[0]);
+            posCoef1.val[1] = vsubq_f32(posCoef1.val[1], posCoef.val[1]);
+            negCoef.val[0] = vsubq_f32(negCoef.val[0], negCoef1.val[0]);
+            negCoef.val[1] = vsubq_f32(negCoef.val[1], negCoef1.val[1]);
+
+            posCoef.val[0] = vmlaq_lane_f32(posCoef.val[0], posCoef1.val[0], interp, 0);
+            posCoef.val[1] = vmlaq_lane_f32(posCoef.val[1], posCoef1.val[1], interp, 0);
+            negCoef.val[0] = vmlaq_lane_f32(negCoef1.val[0], negCoef.val[0], interp, 0); // rev
+            negCoef.val[1] = vmlaq_lane_f32(negCoef1.val[1], negCoef.val[1], interp, 0); // rev
+        }
+        switch (CHANNELS) {
+        case 1: {
+#ifdef vld1q_f32_x2
+            float32x4x2_t posSamp = vld1q_f32_x2(sP);
+            float32x4x2_t negSamp = vld1q_f32_x2(sN);
+            sN += 8;
+            sP -= 8;
+#else
+            float32x4x2_t posSamp;
+            posSamp.val[0] = vld1q_f32(sP);
+            sP += 4;
+            posSamp.val[1] = vld1q_f32(sP);
+            sP -= 12;
+            float32x4x2_t negSamp;
+            negSamp.val[0] = vld1q_f32(sN);
+            sN += 4;
+            negSamp.val[1] = vld1q_f32(sN);
+            sN += 4;
+#endif
+            // effectively we want a vrev128q_f32()
+            posSamp.val[0] = vrev64q_f32(posSamp.val[0]);
+            posSamp.val[1] = vrev64q_f32(posSamp.val[1]);
+            posSamp.val[0] = vcombine_f32(
+                    vget_high_f32(posSamp.val[0]), vget_low_f32(posSamp.val[0]));
+            posSamp.val[1] = vcombine_f32(
+                    vget_high_f32(posSamp.val[1]), vget_low_f32(posSamp.val[1]));
+
+            accum = vmlaq_f32(accum, posSamp.val[0], posCoef.val[1]);
+            accum = vmlaq_f32(accum, posSamp.val[1], posCoef.val[0]);
+            accum = vmlaq_f32(accum, negSamp.val[0], negCoef.val[0]);
+            accum = vmlaq_f32(accum, negSamp.val[1], negCoef.val[1]);
+        } break;
+        case 2: {
+            float32x4x2_t posSamp0 = vld2q_f32(sP);
+            sP += 8;
+            float32x4x2_t negSamp0 = vld2q_f32(sN);
+            sN += 8;
+            posSamp0.val[0] = vrev64q_f32(posSamp0.val[0]);
+            posSamp0.val[1] = vrev64q_f32(posSamp0.val[1]);
+            posSamp0.val[0] = vcombine_f32(
+                    vget_high_f32(posSamp0.val[0]), vget_low_f32(posSamp0.val[0]));
+            posSamp0.val[1] = vcombine_f32(
+                    vget_high_f32(posSamp0.val[1]), vget_low_f32(posSamp0.val[1]));
+
+            float32x4x2_t posSamp1 = vld2q_f32(sP);
+            sP -= 24;
+            float32x4x2_t negSamp1 = vld2q_f32(sN);
+            sN += 8;
+            posSamp1.val[0] = vrev64q_f32(posSamp1.val[0]);
+            posSamp1.val[1] = vrev64q_f32(posSamp1.val[1]);
+            posSamp1.val[0] = vcombine_f32(
+                    vget_high_f32(posSamp1.val[0]), vget_low_f32(posSamp1.val[0]));
+            posSamp1.val[1] = vcombine_f32(
+                    vget_high_f32(posSamp1.val[1]), vget_low_f32(posSamp1.val[1]));
+
+            // Note: speed is affected by accumulation order.
+            // Also, speed appears slower using vmul/vadd instead of vmla for
+            // stereo case, comparable for mono.
+
+            accum = vmlaq_f32(accum, negSamp0.val[0], negCoef.val[0]);
+            accum = vmlaq_f32(accum, negSamp1.val[0], negCoef.val[1]);
+            accum2 = vmlaq_f32(accum2, negSamp0.val[1], negCoef.val[0]);
+            accum2 = vmlaq_f32(accum2, negSamp1.val[1], negCoef.val[1]);
+
+            accum = vmlaq_f32(accum, posSamp0.val[0], posCoef.val[1]); // reversed
+            accum = vmlaq_f32(accum, posSamp1.val[0], posCoef.val[0]); // reversed
+            accum2 = vmlaq_f32(accum2, posSamp0.val[1], posCoef.val[1]); // reversed
+            accum2 = vmlaq_f32(accum2, posSamp1.val[1], posCoef.val[0]); // reversed
+        } break;
+        }
+    } while (count -= 8);
+
+    // multiply by volume and save
+    volumeLR = (const float*)__builtin_assume_aligned(volumeLR, 8);
+    float32x2_t vLR = vld1_f32(volumeLR);
+    float32x2_t outSamp = vld1_f32(out);
+    // combine and funnel down accumulator
+    float32x2_t outAccum = vpadd_f32(vget_low_f32(accum), vget_high_f32(accum));
+    if (CHANNELS == 1) {
+        // duplicate accum to both L and R
+        outAccum = vpadd_f32(outAccum, outAccum);
+    } else if (CHANNELS == 2) {
+        // accum2 contains R, fold in
+        float32x2_t outAccum2 = vpadd_f32(vget_low_f32(accum2), vget_high_f32(accum2));
+        outAccum = vpadd_f32(outAccum, outAccum2);
+    }
+    outSamp = vmla_f32(outSamp, outAccum, vLR);
+    vst1_f32(out, outSamp);
+}
+
 template <>
 inline void ProcessL<1, 16>(int32_t* const out,
         int count,
@@ -58,6 +531,10 @@
         const int16_t* sN,
         const int32_t* const volumeLR)
 {
+#ifdef USE_INTRINSIC
+    ProcessNeonIntrinsic<1, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+            0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
+#else
     const int CHANNELS = 1; // template specialization does not preserve params
     const int STRIDE = 16;
     sP -= CHANNELS*((STRIDE>>1)-1);
@@ -99,6 +576,7 @@
           "q0", "q1", "q2", "q3",
           "q8", "q10"
     );
+#endif
 }
 
 template <>
@@ -110,6 +588,10 @@
         const int16_t* sN,
         const int32_t* const volumeLR)
 {
+#ifdef USE_INTRINSIC
+    ProcessNeonIntrinsic<2, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+            0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
+#else
     const int CHANNELS = 2; // template specialization does not preserve params
     const int STRIDE = 16;
     sP -= CHANNELS*((STRIDE>>1)-1);
@@ -119,13 +601,13 @@
 
         "1:                                      \n"
 
-        "vld2.16        {q2, q3}, [%[sP]]        \n"// (3+0d) load 8 16-bits stereo samples
-        "vld2.16        {q5, q6}, [%[sN]]!       \n"// (3) load 8 16-bits stereo samples
+        "vld2.16        {q2, q3}, [%[sP]]        \n"// (3+0d) load 8 16-bits stereo frames
+        "vld2.16        {q5, q6}, [%[sN]]!       \n"// (3) load 8 16-bits stereo frames
         "vld1.16        {q8}, [%[coefsP0]:128]!  \n"// (1) load 8 16-bits coefs
         "vld1.16        {q10}, [%[coefsN0]:128]! \n"// (1) load 8 16-bits coefs
 
-        "vrev64.16      q2, q2                   \n"// (1) reverse 8 frames of the left positive
-        "vrev64.16      q3, q3                   \n"// (0 combines+) reverse right positive
+        "vrev64.16      q2, q2                   \n"// (1) reverse 8 samples of positive left
+        "vrev64.16      q3, q3                   \n"// (0 combines+) reverse positive right
 
         "vmlal.s16      q0, d4, d17              \n"// (1) multiply (reversed) samples left
         "vmlal.s16      q0, d5, d16              \n"// (1) multiply (reversed) samples left
@@ -157,6 +639,7 @@
           "q4", "q5", "q6",
           "q8", "q10"
      );
+#endif
 }
 
 template <>
@@ -171,6 +654,11 @@
         uint32_t lerpP,
         const int32_t* const volumeLR)
 {
+#ifdef USE_INTRINSIC
+    ProcessNeonIntrinsic<1, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+            lerpP, coefsP1, coefsN1);
+#else
+
     const int CHANNELS = 1; // template specialization does not preserve params
     const int STRIDE = 16;
     sP -= CHANNELS*((STRIDE>>1)-1);
@@ -227,6 +715,7 @@
           "q0", "q1", "q2", "q3",
           "q8", "q9", "q10", "q11"
     );
+#endif
 }
 
 template <>
@@ -241,6 +730,10 @@
         uint32_t lerpP,
         const int32_t* const volumeLR)
 {
+#ifdef USE_INTRINSIC
+    ProcessNeonIntrinsic<2, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+            lerpP, coefsP1, coefsN1);
+#else
     const int CHANNELS = 2; // template specialization does not preserve params
     const int STRIDE = 16;
     sP -= CHANNELS*((STRIDE>>1)-1);
@@ -251,8 +744,8 @@
 
         "1:                                      \n"
 
-        "vld2.16        {q2, q3}, [%[sP]]        \n"// (3+0d) load 8 16-bits stereo samples
-        "vld2.16        {q5, q6}, [%[sN]]!       \n"// (3) load 8 16-bits stereo samples
+        "vld2.16        {q2, q3}, [%[sP]]        \n"// (3+0d) load 8 16-bits stereo frames
+        "vld2.16        {q5, q6}, [%[sN]]!       \n"// (3) load 8 16-bits stereo frames
         "vld1.16        {q8}, [%[coefsP0]:128]!  \n"// (1) load 8 16-bits coefs
         "vld1.16        {q9}, [%[coefsP1]:128]!  \n"// (1) load 8 16-bits coefs for interpolation
         "vld1.16        {q10}, [%[coefsN1]:128]! \n"// (1) load 8 16-bits coefs
@@ -264,8 +757,8 @@
         "vqrdmulh.s16   q9, q9, d2[0]            \n"// (2) interpolate (step2) 1st set of coefs
         "vqrdmulh.s16   q11, q11, d2[0]          \n"// (2) interpolate (step2) 2nd set of coefs
 
-        "vrev64.16      q2, q2                   \n"// (1) reverse 8 frames of the left positive
-        "vrev64.16      q3, q3                   \n"// (1) reverse 8 frames of the right positive
+        "vrev64.16      q2, q2                   \n"// (1) reverse 8 samples of positive left
+        "vrev64.16      q3, q3                   \n"// (1) reverse 8 samples of positive right
 
         "vadd.s16       q8, q8, q9               \n"// (1+1d) interpolate (step3) 1st set
         "vadd.s16       q10, q10, q11            \n"// (1+1d) interpolate (step3) 2nd set
@@ -303,6 +796,7 @@
           "q4", "q5", "q6",
           "q8", "q9", "q10", "q11"
     );
+#endif
 }
 
 template <>
@@ -314,6 +808,10 @@
         const int16_t* sN,
         const int32_t* const volumeLR)
 {
+#ifdef USE_INTRINSIC
+    ProcessNeonIntrinsic<1, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+            0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
+#else
     const int CHANNELS = 1; // template specialization does not preserve params
     const int STRIDE = 16;
     sP -= CHANNELS*((STRIDE>>1)-1);
@@ -327,7 +825,7 @@
         "vld1.32        {q8, q9}, [%[coefsP0]:128]!   \n"// load 8 32-bits coefs
         "vld1.32        {q10, q11}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs
 
-        "vrev64.16      q2, q2                        \n"// reverse 8 frames of the positive side
+        "vrev64.16      q2, q2                        \n"// reverse 8 samples of the positive side
 
         "vshll.s16      q12, d4, #15                  \n"// extend samples to 31 bits
         "vshll.s16      q13, d5, #15                  \n"// extend samples to 31 bits
@@ -335,10 +833,10 @@
         "vshll.s16      q14, d6, #15                  \n"// extend samples to 31 bits
         "vshll.s16      q15, d7, #15                  \n"// extend samples to 31 bits
 
-        "vqrdmulh.s32   q12, q12, q9                  \n"// multiply samples by interpolated coef
-        "vqrdmulh.s32   q13, q13, q8                  \n"// multiply samples by interpolated coef
-        "vqrdmulh.s32   q14, q14, q10                 \n"// multiply samples by interpolated coef
-        "vqrdmulh.s32   q15, q15, q11                 \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q12, q12, q9                  \n"// multiply samples
+        "vqrdmulh.s32   q13, q13, q8                  \n"// multiply samples
+        "vqrdmulh.s32   q14, q14, q10                 \n"// multiply samples
+        "vqrdmulh.s32   q15, q15, q11                 \n"// multiply samples
 
         "vadd.s32       q0, q0, q12                   \n"// accumulate result
         "vadd.s32       q13, q13, q14                 \n"// accumulate result
@@ -364,6 +862,7 @@
           "q8", "q9", "q10", "q11",
           "q12", "q13", "q14", "q15"
     );
+#endif
 }
 
 template <>
@@ -375,6 +874,10 @@
         const int16_t* sN,
         const int32_t* const volumeLR)
 {
+#ifdef USE_INTRINSIC
+    ProcessNeonIntrinsic<2, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+            0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
+#else
     const int CHANNELS = 2; // template specialization does not preserve params
     const int STRIDE = 16;
     sP -= CHANNELS*((STRIDE>>1)-1);
@@ -384,13 +887,13 @@
 
         "1:                                           \n"
 
-        "vld2.16        {q2, q3}, [%[sP]]             \n"// load 4 16-bits stereo samples
-        "vld2.16        {q5, q6}, [%[sN]]!            \n"// load 4 16-bits stereo samples
-        "vld1.32        {q8, q9}, [%[coefsP0]:128]!   \n"// load 4 32-bits coefs
-        "vld1.32        {q10, q11}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs
+        "vld2.16        {q2, q3}, [%[sP]]             \n"// load 8 16-bits stereo frames
+        "vld2.16        {q5, q6}, [%[sN]]!            \n"// load 8 16-bits stereo frames
+        "vld1.32        {q8, q9}, [%[coefsP0]:128]!   \n"// load 8 32-bits coefs
+        "vld1.32        {q10, q11}, [%[coefsN0]:128]! \n"// load 8 32-bits coefs
 
-        "vrev64.16      q2, q2                        \n"// reverse 8 frames of the positive side
-        "vrev64.16      q3, q3                        \n"// reverse 8 frames of the positive side
+        "vrev64.16      q2, q2                        \n"// reverse 8 samples of positive left
+        "vrev64.16      q3, q3                        \n"// reverse 8 samples of positive right
 
         "vshll.s16      q12,  d4, #15                 \n"// extend samples to 31 bits
         "vshll.s16      q13,  d5, #15                 \n"// extend samples to 31 bits
@@ -398,15 +901,15 @@
         "vshll.s16      q14,  d10, #15                \n"// extend samples to 31 bits
         "vshll.s16      q15,  d11, #15                \n"// extend samples to 31 bits
 
-        "vqrdmulh.s32   q12, q12, q9                  \n"// multiply samples by interpolated coef
-        "vqrdmulh.s32   q13, q13, q8                  \n"// multiply samples by interpolated coef
-        "vqrdmulh.s32   q14, q14, q10                 \n"// multiply samples by interpolated coef
-        "vqrdmulh.s32   q15, q15, q11                 \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q12, q12, q9                  \n"// multiply samples by coef
+        "vqrdmulh.s32   q13, q13, q8                  \n"// multiply samples by coef
+        "vqrdmulh.s32   q14, q14, q10                 \n"// multiply samples by coef
+        "vqrdmulh.s32   q15, q15, q11                 \n"// multiply samples by coef
 
         "vadd.s32       q0, q0, q12                   \n"// accumulate result
         "vadd.s32       q13, q13, q14                 \n"// accumulate result
-        "vadd.s32       q0, q0, q15                   \n"// (+1) accumulate result
-        "vadd.s32       q0, q0, q13                   \n"// (+1) accumulate result
+        "vadd.s32       q0, q0, q15                   \n"// accumulate result
+        "vadd.s32       q0, q0, q13                   \n"// accumulate result
 
         "vshll.s16      q12,  d6, #15                 \n"// extend samples to 31 bits
         "vshll.s16      q13,  d7, #15                 \n"// extend samples to 31 bits
@@ -414,15 +917,15 @@
         "vshll.s16      q14,  d12, #15                \n"// extend samples to 31 bits
         "vshll.s16      q15,  d13, #15                \n"// extend samples to 31 bits
 
-        "vqrdmulh.s32   q12, q12, q9                  \n"// multiply samples by interpolated coef
-        "vqrdmulh.s32   q13, q13, q8                  \n"// multiply samples by interpolated coef
-        "vqrdmulh.s32   q14, q14, q10                 \n"// multiply samples by interpolated coef
-        "vqrdmulh.s32   q15, q15, q11                 \n"// multiply samples by interpolated coef
+        "vqrdmulh.s32   q12, q12, q9                  \n"// multiply samples by coef
+        "vqrdmulh.s32   q13, q13, q8                  \n"// multiply samples by coef
+        "vqrdmulh.s32   q14, q14, q10                 \n"// multiply samples by coef
+        "vqrdmulh.s32   q15, q15, q11                 \n"// multiply samples by coef
 
         "vadd.s32       q4, q4, q12                   \n"// accumulate result
         "vadd.s32       q13, q13, q14                 \n"// accumulate result
-        "vadd.s32       q4, q4, q15                   \n"// (+1) accumulate result
-        "vadd.s32       q4, q4, q13                   \n"// (+1) accumulate result
+        "vadd.s32       q4, q4, q15                   \n"// accumulate result
+        "vadd.s32       q4, q4, q13                   \n"// accumulate result
 
         "subs           %[count], %[count], #8        \n"// update loop counter
         "sub            %[sP], %[sP], #32             \n"// move pointer to next set of samples
@@ -444,6 +947,7 @@
           "q8", "q9", "q10", "q11",
           "q12", "q13", "q14", "q15"
     );
+#endif
 }
 
 template <>
@@ -458,6 +962,10 @@
         uint32_t lerpP,
         const int32_t* const volumeLR)
 {
+#ifdef USE_INTRINSIC
+    ProcessNeonIntrinsic<1, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+            lerpP, coefsP1, coefsN1);
+#else
     const int CHANNELS = 1; // template specialization does not preserve params
     const int STRIDE = 16;
     sP -= CHANNELS*((STRIDE>>1)-1);
@@ -489,7 +997,7 @@
         "vadd.s32       q10, q10, q14                 \n"// interpolate (step3)
         "vadd.s32       q11, q11, q15                 \n"// interpolate (step3)
 
-        "vrev64.16      q2, q2                        \n"// reverse 8 frames of the positive side
+        "vrev64.16      q2, q2                        \n"// reverse 8 samples of the positive side
 
         "vshll.s16      q12,  d4, #15                 \n"// extend samples to 31 bits
         "vshll.s16      q13,  d5, #15                 \n"// extend samples to 31 bits
@@ -529,6 +1037,7 @@
           "q8", "q9", "q10", "q11",
           "q12", "q13", "q14", "q15"
     );
+#endif
 }
 
 template <>
@@ -543,6 +1052,10 @@
         uint32_t lerpP,
         const int32_t* const volumeLR)
 {
+#ifdef USE_INTRINSIC
+    ProcessNeonIntrinsic<2, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+            lerpP, coefsP1, coefsN1);
+#else
     const int CHANNELS = 2; // template specialization does not preserve params
     const int STRIDE = 16;
     sP -= CHANNELS*((STRIDE>>1)-1);
@@ -553,8 +1066,8 @@
 
         "1:                                           \n"
 
-        "vld2.16        {q2, q3}, [%[sP]]             \n"// load 4 16-bits stereo samples
-        "vld2.16        {q5, q6}, [%[sN]]!            \n"// load 4 16-bits stereo samples
+        "vld2.16        {q2, q3}, [%[sP]]             \n"// load 8 16-bits stereo frames
+        "vld2.16        {q5, q6}, [%[sN]]!            \n"// load 8 16-bits stereo frames
         "vld1.32        {q8, q9}, [%[coefsP0]:128]!   \n"// load 8 32-bits coefs
         "vld1.32        {q12, q13}, [%[coefsP1]:128]! \n"// load 8 32-bits coefs
         "vld1.32        {q10, q11}, [%[coefsN1]:128]! \n"// load 8 32-bits coefs
@@ -575,8 +1088,8 @@
         "vadd.s32       q10, q10, q14                 \n"// interpolate (step3)
         "vadd.s32       q11, q11, q15                 \n"// interpolate (step3)
 
-        "vrev64.16      q2, q2                        \n"// reverse 8 frames of the positive side
-        "vrev64.16      q3, q3                        \n"// reverse 8 frames of the positive side
+        "vrev64.16      q2, q2                        \n"// reverse 8 samples of positive left
+        "vrev64.16      q3, q3                        \n"// reverse 8 samples of positive right
 
         "vshll.s16      q12,  d4, #15                 \n"// extend samples to 31 bits
         "vshll.s16      q13,  d5, #15                 \n"// extend samples to 31 bits
@@ -591,8 +1104,8 @@
 
         "vadd.s32       q0, q0, q12                   \n"// accumulate result
         "vadd.s32       q13, q13, q14                 \n"// accumulate result
-        "vadd.s32       q0, q0, q15                   \n"// (+1) accumulate result
-        "vadd.s32       q0, q0, q13                   \n"// (+1) accumulate result
+        "vadd.s32       q0, q0, q15                   \n"// accumulate result
+        "vadd.s32       q0, q0, q13                   \n"// accumulate result
 
         "vshll.s16      q12,  d6, #15                 \n"// extend samples to 31 bits
         "vshll.s16      q13,  d7, #15                 \n"// extend samples to 31 bits
@@ -607,8 +1120,8 @@
 
         "vadd.s32       q4, q4, q12                   \n"// accumulate result
         "vadd.s32       q13, q13, q14                 \n"// accumulate result
-        "vadd.s32       q4, q4, q15                   \n"// (+1) accumulate result
-        "vadd.s32       q4, q4, q13                   \n"// (+1) accumulate result
+        "vadd.s32       q4, q4, q15                   \n"// accumulate result
+        "vadd.s32       q4, q4, q13                   \n"// accumulate result
 
         "subs           %[count], %[count], #8        \n"// update loop counter
         "sub            %[sP], %[sP], #32             \n"// move pointer to next set of samples
@@ -633,517 +1146,69 @@
           "q8", "q9", "q10", "q11",
           "q12", "q13", "q14", "q15"
     );
+#endif
 }
 
-template <>
-inline void ProcessL<1, 8>(int32_t* const out,
+template<>
+inline void ProcessL<1, 16>(float* const out,
         int count,
-        const int16_t* coefsP,
-        const int16_t* coefsN,
-        const int16_t* sP,
-        const int16_t* sN,
-        const int32_t* const volumeLR)
+        const float* coefsP,
+        const float* coefsN,
+        const float* sP,
+        const float* sN,
+        const float* const volumeLR)
 {
-    const int CHANNELS = 1; // template specialization does not preserve params
-    const int STRIDE = 8;
-    sP -= CHANNELS*((STRIDE>>1)-1);
-    asm (
-        "veor           q0, q0, q0               \n"// (0 - combines+) accumulator = 0
-
-        "1:                                      \n"
-
-        "vld1.16        {d4}, [%[sP]]            \n"// (2+0d) load 4 16-bits mono samples
-        "vld1.16        {d6}, [%[sN]]!           \n"// (2) load 4 16-bits mono samples
-        "vld1.16        {d16}, [%[coefsP0]:64]!  \n"// (1) load 4 16-bits coefs
-        "vld1.16        {d20}, [%[coefsN0]:64]!  \n"// (1) load 4 16-bits coefs
-
-        "vrev64.16      d4, d4                   \n"// (1) reversed s3, s2, s1, s0, s7, s6, s5, s4
-
-        // reordering the vmal to do d6, d7 before d4, d5 is slower(?)
-        "vmlal.s16      q0, d4, d16              \n"// (1) multiply (reversed)samples by coef
-        "vmlal.s16      q0, d6, d20              \n"// (1) multiply neg samples
-
-        // moving these ARM instructions before neon above seems to be slower
-        "subs           %[count], %[count], #4   \n"// (1) update loop counter
-        "sub            %[sP], %[sP], #8         \n"// (0) move pointer to next set of samples
-
-        // sP used after branch (warning)
-        "bne            1b                       \n"// loop
-
-        ASSEMBLY_ACCUMULATE_MONO
-
-        : [out]     "=Uv" (out[0]),
-          [count]   "+r" (count),
-          [coefsP0] "+r" (coefsP),
-          [coefsN0] "+r" (coefsN),
-          [sP]      "+r" (sP),
-          [sN]      "+r" (sN)
-        : [vLR]     "r" (volumeLR)
-        : "cc", "memory",
-          "q0", "q1", "q2", "q3",
-          "q8", "q10"
-    );
+    ProcessNeonIntrinsic<1, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+            0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
 }
 
-template <>
-inline void ProcessL<2, 8>(int32_t* const out,
+template<>
+inline void ProcessL<2, 16>(float* const out,
         int count,
-        const int16_t* coefsP,
-        const int16_t* coefsN,
-        const int16_t* sP,
-        const int16_t* sN,
-        const int32_t* const volumeLR)
+        const float* coefsP,
+        const float* coefsN,
+        const float* sP,
+        const float* sN,
+        const float* const volumeLR)
 {
-    const int CHANNELS = 2; // template specialization does not preserve params
-    const int STRIDE = 8;
-    sP -= CHANNELS*((STRIDE>>1)-1);
-    asm (
-        "veor           q0, q0, q0               \n"// (1) acc_L = 0
-        "veor           q4, q4, q4               \n"// (0 combines+) acc_R = 0
-
-        "1:                                      \n"
-
-        "vld2.16        {d4, d5}, [%[sP]]        \n"// (2+0d) load 8 16-bits stereo samples
-        "vld2.16        {d6, d7}, [%[sN]]!       \n"// (2) load 8 16-bits stereo samples
-        "vld1.16        {d16}, [%[coefsP0]:64]!  \n"// (1) load 8 16-bits coefs
-        "vld1.16        {d20}, [%[coefsN0]:64]!  \n"// (1) load 8 16-bits coefs
-
-        "vrev64.16      q2, q2                   \n"// (1) reverse 8 frames of the left positive
-
-        "vmlal.s16      q0, d4, d16              \n"// (1) multiply (reversed) samples left
-        "vmlal.s16      q4, d5, d16              \n"// (1) multiply (reversed) samples right
-        "vmlal.s16      q0, d6, d20              \n"// (1) multiply samples left
-        "vmlal.s16      q4, d7, d20              \n"// (1) multiply samples right
-
-        // moving these ARM before neon seems to be slower
-        "subs           %[count], %[count], #4   \n"// (1) update loop counter
-        "sub            %[sP], %[sP], #16        \n"// (0) move pointer to next set of samples
-
-        // sP used after branch (warning)
-        "bne            1b                       \n"// loop
-
-        ASSEMBLY_ACCUMULATE_STEREO
-
-        : [out] "=Uv" (out[0]),
-          [count] "+r" (count),
-          [coefsP0] "+r" (coefsP),
-          [coefsN0] "+r" (coefsN),
-          [sP] "+r" (sP),
-          [sN] "+r" (sN)
-        : [vLR] "r" (volumeLR)
-        : "cc", "memory",
-          "q0", "q1", "q2", "q3",
-          "q4", "q5", "q6",
-          "q8", "q10"
-     );
+    ProcessNeonIntrinsic<2, 16, true>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+            0 /*lerpP*/, NULL /*coefsP1*/, NULL /*coefsN1*/);
 }
 
-template <>
-inline void Process<1, 8>(int32_t* const out,
+template<>
+inline void Process<1, 16>(float* const out,
         int count,
-        const int16_t* coefsP,
-        const int16_t* coefsN,
-        const int16_t* coefsP1,
-        const int16_t* coefsN1,
-        const int16_t* sP,
-        const int16_t* sN,
-        uint32_t lerpP,
-        const int32_t* const volumeLR)
+        const float* coefsP,
+        const float* coefsN,
+        const float* coefsP1,
+        const float* coefsN1,
+        const float* sP,
+        const float* sN,
+        float lerpP,
+        const float* const volumeLR)
 {
-    const int CHANNELS = 1; // template specialization does not preserve params
-    const int STRIDE = 8;
-    sP -= CHANNELS*((STRIDE>>1)-1);
-    asm (
-        "vmov.32        d2[0], %[lerpP]          \n"// load the positive phase S32 Q15
-        "veor           q0, q0, q0               \n"// (0 - combines+) accumulator = 0
-
-        "1:                                      \n"
-
-        "vld1.16        {d4}, [%[sP]]            \n"// (2+0d) load 4 16-bits mono samples
-        "vld1.16        {d6}, [%[sN]]!           \n"// (2) load 4 16-bits mono samples
-        "vld1.16        {d16}, [%[coefsP0]:64]!  \n"// (1) load 4 16-bits coefs
-        "vld1.16        {d17}, [%[coefsP1]:64]!  \n"// (1) load 4 16-bits coefs for interpolation
-        "vld1.16        {d20}, [%[coefsN1]:64]!  \n"// (1) load 4 16-bits coefs
-        "vld1.16        {d21}, [%[coefsN0]:64]!  \n"// (1) load 4 16-bits coefs for interpolation
-
-        "vsub.s16       d17, d17, d16            \n"// (1) interpolate (step1) 1st set of coefs
-        "vsub.s16       d21, d21, d20            \n"// (1) interpolate (step1) 2nd set of coets
-
-        "vqrdmulh.s16   d17, d17, d2[0]          \n"// (2) interpolate (step2) 1st set of coefs
-        "vqrdmulh.s16   d21, d21, d2[0]          \n"// (2) interpolate (step2) 2nd set of coefs
-
-        "vrev64.16      d4, d4                   \n"// (1) reverse s3, s2, s1, s0, s7, s6, s5, s4
-
-        "vadd.s16       d16, d16, d17            \n"// (1+2d) interpolate (step3) 1st set
-        "vadd.s16       d20, d20, d21            \n"// (1+1d) interpolate (step3) 2nd set
-
-        // reordering the vmal to do d6, d7 before d4, d5 is slower(?)
-        "vmlal.s16      q0, d4, d16              \n"// (1+0d) multiply (reversed)by coef
-        "vmlal.s16      q0, d6, d20              \n"// (1) multiply neg samples
-
-        // moving these ARM instructions before neon above seems to be slower
-        "subs           %[count], %[count], #4   \n"// (1) update loop counter
-        "sub            %[sP], %[sP], #8        \n"// move pointer to next set of samples
-
-        // sP used after branch (warning)
-        "bne            1b                       \n"// loop
-
-        ASSEMBLY_ACCUMULATE_MONO
-
-        : [out]     "=Uv" (out[0]),
-          [count]   "+r" (count),
-          [coefsP0] "+r" (coefsP),
-          [coefsN0] "+r" (coefsN),
-          [coefsP1] "+r" (coefsP1),
-          [coefsN1] "+r" (coefsN1),
-          [sP]      "+r" (sP),
-          [sN]      "+r" (sN)
-        : [lerpP]   "r" (lerpP),
-          [vLR]     "r" (volumeLR)
-        : "cc", "memory",
-          "q0", "q1", "q2", "q3",
-          "q8", "q9", "q10", "q11"
-    );
+    ProcessNeonIntrinsic<1, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+            lerpP, coefsP1, coefsN1);
 }
 
-template <>
-inline void Process<2, 8>(int32_t* const out,
+template<>
+inline void Process<2, 16>(float* const out,
         int count,
-        const int16_t* coefsP,
-        const int16_t* coefsN,
-        const int16_t* coefsP1,
-        const int16_t* coefsN1,
-        const int16_t* sP,
-        const int16_t* sN,
-        uint32_t lerpP,
-        const int32_t* const volumeLR)
+        const float* coefsP,
+        const float* coefsN,
+        const float* coefsP1,
+        const float* coefsN1,
+        const float* sP,
+        const float* sN,
+        float lerpP,
+        const float* const volumeLR)
 {
-    const int CHANNELS = 2; // template specialization does not preserve params
-    const int STRIDE = 8;
-    sP -= CHANNELS*((STRIDE>>1)-1);
-    asm (
-        "vmov.32        d2[0], %[lerpP]          \n"// load the positive phase
-        "veor           q0, q0, q0               \n"// (1) acc_L = 0
-        "veor           q4, q4, q4               \n"// (0 combines+) acc_R = 0
-
-        "1:                                      \n"
-
-        "vld2.16        {d4, d5}, [%[sP]]        \n"// (3+0d) load 8 16-bits stereo samples
-        "vld2.16        {d6, d7}, [%[sN]]!       \n"// (3) load 8 16-bits stereo samples
-        "vld1.16        {d16}, [%[coefsP0]:64]!  \n"// (1) load 8 16-bits coefs
-        "vld1.16        {d17}, [%[coefsP1]:64]!  \n"// (1) load 8 16-bits coefs for interpolation
-        "vld1.16        {d20}, [%[coefsN1]:64]!  \n"// (1) load 8 16-bits coefs
-        "vld1.16        {d21}, [%[coefsN0]:64]!  \n"// (1) load 8 16-bits coefs for interpolation
-
-        "vsub.s16       d17, d17, d16            \n"// (1) interpolate (step1) 1st set of coefs
-        "vsub.s16       d21, d21, d20            \n"// (1) interpolate (step1) 2nd set of coets
-
-        "vqrdmulh.s16   d17, d17, d2[0]          \n"// (2) interpolate (step2) 1st set of coefs
-        "vqrdmulh.s16   d21, d21, d2[0]          \n"// (2) interpolate (step2) 2nd set of coefs
-
-        "vrev64.16      q2, q2                   \n"// (1) reverse 8 frames of the left positive
-
-        "vadd.s16       d16, d16, d17            \n"// (1+1d) interpolate (step3) 1st set
-        "vadd.s16       d20, d20, d21            \n"// (1+1d) interpolate (step3) 2nd set
-
-        "vmlal.s16      q0, d4, d16              \n"// (1) multiply (reversed) samples left
-        "vmlal.s16      q4, d5, d16              \n"// (1) multiply (reversed) samples right
-        "vmlal.s16      q0, d6, d20              \n"// (1) multiply samples left
-        "vmlal.s16      q4, d7, d20              \n"// (1) multiply samples right
-
-        // moving these ARM before neon seems to be slower
-        "subs           %[count], %[count], #4   \n"// (1) update loop counter
-        "sub            %[sP], %[sP], #16        \n"// move pointer to next set of samples
-
-        // sP used after branch (warning)
-        "bne            1b                       \n"// loop
-
-        ASSEMBLY_ACCUMULATE_STEREO
-
-        : [out] "=Uv" (out[0]),
-          [count] "+r" (count),
-          [coefsP0] "+r" (coefsP),
-          [coefsN0] "+r" (coefsN),
-          [coefsP1] "+r" (coefsP1),
-          [coefsN1] "+r" (coefsN1),
-          [sP] "+r" (sP),
-          [sN] "+r" (sN)
-        : [lerpP]   "r" (lerpP),
-          [vLR] "r" (volumeLR)
-        : "cc", "memory",
-          "q0", "q1", "q2", "q3",
-          "q4", "q5", "q6",
-          "q8", "q9", "q10", "q11"
-    );
-}
-
-template <>
-inline void ProcessL<1, 8>(int32_t* const out,
-        int count,
-        const int32_t* coefsP,
-        const int32_t* coefsN,
-        const int16_t* sP,
-        const int16_t* sN,
-        const int32_t* const volumeLR)
-{
-    const int CHANNELS = 1; // template specialization does not preserve params
-    const int STRIDE = 8;
-    sP -= CHANNELS*((STRIDE>>1)-1);
-    asm (
-        "veor           q0, q0, q0               \n"// result, initialize to 0
-
-        "1:                                      \n"
-
-        "vld1.16        {d4}, [%[sP]]            \n"// load 4 16-bits mono samples
-        "vld1.16        {d6}, [%[sN]]!           \n"// load 4 16-bits mono samples
-        "vld1.32        {q8}, [%[coefsP0]:128]!  \n"// load 4 32-bits coefs
-        "vld1.32        {q10}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs
-
-        "vrev64.16      d4, d4                   \n"// reverse 2 frames of the positive side
-
-        "vshll.s16      q12, d4, #15             \n"// (stall) extend samples to 31 bits
-        "vshll.s16      q14, d6, #15             \n"// extend samples to 31 bits
-
-        "vqrdmulh.s32   q12, q12, q8             \n"// multiply samples by interpolated coef
-        "vqrdmulh.s32   q14, q14, q10            \n"// multiply samples by interpolated coef
-
-        "vadd.s32       q0, q0, q12              \n"// accumulate result
-        "vadd.s32       q0, q0, q14              \n"// (stall) accumulate result
-
-        "subs           %[count], %[count], #4   \n"// update loop counter
-        "sub            %[sP], %[sP], #8         \n"// move pointer to next set of samples
-
-        "bne            1b                       \n"// loop
-
-        ASSEMBLY_ACCUMULATE_MONO
-
-        : [out] "=Uv" (out[0]),
-          [count] "+r" (count),
-          [coefsP0] "+r" (coefsP),
-          [coefsN0] "+r" (coefsN),
-          [sP] "+r" (sP),
-          [sN] "+r" (sN)
-        : [vLR] "r" (volumeLR)
-        : "cc", "memory",
-          "q0", "q1", "q2", "q3",
-          "q8", "q9", "q10", "q11",
-          "q12", "q14"
-    );
-}
-
-template <>
-inline void ProcessL<2, 8>(int32_t* const out,
-        int count,
-        const int32_t* coefsP,
-        const int32_t* coefsN,
-        const int16_t* sP,
-        const int16_t* sN,
-        const int32_t* const volumeLR)
-{
-    const int CHANNELS = 2; // template specialization does not preserve params
-    const int STRIDE = 8;
-    sP -= CHANNELS*((STRIDE>>1)-1);
-    asm (
-        "veor           q0, q0, q0               \n"// result, initialize to 0
-        "veor           q4, q4, q4               \n"// result, initialize to 0
-
-        "1:                                      \n"
-
-        "vld2.16        {d4, d5}, [%[sP]]        \n"// load 4 16-bits stereo samples
-        "vld2.16        {d6, d7}, [%[sN]]!       \n"// load 4 16-bits stereo samples
-        "vld1.32        {q8}, [%[coefsP0]:128]!  \n"// load 4 32-bits coefs
-        "vld1.32        {q10}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs
-
-        "vrev64.16      q2, q2                   \n"// reverse 2 frames of the positive side
-
-        "vshll.s16      q12, d4, #15             \n"// extend samples to 31 bits
-        "vshll.s16      q13, d5, #15             \n"// extend samples to 31 bits
-
-        "vshll.s16      q14, d6, #15             \n"// extend samples to 31 bits
-        "vshll.s16      q15, d7, #15             \n"// extend samples to 31 bits
-
-        "vqrdmulh.s32   q12, q12, q8             \n"// multiply samples by coef
-        "vqrdmulh.s32   q13, q13, q8             \n"// multiply samples by coef
-        "vqrdmulh.s32   q14, q14, q10            \n"// multiply samples by coef
-        "vqrdmulh.s32   q15, q15, q10            \n"// multiply samples by coef
-
-        "vadd.s32       q0, q0, q12              \n"// accumulate result
-        "vadd.s32       q4, q4, q13              \n"// accumulate result
-        "vadd.s32       q0, q0, q14              \n"// accumulate result
-        "vadd.s32       q4, q4, q15              \n"// accumulate result
-
-        "subs           %[count], %[count], #4   \n"// update loop counter
-        "sub            %[sP], %[sP], #16        \n"// move pointer to next set of samples
-
-        "bne            1b                       \n"// loop
-
-        ASSEMBLY_ACCUMULATE_STEREO
-
-        : [out]     "=Uv" (out[0]),
-          [count]   "+r" (count),
-          [coefsP0] "+r" (coefsP),
-          [coefsN0] "+r" (coefsN),
-          [sP]      "+r" (sP),
-          [sN]      "+r" (sN)
-        : [vLR]     "r" (volumeLR)
-        : "cc", "memory",
-          "q0", "q1", "q2", "q3", "q4",
-          "q8", "q9", "q10", "q11",
-          "q12", "q13", "q14", "q15"
-    );
-}
-
-template <>
-inline void Process<1, 8>(int32_t* const out,
-        int count,
-        const int32_t* coefsP,
-        const int32_t* coefsN,
-        const int32_t* coefsP1,
-        const int32_t* coefsN1,
-        const int16_t* sP,
-        const int16_t* sN,
-        uint32_t lerpP,
-        const int32_t* const volumeLR)
-{
-    const int CHANNELS = 1; // template specialization does not preserve params
-    const int STRIDE = 8;
-    sP -= CHANNELS*((STRIDE>>1)-1);
-    asm (
-        "vmov.32        d2[0], %[lerpP]          \n"// load the positive phase
-        "veor           q0, q0, q0               \n"// result, initialize to 0
-
-        "1:                                      \n"
-
-        "vld1.16        {d4}, [%[sP]]            \n"// load 4 16-bits mono samples
-        "vld1.16        {d6}, [%[sN]]!           \n"// load 4 16-bits mono samples
-        "vld1.32        {q8}, [%[coefsP0]:128]!  \n"// load 4 32-bits coefs
-        "vld1.32        {q9}, [%[coefsP1]:128]!  \n"// load 4 32-bits coefs for interpolation
-        "vld1.32        {q10}, [%[coefsN1]:128]! \n"// load 4 32-bits coefs
-        "vld1.32        {q11}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs for interpolation
-
-        "vrev64.16      d4, d4                   \n"// reverse 2 frames of the positive side
-
-        "vsub.s32       q9, q9, q8               \n"// interpolate (step1) 1st set of coefs
-        "vsub.s32       q11, q11, q10            \n"// interpolate (step1) 2nd set of coets
-        "vshll.s16      q12, d4, #15             \n"// extend samples to 31 bits
-
-        "vqrdmulh.s32   q9, q9, d2[0]            \n"// interpolate (step2) 1st set of coefs
-        "vqrdmulh.s32   q11, q11, d2[0]          \n"// interpolate (step2) 2nd set of coefs
-        "vshll.s16      q14, d6, #15             \n"// extend samples to 31 bits
-
-        "vadd.s32       q8, q8, q9               \n"// interpolate (step3) 1st set
-        "vadd.s32       q10, q10, q11            \n"// interpolate (step4) 2nd set
-
-        "vqrdmulh.s32   q12, q12, q8             \n"// multiply samples by interpolated coef
-        "vqrdmulh.s32   q14, q14, q10            \n"// multiply samples by interpolated coef
-
-        "vadd.s32       q0, q0, q12              \n"// accumulate result
-        "vadd.s32       q0, q0, q14              \n"// accumulate result
-
-        "subs           %[count], %[count], #4   \n"// update loop counter
-        "sub            %[sP], %[sP], #8         \n"// move pointer to next set of samples
-
-        "bne            1b                       \n"// loop
-
-        ASSEMBLY_ACCUMULATE_MONO
-
-        : [out]     "=Uv" (out[0]),
-          [count]   "+r" (count),
-          [coefsP0] "+r" (coefsP),
-          [coefsP1] "+r" (coefsP1),
-          [coefsN0] "+r" (coefsN),
-          [coefsN1] "+r" (coefsN1),
-          [sP]      "+r" (sP),
-          [sN]      "+r" (sN)
-        : [lerpP]   "r" (lerpP),
-          [vLR]     "r" (volumeLR)
-        : "cc", "memory",
-          "q0", "q1", "q2", "q3",
-          "q8", "q9", "q10", "q11",
-          "q12", "q14"
-    );
-}
-
-template <>
-inline
-void Process<2, 8>(int32_t* const out,
-        int count,
-        const int32_t* coefsP,
-        const int32_t* coefsN,
-        const int32_t* coefsP1,
-        const int32_t* coefsN1,
-        const int16_t* sP,
-        const int16_t* sN,
-        uint32_t lerpP,
-        const int32_t* const volumeLR)
-{
-    const int CHANNELS = 2; // template specialization does not preserve params
-    const int STRIDE = 8;
-    sP -= CHANNELS*((STRIDE>>1)-1);
-    asm (
-        "vmov.32        d2[0], %[lerpP]          \n"// load the positive phase
-        "veor           q0, q0, q0               \n"// result, initialize to 0
-        "veor           q4, q4, q4               \n"// result, initialize to 0
-
-        "1:                                      \n"
-        "vld2.16        {d4, d5}, [%[sP]]        \n"// load 4 16-bits stereo samples
-        "vld2.16        {d6, d7}, [%[sN]]!       \n"// load 4 16-bits stereo samples
-        "vld1.32        {q8}, [%[coefsP0]:128]!  \n"// load 4 32-bits coefs
-        "vld1.32        {q9}, [%[coefsP1]:128]!  \n"// load 4 32-bits coefs for interpolation
-        "vld1.32        {q10}, [%[coefsN1]:128]! \n"// load 4 32-bits coefs
-        "vld1.32        {q11}, [%[coefsN0]:128]! \n"// load 4 32-bits coefs for interpolation
-
-        "vrev64.16      q2, q2                   \n"// (reversed) 2 frames of the positive side
-
-        "vsub.s32       q9, q9, q8               \n"// interpolate (step1) 1st set of coefs
-        "vsub.s32       q11, q11, q10            \n"// interpolate (step1) 2nd set of coets
-        "vshll.s16      q12, d4, #15             \n"// extend samples to 31 bits
-        "vshll.s16      q13, d5, #15             \n"// extend samples to 31 bits
-
-        "vqrdmulh.s32   q9, q9, d2[0]            \n"// interpolate (step2) 1st set of coefs
-        "vqrdmulh.s32   q11, q11, d2[1]          \n"// interpolate (step3) 2nd set of coefs
-        "vshll.s16      q14, d6, #15             \n"// extend samples to 31 bits
-        "vshll.s16      q15, d7, #15             \n"// extend samples to 31 bits
-
-        "vadd.s32       q8, q8, q9               \n"// interpolate (step3) 1st set
-        "vadd.s32       q10, q10, q11            \n"// interpolate (step4) 2nd set
-
-        "vqrdmulh.s32   q12, q12, q8             \n"// multiply samples by interpolated coef
-        "vqrdmulh.s32   q13, q13, q8             \n"// multiply samples by interpolated coef
-        "vqrdmulh.s32   q14, q14, q10            \n"// multiply samples by interpolated coef
-        "vqrdmulh.s32   q15, q15, q10            \n"// multiply samples by interpolated coef
-
-        "vadd.s32       q0, q0, q12              \n"// accumulate result
-        "vadd.s32       q4, q4, q13              \n"// accumulate result
-        "vadd.s32       q0, q0, q14              \n"// accumulate result
-        "vadd.s32       q4, q4, q15              \n"// accumulate result
-
-        "subs           %[count], %[count], #4   \n"// update loop counter
-        "sub            %[sP], %[sP], #16        \n"// move pointer to next set of samples
-
-        "bne            1b                       \n"// loop
-
-        ASSEMBLY_ACCUMULATE_STEREO
-
-        : [out]     "=Uv" (out[0]),
-          [count]   "+r" (count),
-          [coefsP0] "+r" (coefsP),
-          [coefsP1] "+r" (coefsP1),
-          [coefsN0] "+r" (coefsN),
-          [coefsN1] "+r" (coefsN1),
-          [sP]      "+r" (sP),
-          [sN]      "+r" (sN)
-        : [lerpP]   "r" (lerpP),
-          [vLR]     "r" (volumeLR)
-        : "cc", "memory",
-          "q0", "q1", "q2", "q3", "q4",
-          "q8", "q9", "q10", "q11",
-          "q12", "q13", "q14", "q15"
-    );
+    ProcessNeonIntrinsic<2, 16, false>(out, count, coefsP, coefsN, sP, sN, volumeLR,
+            lerpP, coefsP1, coefsN1);
 }
 
 #endif //USE_NEON
 
-}; // namespace android
+} // namespace android
 
 #endif /*ANDROID_AUDIO_RESAMPLER_FIR_PROCESS_NEON_H*/
diff --git a/services/audioflinger/AudioResamplerSinc.cpp b/services/audioflinger/AudioResamplerSinc.cpp
index e6fb76c..41730ee 100644
--- a/services/audioflinger/AudioResamplerSinc.cpp
+++ b/services/audioflinger/AudioResamplerSinc.cpp
@@ -61,135 +61,7 @@
  * cmd-line: fir -l 7 -s 48000 -c 20478
  */
 const uint32_t AudioResamplerSinc::mFirCoefsUp[] __attribute__ ((aligned (32))) = {
-        0x6d374bc7, 0x111c6ba0, 0xf3240e61, 0x07d14a38, 0xfc509e64, 0x0139cee9, 0xffc8c866, 0xfffcc300,
-        0x6d35278a, 0x103e8192, 0xf36b9dfd, 0x07bdfaa5, 0xfc5102d0, 0x013d618d, 0xffc663b9, 0xfffd9592,
-        0x6d2ebafe, 0x0f62811a, 0xf3b3d8ac, 0x07a9f399, 0xfc51d9a6, 0x0140bea5, 0xffc41212, 0xfffe631e,
-        0x6d24069d, 0x0e8875ad, 0xf3fcb43e, 0x07953976, 0xfc53216f, 0x0143e67c, 0xffc1d373, 0xffff2b9f,
-        0x6d150b35, 0x0db06a89, 0xf4462690, 0x077fd0ac, 0xfc54d8ae, 0x0146d965, 0xffbfa7d9, 0xffffef10,
-        0x6d01c9e3, 0x0cda6ab5, 0xf4902587, 0x0769bdaf, 0xfc56fdda, 0x014997bb, 0xffbd8f40, 0x0000ad6e,
-        0x6cea4418, 0x0c0680fe, 0xf4daa718, 0x07530501, 0xfc598f60, 0x014c21db, 0xffbb89a1, 0x000166b6,
-        0x6cce7b97, 0x0b34b7f5, 0xf525a143, 0x073bab28, 0xfc5c8ba5, 0x014e782a, 0xffb996f3, 0x00021ae5,
-        0x6cae7272, 0x0a6519f4, 0xf5710a17, 0x0723b4b4, 0xfc5ff105, 0x01509b14, 0xffb7b728, 0x0002c9fd,
-        0x6c8a2b0f, 0x0997b116, 0xf5bcd7b1, 0x070b2639, 0xfc63bdd3, 0x01528b08, 0xffb5ea31, 0x000373fb,
-        0x6c61a823, 0x08cc873c, 0xf609003f, 0x06f20453, 0xfc67f05a, 0x0154487b, 0xffb42ffc, 0x000418e2,
-        0x6c34ecb5, 0x0803a60a, 0xf6557a00, 0x06d853a2, 0xfc6c86dd, 0x0155d3e8, 0xffb28876, 0x0004b8b3,
-        0x6c03fc1c, 0x073d16e7, 0xf6a23b44, 0x06be18cd, 0xfc717f97, 0x01572dcf, 0xffb0f388, 0x00055371,
-        0x6bced9ff, 0x0678e2fc, 0xf6ef3a6e, 0x06a3587e, 0xfc76d8bc, 0x015856b6, 0xffaf7118, 0x0005e921,
-        0x6b958a54, 0x05b71332, 0xf73c6df4, 0x06881761, 0xfc7c9079, 0x01594f25, 0xffae010b, 0x000679c5,
-        0x6b581163, 0x04f7b037, 0xf789cc61, 0x066c5a27, 0xfc82a4f4, 0x015a17ab, 0xffaca344, 0x00070564,
-        0x6b1673c1, 0x043ac276, 0xf7d74c53, 0x06502583, 0xfc89144d, 0x015ab0db, 0xffab57a1, 0x00078c04,
-        0x6ad0b652, 0x0380521c, 0xf824e480, 0x06337e2a, 0xfc8fdc9f, 0x015b1b4e, 0xffaa1e02, 0x00080dab,
-        0x6a86de48, 0x02c86715, 0xf8728bb3, 0x061668d2, 0xfc96fbfc, 0x015b579e, 0xffa8f641, 0x00088a62,
-        0x6a38f123, 0x0213090c, 0xf8c038d0, 0x05f8ea30, 0xfc9e7074, 0x015b666c, 0xffa7e039, 0x00090230,
-        0x69e6f4b1, 0x01603f6e, 0xf90de2d1, 0x05db06fc, 0xfca63810, 0x015b485b, 0xffa6dbc0, 0x0009751e,
-        0x6990ef0b, 0x00b01162, 0xf95b80cb, 0x05bcc3ed, 0xfcae50d6, 0x015afe14, 0xffa5e8ad, 0x0009e337,
-        0x6936e697, 0x000285d0, 0xf9a909ea, 0x059e25b5, 0xfcb6b8c4, 0x015a8843, 0xffa506d2, 0x000a4c85,
-        0x68d8e206, 0xff57a35e, 0xf9f67577, 0x057f310a, 0xfcbf6dd8, 0x0159e796, 0xffa43603, 0x000ab112,
-        0x6876e855, 0xfeaf706f, 0xfa43bad2, 0x055fea9d, 0xfcc86e09, 0x01591cc0, 0xffa3760e, 0x000b10ec,
-        0x681100c9, 0xfe09f323, 0xfa90d17b, 0x0540571a, 0xfcd1b74c, 0x01582878, 0xffa2c6c2, 0x000b6c1d,
-        0x67a732f4, 0xfd673159, 0xfaddb10c, 0x05207b2f, 0xfcdb4793, 0x01570b77, 0xffa227ec, 0x000bc2b3,
-        0x673986ac, 0xfcc730aa, 0xfb2a513b, 0x05005b82, 0xfce51ccb, 0x0155c678, 0xffa19957, 0x000c14bb,
-        0x66c80413, 0xfc29f670, 0xfb76a9dd, 0x04dffcb6, 0xfcef34e1, 0x01545a3c, 0xffa11acb, 0x000c6244,
-        0x6652b392, 0xfb8f87bd, 0xfbc2b2e4, 0x04bf6369, 0xfcf98dbe, 0x0152c783, 0xffa0ac11, 0x000cab5c,
-        0x65d99dd5, 0xfaf7e963, 0xfc0e6461, 0x049e9433, 0xfd04254a, 0x01510f13, 0xffa04cf0, 0x000cf012,
-        0x655ccbd3, 0xfa631fef, 0xfc59b685, 0x047d93a8, 0xfd0ef969, 0x014f31b2, 0xff9ffd2c, 0x000d3075,
-        0x64dc46c3, 0xf9d12fab, 0xfca4a19f, 0x045c6654, 0xfd1a0801, 0x014d3029, 0xff9fbc89, 0x000d6c97,
-        0x64581823, 0xf9421c9d, 0xfcef1e20, 0x043b10bd, 0xfd254ef4, 0x014b0b45, 0xff9f8ac9, 0x000da486,
-        0x63d049b4, 0xf8b5ea87, 0xfd392498, 0x04199760, 0xfd30cc24, 0x0148c3d2, 0xff9f67ae, 0x000dd854,
-        0x6344e578, 0xf82c9ce7, 0xfd82adba, 0x03f7feb4, 0xfd3c7d73, 0x01465a9f, 0xff9f52f7, 0x000e0812,
-        0x62b5f5b2, 0xf7a636fa, 0xfdcbb25a, 0x03d64b27, 0xfd4860c2, 0x0143d07f, 0xff9f4c65, 0x000e33d3,
-        0x622384e8, 0xf722bbb5, 0xfe142b6e, 0x03b4811d, 0xfd5473f3, 0x01412643, 0xff9f53b4, 0x000e5ba7,
-        0x618d9ddc, 0xf6a22dcf, 0xfe5c120f, 0x0392a4f4, 0xfd60b4e7, 0x013e5cc0, 0xff9f68a1, 0x000e7fa1,
-        0x60f44b91, 0xf6248fb6, 0xfea35f79, 0x0370bafc, 0xfd6d2180, 0x013b74ca, 0xff9f8ae9, 0x000e9fd5,
-        0x60579947, 0xf5a9e398, 0xfeea0d0c, 0x034ec77f, 0xfd79b7a1, 0x01386f3a, 0xff9fba47, 0x000ebc54,
-        0x5fb79278, 0xf5322b61, 0xff30144a, 0x032ccebb, 0xfd86752e, 0x01354ce7, 0xff9ff674, 0x000ed533,
-        0x5f1442dc, 0xf4bd68b6, 0xff756edc, 0x030ad4e1, 0xfd93580d, 0x01320ea9, 0xffa03f2b, 0x000eea84,
-        0x5e6db665, 0xf44b9cfe, 0xffba168d, 0x02e8de19, 0xfda05e23, 0x012eb55a, 0xffa09425, 0x000efc5c,
-        0x5dc3f93c, 0xf3dcc959, 0xfffe054e, 0x02c6ee7f, 0xfdad855b, 0x012b41d3, 0xffa0f519, 0x000f0ace,
-        0x5d1717c4, 0xf370eea9, 0x00413536, 0x02a50a22, 0xfdbacb9e, 0x0127b4f1, 0xffa161bf, 0x000f15ef,
-        0x5c671e96, 0xf3080d8c, 0x0083a081, 0x02833506, 0xfdc82edb, 0x01240f8e, 0xffa1d9cf, 0x000f1dd2,
-        0x5bb41a80, 0xf2a2265e, 0x00c54190, 0x02617321, 0xfdd5ad01, 0x01205285, 0xffa25cfe, 0x000f228d,
-        0x5afe1886, 0xf23f393b, 0x010612eb, 0x023fc85c, 0xfde34403, 0x011c7eb2, 0xffa2eb04, 0x000f2434,
-        0x5a4525df, 0xf1df45fd, 0x01460f41, 0x021e3891, 0xfdf0f1d6, 0x011894f0, 0xffa38395, 0x000f22dc,
-        0x59894ff3, 0xf1824c3e, 0x01853165, 0x01fcc78f, 0xfdfeb475, 0x0114961b, 0xffa42668, 0x000f1e99,
-        0x58caa45b, 0xf1284b58, 0x01c37452, 0x01db7914, 0xfe0c89db, 0x0110830f, 0xffa4d332, 0x000f1781,
-        0x580930e1, 0xf0d14267, 0x0200d32c, 0x01ba50d2, 0xfe1a7009, 0x010c5ca6, 0xffa589a6, 0x000f0da8,
-        0x5745037c, 0xf07d3043, 0x023d493c, 0x0199526b, 0xfe286505, 0x010823ba, 0xffa6497c, 0x000f0125,
-        0x567e2a51, 0xf02c138a, 0x0278d1f2, 0x01788170, 0xfe3666d5, 0x0103d927, 0xffa71266, 0x000ef20b,
-        0x55b4b3af, 0xefddea9a, 0x02b368e6, 0x0157e166, 0xfe447389, 0x00ff7dc4, 0xffa7e41a, 0x000ee070,
-        0x54e8ae13, 0xef92b393, 0x02ed09d7, 0x013775bf, 0xfe528931, 0x00fb126b, 0xffa8be4c, 0x000ecc69,
-        0x541a281e, 0xef4a6c58, 0x0325b0ad, 0x011741df, 0xfe60a5e5, 0x00f697f3, 0xffa9a0b1, 0x000eb60b,
-        0x5349309e, 0xef051290, 0x035d5977, 0x00f7491a, 0xfe6ec7c0, 0x00f20f32, 0xffaa8afe, 0x000e9d6b,
-        0x5275d684, 0xeec2a3a3, 0x0394006a, 0x00d78eb3, 0xfe7cece2, 0x00ed78ff, 0xffab7ce7, 0x000e829e,
-        0x51a028e8, 0xee831cc3, 0x03c9a1e5, 0x00b815da, 0xfe8b1373, 0x00e8d62d, 0xffac7621, 0x000e65ba,
-        0x50c83704, 0xee467ae1, 0x03fe3a6f, 0x0098e1b3, 0xfe99399f, 0x00e4278f, 0xffad7662, 0x000e46d3,
-        0x4fee1037, 0xee0cbab9, 0x0431c6b5, 0x0079f54c, 0xfea75d97, 0x00df6df7, 0xffae7d5f, 0x000e25fd,
-        0x4f11c3fe, 0xedd5d8ca, 0x0464438c, 0x005b53a4, 0xfeb57d92, 0x00daaa34, 0xffaf8acd, 0x000e034f,
-        0x4e3361f7, 0xeda1d15c, 0x0495adf2, 0x003cffa9, 0xfec397cf, 0x00d5dd16, 0xffb09e63, 0x000ddedb,
-        0x4d52f9df, 0xed70a07d, 0x04c6030d, 0x001efc35, 0xfed1aa92, 0x00d10769, 0xffb1b7d8, 0x000db8b7,
-        0x4c709b8e, 0xed424205, 0x04f54029, 0x00014c12, 0xfedfb425, 0x00cc29f7, 0xffb2d6e1, 0x000d90f6,
-        0x4b8c56f8, 0xed16b196, 0x052362ba, 0xffe3f1f7, 0xfeedb2da, 0x00c7458a, 0xffb3fb37, 0x000d67ae,
-        0x4aa63c2c, 0xecedea99, 0x0550685d, 0xffc6f08a, 0xfefba508, 0x00c25ae8, 0xffb52490, 0x000d3cf1,
-        0x49be5b50, 0xecc7e845, 0x057c4ed4, 0xffaa4a5d, 0xff09890f, 0x00bd6ad7, 0xffb652a7, 0x000d10d5,
-        0x48d4c4a2, 0xeca4a59b, 0x05a7140b, 0xff8e01f1, 0xff175d53, 0x00b87619, 0xffb78533, 0x000ce36b,
-        0x47e98874, 0xec841d68, 0x05d0b612, 0xff7219b3, 0xff252042, 0x00b37d70, 0xffb8bbed, 0x000cb4c8,
-        0x46fcb72d, 0xec664a48, 0x05f93324, 0xff5693fe, 0xff32d04f, 0x00ae8198, 0xffb9f691, 0x000c84ff,
-        0x460e6148, 0xec4b26a2, 0x0620899e, 0xff3b731b, 0xff406bf8, 0x00a9834e, 0xffbb34d8, 0x000c5422,
-        0x451e9750, 0xec32acb0, 0x0646b808, 0xff20b93e, 0xff4df1be, 0x00a4834c, 0xffbc767f, 0x000c2245,
-        0x442d69de, 0xec1cd677, 0x066bbd0d, 0xff066889, 0xff5b602c, 0x009f8249, 0xffbdbb42, 0x000bef79,
-        0x433ae99c, 0xec099dcf, 0x068f9781, 0xfeec830d, 0xff68b5d5, 0x009a80f8, 0xffbf02dd, 0x000bbbd2,
-        0x4247273f, 0xebf8fc64, 0x06b2465b, 0xfed30ac5, 0xff75f153, 0x0095800c, 0xffc04d0f, 0x000b8760,
-        0x41523389, 0xebeaebaf, 0x06d3c8bb, 0xfeba0199, 0xff831148, 0x00908034, 0xffc19996, 0x000b5235,
-        0x405c1f43, 0xebdf6500, 0x06f41de3, 0xfea16960, 0xff90145e, 0x008b821b, 0xffc2e832, 0x000b1c64,
-        0x3f64fb40, 0xebd6617b, 0x0713453d, 0xfe8943dc, 0xff9cf947, 0x0086866b, 0xffc438a3, 0x000ae5fc,
-        0x3e6cd85b, 0xebcfda19, 0x07313e56, 0xfe7192bd, 0xffa9bebe, 0x00818dcb, 0xffc58aaa, 0x000aaf0f,
-        0x3d73c772, 0xebcbc7a7, 0x074e08e0, 0xfe5a579d, 0xffb66386, 0x007c98de, 0xffc6de09, 0x000a77ac,
-        0x3c79d968, 0xebca22cc, 0x0769a4b2, 0xfe439407, 0xffc2e669, 0x0077a845, 0xffc83285, 0x000a3fe5,
-        0x3b7f1f23, 0xebcae405, 0x078411c7, 0xfe2d496f, 0xffcf463a, 0x0072bc9d, 0xffc987e0, 0x000a07c9,
-        0x3a83a989, 0xebce03aa, 0x079d503b, 0xfe177937, 0xffdb81d6, 0x006dd680, 0xffcadde1, 0x0009cf67,
-        0x3987897f, 0xebd379eb, 0x07b56051, 0xfe0224b0, 0xffe79820, 0x0068f687, 0xffcc344c, 0x000996ce,
-        0x388acfe9, 0xebdb3ed5, 0x07cc426c, 0xfded4d13, 0xfff38806, 0x00641d44, 0xffcd8aeb, 0x00095e0e,
-        0x378d8da8, 0xebe54a4f, 0x07e1f712, 0xfdd8f38b, 0xffff507b, 0x005f4b4a, 0xffcee183, 0x00092535,
-        0x368fd397, 0xebf1941f, 0x07f67eec, 0xfdc5192d, 0x000af07f, 0x005a8125, 0xffd037e0, 0x0008ec50,
-        0x3591b28b, 0xec0013e8, 0x0809dac3, 0xfdb1befc, 0x00166718, 0x0055bf60, 0xffd18dcc, 0x0008b36e,
-        0x34933b50, 0xec10c12c, 0x081c0b84, 0xfd9ee5e7, 0x0021b355, 0x00510682, 0xffd2e311, 0x00087a9c,
-        0x33947eab, 0xec23934f, 0x082d1239, 0xfd8c8ecc, 0x002cd44d, 0x004c570f, 0xffd4377d, 0x000841e8,
-        0x32958d55, 0xec388194, 0x083cf010, 0xfd7aba74, 0x0037c922, 0x0047b186, 0xffd58ade, 0x0008095d,
-        0x319677fa, 0xec4f8322, 0x084ba654, 0xfd696998, 0x004290fc, 0x00431666, 0xffd6dd02, 0x0007d108,
-        0x30974f3b, 0xec688f02, 0x08593671, 0xfd589cdc, 0x004d2b0e, 0x003e8628, 0xffd82dba, 0x000798f5,
-        0x2f9823a8, 0xec839c22, 0x0865a1f1, 0xfd4854d3, 0x00579691, 0x003a0141, 0xffd97cd6, 0x00076130,
-        0x2e9905c1, 0xeca0a156, 0x0870ea7e, 0xfd3891fd, 0x0061d2ca, 0x00358824, 0xffdaca2a, 0x000729c4,
-        0x2d9a05f4, 0xecbf9558, 0x087b11de, 0xfd2954c8, 0x006bdf05, 0x00311b41, 0xffdc1588, 0x0006f2bb,
-        0x2c9b349e, 0xece06ecb, 0x088419f6, 0xfd1a9d91, 0x0075ba95, 0x002cbb03, 0xffdd5ec6, 0x0006bc21,
-        0x2b9ca203, 0xed032439, 0x088c04c8, 0xfd0c6ca2, 0x007f64da, 0x002867d2, 0xffdea5bb, 0x000685ff,
-        0x2a9e5e57, 0xed27ac16, 0x0892d470, 0xfcfec233, 0x0088dd38, 0x00242213, 0xffdfea3c, 0x0006505f,
-        0x29a079b2, 0xed4dfcc2, 0x08988b2a, 0xfcf19e6b, 0x0092231e, 0x001fea27, 0xffe12c22, 0x00061b4b,
-        0x28a30416, 0xed760c88, 0x089d2b4a, 0xfce50161, 0x009b3605, 0x001bc06b, 0xffe26b48, 0x0005e6cb,
-        0x27a60d6a, 0xed9fd1a2, 0x08a0b740, 0xfcd8eb17, 0x00a4156b, 0x0017a53b, 0xffe3a788, 0x0005b2e8,
-        0x26a9a57b, 0xedcb4237, 0x08a33196, 0xfccd5b82, 0x00acc0da, 0x001398ec, 0xffe4e0bf, 0x00057faa,
-        0x25addbf9, 0xedf8545b, 0x08a49cf0, 0xfcc25285, 0x00b537e1, 0x000f9bd2, 0xffe616c8, 0x00054d1a,
-        0x24b2c075, 0xee26fe17, 0x08a4fc0d, 0xfcb7cff0, 0x00bd7a1c, 0x000bae3c, 0xffe74984, 0x00051b3e,
-        0x23b86263, 0xee573562, 0x08a451c0, 0xfcadd386, 0x00c5872a, 0x0007d075, 0xffe878d3, 0x0004ea1d,
-        0x22bed116, 0xee88f026, 0x08a2a0f8, 0xfca45cf7, 0x00cd5eb7, 0x000402c8, 0xffe9a494, 0x0004b9c0,
-        0x21c61bc0, 0xeebc2444, 0x089fecbb, 0xfc9b6be5, 0x00d50075, 0x00004579, 0xffeaccaa, 0x00048a2b,
-        0x20ce516f, 0xeef0c78d, 0x089c3824, 0xfc92ffe1, 0x00dc6c1e, 0xfffc98c9, 0xffebf0fa, 0x00045b65,
-        0x1fd7810f, 0xef26cfca, 0x08978666, 0xfc8b186d, 0x00e3a175, 0xfff8fcf7, 0xffed1166, 0x00042d74,
-        0x1ee1b965, 0xef5e32bd, 0x0891dac8, 0xfc83b4fc, 0x00eaa045, 0xfff5723d, 0xffee2dd7, 0x0004005e,
-        0x1ded0911, 0xef96e61c, 0x088b38a9, 0xfc7cd4f0, 0x00f16861, 0xfff1f8d2, 0xffef4632, 0x0003d426,
-        0x1cf97e8b, 0xefd0df9a, 0x0883a378, 0xfc76779e, 0x00f7f9a3, 0xffee90eb, 0xfff05a60, 0x0003a8d2,
-        0x1c072823, 0xf00c14e1, 0x087b1ebc, 0xfc709c4d, 0x00fe53ef, 0xffeb3ab8, 0xfff16a4a, 0x00037e65,
-        0x1b1613ff, 0xf0487b98, 0x0871ae0d, 0xfc6b4233, 0x0104772e, 0xffe7f666, 0xfff275db, 0x000354e5,
-        0x1a26501b, 0xf0860962, 0x08675516, 0xfc66687a, 0x010a6353, 0xffe4c41e, 0xfff37d00, 0x00032c54,
-        0x1937ea47, 0xf0c4b3e0, 0x085c1794, 0xfc620e3d, 0x01101858, 0xffe1a408, 0xfff47fa5, 0x000304b7,
-        0x184af025, 0xf10470b0, 0x084ff957, 0xfc5e328c, 0x0115963d, 0xffde9646, 0xfff57db8, 0x0002de0e,
-        0x175f6f2b, 0xf1453571, 0x0842fe3d, 0xfc5ad465, 0x011add0b, 0xffdb9af8, 0xfff67729, 0x0002b85f,
-        0x1675749e, 0xf186f7c0, 0x08352a35, 0xfc57f2be, 0x011fecd3, 0xffd8b23b, 0xfff76be9, 0x000293aa,
-        0x158d0d95, 0xf1c9ad40, 0x0826813e, 0xfc558c7c, 0x0124c5ab, 0xffd5dc28, 0xfff85be8, 0x00026ff2,
-        0x14a646f6, 0xf20d4b92, 0x08170767, 0xfc53a07b, 0x012967b1, 0xffd318d6, 0xfff9471b, 0x00024d39,
-        0x13c12d73, 0xf251c85d, 0x0806c0cb, 0xfc522d88, 0x012dd30a, 0xffd06858, 0xfffa2d74, 0x00022b7f,
-        0x12ddcd8f, 0xf297194d, 0x07f5b193, 0xfc513266, 0x013207e4, 0xffcdcabe, 0xfffb0ee9, 0x00020ac7,
-        0x11fc3395, 0xf2dd3411, 0x07e3ddf7, 0xfc50adcc, 0x01360670, 0xffcb4014, 0xfffbeb70, 0x0001eb10,
-        0x111c6ba0, 0xf3240e61, 0x07d14a38, 0xfc509e64, 0x0139cee9, 0xffc8c866, 0xfffcc300, 0x0001cc5c,
+#include "AudioResamplerSincUp.h"
 };
 
 /*
@@ -197,135 +69,7 @@
  * cmd-line: fir -l 7 -s 48000 -c 17189
  */
 const uint32_t AudioResamplerSinc::mFirCoefsDown[] __attribute__ ((aligned (32))) = {
-        0x5bacb6f4, 0x1ded1a1d, 0xf0398d56, 0x0394f674, 0x0193a5f9, 0xfe66dbeb, 0x00791043, 0xfffe6631,
-        0x5bab6c81, 0x1d3ddccd, 0xf0421d2c, 0x03af9995, 0x01818dc9, 0xfe6bb63e, 0x0079812a, 0xfffdc37d,
-        0x5ba78d37, 0x1c8f2cf9, 0xf04beb1d, 0x03c9a04a, 0x016f8aca, 0xfe70a511, 0x0079e34d, 0xfffd2545,
-        0x5ba1194f, 0x1be11231, 0xf056f2c7, 0x03e309fe, 0x015d9e64, 0xfe75a79f, 0x007a36e2, 0xfffc8b86,
-        0x5b981122, 0x1b3393f8, 0xf0632fb7, 0x03fbd625, 0x014bc9fa, 0xfe7abd23, 0x007a7c20, 0xfffbf639,
-        0x5b8c7530, 0x1a86b9bf, 0xf0709d74, 0x04140449, 0x013a0ee9, 0xfe7fe4db, 0x007ab33d, 0xfffb655b,
-        0x5b7e461a, 0x19da8ae5, 0xf07f3776, 0x042b93fd, 0x01286e86, 0xfe851e05, 0x007adc72, 0xfffad8e4,
-        0x5b6d84a8, 0x192f0eb7, 0xf08ef92d, 0x044284e6, 0x0116ea22, 0xfe8a67dd, 0x007af7f6, 0xfffa50ce,
-        0x5b5a31c6, 0x18844c70, 0xf09fddfe, 0x0458d6b7, 0x01058306, 0xfe8fc1a5, 0x007b0603, 0xfff9cd12,
-        0x5b444e81, 0x17da4b37, 0xf0b1e143, 0x046e8933, 0x00f43a74, 0xfe952a9b, 0x007b06d4, 0xfff94da9,
-        0x5b2bdc0e, 0x17311222, 0xf0c4fe50, 0x04839c29, 0x00e311a9, 0xfe9aa201, 0x007afaa1, 0xfff8d28c,
-        0x5b10dbc2, 0x1688a832, 0xf0d9306d, 0x04980f79, 0x00d209db, 0xfea02719, 0x007ae1a7, 0xfff85bb1,
-        0x5af34f18, 0x15e11453, 0xf0ee72db, 0x04abe310, 0x00c12439, 0xfea5b926, 0x007abc20, 0xfff7e910,
-        0x5ad337af, 0x153a5d5e, 0xf104c0d2, 0x04bf16e9, 0x00b061eb, 0xfeab576d, 0x007a8a49, 0xfff77a9f,
-        0x5ab09748, 0x14948a16, 0xf11c1583, 0x04d1ab0d, 0x009fc413, 0xfeb10134, 0x007a4c5d, 0xfff71057,
-        0x5a8b6fc7, 0x13efa12c, 0xf1346c17, 0x04e39f93, 0x008f4bcb, 0xfeb6b5c0, 0x007a029a, 0xfff6aa2b,
-        0x5a63c336, 0x134ba937, 0xf14dbfb1, 0x04f4f4a2, 0x007efa29, 0xfebc745c, 0x0079ad3d, 0xfff64812,
-        0x5a3993c0, 0x12a8a8bb, 0xf1680b6e, 0x0505aa6a, 0x006ed038, 0xfec23c50, 0x00794c82, 0xfff5ea02,
-        0x5a0ce3b2, 0x1206a625, 0xf1834a63, 0x0515c12d, 0x005ecf01, 0xfec80ce8, 0x0078e0a9, 0xfff58ff0,
-        0x59ddb57f, 0x1165a7cc, 0xf19f77a0, 0x05253938, 0x004ef782, 0xfecde571, 0x007869ee, 0xfff539cf,
-        0x59ac0bba, 0x10c5b3ef, 0xf1bc8e31, 0x053412e4, 0x003f4ab4, 0xfed3c538, 0x0077e891, 0xfff4e794,
-        0x5977e919, 0x1026d0b8, 0xf1da891b, 0x05424e9b, 0x002fc98a, 0xfed9ab8f, 0x00775ccf, 0xfff49934,
-        0x59415075, 0x0f890437, 0xf1f96360, 0x054feccf, 0x002074ed, 0xfedf97c6, 0x0076c6e8, 0xfff44ea3,
-        0x590844c9, 0x0eec5465, 0xf21917ff, 0x055cee03, 0x00114dc3, 0xfee58932, 0x00762719, 0xfff407d2,
-        0x58ccc930, 0x0e50c723, 0xf239a1ef, 0x056952c3, 0x000254e8, 0xfeeb7f27, 0x00757da3, 0xfff3c4b7,
-        0x588ee0ea, 0x0db6623b, 0xf25afc29, 0x05751baa, 0xfff38b32, 0xfef178fc, 0x0074cac4, 0xfff38542,
-        0x584e8f56, 0x0d1d2b5d, 0xf27d219f, 0x0580495c, 0xffe4f171, 0xfef7760c, 0x00740ebb, 0xfff34968,
-        0x580bd7f4, 0x0c85281f, 0xf2a00d43, 0x058adc8d, 0xffd6886d, 0xfefd75af, 0x007349c7, 0xfff3111b,
-        0x57c6be67, 0x0bee5dff, 0xf2c3ba04, 0x0594d5fa, 0xffc850e6, 0xff037744, 0x00727c27, 0xfff2dc4c,
-        0x577f4670, 0x0b58d262, 0xf2e822ce, 0x059e366c, 0xffba4b98, 0xff097a29, 0x0071a61b, 0xfff2aaef,
-        0x573573f2, 0x0ac48a92, 0xf30d428e, 0x05a6feb9, 0xffac7936, 0xff0f7dbf, 0x0070c7e1, 0xfff27cf3,
-        0x56e94af1, 0x0a318bc1, 0xf333142f, 0x05af2fbf, 0xff9eda6d, 0xff15816a, 0x006fe1b8, 0xfff2524c,
-        0x569acf90, 0x099fdb04, 0xf359929a, 0x05b6ca6b, 0xff916fe1, 0xff1b848e, 0x006ef3df, 0xfff22aea,
-        0x564a0610, 0x090f7d57, 0xf380b8ba, 0x05bdcfb2, 0xff843a32, 0xff218692, 0x006dfe94, 0xfff206bf,
-        0x55f6f2d3, 0x0880779d, 0xf3a88179, 0x05c44095, 0xff7739f7, 0xff2786e1, 0x006d0217, 0xfff1e5bb,
-        0x55a19a5c, 0x07f2ce9b, 0xf3d0e7c2, 0x05ca1e1f, 0xff6a6fc1, 0xff2d84e5, 0x006bfea4, 0xfff1c7d0,
-        0x554a0148, 0x076686fc, 0xf3f9e680, 0x05cf6965, 0xff5ddc1a, 0xff33800e, 0x006af47b, 0xfff1acef,
-        0x54f02c56, 0x06dba551, 0xf42378a0, 0x05d42387, 0xff517f86, 0xff3977cb, 0x0069e3d9, 0xfff19508,
-        0x54942061, 0x06522e0f, 0xf44d9912, 0x05d84daf, 0xff455a80, 0xff3f6b8f, 0x0068ccfa, 0xfff1800b,
-        0x5435e263, 0x05ca258f, 0xf47842c5, 0x05dbe90f, 0xff396d7f, 0xff455acf, 0x0067b01e, 0xfff16de9,
-        0x53d57774, 0x0543900d, 0xf4a370ad, 0x05def6e4, 0xff2db8f2, 0xff4b4503, 0x00668d80, 0xfff15e93,
-        0x5372e4c6, 0x04be71ab, 0xf4cf1dbf, 0x05e17873, 0xff223d40, 0xff5129a3, 0x0065655d, 0xfff151f9,
-        0x530e2fac, 0x043ace6e, 0xf4fb44f4, 0x05e36f0d, 0xff16faca, 0xff57082e, 0x006437f1, 0xfff1480b,
-        0x52a75d90, 0x03b8aa40, 0xf527e149, 0x05e4dc08, 0xff0bf1ed, 0xff5ce021, 0x00630577, 0xfff140b9,
-        0x523e73fd, 0x033808eb, 0xf554edbd, 0x05e5c0c6, 0xff0122fc, 0xff62b0fd, 0x0061ce2c, 0xfff13bf3,
-        0x51d37897, 0x02b8ee22, 0xf5826555, 0x05e61eae, 0xfef68e45, 0xff687a47, 0x00609249, 0xfff139aa,
-        0x5166711c, 0x023b5d76, 0xf5b0431a, 0x05e5f733, 0xfeec340f, 0xff6e3b84, 0x005f520a, 0xfff139cd,
-        0x50f76368, 0x01bf5a5e, 0xf5de8218, 0x05e54bcd, 0xfee2149b, 0xff73f43d, 0x005e0da8, 0xfff13c4c,
-        0x5086556f, 0x0144e834, 0xf60d1d63, 0x05e41dfe, 0xfed83023, 0xff79a3fe, 0x005cc55c, 0xfff14119,
-        0x50134d3e, 0x00cc0a36, 0xf63c1012, 0x05e26f4e, 0xfece86db, 0xff7f4a54, 0x005b7961, 0xfff14821,
-        0x4f9e50ff, 0x0054c382, 0xf66b5544, 0x05e0414d, 0xfec518f1, 0xff84e6d0, 0x005a29ed, 0xfff15156,
-        0x4f2766f2, 0xffdf171b, 0xf69ae81d, 0x05dd9593, 0xfebbe68c, 0xff8a7905, 0x0058d738, 0xfff15ca8,
-        0x4eae9571, 0xff6b07e7, 0xf6cac3c7, 0x05da6dbe, 0xfeb2efcd, 0xff900089, 0x0057817b, 0xfff16a07,
-        0x4e33e2ee, 0xfef898ae, 0xf6fae373, 0x05d6cb72, 0xfeaa34d0, 0xff957cf4, 0x005628ec, 0xfff17962,
-        0x4db755f3, 0xfe87cc1b, 0xf72b425b, 0x05d2b05c, 0xfea1b5a9, 0xff9aede0, 0x0054cdc0, 0xfff18aab,
-        0x4d38f520, 0xfe18a4bc, 0xf75bdbbd, 0x05ce1e2d, 0xfe997268, 0xffa052ec, 0x0053702d, 0xfff19dd1,
-        0x4cb8c72e, 0xfdab2501, 0xf78caae0, 0x05c9169d, 0xfe916b15, 0xffa5abb8, 0x00521068, 0xfff1b2c5,
-        0x4c36d2eb, 0xfd3f4f3d, 0xf7bdab16, 0x05c39b6a, 0xfe899fb2, 0xffaaf7e6, 0x0050aea5, 0xfff1c976,
-        0x4bb31f3c, 0xfcd525a5, 0xf7eed7b4, 0x05bdae57, 0xfe82103f, 0xffb0371c, 0x004f4b17, 0xfff1e1d6,
-        0x4b2db31a, 0xfc6caa53, 0xf8202c1c, 0x05b7512e, 0xfe7abcb1, 0xffb56902, 0x004de5f1, 0xfff1fbd5,
-        0x4aa69594, 0xfc05df40, 0xf851a3b6, 0x05b085bc, 0xfe73a4fb, 0xffba8d44, 0x004c7f66, 0xfff21764,
-        0x4a1dcdce, 0xfba0c64b, 0xf88339f5, 0x05a94dd5, 0xfe6cc909, 0xffbfa38d, 0x004b17a6, 0xfff23473,
-        0x499362ff, 0xfb3d6133, 0xf8b4ea55, 0x05a1ab52, 0xfe6628c1, 0xffc4ab8f, 0x0049aee3, 0xfff252f3,
-        0x49075c72, 0xfadbb19a, 0xf8e6b059, 0x0599a00e, 0xfe5fc405, 0xffc9a4fc, 0x0048454b, 0xfff272d6,
-        0x4879c185, 0xfa7bb908, 0xf9188793, 0x05912dea, 0xfe599aaf, 0xffce8f8a, 0x0046db0f, 0xfff2940b,
-        0x47ea99a9, 0xfa1d78e3, 0xf94a6b9b, 0x058856cd, 0xfe53ac97, 0xffd36af1, 0x0045705c, 0xfff2b686,
-        0x4759ec60, 0xf9c0f276, 0xf97c5815, 0x057f1c9e, 0xfe4df98e, 0xffd836eb, 0x00440561, 0xfff2da36,
-        0x46c7c140, 0xf96626f0, 0xf9ae48af, 0x0575814c, 0xfe48815e, 0xffdcf336, 0x00429a4a, 0xfff2ff0d,
-        0x46341fed, 0xf90d1761, 0xf9e03924, 0x056b86c6, 0xfe4343d0, 0xffe19f91, 0x00412f43, 0xfff324fd,
-        0x459f101d, 0xf8b5c4be, 0xfa122537, 0x05612f00, 0xfe3e40a6, 0xffe63bc0, 0x003fc478, 0xfff34bf9,
-        0x45089996, 0xf8602fdc, 0xfa4408ba, 0x05567bf1, 0xfe39779a, 0xffeac787, 0x003e5a12, 0xfff373f0,
-        0x4470c42d, 0xf80c5977, 0xfa75df87, 0x054b6f92, 0xfe34e867, 0xffef42af, 0x003cf03d, 0xfff39cd7,
-        0x43d797c7, 0xf7ba422b, 0xfaa7a586, 0x05400be1, 0xfe3092bf, 0xfff3ad01, 0x003b871f, 0xfff3c69f,
-        0x433d1c56, 0xf769ea78, 0xfad956ab, 0x053452dc, 0xfe2c7650, 0xfff8064b, 0x003a1ee3, 0xfff3f13a,
-        0x42a159dc, 0xf71b52c4, 0xfb0aeef6, 0x05284685, 0xfe2892c5, 0xfffc4e5c, 0x0038b7ae, 0xfff41c9c,
-        0x42045865, 0xf6ce7b57, 0xfb3c6a73, 0x051be8dd, 0xfe24e7c3, 0x00008507, 0x003751a7, 0xfff448b7,
-        0x4166200e, 0xf683645a, 0xfb6dc53c, 0x050f3bec, 0xfe2174ec, 0x0004aa1f, 0x0035ecf4, 0xfff4757e,
-        0x40c6b8fd, 0xf63a0ddf, 0xfb9efb77, 0x050241b6, 0xfe1e39da, 0x0008bd7c, 0x003489b9, 0xfff4a2e5,
-        0x40262b65, 0xf5f277d9, 0xfbd00956, 0x04f4fc46, 0xfe1b3628, 0x000cbef7, 0x0033281a, 0xfff4d0de,
-        0x3f847f83, 0xf5aca21f, 0xfc00eb1b, 0x04e76da3, 0xfe18696a, 0x0010ae6e, 0x0031c83a, 0xfff4ff5d,
-        0x3ee1bda2, 0xf5688c6d, 0xfc319d13, 0x04d997d8, 0xfe15d32f, 0x00148bbd, 0x00306a3b, 0xfff52e57,
-        0x3e3dee13, 0xf5263665, 0xfc621b9a, 0x04cb7cf2, 0xfe137304, 0x001856c7, 0x002f0e3f, 0xfff55dbf,
-        0x3d991932, 0xf4e59f8a, 0xfc926319, 0x04bd1efb, 0xfe114872, 0x001c0f6e, 0x002db466, 0xfff58d89,
-        0x3cf34766, 0xf4a6c748, 0xfcc27008, 0x04ae8000, 0xfe0f52fc, 0x001fb599, 0x002c5cd0, 0xfff5bdaa,
-        0x3c4c811c, 0xf469aced, 0xfcf23eec, 0x049fa20f, 0xfe0d9224, 0x0023492f, 0x002b079a, 0xfff5ee17,
-        0x3ba4cec9, 0xf42e4faf, 0xfd21cc59, 0x04908733, 0xfe0c0567, 0x0026ca1c, 0x0029b4e4, 0xfff61ec5,
-        0x3afc38eb, 0xf3f4aea6, 0xfd5114f0, 0x0481317a, 0xfe0aac3f, 0x002a384c, 0x002864c9, 0xfff64fa8,
-        0x3a52c805, 0xf3bcc8d3, 0xfd801564, 0x0471a2ef, 0xfe098622, 0x002d93ae, 0x00271766, 0xfff680b5,
-        0x39a884a1, 0xf3869d1a, 0xfdaeca73, 0x0461dda0, 0xfe089283, 0x0030dc34, 0x0025ccd7, 0xfff6b1e4,
-        0x38fd774e, 0xf3522a49, 0xfddd30eb, 0x0451e396, 0xfe07d0d3, 0x003411d2, 0x00248535, 0xfff6e329,
-        0x3851a8a2, 0xf31f6f0f, 0xfe0b45aa, 0x0441b6dd, 0xfe07407d, 0x0037347d, 0x0023409a, 0xfff7147a,
-        0x37a52135, 0xf2ee6a07, 0xfe39059b, 0x0431597d, 0xfe06e0eb, 0x003a442e, 0x0021ff1f, 0xfff745cd,
-        0x36f7e9a4, 0xf2bf19ae, 0xfe666dbc, 0x0420cd80, 0xfe06b184, 0x003d40e0, 0x0020c0dc, 0xfff7771a,
-        0x364a0a90, 0xf2917c6d, 0xfe937b15, 0x041014eb, 0xfe06b1ac, 0x00402a8e, 0x001f85e6, 0xfff7a857,
-        0x359b8c9d, 0xf265908f, 0xfec02ac2, 0x03ff31c3, 0xfe06e0c4, 0x00430137, 0x001e4e56, 0xfff7d97a,
-        0x34ec786f, 0xf23b544b, 0xfeec79ec, 0x03ee260d, 0xfe073e2a, 0x0045c4dd, 0x001d1a3f, 0xfff80a7c,
-        0x343cd6af, 0xf212c5be, 0xff1865cd, 0x03dcf3ca, 0xfe07c93a, 0x00487582, 0x001be9b7, 0xfff83b52,
-        0x338cb004, 0xf1ebe2ec, 0xff43ebac, 0x03cb9cf9, 0xfe08814e, 0x004b132b, 0x001abcd0, 0xfff86bf6,
-        0x32dc0d17, 0xf1c6a9c3, 0xff6f08e4, 0x03ba2398, 0xfe0965bc, 0x004d9dde, 0x0019939d, 0xfff89c60,
-        0x322af693, 0xf1a3181a, 0xff99badb, 0x03a889a1, 0xfe0a75da, 0x005015a5, 0x00186e31, 0xfff8cc86,
-        0x3179751f, 0xf1812bb0, 0xffc3ff0c, 0x0396d10c, 0xfe0bb0f9, 0x00527a8a, 0x00174c9c, 0xfff8fc62,
-        0x30c79163, 0xf160e22d, 0xffedd2fd, 0x0384fbd1, 0xfe0d166b, 0x0054cc9a, 0x00162eef, 0xfff92bec,
-        0x30155404, 0xf1423924, 0x00173447, 0x03730be0, 0xfe0ea57e, 0x00570be4, 0x00151538, 0xfff95b1e,
-        0x2f62c5a7, 0xf1252e0f, 0x00402092, 0x0361032a, 0xfe105d7e, 0x00593877, 0x0013ff88, 0xfff989ef,
-        0x2eafeeed, 0xf109be56, 0x00689598, 0x034ee39b, 0xfe123db6, 0x005b5267, 0x0012edea, 0xfff9b85b,
-        0x2dfcd873, 0xf0efe748, 0x0090911f, 0x033caf1d, 0xfe144570, 0x005d59c6, 0x0011e06d, 0xfff9e65a,
-        0x2d498ad3, 0xf0d7a622, 0x00b81102, 0x032a6796, 0xfe1673f2, 0x005f4eac, 0x0010d71d, 0xfffa13e5,
-        0x2c960ea3, 0xf0c0f808, 0x00df1328, 0x03180ee7, 0xfe18c884, 0x0061312e, 0x000fd205, 0xfffa40f8,
-        0x2be26c73, 0xf0abda0e, 0x0105958c, 0x0305a6f0, 0xfe1b4268, 0x00630167, 0x000ed130, 0xfffa6d8d,
-        0x2b2eaccf, 0xf0984931, 0x012b9635, 0x02f3318a, 0xfe1de0e2, 0x0064bf71, 0x000dd4a7, 0xfffa999d,
-        0x2a7ad83c, 0xf086425a, 0x0151133e, 0x02e0b08d, 0xfe20a335, 0x00666b68, 0x000cdc74, 0xfffac525,
-        0x29c6f738, 0xf075c260, 0x01760ad1, 0x02ce25ca, 0xfe2388a1, 0x0068056b, 0x000be89f, 0xfffaf01e,
-        0x2913123c, 0xf066c606, 0x019a7b27, 0x02bb9310, 0xfe269065, 0x00698d98, 0x000af931, 0xfffb1a84,
-        0x285f31b7, 0xf05949fb, 0x01be628c, 0x02a8fa2a, 0xfe29b9c1, 0x006b0411, 0x000a0e2f, 0xfffb4453,
-        0x27ab5e12, 0xf04d4ade, 0x01e1bf58, 0x02965cdb, 0xfe2d03f2, 0x006c68f8, 0x000927a0, 0xfffb6d86,
-        0x26f79fab, 0xf042c539, 0x02048ff8, 0x0283bce6, 0xfe306e35, 0x006dbc71, 0x00084589, 0xfffb961a,
-        0x2643feda, 0xf039b587, 0x0226d2e6, 0x02711c05, 0xfe33f7c7, 0x006efea0, 0x000767f0, 0xfffbbe09,
-        0x259083eb, 0xf032182f, 0x024886ad, 0x025e7bf0, 0xfe379fe3, 0x00702fae, 0x00068ed8, 0xfffbe552,
-        0x24dd3721, 0xf02be98a, 0x0269a9e9, 0x024bde5a, 0xfe3b65c4, 0x00714fc0, 0x0005ba46, 0xfffc0bef,
-        0x242a20b3, 0xf02725dc, 0x028a3b44, 0x023944ee, 0xfe3f48a5, 0x00725f02, 0x0004ea3a, 0xfffc31df,
-        0x237748cf, 0xf023c95d, 0x02aa397b, 0x0226b156, 0xfe4347c0, 0x00735d9c, 0x00041eb9, 0xfffc571e,
-        0x22c4b795, 0xf021d031, 0x02c9a359, 0x02142533, 0xfe476250, 0x00744bba, 0x000357c2, 0xfffc7ba9,
-        0x2212751a, 0xf0213671, 0x02e877b9, 0x0201a223, 0xfe4b978e, 0x0075298a, 0x00029558, 0xfffc9f7e,
-        0x21608968, 0xf021f823, 0x0306b586, 0x01ef29be, 0xfe4fe6b3, 0x0075f739, 0x0001d779, 0xfffcc29a,
-        0x20aefc79, 0xf0241140, 0x03245bbc, 0x01dcbd96, 0xfe544efb, 0x0076b4f5, 0x00011e26, 0xfffce4fc,
-        0x1ffdd63b, 0xf0277db1, 0x03416966, 0x01ca5f37, 0xfe58cf9d, 0x007762f0, 0x0000695e, 0xfffd06a1,
-        0x1f4d1e8e, 0xf02c3953, 0x035ddd9e, 0x01b81028, 0xfe5d67d4, 0x0078015a, 0xffffb91f, 0xfffd2787,
-        0x1e9cdd43, 0xf0323ff5, 0x0379b790, 0x01a5d1ea, 0xfe6216db, 0x00789065, 0xffff0d66, 0xfffd47ae,
-        0x1ded1a1d, 0xf0398d56, 0x0394f674, 0x0193a5f9, 0xfe66dbeb, 0x00791043, 0xfffe6631, 0xfffd6713,
+#include "AudioResamplerSincDown.h"
 };
 
 // we use 15 bits to interpolate between these samples
@@ -512,7 +256,7 @@
     mVolumeSIMD[1] = u4_28_from_float(clampFloatVol(right));
 }
 
-void AudioResamplerSinc::resample(int32_t* out, size_t outFrameCount,
+size_t AudioResamplerSinc::resample(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider)
 {
     // FIXME store current state (up or down sample) and only load the coefs when the state
@@ -521,23 +265,25 @@
     if (mConstants == &veryHighQualityConstants && readResampleCoefficients) {
         mFirCoefs = readResampleCoefficients( mInSampleRate <= mSampleRate );
     } else {
-        mFirCoefs = (const int32_t *) ((mInSampleRate <= mSampleRate) ? mFirCoefsUp : mFirCoefsDown);
+        mFirCoefs = (const int32_t *)
+                ((mInSampleRate <= mSampleRate) ? mFirCoefsUp : mFirCoefsDown);
     }
 
     // select the appropriate resampler
     switch (mChannelCount) {
     case 1:
-        resample<1>(out, outFrameCount, provider);
-        break;
+        return resample<1>(out, outFrameCount, provider);
     case 2:
-        resample<2>(out, outFrameCount, provider);
-        break;
+        return resample<2>(out, outFrameCount, provider);
+    default:
+        LOG_ALWAYS_FATAL("invalid channel count: %d", mChannelCount);
+        return 0;
     }
 }
 
 
 template<int CHANNELS>
-void AudioResamplerSinc::resample(int32_t* out, size_t outFrameCount,
+size_t AudioResamplerSinc::resample(int32_t* out, size_t outFrameCount,
         AudioBufferProvider* provider)
 {
     const Constants& c(*mConstants);
@@ -612,6 +358,7 @@
     mImpulse = impulse;
     mInputIndex = inputIndex;
     mPhaseFraction = phaseFraction;
+    return outputIndex / CHANNELS;
 }
 
 template<int CHANNELS>
@@ -856,4 +603,4 @@
     }
 }
 // ----------------------------------------------------------------------------
-}; // namespace android
+} // namespace android
diff --git a/services/audioflinger/AudioResamplerSinc.h b/services/audioflinger/AudioResamplerSinc.h
index 4691d0a..0fbeac8 100644
--- a/services/audioflinger/AudioResamplerSinc.h
+++ b/services/audioflinger/AudioResamplerSinc.h
@@ -39,7 +39,7 @@
 
     virtual ~AudioResamplerSinc();
 
-    virtual void resample(int32_t* out, size_t outFrameCount,
+    virtual size_t resample(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider);
 private:
     void init();
@@ -47,7 +47,7 @@
     virtual void setVolume(float left, float right);
 
     template<int CHANNELS>
-    void resample(int32_t* out, size_t outFrameCount,
+    size_t resample(int32_t* out, size_t outFrameCount,
             AudioBufferProvider* provider);
 
     template<int CHANNELS>
@@ -95,6 +95,6 @@
 };
 
 // ----------------------------------------------------------------------------
-}; // namespace android
+} // namespace android
 
 #endif /*ANDROID_AUDIO_RESAMPLER_SINC_H*/
diff --git a/services/audioflinger/AudioResamplerSincDown.h b/services/audioflinger/AudioResamplerSincDown.h
new file mode 100644
index 0000000..2d0fb86
--- /dev/null
+++ b/services/audioflinger/AudioResamplerSincDown.h
@@ -0,0 +1,131 @@
+// cmd-line: fir -l 7 -s48000 -c 17189
+
+    0x5bacb6f4, 0x1ded1a1d, 0xf0398d56, 0x0394f674, 0x0193a5f9, 0xfe66dbeb, 0x00791043, 0xfffe6631,
+    0x5bab6c81, 0x1d3ddccd, 0xf0421d2c, 0x03af9995, 0x01818dc9, 0xfe6bb63e, 0x0079812a, 0xfffdc37d,
+    0x5ba78d37, 0x1c8f2cf9, 0xf04beb1d, 0x03c9a04a, 0x016f8aca, 0xfe70a511, 0x0079e34d, 0xfffd2545,
+    0x5ba1194f, 0x1be11231, 0xf056f2c7, 0x03e309fe, 0x015d9e64, 0xfe75a79f, 0x007a36e2, 0xfffc8b86,
+    0x5b981122, 0x1b3393f8, 0xf0632fb7, 0x03fbd625, 0x014bc9fa, 0xfe7abd23, 0x007a7c20, 0xfffbf639,
+    0x5b8c7530, 0x1a86b9bf, 0xf0709d74, 0x04140449, 0x013a0ee9, 0xfe7fe4db, 0x007ab33d, 0xfffb655b,
+    0x5b7e461a, 0x19da8ae5, 0xf07f3776, 0x042b93fd, 0x01286e86, 0xfe851e05, 0x007adc72, 0xfffad8e4,
+    0x5b6d84a8, 0x192f0eb7, 0xf08ef92d, 0x044284e6, 0x0116ea22, 0xfe8a67dd, 0x007af7f6, 0xfffa50ce,
+    0x5b5a31c6, 0x18844c70, 0xf09fddfe, 0x0458d6b7, 0x01058306, 0xfe8fc1a5, 0x007b0603, 0xfff9cd12,
+    0x5b444e81, 0x17da4b37, 0xf0b1e143, 0x046e8933, 0x00f43a74, 0xfe952a9b, 0x007b06d4, 0xfff94da9,
+    0x5b2bdc0e, 0x17311222, 0xf0c4fe50, 0x04839c29, 0x00e311a9, 0xfe9aa201, 0x007afaa1, 0xfff8d28c,
+    0x5b10dbc2, 0x1688a832, 0xf0d9306d, 0x04980f79, 0x00d209db, 0xfea02719, 0x007ae1a7, 0xfff85bb1,
+    0x5af34f18, 0x15e11453, 0xf0ee72db, 0x04abe310, 0x00c12439, 0xfea5b926, 0x007abc20, 0xfff7e910,
+    0x5ad337af, 0x153a5d5e, 0xf104c0d2, 0x04bf16e9, 0x00b061eb, 0xfeab576d, 0x007a8a49, 0xfff77a9f,
+    0x5ab09748, 0x14948a16, 0xf11c1583, 0x04d1ab0d, 0x009fc413, 0xfeb10134, 0x007a4c5d, 0xfff71057,
+    0x5a8b6fc7, 0x13efa12c, 0xf1346c17, 0x04e39f93, 0x008f4bcb, 0xfeb6b5c0, 0x007a029a, 0xfff6aa2b,
+    0x5a63c336, 0x134ba937, 0xf14dbfb1, 0x04f4f4a2, 0x007efa29, 0xfebc745c, 0x0079ad3d, 0xfff64812,
+    0x5a3993c0, 0x12a8a8bb, 0xf1680b6e, 0x0505aa6a, 0x006ed038, 0xfec23c50, 0x00794c82, 0xfff5ea02,
+    0x5a0ce3b2, 0x1206a625, 0xf1834a63, 0x0515c12d, 0x005ecf01, 0xfec80ce8, 0x0078e0a9, 0xfff58ff0,
+    0x59ddb57f, 0x1165a7cc, 0xf19f77a0, 0x05253938, 0x004ef782, 0xfecde571, 0x007869ee, 0xfff539cf,
+    0x59ac0bba, 0x10c5b3ef, 0xf1bc8e31, 0x053412e4, 0x003f4ab4, 0xfed3c538, 0x0077e891, 0xfff4e794,
+    0x5977e919, 0x1026d0b8, 0xf1da891b, 0x05424e9b, 0x002fc98a, 0xfed9ab8f, 0x00775ccf, 0xfff49934,
+    0x59415075, 0x0f890437, 0xf1f96360, 0x054feccf, 0x002074ed, 0xfedf97c6, 0x0076c6e8, 0xfff44ea3,
+    0x590844c9, 0x0eec5465, 0xf21917ff, 0x055cee03, 0x00114dc3, 0xfee58932, 0x00762719, 0xfff407d2,
+    0x58ccc930, 0x0e50c723, 0xf239a1ef, 0x056952c3, 0x000254e8, 0xfeeb7f27, 0x00757da3, 0xfff3c4b7,
+    0x588ee0ea, 0x0db6623b, 0xf25afc29, 0x05751baa, 0xfff38b32, 0xfef178fc, 0x0074cac4, 0xfff38542,
+    0x584e8f56, 0x0d1d2b5d, 0xf27d219f, 0x0580495c, 0xffe4f171, 0xfef7760c, 0x00740ebb, 0xfff34968,
+    0x580bd7f4, 0x0c85281f, 0xf2a00d43, 0x058adc8d, 0xffd6886d, 0xfefd75af, 0x007349c7, 0xfff3111b,
+    0x57c6be67, 0x0bee5dff, 0xf2c3ba04, 0x0594d5fa, 0xffc850e6, 0xff037744, 0x00727c27, 0xfff2dc4c,
+    0x577f4670, 0x0b58d262, 0xf2e822ce, 0x059e366c, 0xffba4b98, 0xff097a29, 0x0071a61b, 0xfff2aaef,
+    0x573573f2, 0x0ac48a92, 0xf30d428e, 0x05a6feb9, 0xffac7936, 0xff0f7dbf, 0x0070c7e1, 0xfff27cf3,
+    0x56e94af1, 0x0a318bc1, 0xf333142f, 0x05af2fbf, 0xff9eda6d, 0xff15816a, 0x006fe1b8, 0xfff2524c,
+    0x569acf90, 0x099fdb04, 0xf359929a, 0x05b6ca6b, 0xff916fe1, 0xff1b848e, 0x006ef3df, 0xfff22aea,
+    0x564a0610, 0x090f7d57, 0xf380b8ba, 0x05bdcfb2, 0xff843a32, 0xff218692, 0x006dfe94, 0xfff206bf,
+    0x55f6f2d3, 0x0880779d, 0xf3a88179, 0x05c44095, 0xff7739f7, 0xff2786e1, 0x006d0217, 0xfff1e5bb,
+    0x55a19a5c, 0x07f2ce9b, 0xf3d0e7c2, 0x05ca1e1f, 0xff6a6fc1, 0xff2d84e5, 0x006bfea4, 0xfff1c7d0,
+    0x554a0148, 0x076686fc, 0xf3f9e680, 0x05cf6965, 0xff5ddc1a, 0xff33800e, 0x006af47b, 0xfff1acef,
+    0x54f02c56, 0x06dba551, 0xf42378a0, 0x05d42387, 0xff517f86, 0xff3977cb, 0x0069e3d9, 0xfff19508,
+    0x54942061, 0x06522e0f, 0xf44d9912, 0x05d84daf, 0xff455a80, 0xff3f6b8f, 0x0068ccfa, 0xfff1800b,
+    0x5435e263, 0x05ca258f, 0xf47842c5, 0x05dbe90f, 0xff396d7f, 0xff455acf, 0x0067b01e, 0xfff16de9,
+    0x53d57774, 0x0543900d, 0xf4a370ad, 0x05def6e4, 0xff2db8f2, 0xff4b4503, 0x00668d80, 0xfff15e93,
+    0x5372e4c6, 0x04be71ab, 0xf4cf1dbf, 0x05e17873, 0xff223d40, 0xff5129a3, 0x0065655d, 0xfff151f9,
+    0x530e2fac, 0x043ace6e, 0xf4fb44f4, 0x05e36f0d, 0xff16faca, 0xff57082e, 0x006437f1, 0xfff1480b,
+    0x52a75d90, 0x03b8aa40, 0xf527e149, 0x05e4dc08, 0xff0bf1ed, 0xff5ce021, 0x00630577, 0xfff140b9,
+    0x523e73fd, 0x033808eb, 0xf554edbd, 0x05e5c0c6, 0xff0122fc, 0xff62b0fd, 0x0061ce2c, 0xfff13bf3,
+    0x51d37897, 0x02b8ee22, 0xf5826555, 0x05e61eae, 0xfef68e45, 0xff687a47, 0x00609249, 0xfff139aa,
+    0x5166711c, 0x023b5d76, 0xf5b0431a, 0x05e5f733, 0xfeec340f, 0xff6e3b84, 0x005f520a, 0xfff139cd,
+    0x50f76368, 0x01bf5a5e, 0xf5de8218, 0x05e54bcd, 0xfee2149b, 0xff73f43d, 0x005e0da8, 0xfff13c4c,
+    0x5086556f, 0x0144e834, 0xf60d1d63, 0x05e41dfe, 0xfed83023, 0xff79a3fe, 0x005cc55c, 0xfff14119,
+    0x50134d3e, 0x00cc0a36, 0xf63c1012, 0x05e26f4e, 0xfece86db, 0xff7f4a54, 0x005b7961, 0xfff14821,
+    0x4f9e50ff, 0x0054c382, 0xf66b5544, 0x05e0414d, 0xfec518f1, 0xff84e6d0, 0x005a29ed, 0xfff15156,
+    0x4f2766f2, 0xffdf171b, 0xf69ae81d, 0x05dd9593, 0xfebbe68c, 0xff8a7905, 0x0058d738, 0xfff15ca8,
+    0x4eae9571, 0xff6b07e7, 0xf6cac3c7, 0x05da6dbe, 0xfeb2efcd, 0xff900089, 0x0057817b, 0xfff16a07,
+    0x4e33e2ee, 0xfef898ae, 0xf6fae373, 0x05d6cb72, 0xfeaa34d0, 0xff957cf4, 0x005628ec, 0xfff17962,
+    0x4db755f3, 0xfe87cc1b, 0xf72b425b, 0x05d2b05c, 0xfea1b5a9, 0xff9aede0, 0x0054cdc0, 0xfff18aab,
+    0x4d38f520, 0xfe18a4bc, 0xf75bdbbd, 0x05ce1e2d, 0xfe997268, 0xffa052ec, 0x0053702d, 0xfff19dd1,
+    0x4cb8c72e, 0xfdab2501, 0xf78caae0, 0x05c9169d, 0xfe916b15, 0xffa5abb8, 0x00521068, 0xfff1b2c5,
+    0x4c36d2eb, 0xfd3f4f3d, 0xf7bdab16, 0x05c39b6a, 0xfe899fb2, 0xffaaf7e6, 0x0050aea5, 0xfff1c976,
+    0x4bb31f3c, 0xfcd525a5, 0xf7eed7b4, 0x05bdae57, 0xfe82103f, 0xffb0371c, 0x004f4b17, 0xfff1e1d6,
+    0x4b2db31a, 0xfc6caa53, 0xf8202c1c, 0x05b7512e, 0xfe7abcb1, 0xffb56902, 0x004de5f1, 0xfff1fbd5,
+    0x4aa69594, 0xfc05df40, 0xf851a3b6, 0x05b085bc, 0xfe73a4fb, 0xffba8d44, 0x004c7f66, 0xfff21764,
+    0x4a1dcdce, 0xfba0c64b, 0xf88339f5, 0x05a94dd5, 0xfe6cc909, 0xffbfa38d, 0x004b17a6, 0xfff23473,
+    0x499362ff, 0xfb3d6133, 0xf8b4ea55, 0x05a1ab52, 0xfe6628c1, 0xffc4ab8f, 0x0049aee3, 0xfff252f3,
+    0x49075c72, 0xfadbb19a, 0xf8e6b059, 0x0599a00e, 0xfe5fc405, 0xffc9a4fc, 0x0048454b, 0xfff272d6,
+    0x4879c185, 0xfa7bb908, 0xf9188793, 0x05912dea, 0xfe599aaf, 0xffce8f8a, 0x0046db0f, 0xfff2940b,
+    0x47ea99a9, 0xfa1d78e3, 0xf94a6b9b, 0x058856cd, 0xfe53ac97, 0xffd36af1, 0x0045705c, 0xfff2b686,
+    0x4759ec60, 0xf9c0f276, 0xf97c5815, 0x057f1c9e, 0xfe4df98e, 0xffd836eb, 0x00440561, 0xfff2da36,
+    0x46c7c140, 0xf96626f0, 0xf9ae48af, 0x0575814c, 0xfe48815e, 0xffdcf336, 0x00429a4a, 0xfff2ff0d,
+    0x46341fed, 0xf90d1761, 0xf9e03924, 0x056b86c6, 0xfe4343d0, 0xffe19f91, 0x00412f43, 0xfff324fd,
+    0x459f101d, 0xf8b5c4be, 0xfa122537, 0x05612f00, 0xfe3e40a6, 0xffe63bc0, 0x003fc478, 0xfff34bf9,
+    0x45089996, 0xf8602fdc, 0xfa4408ba, 0x05567bf1, 0xfe39779a, 0xffeac787, 0x003e5a12, 0xfff373f0,
+    0x4470c42d, 0xf80c5977, 0xfa75df87, 0x054b6f92, 0xfe34e867, 0xffef42af, 0x003cf03d, 0xfff39cd7,
+    0x43d797c7, 0xf7ba422b, 0xfaa7a586, 0x05400be1, 0xfe3092bf, 0xfff3ad01, 0x003b871f, 0xfff3c69f,
+    0x433d1c56, 0xf769ea78, 0xfad956ab, 0x053452dc, 0xfe2c7650, 0xfff8064b, 0x003a1ee3, 0xfff3f13a,
+    0x42a159dc, 0xf71b52c4, 0xfb0aeef6, 0x05284685, 0xfe2892c5, 0xfffc4e5c, 0x0038b7ae, 0xfff41c9c,
+    0x42045865, 0xf6ce7b57, 0xfb3c6a73, 0x051be8dd, 0xfe24e7c3, 0x00008507, 0x003751a7, 0xfff448b7,
+    0x4166200e, 0xf683645a, 0xfb6dc53c, 0x050f3bec, 0xfe2174ec, 0x0004aa1f, 0x0035ecf4, 0xfff4757e,
+    0x40c6b8fd, 0xf63a0ddf, 0xfb9efb77, 0x050241b6, 0xfe1e39da, 0x0008bd7c, 0x003489b9, 0xfff4a2e5,
+    0x40262b65, 0xf5f277d9, 0xfbd00956, 0x04f4fc46, 0xfe1b3628, 0x000cbef7, 0x0033281a, 0xfff4d0de,
+    0x3f847f83, 0xf5aca21f, 0xfc00eb1b, 0x04e76da3, 0xfe18696a, 0x0010ae6e, 0x0031c83a, 0xfff4ff5d,
+    0x3ee1bda2, 0xf5688c6d, 0xfc319d13, 0x04d997d8, 0xfe15d32f, 0x00148bbd, 0x00306a3b, 0xfff52e57,
+    0x3e3dee13, 0xf5263665, 0xfc621b9a, 0x04cb7cf2, 0xfe137304, 0x001856c7, 0x002f0e3f, 0xfff55dbf,
+    0x3d991932, 0xf4e59f8a, 0xfc926319, 0x04bd1efb, 0xfe114872, 0x001c0f6e, 0x002db466, 0xfff58d89,
+    0x3cf34766, 0xf4a6c748, 0xfcc27008, 0x04ae8000, 0xfe0f52fc, 0x001fb599, 0x002c5cd0, 0xfff5bdaa,
+    0x3c4c811c, 0xf469aced, 0xfcf23eec, 0x049fa20f, 0xfe0d9224, 0x0023492f, 0x002b079a, 0xfff5ee17,
+    0x3ba4cec9, 0xf42e4faf, 0xfd21cc59, 0x04908733, 0xfe0c0567, 0x0026ca1c, 0x0029b4e4, 0xfff61ec5,
+    0x3afc38eb, 0xf3f4aea6, 0xfd5114f0, 0x0481317a, 0xfe0aac3f, 0x002a384c, 0x002864c9, 0xfff64fa8,
+    0x3a52c805, 0xf3bcc8d3, 0xfd801564, 0x0471a2ef, 0xfe098622, 0x002d93ae, 0x00271766, 0xfff680b5,
+    0x39a884a1, 0xf3869d1a, 0xfdaeca73, 0x0461dda0, 0xfe089283, 0x0030dc34, 0x0025ccd7, 0xfff6b1e4,
+    0x38fd774e, 0xf3522a49, 0xfddd30eb, 0x0451e396, 0xfe07d0d3, 0x003411d2, 0x00248535, 0xfff6e329,
+    0x3851a8a2, 0xf31f6f0f, 0xfe0b45aa, 0x0441b6dd, 0xfe07407d, 0x0037347d, 0x0023409a, 0xfff7147a,
+    0x37a52135, 0xf2ee6a07, 0xfe39059b, 0x0431597d, 0xfe06e0eb, 0x003a442e, 0x0021ff1f, 0xfff745cd,
+    0x36f7e9a4, 0xf2bf19ae, 0xfe666dbc, 0x0420cd80, 0xfe06b184, 0x003d40e0, 0x0020c0dc, 0xfff7771a,
+    0x364a0a90, 0xf2917c6d, 0xfe937b15, 0x041014eb, 0xfe06b1ac, 0x00402a8e, 0x001f85e6, 0xfff7a857,
+    0x359b8c9d, 0xf265908f, 0xfec02ac2, 0x03ff31c3, 0xfe06e0c4, 0x00430137, 0x001e4e56, 0xfff7d97a,
+    0x34ec786f, 0xf23b544b, 0xfeec79ec, 0x03ee260d, 0xfe073e2a, 0x0045c4dd, 0x001d1a3f, 0xfff80a7c,
+    0x343cd6af, 0xf212c5be, 0xff1865cd, 0x03dcf3ca, 0xfe07c93a, 0x00487582, 0x001be9b7, 0xfff83b52,
+    0x338cb004, 0xf1ebe2ec, 0xff43ebac, 0x03cb9cf9, 0xfe08814e, 0x004b132b, 0x001abcd0, 0xfff86bf6,
+    0x32dc0d17, 0xf1c6a9c3, 0xff6f08e4, 0x03ba2398, 0xfe0965bc, 0x004d9dde, 0x0019939d, 0xfff89c60,
+    0x322af693, 0xf1a3181a, 0xff99badb, 0x03a889a1, 0xfe0a75da, 0x005015a5, 0x00186e31, 0xfff8cc86,
+    0x3179751f, 0xf1812bb0, 0xffc3ff0c, 0x0396d10c, 0xfe0bb0f9, 0x00527a8a, 0x00174c9c, 0xfff8fc62,
+    0x30c79163, 0xf160e22d, 0xffedd2fd, 0x0384fbd1, 0xfe0d166b, 0x0054cc9a, 0x00162eef, 0xfff92bec,
+    0x30155404, 0xf1423924, 0x00173447, 0x03730be0, 0xfe0ea57e, 0x00570be4, 0x00151538, 0xfff95b1e,
+    0x2f62c5a7, 0xf1252e0f, 0x00402092, 0x0361032a, 0xfe105d7e, 0x00593877, 0x0013ff88, 0xfff989ef,
+    0x2eafeeed, 0xf109be56, 0x00689598, 0x034ee39b, 0xfe123db6, 0x005b5267, 0x0012edea, 0xfff9b85b,
+    0x2dfcd873, 0xf0efe748, 0x0090911f, 0x033caf1d, 0xfe144570, 0x005d59c6, 0x0011e06d, 0xfff9e65a,
+    0x2d498ad3, 0xf0d7a622, 0x00b81102, 0x032a6796, 0xfe1673f2, 0x005f4eac, 0x0010d71d, 0xfffa13e5,
+    0x2c960ea3, 0xf0c0f808, 0x00df1328, 0x03180ee7, 0xfe18c884, 0x0061312e, 0x000fd205, 0xfffa40f8,
+    0x2be26c73, 0xf0abda0e, 0x0105958c, 0x0305a6f0, 0xfe1b4268, 0x00630167, 0x000ed130, 0xfffa6d8d,
+    0x2b2eaccf, 0xf0984931, 0x012b9635, 0x02f3318a, 0xfe1de0e2, 0x0064bf71, 0x000dd4a7, 0xfffa999d,
+    0x2a7ad83c, 0xf086425a, 0x0151133e, 0x02e0b08d, 0xfe20a335, 0x00666b68, 0x000cdc74, 0xfffac525,
+    0x29c6f738, 0xf075c260, 0x01760ad1, 0x02ce25ca, 0xfe2388a1, 0x0068056b, 0x000be89f, 0xfffaf01e,
+    0x2913123c, 0xf066c606, 0x019a7b27, 0x02bb9310, 0xfe269065, 0x00698d98, 0x000af931, 0xfffb1a84,
+    0x285f31b7, 0xf05949fb, 0x01be628c, 0x02a8fa2a, 0xfe29b9c1, 0x006b0411, 0x000a0e2f, 0xfffb4453,
+    0x27ab5e12, 0xf04d4ade, 0x01e1bf58, 0x02965cdb, 0xfe2d03f2, 0x006c68f8, 0x000927a0, 0xfffb6d86,
+    0x26f79fab, 0xf042c539, 0x02048ff8, 0x0283bce6, 0xfe306e35, 0x006dbc71, 0x00084589, 0xfffb961a,
+    0x2643feda, 0xf039b587, 0x0226d2e6, 0x02711c05, 0xfe33f7c7, 0x006efea0, 0x000767f0, 0xfffbbe09,
+    0x259083eb, 0xf032182f, 0x024886ad, 0x025e7bf0, 0xfe379fe3, 0x00702fae, 0x00068ed8, 0xfffbe552,
+    0x24dd3721, 0xf02be98a, 0x0269a9e9, 0x024bde5a, 0xfe3b65c4, 0x00714fc0, 0x0005ba46, 0xfffc0bef,
+    0x242a20b3, 0xf02725dc, 0x028a3b44, 0x023944ee, 0xfe3f48a5, 0x00725f02, 0x0004ea3a, 0xfffc31df,
+    0x237748cf, 0xf023c95d, 0x02aa397b, 0x0226b156, 0xfe4347c0, 0x00735d9c, 0x00041eb9, 0xfffc571e,
+    0x22c4b795, 0xf021d031, 0x02c9a359, 0x02142533, 0xfe476250, 0x00744bba, 0x000357c2, 0xfffc7ba9,
+    0x2212751a, 0xf0213671, 0x02e877b9, 0x0201a223, 0xfe4b978e, 0x0075298a, 0x00029558, 0xfffc9f7e,
+    0x21608968, 0xf021f823, 0x0306b586, 0x01ef29be, 0xfe4fe6b3, 0x0075f739, 0x0001d779, 0xfffcc29a,
+    0x20aefc79, 0xf0241140, 0x03245bbc, 0x01dcbd96, 0xfe544efb, 0x0076b4f5, 0x00011e26, 0xfffce4fc,
+    0x1ffdd63b, 0xf0277db1, 0x03416966, 0x01ca5f37, 0xfe58cf9d, 0x007762f0, 0x0000695e, 0xfffd06a1,
+    0x1f4d1e8e, 0xf02c3953, 0x035ddd9e, 0x01b81028, 0xfe5d67d4, 0x0078015a, 0xffffb91f, 0xfffd2787,
+    0x1e9cdd43, 0xf0323ff5, 0x0379b790, 0x01a5d1ea, 0xfe6216db, 0x00789065, 0xffff0d66, 0xfffd47ae,
+    0x1ded1a1d, 0xf0398d56, 0x0394f674, 0x0193a5f9, 0xfe66dbeb, 0x00791043, 0xfffe6631, 0xfffd6713,
diff --git a/services/audioflinger/AudioResamplerSincUp.h b/services/audioflinger/AudioResamplerSincUp.h
new file mode 100644
index 0000000..fd5367e
--- /dev/null
+++ b/services/audioflinger/AudioResamplerSincUp.h
@@ -0,0 +1,131 @@
+// cmd-line: fir -l 7 -s48000 -c 20478
+
+    0x6d374bc7, 0x111c6ba0, 0xf3240e61, 0x07d14a38, 0xfc509e64, 0x0139cee9, 0xffc8c866, 0xfffcc300,
+    0x6d35278a, 0x103e8192, 0xf36b9dfd, 0x07bdfaa5, 0xfc5102d0, 0x013d618d, 0xffc663b9, 0xfffd9592,
+    0x6d2ebafe, 0x0f62811a, 0xf3b3d8ac, 0x07a9f399, 0xfc51d9a6, 0x0140bea5, 0xffc41212, 0xfffe631e,
+    0x6d24069d, 0x0e8875ad, 0xf3fcb43e, 0x07953976, 0xfc53216f, 0x0143e67c, 0xffc1d373, 0xffff2b9f,
+    0x6d150b35, 0x0db06a89, 0xf4462690, 0x077fd0ac, 0xfc54d8ae, 0x0146d965, 0xffbfa7d9, 0xffffef10,
+    0x6d01c9e3, 0x0cda6ab5, 0xf4902587, 0x0769bdaf, 0xfc56fdda, 0x014997bb, 0xffbd8f40, 0x0000ad6e,
+    0x6cea4418, 0x0c0680fe, 0xf4daa718, 0x07530501, 0xfc598f60, 0x014c21db, 0xffbb89a1, 0x000166b6,
+    0x6cce7b97, 0x0b34b7f5, 0xf525a143, 0x073bab28, 0xfc5c8ba5, 0x014e782a, 0xffb996f3, 0x00021ae5,
+    0x6cae7272, 0x0a6519f4, 0xf5710a17, 0x0723b4b4, 0xfc5ff105, 0x01509b14, 0xffb7b728, 0x0002c9fd,
+    0x6c8a2b0f, 0x0997b116, 0xf5bcd7b1, 0x070b2639, 0xfc63bdd3, 0x01528b08, 0xffb5ea31, 0x000373fb,
+    0x6c61a823, 0x08cc873c, 0xf609003f, 0x06f20453, 0xfc67f05a, 0x0154487b, 0xffb42ffc, 0x000418e2,
+    0x6c34ecb5, 0x0803a60a, 0xf6557a00, 0x06d853a2, 0xfc6c86dd, 0x0155d3e8, 0xffb28876, 0x0004b8b3,
+    0x6c03fc1c, 0x073d16e7, 0xf6a23b44, 0x06be18cd, 0xfc717f97, 0x01572dcf, 0xffb0f388, 0x00055371,
+    0x6bced9ff, 0x0678e2fc, 0xf6ef3a6e, 0x06a3587e, 0xfc76d8bc, 0x015856b6, 0xffaf7118, 0x0005e921,
+    0x6b958a54, 0x05b71332, 0xf73c6df4, 0x06881761, 0xfc7c9079, 0x01594f25, 0xffae010b, 0x000679c5,
+    0x6b581163, 0x04f7b037, 0xf789cc61, 0x066c5a27, 0xfc82a4f4, 0x015a17ab, 0xffaca344, 0x00070564,
+    0x6b1673c1, 0x043ac276, 0xf7d74c53, 0x06502583, 0xfc89144d, 0x015ab0db, 0xffab57a1, 0x00078c04,
+    0x6ad0b652, 0x0380521c, 0xf824e480, 0x06337e2a, 0xfc8fdc9f, 0x015b1b4e, 0xffaa1e02, 0x00080dab,
+    0x6a86de48, 0x02c86715, 0xf8728bb3, 0x061668d2, 0xfc96fbfc, 0x015b579e, 0xffa8f641, 0x00088a62,
+    0x6a38f123, 0x0213090c, 0xf8c038d0, 0x05f8ea30, 0xfc9e7074, 0x015b666c, 0xffa7e039, 0x00090230,
+    0x69e6f4b1, 0x01603f6e, 0xf90de2d1, 0x05db06fc, 0xfca63810, 0x015b485b, 0xffa6dbc0, 0x0009751e,
+    0x6990ef0b, 0x00b01162, 0xf95b80cb, 0x05bcc3ed, 0xfcae50d6, 0x015afe14, 0xffa5e8ad, 0x0009e337,
+    0x6936e697, 0x000285d0, 0xf9a909ea, 0x059e25b5, 0xfcb6b8c4, 0x015a8843, 0xffa506d2, 0x000a4c85,
+    0x68d8e206, 0xff57a35e, 0xf9f67577, 0x057f310a, 0xfcbf6dd8, 0x0159e796, 0xffa43603, 0x000ab112,
+    0x6876e855, 0xfeaf706f, 0xfa43bad2, 0x055fea9d, 0xfcc86e09, 0x01591cc0, 0xffa3760e, 0x000b10ec,
+    0x681100c9, 0xfe09f323, 0xfa90d17b, 0x0540571a, 0xfcd1b74c, 0x01582878, 0xffa2c6c2, 0x000b6c1d,
+    0x67a732f4, 0xfd673159, 0xfaddb10c, 0x05207b2f, 0xfcdb4793, 0x01570b77, 0xffa227ec, 0x000bc2b3,
+    0x673986ac, 0xfcc730aa, 0xfb2a513b, 0x05005b82, 0xfce51ccb, 0x0155c678, 0xffa19957, 0x000c14bb,
+    0x66c80413, 0xfc29f670, 0xfb76a9dd, 0x04dffcb6, 0xfcef34e1, 0x01545a3c, 0xffa11acb, 0x000c6244,
+    0x6652b392, 0xfb8f87bd, 0xfbc2b2e4, 0x04bf6369, 0xfcf98dbe, 0x0152c783, 0xffa0ac11, 0x000cab5c,
+    0x65d99dd5, 0xfaf7e963, 0xfc0e6461, 0x049e9433, 0xfd04254a, 0x01510f13, 0xffa04cf0, 0x000cf012,
+    0x655ccbd3, 0xfa631fef, 0xfc59b685, 0x047d93a8, 0xfd0ef969, 0x014f31b2, 0xff9ffd2c, 0x000d3075,
+    0x64dc46c3, 0xf9d12fab, 0xfca4a19f, 0x045c6654, 0xfd1a0801, 0x014d3029, 0xff9fbc89, 0x000d6c97,
+    0x64581823, 0xf9421c9d, 0xfcef1e20, 0x043b10bd, 0xfd254ef4, 0x014b0b45, 0xff9f8ac9, 0x000da486,
+    0x63d049b4, 0xf8b5ea87, 0xfd392498, 0x04199760, 0xfd30cc24, 0x0148c3d2, 0xff9f67ae, 0x000dd854,
+    0x6344e578, 0xf82c9ce7, 0xfd82adba, 0x03f7feb4, 0xfd3c7d73, 0x01465a9f, 0xff9f52f7, 0x000e0812,
+    0x62b5f5b2, 0xf7a636fa, 0xfdcbb25a, 0x03d64b27, 0xfd4860c2, 0x0143d07f, 0xff9f4c65, 0x000e33d3,
+    0x622384e8, 0xf722bbb5, 0xfe142b6e, 0x03b4811d, 0xfd5473f3, 0x01412643, 0xff9f53b4, 0x000e5ba7,
+    0x618d9ddc, 0xf6a22dcf, 0xfe5c120f, 0x0392a4f4, 0xfd60b4e7, 0x013e5cc0, 0xff9f68a1, 0x000e7fa1,
+    0x60f44b91, 0xf6248fb6, 0xfea35f79, 0x0370bafc, 0xfd6d2180, 0x013b74ca, 0xff9f8ae9, 0x000e9fd5,
+    0x60579947, 0xf5a9e398, 0xfeea0d0c, 0x034ec77f, 0xfd79b7a1, 0x01386f3a, 0xff9fba47, 0x000ebc54,
+    0x5fb79278, 0xf5322b61, 0xff30144a, 0x032ccebb, 0xfd86752e, 0x01354ce7, 0xff9ff674, 0x000ed533,
+    0x5f1442dc, 0xf4bd68b6, 0xff756edc, 0x030ad4e1, 0xfd93580d, 0x01320ea9, 0xffa03f2b, 0x000eea84,
+    0x5e6db665, 0xf44b9cfe, 0xffba168d, 0x02e8de19, 0xfda05e23, 0x012eb55a, 0xffa09425, 0x000efc5c,
+    0x5dc3f93c, 0xf3dcc959, 0xfffe054e, 0x02c6ee7f, 0xfdad855b, 0x012b41d3, 0xffa0f519, 0x000f0ace,
+    0x5d1717c4, 0xf370eea9, 0x00413536, 0x02a50a22, 0xfdbacb9e, 0x0127b4f1, 0xffa161bf, 0x000f15ef,
+    0x5c671e96, 0xf3080d8c, 0x0083a081, 0x02833506, 0xfdc82edb, 0x01240f8e, 0xffa1d9cf, 0x000f1dd2,
+    0x5bb41a80, 0xf2a2265e, 0x00c54190, 0x02617321, 0xfdd5ad01, 0x01205285, 0xffa25cfe, 0x000f228d,
+    0x5afe1886, 0xf23f393b, 0x010612eb, 0x023fc85c, 0xfde34403, 0x011c7eb2, 0xffa2eb04, 0x000f2434,
+    0x5a4525df, 0xf1df45fd, 0x01460f41, 0x021e3891, 0xfdf0f1d6, 0x011894f0, 0xffa38395, 0x000f22dc,
+    0x59894ff3, 0xf1824c3e, 0x01853165, 0x01fcc78f, 0xfdfeb475, 0x0114961b, 0xffa42668, 0x000f1e99,
+    0x58caa45b, 0xf1284b58, 0x01c37452, 0x01db7914, 0xfe0c89db, 0x0110830f, 0xffa4d332, 0x000f1781,
+    0x580930e1, 0xf0d14267, 0x0200d32c, 0x01ba50d2, 0xfe1a7009, 0x010c5ca6, 0xffa589a6, 0x000f0da8,
+    0x5745037c, 0xf07d3043, 0x023d493c, 0x0199526b, 0xfe286505, 0x010823ba, 0xffa6497c, 0x000f0125,
+    0x567e2a51, 0xf02c138a, 0x0278d1f2, 0x01788170, 0xfe3666d5, 0x0103d927, 0xffa71266, 0x000ef20b,
+    0x55b4b3af, 0xefddea9a, 0x02b368e6, 0x0157e166, 0xfe447389, 0x00ff7dc4, 0xffa7e41a, 0x000ee070,
+    0x54e8ae13, 0xef92b393, 0x02ed09d7, 0x013775bf, 0xfe528931, 0x00fb126b, 0xffa8be4c, 0x000ecc69,
+    0x541a281e, 0xef4a6c58, 0x0325b0ad, 0x011741df, 0xfe60a5e5, 0x00f697f3, 0xffa9a0b1, 0x000eb60b,
+    0x5349309e, 0xef051290, 0x035d5977, 0x00f7491a, 0xfe6ec7c0, 0x00f20f32, 0xffaa8afe, 0x000e9d6b,
+    0x5275d684, 0xeec2a3a3, 0x0394006a, 0x00d78eb3, 0xfe7cece2, 0x00ed78ff, 0xffab7ce7, 0x000e829e,
+    0x51a028e8, 0xee831cc3, 0x03c9a1e5, 0x00b815da, 0xfe8b1373, 0x00e8d62d, 0xffac7621, 0x000e65ba,
+    0x50c83704, 0xee467ae1, 0x03fe3a6f, 0x0098e1b3, 0xfe99399f, 0x00e4278f, 0xffad7662, 0x000e46d3,
+    0x4fee1037, 0xee0cbab9, 0x0431c6b5, 0x0079f54c, 0xfea75d97, 0x00df6df7, 0xffae7d5f, 0x000e25fd,
+    0x4f11c3fe, 0xedd5d8ca, 0x0464438c, 0x005b53a4, 0xfeb57d92, 0x00daaa34, 0xffaf8acd, 0x000e034f,
+    0x4e3361f7, 0xeda1d15c, 0x0495adf2, 0x003cffa9, 0xfec397cf, 0x00d5dd16, 0xffb09e63, 0x000ddedb,
+    0x4d52f9df, 0xed70a07d, 0x04c6030d, 0x001efc35, 0xfed1aa92, 0x00d10769, 0xffb1b7d8, 0x000db8b7,
+    0x4c709b8e, 0xed424205, 0x04f54029, 0x00014c12, 0xfedfb425, 0x00cc29f7, 0xffb2d6e1, 0x000d90f6,
+    0x4b8c56f8, 0xed16b196, 0x052362ba, 0xffe3f1f7, 0xfeedb2da, 0x00c7458a, 0xffb3fb37, 0x000d67ae,
+    0x4aa63c2c, 0xecedea99, 0x0550685d, 0xffc6f08a, 0xfefba508, 0x00c25ae8, 0xffb52490, 0x000d3cf1,
+    0x49be5b50, 0xecc7e845, 0x057c4ed4, 0xffaa4a5d, 0xff09890f, 0x00bd6ad7, 0xffb652a7, 0x000d10d5,
+    0x48d4c4a2, 0xeca4a59b, 0x05a7140b, 0xff8e01f1, 0xff175d53, 0x00b87619, 0xffb78533, 0x000ce36b,
+    0x47e98874, 0xec841d68, 0x05d0b612, 0xff7219b3, 0xff252042, 0x00b37d70, 0xffb8bbed, 0x000cb4c8,
+    0x46fcb72d, 0xec664a48, 0x05f93324, 0xff5693fe, 0xff32d04f, 0x00ae8198, 0xffb9f691, 0x000c84ff,
+    0x460e6148, 0xec4b26a2, 0x0620899e, 0xff3b731b, 0xff406bf8, 0x00a9834e, 0xffbb34d8, 0x000c5422,
+    0x451e9750, 0xec32acb0, 0x0646b808, 0xff20b93e, 0xff4df1be, 0x00a4834c, 0xffbc767f, 0x000c2245,
+    0x442d69de, 0xec1cd677, 0x066bbd0d, 0xff066889, 0xff5b602c, 0x009f8249, 0xffbdbb42, 0x000bef79,
+    0x433ae99c, 0xec099dcf, 0x068f9781, 0xfeec830d, 0xff68b5d5, 0x009a80f8, 0xffbf02dd, 0x000bbbd2,
+    0x4247273f, 0xebf8fc64, 0x06b2465b, 0xfed30ac5, 0xff75f153, 0x0095800c, 0xffc04d0f, 0x000b8760,
+    0x41523389, 0xebeaebaf, 0x06d3c8bb, 0xfeba0199, 0xff831148, 0x00908034, 0xffc19996, 0x000b5235,
+    0x405c1f43, 0xebdf6500, 0x06f41de3, 0xfea16960, 0xff90145e, 0x008b821b, 0xffc2e832, 0x000b1c64,
+    0x3f64fb40, 0xebd6617b, 0x0713453d, 0xfe8943dc, 0xff9cf947, 0x0086866b, 0xffc438a3, 0x000ae5fc,
+    0x3e6cd85b, 0xebcfda19, 0x07313e56, 0xfe7192bd, 0xffa9bebe, 0x00818dcb, 0xffc58aaa, 0x000aaf0f,
+    0x3d73c772, 0xebcbc7a7, 0x074e08e0, 0xfe5a579d, 0xffb66386, 0x007c98de, 0xffc6de09, 0x000a77ac,
+    0x3c79d968, 0xebca22cc, 0x0769a4b2, 0xfe439407, 0xffc2e669, 0x0077a845, 0xffc83285, 0x000a3fe5,
+    0x3b7f1f23, 0xebcae405, 0x078411c7, 0xfe2d496f, 0xffcf463a, 0x0072bc9d, 0xffc987e0, 0x000a07c9,
+    0x3a83a989, 0xebce03aa, 0x079d503b, 0xfe177937, 0xffdb81d6, 0x006dd680, 0xffcadde1, 0x0009cf67,
+    0x3987897f, 0xebd379eb, 0x07b56051, 0xfe0224b0, 0xffe79820, 0x0068f687, 0xffcc344c, 0x000996ce,
+    0x388acfe9, 0xebdb3ed5, 0x07cc426c, 0xfded4d13, 0xfff38806, 0x00641d44, 0xffcd8aeb, 0x00095e0e,
+    0x378d8da8, 0xebe54a4f, 0x07e1f712, 0xfdd8f38b, 0xffff507b, 0x005f4b4a, 0xffcee183, 0x00092535,
+    0x368fd397, 0xebf1941f, 0x07f67eec, 0xfdc5192d, 0x000af07f, 0x005a8125, 0xffd037e0, 0x0008ec50,
+    0x3591b28b, 0xec0013e8, 0x0809dac3, 0xfdb1befc, 0x00166718, 0x0055bf60, 0xffd18dcc, 0x0008b36e,
+    0x34933b50, 0xec10c12c, 0x081c0b84, 0xfd9ee5e7, 0x0021b355, 0x00510682, 0xffd2e311, 0x00087a9c,
+    0x33947eab, 0xec23934f, 0x082d1239, 0xfd8c8ecc, 0x002cd44d, 0x004c570f, 0xffd4377d, 0x000841e8,
+    0x32958d55, 0xec388194, 0x083cf010, 0xfd7aba74, 0x0037c922, 0x0047b186, 0xffd58ade, 0x0008095d,
+    0x319677fa, 0xec4f8322, 0x084ba654, 0xfd696998, 0x004290fc, 0x00431666, 0xffd6dd02, 0x0007d108,
+    0x30974f3b, 0xec688f02, 0x08593671, 0xfd589cdc, 0x004d2b0e, 0x003e8628, 0xffd82dba, 0x000798f5,
+    0x2f9823a8, 0xec839c22, 0x0865a1f1, 0xfd4854d3, 0x00579691, 0x003a0141, 0xffd97cd6, 0x00076130,
+    0x2e9905c1, 0xeca0a156, 0x0870ea7e, 0xfd3891fd, 0x0061d2ca, 0x00358824, 0xffdaca2a, 0x000729c4,
+    0x2d9a05f4, 0xecbf9558, 0x087b11de, 0xfd2954c8, 0x006bdf05, 0x00311b41, 0xffdc1588, 0x0006f2bb,
+    0x2c9b349e, 0xece06ecb, 0x088419f6, 0xfd1a9d91, 0x0075ba95, 0x002cbb03, 0xffdd5ec6, 0x0006bc21,
+    0x2b9ca203, 0xed032439, 0x088c04c8, 0xfd0c6ca2, 0x007f64da, 0x002867d2, 0xffdea5bb, 0x000685ff,
+    0x2a9e5e57, 0xed27ac16, 0x0892d470, 0xfcfec233, 0x0088dd38, 0x00242213, 0xffdfea3c, 0x0006505f,
+    0x29a079b2, 0xed4dfcc2, 0x08988b2a, 0xfcf19e6b, 0x0092231e, 0x001fea27, 0xffe12c22, 0x00061b4b,
+    0x28a30416, 0xed760c88, 0x089d2b4a, 0xfce50161, 0x009b3605, 0x001bc06b, 0xffe26b48, 0x0005e6cb,
+    0x27a60d6a, 0xed9fd1a2, 0x08a0b740, 0xfcd8eb17, 0x00a4156b, 0x0017a53b, 0xffe3a788, 0x0005b2e8,
+    0x26a9a57b, 0xedcb4237, 0x08a33196, 0xfccd5b82, 0x00acc0da, 0x001398ec, 0xffe4e0bf, 0x00057faa,
+    0x25addbf9, 0xedf8545b, 0x08a49cf0, 0xfcc25285, 0x00b537e1, 0x000f9bd2, 0xffe616c8, 0x00054d1a,
+    0x24b2c075, 0xee26fe17, 0x08a4fc0d, 0xfcb7cff0, 0x00bd7a1c, 0x000bae3c, 0xffe74984, 0x00051b3e,
+    0x23b86263, 0xee573562, 0x08a451c0, 0xfcadd386, 0x00c5872a, 0x0007d075, 0xffe878d3, 0x0004ea1d,
+    0x22bed116, 0xee88f026, 0x08a2a0f8, 0xfca45cf7, 0x00cd5eb7, 0x000402c8, 0xffe9a494, 0x0004b9c0,
+    0x21c61bc0, 0xeebc2444, 0x089fecbb, 0xfc9b6be5, 0x00d50075, 0x00004579, 0xffeaccaa, 0x00048a2b,
+    0x20ce516f, 0xeef0c78d, 0x089c3824, 0xfc92ffe1, 0x00dc6c1e, 0xfffc98c9, 0xffebf0fa, 0x00045b65,
+    0x1fd7810f, 0xef26cfca, 0x08978666, 0xfc8b186d, 0x00e3a175, 0xfff8fcf7, 0xffed1166, 0x00042d74,
+    0x1ee1b965, 0xef5e32bd, 0x0891dac8, 0xfc83b4fc, 0x00eaa045, 0xfff5723d, 0xffee2dd7, 0x0004005e,
+    0x1ded0911, 0xef96e61c, 0x088b38a9, 0xfc7cd4f0, 0x00f16861, 0xfff1f8d2, 0xffef4632, 0x0003d426,
+    0x1cf97e8b, 0xefd0df9a, 0x0883a378, 0xfc76779e, 0x00f7f9a3, 0xffee90eb, 0xfff05a60, 0x0003a8d2,
+    0x1c072823, 0xf00c14e1, 0x087b1ebc, 0xfc709c4d, 0x00fe53ef, 0xffeb3ab8, 0xfff16a4a, 0x00037e65,
+    0x1b1613ff, 0xf0487b98, 0x0871ae0d, 0xfc6b4233, 0x0104772e, 0xffe7f666, 0xfff275db, 0x000354e5,
+    0x1a26501b, 0xf0860962, 0x08675516, 0xfc66687a, 0x010a6353, 0xffe4c41e, 0xfff37d00, 0x00032c54,
+    0x1937ea47, 0xf0c4b3e0, 0x085c1794, 0xfc620e3d, 0x01101858, 0xffe1a408, 0xfff47fa5, 0x000304b7,
+    0x184af025, 0xf10470b0, 0x084ff957, 0xfc5e328c, 0x0115963d, 0xffde9646, 0xfff57db8, 0x0002de0e,
+    0x175f6f2b, 0xf1453571, 0x0842fe3d, 0xfc5ad465, 0x011add0b, 0xffdb9af8, 0xfff67729, 0x0002b85f,
+    0x1675749e, 0xf186f7c0, 0x08352a35, 0xfc57f2be, 0x011fecd3, 0xffd8b23b, 0xfff76be9, 0x000293aa,
+    0x158d0d95, 0xf1c9ad40, 0x0826813e, 0xfc558c7c, 0x0124c5ab, 0xffd5dc28, 0xfff85be8, 0x00026ff2,
+    0x14a646f6, 0xf20d4b92, 0x08170767, 0xfc53a07b, 0x012967b1, 0xffd318d6, 0xfff9471b, 0x00024d39,
+    0x13c12d73, 0xf251c85d, 0x0806c0cb, 0xfc522d88, 0x012dd30a, 0xffd06858, 0xfffa2d74, 0x00022b7f,
+    0x12ddcd8f, 0xf297194d, 0x07f5b193, 0xfc513266, 0x013207e4, 0xffcdcabe, 0xfffb0ee9, 0x00020ac7,
+    0x11fc3395, 0xf2dd3411, 0x07e3ddf7, 0xfc50adcc, 0x01360670, 0xffcb4014, 0xfffbeb70, 0x0001eb10,
+    0x111c6ba0, 0xf3240e61, 0x07d14a38, 0xfc509e64, 0x0139cee9, 0xffc8c866, 0xfffcc300, 0x0001cc5c,
diff --git a/services/audioflinger/AudioStreamOut.cpp b/services/audioflinger/AudioStreamOut.cpp
new file mode 100644
index 0000000..b6d1be7
--- /dev/null
+++ b/services/audioflinger/AudioStreamOut.cpp
@@ -0,0 +1,191 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "AudioFlinger"
+//#define LOG_NDEBUG 0
+
+#include <hardware/audio.h>
+#include <utils/Log.h>
+
+#include "AudioHwDevice.h"
+#include "AudioStreamOut.h"
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+AudioStreamOut::AudioStreamOut(AudioHwDevice *dev, audio_output_flags_t flags)
+        : audioHwDev(dev)
+        , stream(NULL)
+        , flags(flags)
+        , mFramesWritten(0)
+        , mFramesWrittenAtStandby(0)
+        , mRenderPosition(0)
+        , mRateMultiplier(1)
+        , mHalFormatIsLinearPcm(false)
+        , mHalFrameSize(0)
+{
+}
+
+audio_hw_device_t *AudioStreamOut::hwDev() const
+{
+    return audioHwDev->hwDevice();
+}
+
+status_t AudioStreamOut::getRenderPosition(uint64_t *frames)
+{
+    if (stream == NULL) {
+        return NO_INIT;
+    }
+
+    uint32_t halPosition = 0;
+    status_t status = stream->get_render_position(stream, &halPosition);
+    if (status != NO_ERROR) {
+        return status;
+    }
+
+    // Maintain a 64-bit render position using the 32-bit result from the HAL.
+    // This delta calculation relies on the arithmetic overflow behavior
+    // of integers. For example (100 - 0xFFFFFFF0) = 116.
+    uint32_t truncatedPosition = (uint32_t)mRenderPosition;
+    int32_t deltaHalPosition = (int32_t)(halPosition - truncatedPosition);
+    if (deltaHalPosition > 0) {
+        mRenderPosition += deltaHalPosition;
+    }
+    // Scale from HAL sample rate to application rate.
+    *frames = mRenderPosition / mRateMultiplier;
+
+    return status;
+}
+
+// return bottom 32-bits of the render position
+status_t AudioStreamOut::getRenderPosition(uint32_t *frames)
+{
+    uint64_t position64 = 0;
+    status_t status = getRenderPosition(&position64);
+    if (status == NO_ERROR) {
+        *frames = (uint32_t)position64;
+    }
+    return status;
+}
+
+status_t AudioStreamOut::getPresentationPosition(uint64_t *frames, struct timespec *timestamp)
+{
+    if (stream == NULL) {
+        return NO_INIT;
+    }
+
+    uint64_t halPosition = 0;
+    status_t status = stream->get_presentation_position(stream, &halPosition, timestamp);
+    if (status != NO_ERROR) {
+        return status;
+    }
+
+    // Adjust for standby using HAL rate frames.
+    // Only apply this correction if the HAL is getting PCM frames.
+    if (mHalFormatIsLinearPcm) {
+        uint64_t adjustedPosition = (halPosition <= mFramesWrittenAtStandby) ?
+                0 : (halPosition - mFramesWrittenAtStandby);
+        // Scale from HAL sample rate to application rate.
+        *frames = adjustedPosition / mRateMultiplier;
+    } else {
+        // For offloaded MP3 and other compressed formats.
+        *frames = halPosition;
+    }
+
+    return status;
+}
+
+status_t AudioStreamOut::open(
+        audio_io_handle_t handle,
+        audio_devices_t devices,
+        struct audio_config *config,
+        const char *address)
+{
+    audio_stream_out_t *outStream;
+    int status = hwDev()->open_output_stream(
+            hwDev(),
+            handle,
+            devices,
+            flags,
+            config,
+            &outStream,
+            address);
+    ALOGV("AudioStreamOut::open(), HAL open_output_stream returned "
+            " %p, sampleRate %d, Format %#x, "
+            "channelMask %#x, status %d",
+            outStream,
+            config->sample_rate,
+            config->format,
+            config->channel_mask,
+            status);
+
+    if (status == NO_ERROR) {
+        stream = outStream;
+        mHalFormatIsLinearPcm = audio_is_linear_pcm(config->format);
+        ALOGI("AudioStreamOut::open(), mHalFormatIsLinearPcm = %d", (int)mHalFormatIsLinearPcm);
+        mHalFrameSize = audio_stream_out_frame_size(stream);
+    }
+
+    return status;
+}
+
+audio_format_t AudioStreamOut::getFormat() const
+{
+    return stream->common.get_format(&stream->common);
+}
+
+uint32_t AudioStreamOut::getSampleRate() const
+{
+    return stream->common.get_sample_rate(&stream->common);
+}
+
+audio_channel_mask_t AudioStreamOut::getChannelMask() const
+{
+    return stream->common.get_channels(&stream->common);
+}
+
+int AudioStreamOut::flush()
+{
+    ALOG_ASSERT(stream != NULL);
+    mRenderPosition = 0;
+    mFramesWritten = 0;
+    mFramesWrittenAtStandby = 0;
+    if (stream->flush != NULL) {
+        return stream->flush(stream);
+    }
+    return NO_ERROR;
+}
+
+int AudioStreamOut::standby()
+{
+    ALOG_ASSERT(stream != NULL);
+    mRenderPosition = 0;
+    mFramesWrittenAtStandby = mFramesWritten;
+    return stream->common.standby(&stream->common);
+}
+
+ssize_t AudioStreamOut::write(const void *buffer, size_t numBytes)
+{
+    ALOG_ASSERT(stream != NULL);
+    ssize_t bytesWritten = stream->write(stream, buffer, numBytes);
+    if (bytesWritten > 0 && mHalFrameSize > 0) {
+        mFramesWritten += bytesWritten / mHalFrameSize;
+    }
+    return bytesWritten;
+}
+
+} // namespace android
diff --git a/services/audioflinger/AudioStreamOut.h b/services/audioflinger/AudioStreamOut.h
new file mode 100644
index 0000000..06a2277
--- /dev/null
+++ b/services/audioflinger/AudioStreamOut.h
@@ -0,0 +1,115 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_AUDIO_STREAM_OUT_H
+#define ANDROID_AUDIO_STREAM_OUT_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <system/audio.h>
+
+#include "AudioStreamOut.h"
+
+namespace android {
+
+class AudioHwDevice;
+
+/**
+ * Managed access to a HAL output stream.
+ */
+class AudioStreamOut {
+public:
+// AudioStreamOut is immutable, so its fields are const.
+// For emphasis, we could also make all pointers to them be "const *",
+// but that would clutter the code unnecessarily.
+    AudioHwDevice * const audioHwDev;
+    audio_stream_out_t *stream;
+    const audio_output_flags_t flags;
+
+    audio_hw_device_t *hwDev() const;
+
+    AudioStreamOut(AudioHwDevice *dev, audio_output_flags_t flags);
+
+    virtual status_t open(
+            audio_io_handle_t handle,
+            audio_devices_t devices,
+            struct audio_config *config,
+            const char *address);
+
+    virtual ~AudioStreamOut() { }
+
+    // Get the bottom 32-bits of the 64-bit render position.
+    status_t getRenderPosition(uint32_t *frames);
+
+    virtual status_t getRenderPosition(uint64_t *frames);
+
+    virtual status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp);
+
+    /**
+    * Write audio buffer to driver. Returns number of bytes written, or a
+    * negative status_t. If at least one frame was written successfully prior to the error,
+    * it is suggested that the driver return that successful (short) byte count
+    * and then return an error in the subsequent call.
+    *
+    * If set_callback() has previously been called to enable non-blocking mode
+    * the write() is not allowed to block. It must write only the number of
+    * bytes that currently fit in the driver/hardware buffer and then return
+    * this byte count. If this is less than the requested write size the
+    * callback function must be called when more space is available in the
+    * driver/hardware buffer.
+    */
+    virtual ssize_t write(const void *buffer, size_t bytes);
+
+    /**
+     * @return frame size from the perspective of the application and the AudioFlinger.
+     */
+    virtual size_t getFrameSize() const { return mHalFrameSize; }
+
+    /**
+     * @return format from the perspective of the application and the AudioFlinger.
+     */
+    virtual audio_format_t getFormat() const;
+
+    /**
+     * The HAL may be running at a higher sample rate if, for example, playing wrapped EAC3.
+     * @return sample rate from the perspective of the application and the AudioFlinger.
+     */
+    virtual uint32_t getSampleRate() const;
+
+    /**
+     * The HAL is in stereo mode when playing multi-channel compressed audio over HDMI.
+     * @return channel mask from the perspective of the application and the AudioFlinger.
+     */
+    virtual audio_channel_mask_t getChannelMask() const;
+
+
+    virtual status_t flush();
+    virtual status_t standby();
+
+protected:
+    uint64_t             mFramesWritten; // reset by flush
+    uint64_t             mFramesWrittenAtStandby;
+    uint64_t             mRenderPosition; // reset by flush or standby
+    int                  mRateMultiplier;
+    bool                 mHalFormatIsLinearPcm;
+    size_t               mHalFrameSize;
+};
+
+} // namespace android
+
+#endif // ANDROID_AUDIO_STREAM_OUT_H
diff --git a/services/audioflinger/BufferProviders.cpp b/services/audioflinger/BufferProviders.cpp
new file mode 100644
index 0000000..a8be206
--- /dev/null
+++ b/services/audioflinger/BufferProviders.cpp
@@ -0,0 +1,542 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "BufferProvider"
+//#define LOG_NDEBUG 0
+
+#include <audio_effects/effect_downmix.h>
+#include <audio_utils/primitives.h>
+#include <audio_utils/format.h>
+#include <media/AudioResamplerPublic.h>
+#include <media/EffectsFactoryApi.h>
+
+#include <utils/Log.h>
+
+#include "Configuration.h"
+#include "BufferProviders.h"
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(x) (sizeof(x)/sizeof((x)[0]))
+#endif
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+template <typename T>
+static inline T min(const T& a, const T& b)
+{
+    return a < b ? a : b;
+}
+
+CopyBufferProvider::CopyBufferProvider(size_t inputFrameSize,
+        size_t outputFrameSize, size_t bufferFrameCount) :
+        mInputFrameSize(inputFrameSize),
+        mOutputFrameSize(outputFrameSize),
+        mLocalBufferFrameCount(bufferFrameCount),
+        mLocalBufferData(NULL),
+        mConsumed(0)
+{
+    ALOGV("CopyBufferProvider(%p)(%zu, %zu, %zu)", this,
+            inputFrameSize, outputFrameSize, bufferFrameCount);
+    LOG_ALWAYS_FATAL_IF(inputFrameSize < outputFrameSize && bufferFrameCount == 0,
+            "Requires local buffer if inputFrameSize(%zu) < outputFrameSize(%zu)",
+            inputFrameSize, outputFrameSize);
+    if (mLocalBufferFrameCount) {
+        (void)posix_memalign(&mLocalBufferData, 32, mLocalBufferFrameCount * mOutputFrameSize);
+    }
+    mBuffer.frameCount = 0;
+}
+
+CopyBufferProvider::~CopyBufferProvider()
+{
+    ALOGV("~CopyBufferProvider(%p)", this);
+    if (mBuffer.frameCount != 0) {
+        mTrackBufferProvider->releaseBuffer(&mBuffer);
+    }
+    free(mLocalBufferData);
+}
+
+status_t CopyBufferProvider::getNextBuffer(AudioBufferProvider::Buffer *pBuffer,
+        int64_t pts)
+{
+    //ALOGV("CopyBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
+    //        this, pBuffer, pBuffer->frameCount, pts);
+    if (mLocalBufferFrameCount == 0) {
+        status_t res = mTrackBufferProvider->getNextBuffer(pBuffer, pts);
+        if (res == OK) {
+            copyFrames(pBuffer->raw, pBuffer->raw, pBuffer->frameCount);
+        }
+        return res;
+    }
+    if (mBuffer.frameCount == 0) {
+        mBuffer.frameCount = pBuffer->frameCount;
+        status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts);
+        // At one time an upstream buffer provider had
+        // res == OK and mBuffer.frameCount == 0, doesn't seem to happen now 7/18/2014.
+        //
+        // By API spec, if res != OK, then mBuffer.frameCount == 0.
+        // but there may be improper implementations.
+        ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
+        if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
+            pBuffer->raw = NULL;
+            pBuffer->frameCount = 0;
+            return res;
+        }
+        mConsumed = 0;
+    }
+    ALOG_ASSERT(mConsumed < mBuffer.frameCount);
+    size_t count = min(mLocalBufferFrameCount, mBuffer.frameCount - mConsumed);
+    count = min(count, pBuffer->frameCount);
+    pBuffer->raw = mLocalBufferData;
+    pBuffer->frameCount = count;
+    copyFrames(pBuffer->raw, (uint8_t*)mBuffer.raw + mConsumed * mInputFrameSize,
+            pBuffer->frameCount);
+    return OK;
+}
+
+void CopyBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer)
+{
+    //ALOGV("CopyBufferProvider(%p)::releaseBuffer(%p(%zu))",
+    //        this, pBuffer, pBuffer->frameCount);
+    if (mLocalBufferFrameCount == 0) {
+        mTrackBufferProvider->releaseBuffer(pBuffer);
+        return;
+    }
+    // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount");
+    mConsumed += pBuffer->frameCount; // TODO: update for efficiency to reuse existing content
+    if (mConsumed != 0 && mConsumed >= mBuffer.frameCount) {
+        mTrackBufferProvider->releaseBuffer(&mBuffer);
+        ALOG_ASSERT(mBuffer.frameCount == 0);
+    }
+    pBuffer->raw = NULL;
+    pBuffer->frameCount = 0;
+}
+
+void CopyBufferProvider::reset()
+{
+    if (mBuffer.frameCount != 0) {
+        mTrackBufferProvider->releaseBuffer(&mBuffer);
+    }
+    mConsumed = 0;
+}
+
+DownmixerBufferProvider::DownmixerBufferProvider(
+        audio_channel_mask_t inputChannelMask,
+        audio_channel_mask_t outputChannelMask, audio_format_t format,
+        uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount) :
+        CopyBufferProvider(
+            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(inputChannelMask),
+            audio_bytes_per_sample(format) * audio_channel_count_from_out_mask(outputChannelMask),
+            bufferFrameCount)  // set bufferFrameCount to 0 to do in-place
+{
+    ALOGV("DownmixerBufferProvider(%p)(%#x, %#x, %#x %u %d)",
+            this, inputChannelMask, outputChannelMask, format,
+            sampleRate, sessionId);
+    if (!sIsMultichannelCapable
+            || EffectCreate(&sDwnmFxDesc.uuid,
+                    sessionId,
+                    SESSION_ID_INVALID_AND_IGNORED,
+                    &mDownmixHandle) != 0) {
+         ALOGE("DownmixerBufferProvider() error creating downmixer effect");
+         mDownmixHandle = NULL;
+         return;
+     }
+     // channel input configuration will be overridden per-track
+     mDownmixConfig.inputCfg.channels = inputChannelMask;   // FIXME: Should be bits
+     mDownmixConfig.outputCfg.channels = outputChannelMask; // FIXME: should be bits
+     mDownmixConfig.inputCfg.format = format;
+     mDownmixConfig.outputCfg.format = format;
+     mDownmixConfig.inputCfg.samplingRate = sampleRate;
+     mDownmixConfig.outputCfg.samplingRate = sampleRate;
+     mDownmixConfig.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
+     mDownmixConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
+     // input and output buffer provider, and frame count will not be used as the downmix effect
+     // process() function is called directly (see DownmixerBufferProvider::getNextBuffer())
+     mDownmixConfig.inputCfg.mask = EFFECT_CONFIG_SMP_RATE | EFFECT_CONFIG_CHANNELS |
+             EFFECT_CONFIG_FORMAT | EFFECT_CONFIG_ACC_MODE;
+     mDownmixConfig.outputCfg.mask = mDownmixConfig.inputCfg.mask;
+
+     int cmdStatus;
+     uint32_t replySize = sizeof(int);
+
+     // Configure downmixer
+     status_t status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_SET_CONFIG /*cmdCode*/, sizeof(effect_config_t) /*cmdSize*/,
+             &mDownmixConfig /*pCmdData*/,
+             &replySize, &cmdStatus /*pReplyData*/);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while configuring downmixer",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+
+     // Enable downmixer
+     replySize = sizeof(int);
+     status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_ENABLE /*cmdCode*/, 0 /*cmdSize*/, NULL /*pCmdData*/,
+             &replySize, &cmdStatus /*pReplyData*/);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while enabling downmixer",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+
+     // Set downmix type
+     // parameter size rounded for padding on 32bit boundary
+     const int psizePadded = ((sizeof(downmix_params_t) - 1)/sizeof(int) + 1) * sizeof(int);
+     const int downmixParamSize =
+             sizeof(effect_param_t) + psizePadded + sizeof(downmix_type_t);
+     effect_param_t * const param = (effect_param_t *) malloc(downmixParamSize);
+     param->psize = sizeof(downmix_params_t);
+     const downmix_params_t downmixParam = DOWNMIX_PARAM_TYPE;
+     memcpy(param->data, &downmixParam, param->psize);
+     const downmix_type_t downmixType = DOWNMIX_TYPE_FOLD;
+     param->vsize = sizeof(downmix_type_t);
+     memcpy(param->data + psizePadded, &downmixType, param->vsize);
+     replySize = sizeof(int);
+     status = (*mDownmixHandle)->command(mDownmixHandle,
+             EFFECT_CMD_SET_PARAM /* cmdCode */, downmixParamSize /* cmdSize */,
+             param /*pCmdData*/, &replySize, &cmdStatus /*pReplyData*/);
+     free(param);
+     if (status != 0 || cmdStatus != 0) {
+         ALOGE("DownmixerBufferProvider() error %d cmdStatus %d while setting downmix type",
+                 status, cmdStatus);
+         EffectRelease(mDownmixHandle);
+         mDownmixHandle = NULL;
+         return;
+     }
+     ALOGV("DownmixerBufferProvider() downmix type set to %d", (int) downmixType);
+}
+
+DownmixerBufferProvider::~DownmixerBufferProvider()
+{
+    ALOGV("~DownmixerBufferProvider (%p)", this);
+    EffectRelease(mDownmixHandle);
+    mDownmixHandle = NULL;
+}
+
+void DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    mDownmixConfig.inputCfg.buffer.frameCount = frames;
+    mDownmixConfig.inputCfg.buffer.raw = const_cast<void *>(src);
+    mDownmixConfig.outputCfg.buffer.frameCount = frames;
+    mDownmixConfig.outputCfg.buffer.raw = dst;
+    // may be in-place if src == dst.
+    status_t res = (*mDownmixHandle)->process(mDownmixHandle,
+            &mDownmixConfig.inputCfg.buffer, &mDownmixConfig.outputCfg.buffer);
+    ALOGE_IF(res != OK, "DownmixBufferProvider error %d", res);
+}
+
+/* call once in a pthread_once handler. */
+/*static*/ status_t DownmixerBufferProvider::init()
+{
+    // find multichannel downmix effect if we have to play multichannel content
+    uint32_t numEffects = 0;
+    int ret = EffectQueryNumberEffects(&numEffects);
+    if (ret != 0) {
+        ALOGE("AudioMixer() error %d querying number of effects", ret);
+        return NO_INIT;
+    }
+    ALOGV("EffectQueryNumberEffects() numEffects=%d", numEffects);
+
+    for (uint32_t i = 0 ; i < numEffects ; i++) {
+        if (EffectQueryEffect(i, &sDwnmFxDesc) == 0) {
+            ALOGV("effect %d is called %s", i, sDwnmFxDesc.name);
+            if (memcmp(&sDwnmFxDesc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
+                ALOGI("found effect \"%s\" from %s",
+                        sDwnmFxDesc.name, sDwnmFxDesc.implementor);
+                sIsMultichannelCapable = true;
+                break;
+            }
+        }
+    }
+    ALOGW_IF(!sIsMultichannelCapable, "unable to find downmix effect");
+    return NO_INIT;
+}
+
+/*static*/ bool DownmixerBufferProvider::sIsMultichannelCapable = false;
+/*static*/ effect_descriptor_t DownmixerBufferProvider::sDwnmFxDesc;
+
+RemixBufferProvider::RemixBufferProvider(audio_channel_mask_t inputChannelMask,
+        audio_channel_mask_t outputChannelMask, audio_format_t format,
+        size_t bufferFrameCount) :
+        CopyBufferProvider(
+                audio_bytes_per_sample(format)
+                    * audio_channel_count_from_out_mask(inputChannelMask),
+                audio_bytes_per_sample(format)
+                    * audio_channel_count_from_out_mask(outputChannelMask),
+                bufferFrameCount),
+        mFormat(format),
+        mSampleSize(audio_bytes_per_sample(format)),
+        mInputChannels(audio_channel_count_from_out_mask(inputChannelMask)),
+        mOutputChannels(audio_channel_count_from_out_mask(outputChannelMask))
+{
+    ALOGV("RemixBufferProvider(%p)(%#x, %#x, %#x) %zu %zu",
+            this, format, inputChannelMask, outputChannelMask,
+            mInputChannels, mOutputChannels);
+    (void) memcpy_by_index_array_initialization_from_channel_mask(
+            mIdxAry, ARRAY_SIZE(mIdxAry), outputChannelMask, inputChannelMask);
+}
+
+void RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    memcpy_by_index_array(dst, mOutputChannels,
+            src, mInputChannels, mIdxAry, mSampleSize, frames);
+}
+
+ReformatBufferProvider::ReformatBufferProvider(int32_t channelCount,
+        audio_format_t inputFormat, audio_format_t outputFormat,
+        size_t bufferFrameCount) :
+        CopyBufferProvider(
+                channelCount * audio_bytes_per_sample(inputFormat),
+                channelCount * audio_bytes_per_sample(outputFormat),
+                bufferFrameCount),
+        mChannelCount(channelCount),
+        mInputFormat(inputFormat),
+        mOutputFormat(outputFormat)
+{
+    ALOGV("ReformatBufferProvider(%p)(%u, %#x, %#x)",
+            this, channelCount, inputFormat, outputFormat);
+}
+
+void ReformatBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+    memcpy_by_audio_format(dst, mOutputFormat, src, mInputFormat, frames * mChannelCount);
+}
+
+TimestretchBufferProvider::TimestretchBufferProvider(int32_t channelCount,
+        audio_format_t format, uint32_t sampleRate, const AudioPlaybackRate &playbackRate) :
+        mChannelCount(channelCount),
+        mFormat(format),
+        mSampleRate(sampleRate),
+        mFrameSize(channelCount * audio_bytes_per_sample(format)),
+        mLocalBufferFrameCount(0),
+        mLocalBufferData(NULL),
+        mRemaining(0),
+        mSonicStream(sonicCreateStream(sampleRate, mChannelCount)),
+        mFallbackFailErrorShown(false),
+        mAudioPlaybackRateValid(false)
+{
+    LOG_ALWAYS_FATAL_IF(mSonicStream == NULL,
+            "TimestretchBufferProvider can't allocate Sonic stream");
+
+    setPlaybackRate(playbackRate);
+    ALOGV("TimestretchBufferProvider(%p)(%u, %#x, %u %f %f %d %d)",
+            this, channelCount, format, sampleRate, playbackRate.mSpeed,
+            playbackRate.mPitch, playbackRate.mStretchMode, playbackRate.mFallbackMode);
+    mBuffer.frameCount = 0;
+}
+
+TimestretchBufferProvider::~TimestretchBufferProvider()
+{
+    ALOGV("~TimestretchBufferProvider(%p)", this);
+    sonicDestroyStream(mSonicStream);
+    if (mBuffer.frameCount != 0) {
+        mTrackBufferProvider->releaseBuffer(&mBuffer);
+    }
+    free(mLocalBufferData);
+}
+
+status_t TimestretchBufferProvider::getNextBuffer(
+        AudioBufferProvider::Buffer *pBuffer, int64_t pts)
+{
+    ALOGV("TimestretchBufferProvider(%p)::getNextBuffer(%p (%zu), %lld)",
+            this, pBuffer, pBuffer->frameCount, pts);
+
+    // BYPASS
+    //return mTrackBufferProvider->getNextBuffer(pBuffer, pts);
+
+    // check if previously processed data is sufficient.
+    if (pBuffer->frameCount <= mRemaining) {
+        ALOGV("previous sufficient");
+        pBuffer->raw = mLocalBufferData;
+        return OK;
+    }
+
+    // do we need to resize our buffer?
+    if (pBuffer->frameCount > mLocalBufferFrameCount) {
+        void *newmem;
+        if (posix_memalign(&newmem, 32, pBuffer->frameCount * mFrameSize) == OK) {
+            if (mRemaining != 0) {
+                memcpy(newmem, mLocalBufferData, mRemaining * mFrameSize);
+            }
+            free(mLocalBufferData);
+            mLocalBufferData = newmem;
+            mLocalBufferFrameCount = pBuffer->frameCount;
+        }
+    }
+
+    // need to fetch more data
+    const size_t outputDesired = pBuffer->frameCount - mRemaining;
+    size_t dstAvailable;
+    do {
+        mBuffer.frameCount = mPlaybackRate.mSpeed == AUDIO_TIMESTRETCH_SPEED_NORMAL
+                ? outputDesired : outputDesired * mPlaybackRate.mSpeed + 1;
+
+        status_t res = mTrackBufferProvider->getNextBuffer(&mBuffer, pts);
+
+        ALOG_ASSERT(res == OK || mBuffer.frameCount == 0);
+        if (res != OK || mBuffer.frameCount == 0) { // not needed by API spec, but to be safe.
+            ALOGV("upstream provider cannot provide data");
+            if (mRemaining == 0) {
+                pBuffer->raw = NULL;
+                pBuffer->frameCount = 0;
+                return res;
+            } else { // return partial count
+                pBuffer->raw = mLocalBufferData;
+                pBuffer->frameCount = mRemaining;
+                return OK;
+            }
+        }
+
+        // time-stretch the data
+        dstAvailable = min(mLocalBufferFrameCount - mRemaining, outputDesired);
+        size_t srcAvailable = mBuffer.frameCount;
+        processFrames((uint8_t*)mLocalBufferData + mRemaining * mFrameSize, &dstAvailable,
+                mBuffer.raw, &srcAvailable);
+
+        // release all data consumed
+        mBuffer.frameCount = srcAvailable;
+        mTrackBufferProvider->releaseBuffer(&mBuffer);
+    } while (dstAvailable == 0); // try until we get output data or upstream provider fails.
+
+    // update buffer vars with the actual data processed and return with buffer
+    mRemaining += dstAvailable;
+
+    pBuffer->raw = mLocalBufferData;
+    pBuffer->frameCount = mRemaining;
+
+    return OK;
+}
+
+void TimestretchBufferProvider::releaseBuffer(AudioBufferProvider::Buffer *pBuffer)
+{
+    ALOGV("TimestretchBufferProvider(%p)::releaseBuffer(%p (%zu))",
+       this, pBuffer, pBuffer->frameCount);
+
+    // BYPASS
+    //return mTrackBufferProvider->releaseBuffer(pBuffer);
+
+    // LOG_ALWAYS_FATAL_IF(pBuffer->frameCount == 0, "Invalid framecount");
+    if (pBuffer->frameCount < mRemaining) {
+        memcpy(mLocalBufferData,
+                (uint8_t*)mLocalBufferData + pBuffer->frameCount * mFrameSize,
+                (mRemaining - pBuffer->frameCount) * mFrameSize);
+        mRemaining -= pBuffer->frameCount;
+    } else if (pBuffer->frameCount == mRemaining) {
+        mRemaining = 0;
+    } else {
+        LOG_ALWAYS_FATAL("Releasing more frames(%zu) than available(%zu)",
+                pBuffer->frameCount, mRemaining);
+    }
+
+    pBuffer->raw = NULL;
+    pBuffer->frameCount = 0;
+}
+
+void TimestretchBufferProvider::reset()
+{
+    mRemaining = 0;
+}
+
+status_t TimestretchBufferProvider::setPlaybackRate(const AudioPlaybackRate &playbackRate)
+{
+    mPlaybackRate = playbackRate;
+    mFallbackFailErrorShown = false;
+    sonicSetSpeed(mSonicStream, mPlaybackRate.mSpeed);
+    //TODO: pitch is ignored for now
+    //TODO: optimize: if parameters are the same, don't do any extra computation.
+
+    mAudioPlaybackRateValid = isAudioPlaybackRateValid(mPlaybackRate);
+    return OK;
+}
+
+void TimestretchBufferProvider::processFrames(void *dstBuffer, size_t *dstFrames,
+        const void *srcBuffer, size_t *srcFrames)
+{
+    ALOGV("processFrames(%zu %zu)  remaining(%zu)", *dstFrames, *srcFrames, mRemaining);
+    // Note dstFrames is the required number of frames.
+
+    // Ensure consumption from src is as expected.
+    //TODO: add logic to track "very accurate" consumption related to speed, original sampling
+    //rate, actual frames processed.
+    const size_t targetSrc = *dstFrames * mPlaybackRate.mSpeed;
+    if (*srcFrames < targetSrc) { // limit dst frames to that possible
+        *dstFrames = *srcFrames / mPlaybackRate.mSpeed;
+    } else if (*srcFrames > targetSrc + 1) {
+        *srcFrames = targetSrc + 1;
+    }
+
+    if (!mAudioPlaybackRateValid) {
+        //fallback mode
+        if (*dstFrames > 0) {
+            switch(mPlaybackRate.mFallbackMode) {
+            case AUDIO_TIMESTRETCH_FALLBACK_CUT_REPEAT:
+                if (*dstFrames <= *srcFrames) {
+                      size_t copySize = mFrameSize * *dstFrames;
+                      memcpy(dstBuffer, srcBuffer, copySize);
+                  } else {
+                      // cyclically repeat the source.
+                      for (size_t count = 0; count < *dstFrames; count += *srcFrames) {
+                          size_t remaining = min(*srcFrames, *dstFrames - count);
+                          memcpy((uint8_t*)dstBuffer + mFrameSize * count,
+                                  srcBuffer, mFrameSize * remaining);
+                      }
+                  }
+                break;
+            case AUDIO_TIMESTRETCH_FALLBACK_DEFAULT:
+            case AUDIO_TIMESTRETCH_FALLBACK_MUTE:
+                memset(dstBuffer,0, mFrameSize * *dstFrames);
+                break;
+            case AUDIO_TIMESTRETCH_FALLBACK_FAIL:
+            default:
+                if(!mFallbackFailErrorShown) {
+                    ALOGE("invalid parameters in TimestretchBufferProvider fallbackMode:%d",
+                            mPlaybackRate.mFallbackMode);
+                    mFallbackFailErrorShown = true;
+                }
+                break;
+            }
+        }
+    } else {
+        switch (mFormat) {
+        case AUDIO_FORMAT_PCM_FLOAT:
+            if (sonicWriteFloatToStream(mSonicStream, (float*)srcBuffer, *srcFrames) != 1) {
+                ALOGE("sonicWriteFloatToStream cannot realloc");
+                *srcFrames = 0; // cannot consume all of srcBuffer
+            }
+            *dstFrames = sonicReadFloatFromStream(mSonicStream, (float*)dstBuffer, *dstFrames);
+            break;
+        case AUDIO_FORMAT_PCM_16_BIT:
+            if (sonicWriteShortToStream(mSonicStream, (short*)srcBuffer, *srcFrames) != 1) {
+                ALOGE("sonicWriteShortToStream cannot realloc");
+                *srcFrames = 0; // cannot consume all of srcBuffer
+            }
+            *dstFrames = sonicReadShortFromStream(mSonicStream, (short*)dstBuffer, *dstFrames);
+            break;
+        default:
+            // could also be caught on construction
+            LOG_ALWAYS_FATAL("invalid format %#x for TimestretchBufferProvider", mFormat);
+        }
+    }
+}
+// ----------------------------------------------------------------------------
+} // namespace android
diff --git a/services/audioflinger/BufferProviders.h b/services/audioflinger/BufferProviders.h
new file mode 100644
index 0000000..4bc895c
--- /dev/null
+++ b/services/audioflinger/BufferProviders.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_BUFFER_PROVIDERS_H
+#define ANDROID_BUFFER_PROVIDERS_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <hardware/audio_effect.h>
+#include <media/AudioBufferProvider.h>
+#include <system/audio.h>
+#include <sonic.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+class PassthruBufferProvider : public AudioBufferProvider {
+public:
+    PassthruBufferProvider() : mTrackBufferProvider(NULL) { }
+
+    virtual ~PassthruBufferProvider() { }
+
+    // call this to release the buffer to the upstream provider.
+    // treat it as an audio discontinuity for future samples.
+    virtual void reset() { }
+
+    // set the upstream buffer provider. Consider calling "reset" before this function.
+    virtual void setBufferProvider(AudioBufferProvider *p) {
+        mTrackBufferProvider = p;
+    }
+
+protected:
+    AudioBufferProvider *mTrackBufferProvider;
+};
+
+// Base AudioBufferProvider class used for DownMixerBufferProvider, RemixBufferProvider,
+// and ReformatBufferProvider.
+// It handles a private buffer for use in converting format or channel masks from the
+// input data to a form acceptable by the mixer.
+// TODO: Make a ResamplerBufferProvider when integers are entirely removed from the
+// processing pipeline.
+class CopyBufferProvider : public PassthruBufferProvider {
+public:
+    // Use a private buffer of bufferFrameCount frames (each frame is outputFrameSize bytes).
+    // If bufferFrameCount is 0, no private buffer is created and in-place modification of
+    // the upstream buffer provider's buffers is performed by copyFrames().
+    CopyBufferProvider(size_t inputFrameSize, size_t outputFrameSize,
+            size_t bufferFrameCount);
+    virtual ~CopyBufferProvider();
+
+    // Overrides AudioBufferProvider methods
+    virtual status_t getNextBuffer(Buffer *buffer, int64_t pts);
+    virtual void releaseBuffer(Buffer *buffer);
+
+    // Overrides PassthruBufferProvider
+    virtual void reset();
+
+    // this function should be supplied by the derived class.  It converts
+    // #frames in the *src pointer to the *dst pointer.  It is public because
+    // some providers will allow this to work on arbitrary buffers outside
+    // of the internal buffers.
+    virtual void copyFrames(void *dst, const void *src, size_t frames) = 0;
+
+protected:
+    const size_t         mInputFrameSize;
+    const size_t         mOutputFrameSize;
+private:
+    AudioBufferProvider::Buffer mBuffer;
+    const size_t         mLocalBufferFrameCount;
+    void                *mLocalBufferData;
+    size_t               mConsumed;
+};
+
+// DownmixerBufferProvider derives from CopyBufferProvider to provide
+// position dependent downmixing by an Audio Effect.
+class DownmixerBufferProvider : public CopyBufferProvider {
+public:
+    DownmixerBufferProvider(audio_channel_mask_t inputChannelMask,
+            audio_channel_mask_t outputChannelMask, audio_format_t format,
+            uint32_t sampleRate, int32_t sessionId, size_t bufferFrameCount);
+    virtual ~DownmixerBufferProvider();
+    //Overrides
+    virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+    bool isValid() const { return mDownmixHandle != NULL; }
+    static status_t init();
+    static bool isMultichannelCapable() { return sIsMultichannelCapable; }
+
+protected:
+    effect_handle_t    mDownmixHandle;
+    effect_config_t    mDownmixConfig;
+
+    // effect descriptor for the downmixer used by the mixer
+    static effect_descriptor_t sDwnmFxDesc;
+    // indicates whether a downmix effect has been found and is usable by this mixer
+    static bool                sIsMultichannelCapable;
+    // FIXME: should we allow effects outside of the framework?
+    // We need to here. A special ioId that must be <= -2 so it does not map to a session.
+    static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2;
+};
+
+// RemixBufferProvider derives from CopyBufferProvider to perform an
+// upmix or downmix to the proper channel count and mask.
+class RemixBufferProvider : public CopyBufferProvider {
+public:
+    RemixBufferProvider(audio_channel_mask_t inputChannelMask,
+            audio_channel_mask_t outputChannelMask, audio_format_t format,
+            size_t bufferFrameCount);
+    //Overrides
+    virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+protected:
+    const audio_format_t mFormat;
+    const size_t         mSampleSize;
+    const size_t         mInputChannels;
+    const size_t         mOutputChannels;
+    int8_t               mIdxAry[sizeof(uint32_t) * 8]; // 32 bits => channel indices
+};
+
+// ReformatBufferProvider derives from CopyBufferProvider to convert the input data
+// to an acceptable mixer input format type.
+class ReformatBufferProvider : public CopyBufferProvider {
+public:
+    ReformatBufferProvider(int32_t channelCount,
+            audio_format_t inputFormat, audio_format_t outputFormat,
+            size_t bufferFrameCount);
+    virtual void copyFrames(void *dst, const void *src, size_t frames);
+
+protected:
+    const uint32_t       mChannelCount;
+    const audio_format_t mInputFormat;
+    const audio_format_t mOutputFormat;
+};
+
+// TimestretchBufferProvider derives from PassthruBufferProvider for time stretching
+class TimestretchBufferProvider : public PassthruBufferProvider {
+public:
+    TimestretchBufferProvider(int32_t channelCount,
+            audio_format_t format, uint32_t sampleRate,
+            const AudioPlaybackRate &playbackRate);
+    virtual ~TimestretchBufferProvider();
+
+    // Overrides AudioBufferProvider methods
+    virtual status_t getNextBuffer(Buffer* buffer, int64_t pts);
+    virtual void releaseBuffer(Buffer* buffer);
+
+    // Overrides PassthruBufferProvider
+    virtual void reset();
+
+    virtual status_t setPlaybackRate(const AudioPlaybackRate &playbackRate);
+
+    // processes frames
+    // dstBuffer is where to place the data
+    // dstFrames [in/out] is the desired frames (return with actual placed in buffer)
+    // srcBuffer is the source data
+    // srcFrames [in/out] is the available source frames (return with consumed)
+    virtual void processFrames(void *dstBuffer, size_t *dstFrames,
+            const void *srcBuffer, size_t *srcFrames);
+
+protected:
+    const uint32_t       mChannelCount;
+    const audio_format_t mFormat;
+    const uint32_t       mSampleRate; // const for now (TODO change this)
+    const size_t         mFrameSize;
+    AudioPlaybackRate    mPlaybackRate;
+
+private:
+    AudioBufferProvider::Buffer mBuffer;          // for upstream request
+    size_t               mLocalBufferFrameCount;  // size of local buffer
+    void                *mLocalBufferData;        // internally allocated buffer for data returned
+                                                  // to caller
+    size_t               mRemaining;              // remaining data in local buffer
+    sonicStream          mSonicStream;            // handle to sonic timestretch object
+    //FIXME: this dependency should be abstracted out
+    bool                 mFallbackFailErrorShown; // log fallback error only once
+    bool                 mAudioPlaybackRateValid; // flag for current parameters validity
+};
+
+// ----------------------------------------------------------------------------
+} // namespace android
+
+#endif // ANDROID_BUFFER_PROVIDERS_H
diff --git a/services/audioflinger/Configuration.h b/services/audioflinger/Configuration.h
index 6a8aeb1..845697a 100644
--- a/services/audioflinger/Configuration.h
+++ b/services/audioflinger/Configuration.h
@@ -29,9 +29,8 @@
 // uncomment to display CPU load adjusted for CPU frequency
 //#define CPU_FREQUENCY_STATISTICS
 
-// uncomment to enable fast mixer to take performance samples for later statistical analysis
-#define FAST_MIXER_STATISTICS
-// FIXME rename to FAST_THREAD_STATISTICS
+// uncomment to enable fast threads to take performance samples for later statistical analysis
+#define FAST_THREAD_STATISTICS
 
 // uncomment for debugging timing problems related to StateQueue::push()
 //#define STATE_QUEUE_DUMP
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index bcaf8ae..949c91d 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -335,13 +335,21 @@
 
     // TODO: handle configuration of effects replacing track process
     channelMask = thread->channelMask();
+    mConfig.outputCfg.channels = channelMask;
 
     if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
         mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_MONO;
     } else {
         mConfig.inputCfg.channels = channelMask;
+        // TODO: Update this logic when multichannel effects are implemented.
+        // For offloaded tracks consider mono output as stereo for proper effect initialization
+        if (channelMask == AUDIO_CHANNEL_OUT_MONO) {
+            mConfig.inputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+            mConfig.outputCfg.channels = AUDIO_CHANNEL_OUT_STEREO;
+            ALOGV("Overriding effect input and output as STEREO");
+        }
     }
-    mConfig.outputCfg.channels = channelMask;
+
     mConfig.inputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
     mConfig.outputCfg.format = AUDIO_FORMAT_PCM_16_BIT;
     mConfig.inputCfg.samplingRate = thread->sampleRate();
@@ -1953,4 +1961,4 @@
     }
 }
 
-}; // namespace android
+} // namespace android
diff --git a/services/audioflinger/FastCapture.cpp b/services/audioflinger/FastCapture.cpp
index 0c9b976..79ac12b 100644
--- a/services/audioflinger/FastCapture.cpp
+++ b/services/audioflinger/FastCapture.cpp
@@ -29,18 +29,18 @@
 
 namespace android {
 
-/*static*/ const FastCaptureState FastCapture::initial;
+/*static*/ const FastCaptureState FastCapture::sInitial;
 
 FastCapture::FastCapture() : FastThread(),
-    inputSource(NULL), inputSourceGen(0), pipeSink(NULL), pipeSinkGen(0),
-    readBuffer(NULL), readBufferState(-1), format(Format_Invalid), sampleRate(0),
-    // dummyDumpState
-    totalNativeFramesRead(0)
+    mInputSource(NULL), mInputSourceGen(0), mPipeSink(NULL), mPipeSinkGen(0),
+    mReadBuffer(NULL), mReadBufferState(-1), mFormat(Format_Invalid), mSampleRate(0),
+    // mDummyDumpState
+    mTotalNativeFramesRead(0)
 {
-    previous = &initial;
-    current = &initial;
+    mPrevious = &sInitial;
+    mCurrent = &sInitial;
 
-    mDummyDumpState = &dummyDumpState;
+    mDummyDumpState = &mDummyFastCaptureDumpState;
 }
 
 FastCapture::~FastCapture()
@@ -63,13 +63,13 @@
 
 void FastCapture::onIdle()
 {
-    preIdle = *(const FastCaptureState *)current;
-    current = &preIdle;
+    mPreIdle = *(const FastCaptureState *)mCurrent;
+    mCurrent = &mPreIdle;
 }
 
 void FastCapture::onExit()
 {
-    delete[] readBuffer;
+    free(mReadBuffer);
 }
 
 bool FastCapture::isSubClassCommand(FastThreadState::Command command)
@@ -86,67 +86,67 @@
 
 void FastCapture::onStateChange()
 {
-    const FastCaptureState * const current = (const FastCaptureState *) this->current;
-    const FastCaptureState * const previous = (const FastCaptureState *) this->previous;
-    FastCaptureDumpState * const dumpState = (FastCaptureDumpState *) this->dumpState;
+    const FastCaptureState * const current = (const FastCaptureState *) mCurrent;
+    const FastCaptureState * const previous = (const FastCaptureState *) mPrevious;
+    FastCaptureDumpState * const dumpState = (FastCaptureDumpState *) mDumpState;
     const size_t frameCount = current->mFrameCount;
 
     bool eitherChanged = false;
 
     // check for change in input HAL configuration
-    NBAIO_Format previousFormat = format;
-    if (current->mInputSourceGen != inputSourceGen) {
-        inputSource = current->mInputSource;
-        inputSourceGen = current->mInputSourceGen;
-        if (inputSource == NULL) {
-            format = Format_Invalid;
-            sampleRate = 0;
+    NBAIO_Format previousFormat = mFormat;
+    if (current->mInputSourceGen != mInputSourceGen) {
+        mInputSource = current->mInputSource;
+        mInputSourceGen = current->mInputSourceGen;
+        if (mInputSource == NULL) {
+            mFormat = Format_Invalid;
+            mSampleRate = 0;
         } else {
-            format = inputSource->format();
-            sampleRate = Format_sampleRate(format);
-            unsigned channelCount = Format_channelCount(format);
-            ALOG_ASSERT(channelCount == 1 || channelCount == 2);
+            mFormat = mInputSource->format();
+            mSampleRate = Format_sampleRate(mFormat);
+            unsigned channelCount = Format_channelCount(mFormat);
+            ALOG_ASSERT(channelCount >= 1 && channelCount <= FCC_8);
         }
-        dumpState->mSampleRate = sampleRate;
+        dumpState->mSampleRate = mSampleRate;
         eitherChanged = true;
     }
 
     // check for change in pipe
-    if (current->mPipeSinkGen != pipeSinkGen) {
-        pipeSink = current->mPipeSink;
-        pipeSinkGen = current->mPipeSinkGen;
+    if (current->mPipeSinkGen != mPipeSinkGen) {
+        mPipeSink = current->mPipeSink;
+        mPipeSinkGen = current->mPipeSinkGen;
         eitherChanged = true;
     }
 
     // input source and pipe sink must be compatible
-    if (eitherChanged && inputSource != NULL && pipeSink != NULL) {
-        ALOG_ASSERT(Format_isEqual(format, pipeSink->format()));
+    if (eitherChanged && mInputSource != NULL && mPipeSink != NULL) {
+        ALOG_ASSERT(Format_isEqual(mFormat, mPipeSink->format()));
     }
 
-    if ((!Format_isEqual(format, previousFormat)) || (frameCount != previous->mFrameCount)) {
-        // FIXME to avoid priority inversion, don't delete here
-        delete[] readBuffer;
-        readBuffer = NULL;
-        if (frameCount > 0 && sampleRate > 0) {
+    if ((!Format_isEqual(mFormat, previousFormat)) || (frameCount != previous->mFrameCount)) {
+        // FIXME to avoid priority inversion, don't free here
+        free(mReadBuffer);
+        mReadBuffer = NULL;
+        if (frameCount > 0 && mSampleRate > 0) {
             // FIXME new may block for unbounded time at internal mutex of the heap
             //       implementation; it would be better to have normal capture thread allocate for
             //       us to avoid blocking here and to prevent possible priority inversion
-            unsigned channelCount = Format_channelCount(format);
-            // FIXME frameSize
-            readBuffer = new short[frameCount * channelCount];
-            periodNs = (frameCount * 1000000000LL) / sampleRate;    // 1.00
-            underrunNs = (frameCount * 1750000000LL) / sampleRate;  // 1.75
-            overrunNs = (frameCount * 500000000LL) / sampleRate;    // 0.50
-            forceNs = (frameCount * 950000000LL) / sampleRate;      // 0.95
-            warmupNs = (frameCount * 500000000LL) / sampleRate;     // 0.50
+            (void)posix_memalign(&mReadBuffer, 32, frameCount * Format_frameSize(mFormat));
+            mPeriodNs = (frameCount * 1000000000LL) / mSampleRate;      // 1.00
+            mUnderrunNs = (frameCount * 1750000000LL) / mSampleRate;    // 1.75
+            mOverrunNs = (frameCount * 500000000LL) / mSampleRate;      // 0.50
+            mForceNs = (frameCount * 950000000LL) / mSampleRate;        // 0.95
+            mWarmupNsMin = (frameCount * 750000000LL) / mSampleRate;    // 0.75
+            mWarmupNsMax = (frameCount * 1250000000LL) / mSampleRate;   // 1.25
         } else {
-            periodNs = 0;
-            underrunNs = 0;
-            overrunNs = 0;
-            forceNs = 0;
-            warmupNs = 0;
+            mPeriodNs = 0;
+            mUnderrunNs = 0;
+            mOverrunNs = 0;
+            mForceNs = 0;
+            mWarmupNsMin = 0;
+            mWarmupNsMax = LONG_MAX;
         }
-        readBufferState = -1;
+        mReadBufferState = -1;
         dumpState->mFrameCount = frameCount;
     }
 
@@ -154,44 +154,43 @@
 
 void FastCapture::onWork()
 {
-    const FastCaptureState * const current = (const FastCaptureState *) this->current;
-    FastCaptureDumpState * const dumpState = (FastCaptureDumpState *) this->dumpState;
-    const FastCaptureState::Command command = this->command;
+    const FastCaptureState * const current = (const FastCaptureState *) mCurrent;
+    FastCaptureDumpState * const dumpState = (FastCaptureDumpState *) mDumpState;
+    const FastCaptureState::Command command = mCommand;
     const size_t frameCount = current->mFrameCount;
 
     if ((command & FastCaptureState::READ) /*&& isWarm*/) {
-        ALOG_ASSERT(inputSource != NULL);
-        ALOG_ASSERT(readBuffer != NULL);
+        ALOG_ASSERT(mInputSource != NULL);
+        ALOG_ASSERT(mReadBuffer != NULL);
         dumpState->mReadSequence++;
         ATRACE_BEGIN("read");
-        ssize_t framesRead = inputSource->read(readBuffer, frameCount,
+        ssize_t framesRead = mInputSource->read(mReadBuffer, frameCount,
                 AudioBufferProvider::kInvalidPTS);
         ATRACE_END();
         dumpState->mReadSequence++;
         if (framesRead >= 0) {
             LOG_ALWAYS_FATAL_IF((size_t) framesRead > frameCount);
-            totalNativeFramesRead += framesRead;
-            dumpState->mFramesRead = totalNativeFramesRead;
-            readBufferState = framesRead;
+            mTotalNativeFramesRead += framesRead;
+            dumpState->mFramesRead = mTotalNativeFramesRead;
+            mReadBufferState = framesRead;
         } else {
             dumpState->mReadErrors++;
-            readBufferState = 0;
+            mReadBufferState = 0;
         }
         // FIXME rename to attemptedIO
-        attemptedWrite = true;
+        mAttemptedWrite = true;
     }
 
     if (command & FastCaptureState::WRITE) {
-        ALOG_ASSERT(pipeSink != NULL);
-        ALOG_ASSERT(readBuffer != NULL);
-        if (readBufferState < 0) {
-            unsigned channelCount = Format_channelCount(format);
-            // FIXME frameSize
-            memset(readBuffer, 0, frameCount * channelCount * sizeof(short));
-            readBufferState = frameCount;
+        ALOG_ASSERT(mPipeSink != NULL);
+        ALOG_ASSERT(mReadBuffer != NULL);
+        if (mReadBufferState < 0) {
+            unsigned channelCount = Format_channelCount(mFormat);
+            memset(mReadBuffer, 0, frameCount * Format_frameSize(mFormat));
+            mReadBufferState = frameCount;
         }
-        if (readBufferState > 0) {
-            ssize_t framesWritten = pipeSink->write(readBuffer, readBufferState);
+        if (mReadBufferState > 0) {
+            ssize_t framesWritten = mPipeSink->write(mReadBuffer, mReadBufferState);
             // FIXME This supports at most one fast capture client.
             //       To handle multiple clients this could be converted to an array,
             //       or with a lot more work the control block could be shared by all clients.
@@ -210,13 +209,4 @@
     }
 }
 
-FastCaptureDumpState::FastCaptureDumpState() : FastThreadDumpState(),
-    mReadSequence(0), mFramesRead(0), mReadErrors(0), mSampleRate(0), mFrameCount(0)
-{
-}
-
-FastCaptureDumpState::~FastCaptureDumpState()
-{
-}
-
 }   // namespace android
diff --git a/services/audioflinger/FastCapture.h b/services/audioflinger/FastCapture.h
index e535b9d..e258a4d 100644
--- a/services/audioflinger/FastCapture.h
+++ b/services/audioflinger/FastCapture.h
@@ -20,23 +20,12 @@
 #include "FastThread.h"
 #include "StateQueue.h"
 #include "FastCaptureState.h"
+#include "FastCaptureDumpState.h"
 
 namespace android {
 
 typedef StateQueue<FastCaptureState> FastCaptureStateQueue;
 
-struct FastCaptureDumpState : FastThreadDumpState {
-    FastCaptureDumpState();
-    /*virtual*/ ~FastCaptureDumpState();
-
-    // FIXME by renaming, could pull up many of these to FastThreadDumpState
-    uint32_t mReadSequence;     // incremented before and after each read()
-    uint32_t mFramesRead;       // total number of frames read successfully
-    uint32_t mReadErrors;       // total number of read() errors
-    uint32_t mSampleRate;
-    size_t   mFrameCount;
-};
-
 class FastCapture : public FastThread {
 
 public:
@@ -57,19 +46,21 @@
     virtual void onStateChange();
     virtual void onWork();
 
-    static const FastCaptureState initial;
-    FastCaptureState preIdle; // copy of state before we went into idle
+    static const FastCaptureState sInitial;
+
+    FastCaptureState    mPreIdle;   // copy of state before we went into idle
     // FIXME by renaming, could pull up many of these to FastThread
-    NBAIO_Source *inputSource;
-    int inputSourceGen;
-    NBAIO_Sink *pipeSink;
-    int pipeSinkGen;
-    short *readBuffer;
-    ssize_t readBufferState;    // number of initialized frames in readBuffer, or -1 to clear
-    NBAIO_Format format;
-    unsigned sampleRate;
-    FastCaptureDumpState dummyDumpState;
-    uint32_t totalNativeFramesRead; // copied to dumpState->mFramesRead
+    NBAIO_Source*       mInputSource;
+    int                 mInputSourceGen;
+    NBAIO_Sink*         mPipeSink;
+    int                 mPipeSinkGen;
+    void*               mReadBuffer;
+    ssize_t             mReadBufferState;   // number of initialized frames in readBuffer,
+                                            // or -1 to clear
+    NBAIO_Format        mFormat;
+    unsigned            mSampleRate;
+    FastCaptureDumpState mDummyFastCaptureDumpState;
+    uint32_t            mTotalNativeFramesRead; // copied to dumpState->mFramesRead
 
 };  // class FastCapture
 
diff --git a/services/audioflinger/FastCaptureDumpState.cpp b/services/audioflinger/FastCaptureDumpState.cpp
new file mode 100644
index 0000000..53eeba5
--- /dev/null
+++ b/services/audioflinger/FastCaptureDumpState.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "FastCaptureDumpState"
+//define LOG_NDEBUG 0
+
+#include "Configuration.h"
+#include <utils/Log.h>
+#include "FastCaptureDumpState.h"
+#include "FastCaptureState.h"
+
+namespace android {
+
+FastCaptureDumpState::FastCaptureDumpState() : FastThreadDumpState(),
+    mReadSequence(0), mFramesRead(0), mReadErrors(0), mSampleRate(0), mFrameCount(0)
+{
+}
+
+FastCaptureDumpState::~FastCaptureDumpState()
+{
+}
+
+void FastCaptureDumpState::dump(int fd) const
+{
+    if (mCommand == FastCaptureState::INITIAL) {
+        dprintf(fd, "  FastCapture not initialized\n");
+        return;
+    }
+    double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1000.0) +
+            (mMeasuredWarmupTs.tv_nsec / 1000000.0);
+    double periodSec = (double) mFrameCount / mSampleRate;
+    dprintf(fd, "  FastCapture command=%s readSequence=%u framesRead=%u\n"
+                "              readErrors=%u sampleRate=%u frameCount=%zu\n"
+                "              measuredWarmup=%.3g ms, warmupCycles=%u period=%.2f ms\n",
+                FastCaptureState::commandToString(mCommand), mReadSequence, mFramesRead,
+                mReadErrors, mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles,
+                periodSec * 1e3);
+}
+
+}   // android
diff --git a/services/audioflinger/FastCaptureDumpState.h b/services/audioflinger/FastCaptureDumpState.h
new file mode 100644
index 0000000..6f9c4c3
--- /dev/null
+++ b/services/audioflinger/FastCaptureDumpState.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_FAST_CAPTURE_DUMP_STATE_H
+#define ANDROID_AUDIO_FAST_CAPTURE_DUMP_STATE_H
+
+#include <stdint.h>
+#include "Configuration.h"
+#include "FastThreadDumpState.h"
+
+namespace android {
+
+struct FastCaptureDumpState : FastThreadDumpState {
+    FastCaptureDumpState();
+    /*virtual*/ ~FastCaptureDumpState();
+
+    void dump(int fd) const;    // should only be called on a stable copy, not the original
+
+    // FIXME by renaming, could pull up many of these to FastThreadDumpState
+    uint32_t mReadSequence;     // incremented before and after each read()
+    uint32_t mFramesRead;       // total number of frames read successfully
+    uint32_t mReadErrors;       // total number of read() errors
+    uint32_t mSampleRate;
+    size_t   mFrameCount;
+};
+
+}   // android
+
+#endif  // ANDROID_AUDIO_FAST_CAPTURE_DUMP_STATE_H
diff --git a/services/audioflinger/FastCaptureState.cpp b/services/audioflinger/FastCaptureState.cpp
index 1d029b7..c4d5e45 100644
--- a/services/audioflinger/FastCaptureState.cpp
+++ b/services/audioflinger/FastCaptureState.cpp
@@ -27,4 +27,19 @@
 {
 }
 
+// static
+const char *FastCaptureState::commandToString(Command command)
+{
+    const char *str = FastThreadState::commandToString(command);
+    if (str != NULL) {
+        return str;
+    }
+    switch (command) {
+    case FastCaptureState::READ:        return "READ";
+    case FastCaptureState::WRITE:       return "WRITE";
+    case FastCaptureState::READ_WRITE:  return "READ_WRITE";
+    }
+    LOG_ALWAYS_FATAL("%s", __func__);
+}
+
 }   // android
diff --git a/services/audioflinger/FastCaptureState.h b/services/audioflinger/FastCaptureState.h
index 29c865a..9bca2d4 100644
--- a/services/audioflinger/FastCaptureState.h
+++ b/services/audioflinger/FastCaptureState.h
@@ -29,21 +29,23 @@
     /*virtual*/ ~FastCaptureState();
 
     // all pointer fields use raw pointers; objects are owned and ref-counted by RecordThread
-    NBAIO_Source    *mInputSource;      // HAL input device, must already be negotiated
+    NBAIO_Source*   mInputSource;       // HAL input device, must already be negotiated
     // FIXME by renaming, could pull up these fields to FastThreadState
     int             mInputSourceGen;    // increment when mInputSource is assigned
-    NBAIO_Sink      *mPipeSink;         // after reading from input source, write to this pipe sink
+    NBAIO_Sink*     mPipeSink;          // after reading from input source, write to this pipe sink
     int             mPipeSinkGen;       // increment when mPipeSink is assigned
     size_t          mFrameCount;        // number of frames per fast capture buffer
-    audio_track_cblk_t  *mCblk;         // control block for the single fast client, or NULL
+    audio_track_cblk_t* mCblk;          // control block for the single fast client, or NULL
 
     // Extends FastThreadState::Command
     static const Command
         // The following commands also process configuration changes, and can be "or"ed:
-        READ = 0x8,             // read from input source
-        WRITE = 0x10,           // write to pipe sink
-        READ_WRITE = 0x18;      // read from input source and write to pipe sink
+        READ = 0x8,                     // read from input source
+        WRITE = 0x10,                   // write to pipe sink
+        READ_WRITE = 0x18;              // read from input source and write to pipe sink
 
+    // never returns NULL; asserts if command is invalid
+    static const char *commandToString(Command command);
 };  // struct FastCaptureState
 
 }   // namespace android
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 2678cbf..45c68b5 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -27,10 +27,11 @@
 
 #include "Configuration.h"
 #include <time.h>
+#include <utils/Debug.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
 #include <system/audio.h>
-#ifdef FAST_MIXER_STATISTICS
+#ifdef FAST_THREAD_STATISTICS
 #include <cpustats/CentralTendencyStatistics.h>
 #ifdef CPU_FREQUENCY_STATISTICS
 #include <cpustats/ThreadCpuUsage.h>
@@ -44,15 +45,15 @@
 
 namespace android {
 
-/*static*/ const FastMixerState FastMixer::initial;
+/*static*/ const FastMixerState FastMixer::sInitial;
 
 FastMixer::FastMixer() : FastThread(),
-    slopNs(0),
-    // fastTrackNames
-    // generations
-    outputSink(NULL),
-    outputSinkGen(0),
-    mixer(NULL),
+    mSlopNs(0),
+    // mFastTrackNames
+    // mGenerations
+    mOutputSink(NULL),
+    mOutputSinkGen(0),
+    mMixer(NULL),
     mSinkBuffer(NULL),
     mSinkBufferSize(0),
     mSinkChannelCount(FCC_2),
@@ -60,30 +61,30 @@
     mMixerBufferSize(0),
     mMixerBufferFormat(AUDIO_FORMAT_PCM_16_BIT),
     mMixerBufferState(UNDEFINED),
-    format(Format_Invalid),
-    sampleRate(0),
-    fastTracksGen(0),
-    totalNativeFramesWritten(0),
+    mFormat(Format_Invalid),
+    mSampleRate(0),
+    mFastTracksGen(0),
+    mTotalNativeFramesWritten(0),
     // timestamp
-    nativeFramesWrittenButNotPresented(0)   // the = 0 is to silence the compiler
+    mNativeFramesWrittenButNotPresented(0)   // the = 0 is to silence the compiler
 {
-    // FIXME pass initial as parameter to base class constructor, and make it static local
-    previous = &initial;
-    current = &initial;
+    // FIXME pass sInitial as parameter to base class constructor, and make it static local
+    mPrevious = &sInitial;
+    mCurrent = &sInitial;
 
-    mDummyDumpState = &dummyDumpState;
+    mDummyDumpState = &mDummyFastMixerDumpState;
     // TODO: Add channel mask to NBAIO_Format.
     // We assume that the channel mask must be a valid positional channel mask.
     mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount);
 
     unsigned i;
     for (i = 0; i < FastMixerState::kMaxFastTracks; ++i) {
-        fastTrackNames[i] = -1;
-        generations[i] = 0;
+        mFastTrackNames[i] = -1;
+        mGenerations[i] = 0;
     }
-#ifdef FAST_MIXER_STATISTICS
-    oldLoad.tv_sec = 0;
-    oldLoad.tv_nsec = 0;
+#ifdef FAST_THREAD_STATISTICS
+    mOldLoad.tv_sec = 0;
+    mOldLoad.tv_nsec = 0;
 #endif
 }
 
@@ -103,20 +104,20 @@
 
 void FastMixer::setLog(NBLog::Writer *logWriter)
 {
-    if (mixer != NULL) {
-        mixer->setLog(logWriter);
+    if (mMixer != NULL) {
+        mMixer->setLog(logWriter);
     }
 }
 
 void FastMixer::onIdle()
 {
-    preIdle = *(const FastMixerState *)current;
-    current = &preIdle;
+    mPreIdle = *(const FastMixerState *)mCurrent;
+    mCurrent = &mPreIdle;
 }
 
 void FastMixer::onExit()
 {
-    delete mixer;
+    delete mMixer;
     free(mMixerBuffer);
     free(mSinkBuffer);
 }
@@ -135,82 +136,88 @@
 
 void FastMixer::onStateChange()
 {
-    const FastMixerState * const current = (const FastMixerState *) this->current;
-    const FastMixerState * const previous = (const FastMixerState *) this->previous;
-    FastMixerDumpState * const dumpState = (FastMixerDumpState *) this->dumpState;
+    const FastMixerState * const current = (const FastMixerState *) mCurrent;
+    const FastMixerState * const previous = (const FastMixerState *) mPrevious;
+    FastMixerDumpState * const dumpState = (FastMixerDumpState *) mDumpState;
     const size_t frameCount = current->mFrameCount;
 
     // handle state change here, but since we want to diff the state,
-    // we're prepared for previous == &initial the first time through
+    // we're prepared for previous == &sInitial the first time through
     unsigned previousTrackMask;
 
     // check for change in output HAL configuration
-    NBAIO_Format previousFormat = format;
-    if (current->mOutputSinkGen != outputSinkGen) {
-        outputSink = current->mOutputSink;
-        outputSinkGen = current->mOutputSinkGen;
-        if (outputSink == NULL) {
-            format = Format_Invalid;
-            sampleRate = 0;
+    NBAIO_Format previousFormat = mFormat;
+    if (current->mOutputSinkGen != mOutputSinkGen) {
+        mOutputSink = current->mOutputSink;
+        mOutputSinkGen = current->mOutputSinkGen;
+        if (mOutputSink == NULL) {
+            mFormat = Format_Invalid;
+            mSampleRate = 0;
             mSinkChannelCount = 0;
             mSinkChannelMask = AUDIO_CHANNEL_NONE;
         } else {
-            format = outputSink->format();
-            sampleRate = Format_sampleRate(format);
-            mSinkChannelCount = Format_channelCount(format);
+            mFormat = mOutputSink->format();
+            mSampleRate = Format_sampleRate(mFormat);
+            mSinkChannelCount = Format_channelCount(mFormat);
             LOG_ALWAYS_FATAL_IF(mSinkChannelCount > AudioMixer::MAX_NUM_CHANNELS);
 
             // TODO: Add channel mask to NBAIO_Format
             // We assume that the channel mask must be a valid positional channel mask.
             mSinkChannelMask = audio_channel_out_mask_from_count(mSinkChannelCount);
         }
-        dumpState->mSampleRate = sampleRate;
+        dumpState->mSampleRate = mSampleRate;
     }
 
-    if ((!Format_isEqual(format, previousFormat)) || (frameCount != previous->mFrameCount)) {
+    if ((!Format_isEqual(mFormat, previousFormat)) || (frameCount != previous->mFrameCount)) {
         // FIXME to avoid priority inversion, don't delete here
-        delete mixer;
-        mixer = NULL;
+        delete mMixer;
+        mMixer = NULL;
         free(mMixerBuffer);
         mMixerBuffer = NULL;
         free(mSinkBuffer);
         mSinkBuffer = NULL;
-        if (frameCount > 0 && sampleRate > 0) {
+        if (frameCount > 0 && mSampleRate > 0) {
+            // The mixer produces either 16 bit PCM or float output, select
+            // float output if the HAL supports higher than 16 bit precision.
+            mMixerBufferFormat = mFormat.mFormat == AUDIO_FORMAT_PCM_16_BIT ?
+                    AUDIO_FORMAT_PCM_16_BIT : AUDIO_FORMAT_PCM_FLOAT;
             // FIXME new may block for unbounded time at internal mutex of the heap
             //       implementation; it would be better to have normal mixer allocate for us
             //       to avoid blocking here and to prevent possible priority inversion
-            mixer = new AudioMixer(frameCount, sampleRate, FastMixerState::kMaxFastTracks);
+            mMixer = new AudioMixer(frameCount, mSampleRate, FastMixerState::kMaxFastTracks);
             const size_t mixerFrameSize = mSinkChannelCount
                     * audio_bytes_per_sample(mMixerBufferFormat);
             mMixerBufferSize = mixerFrameSize * frameCount;
             (void)posix_memalign(&mMixerBuffer, 32, mMixerBufferSize);
             const size_t sinkFrameSize = mSinkChannelCount
-                    * audio_bytes_per_sample(format.mFormat);
+                    * audio_bytes_per_sample(mFormat.mFormat);
             if (sinkFrameSize > mixerFrameSize) { // need a sink buffer
                 mSinkBufferSize = sinkFrameSize * frameCount;
                 (void)posix_memalign(&mSinkBuffer, 32, mSinkBufferSize);
             }
-            periodNs = (frameCount * 1000000000LL) / sampleRate;    // 1.00
-            underrunNs = (frameCount * 1750000000LL) / sampleRate;  // 1.75
-            overrunNs = (frameCount * 500000000LL) / sampleRate;    // 0.50
-            forceNs = (frameCount * 950000000LL) / sampleRate;      // 0.95
-            warmupNs = (frameCount * 500000000LL) / sampleRate;     // 0.50
+            mPeriodNs = (frameCount * 1000000000LL) / mSampleRate;    // 1.00
+            mUnderrunNs = (frameCount * 1750000000LL) / mSampleRate;  // 1.75
+            mOverrunNs = (frameCount * 500000000LL) / mSampleRate;    // 0.50
+            mForceNs = (frameCount * 950000000LL) / mSampleRate;      // 0.95
+            mWarmupNsMin = (frameCount * 750000000LL) / mSampleRate;  // 0.75
+            mWarmupNsMax = (frameCount * 1250000000LL) / mSampleRate; // 1.25
         } else {
-            periodNs = 0;
-            underrunNs = 0;
-            overrunNs = 0;
-            forceNs = 0;
-            warmupNs = 0;
+            mPeriodNs = 0;
+            mUnderrunNs = 0;
+            mOverrunNs = 0;
+            mForceNs = 0;
+            mWarmupNsMin = 0;
+            mWarmupNsMax = LONG_MAX;
         }
         mMixerBufferState = UNDEFINED;
 #if !LOG_NDEBUG
         for (unsigned i = 0; i < FastMixerState::kMaxFastTracks; ++i) {
-            fastTrackNames[i] = -1;
+            mFastTrackNames[i] = -1;
         }
 #endif
         // we need to reconfigure all active tracks
         previousTrackMask = 0;
-        fastTracksGen = current->mFastTracksGen - 1;
+        mFastTracksGen = current->mFastTracksGen - 1;
         dumpState->mFrameCount = frameCount;
     } else {
         previousTrackMask = previous->mTrackMask;
@@ -219,7 +226,7 @@
     // check for change in active track set
     const unsigned currentTrackMask = current->mTrackMask;
     dumpState->mTrackMask = currentTrackMask;
-    if (current->mFastTracksGen != fastTracksGen) {
+    if (current->mFastTracksGen != mFastTracksGen) {
         ALOG_ASSERT(mMixerBuffer != NULL);
         int name;
 
@@ -230,16 +237,16 @@
             removedTracks &= ~(1 << i);
             const FastTrack* fastTrack = &current->mFastTracks[i];
             ALOG_ASSERT(fastTrack->mBufferProvider == NULL);
-            if (mixer != NULL) {
-                name = fastTrackNames[i];
+            if (mMixer != NULL) {
+                name = mFastTrackNames[i];
                 ALOG_ASSERT(name >= 0);
-                mixer->deleteTrackName(name);
+                mMixer->deleteTrackName(name);
             }
 #if !LOG_NDEBUG
-            fastTrackNames[i] = -1;
+            mFastTrackNames[i] = -1;
 #endif
             // don't reset track dump state, since other side is ignoring it
-            generations[i] = fastTrack->mGeneration;
+            mGenerations[i] = fastTrack->mGeneration;
         }
 
         // now process added tracks
@@ -249,29 +256,29 @@
             addedTracks &= ~(1 << i);
             const FastTrack* fastTrack = &current->mFastTracks[i];
             AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider;
-            ALOG_ASSERT(bufferProvider != NULL && fastTrackNames[i] == -1);
-            if (mixer != NULL) {
-                name = mixer->getTrackName(fastTrack->mChannelMask,
+            ALOG_ASSERT(bufferProvider != NULL && mFastTrackNames[i] == -1);
+            if (mMixer != NULL) {
+                name = mMixer->getTrackName(fastTrack->mChannelMask,
                         fastTrack->mFormat, AUDIO_SESSION_OUTPUT_MIX);
                 ALOG_ASSERT(name >= 0);
-                fastTrackNames[i] = name;
-                mixer->setBufferProvider(name, bufferProvider);
-                mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
+                mFastTrackNames[i] = name;
+                mMixer->setBufferProvider(name, bufferProvider);
+                mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
                         (void *)mMixerBuffer);
                 // newly allocated track names default to full scale volume
-                mixer->setParameter(
+                mMixer->setParameter(
                         name,
                         AudioMixer::TRACK,
                         AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
-                mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT,
+                mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT,
                         (void *)(uintptr_t)fastTrack->mFormat);
-                mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
+                mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
                         (void *)(uintptr_t)fastTrack->mChannelMask);
-                mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
+                mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
                         (void *)(uintptr_t)mSinkChannelMask);
-                mixer->enable(name);
+                mMixer->enable(name);
             }
-            generations[i] = fastTrack->mGeneration;
+            mGenerations[i] = fastTrack->mGeneration;
         }
 
         // finally process (potentially) modified tracks; these use the same slot
@@ -281,38 +288,38 @@
             int i = __builtin_ctz(modifiedTracks);
             modifiedTracks &= ~(1 << i);
             const FastTrack* fastTrack = &current->mFastTracks[i];
-            if (fastTrack->mGeneration != generations[i]) {
+            if (fastTrack->mGeneration != mGenerations[i]) {
                 // this track was actually modified
                 AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider;
                 ALOG_ASSERT(bufferProvider != NULL);
-                if (mixer != NULL) {
-                    name = fastTrackNames[i];
+                if (mMixer != NULL) {
+                    name = mFastTrackNames[i];
                     ALOG_ASSERT(name >= 0);
-                    mixer->setBufferProvider(name, bufferProvider);
+                    mMixer->setBufferProvider(name, bufferProvider);
                     if (fastTrack->mVolumeProvider == NULL) {
                         float f = AudioMixer::UNITY_GAIN_FLOAT;
-                        mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f);
-                        mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f);
+                        mMixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f);
+                        mMixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f);
                     }
-                    mixer->setParameter(name, AudioMixer::RESAMPLE,
+                    mMixer->setParameter(name, AudioMixer::RESAMPLE,
                             AudioMixer::REMOVE, NULL);
-                    mixer->setParameter(
+                    mMixer->setParameter(
                             name,
                             AudioMixer::TRACK,
                             AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
-                    mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT,
+                    mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::FORMAT,
                             (void *)(uintptr_t)fastTrack->mFormat);
-                    mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
+                    mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK,
                             (void *)(uintptr_t)fastTrack->mChannelMask);
-                    mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
+                    mMixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MIXER_CHANNEL_MASK,
                             (void *)(uintptr_t)mSinkChannelMask);
                     // already enabled
                 }
-                generations[i] = fastTrack->mGeneration;
+                mGenerations[i] = fastTrack->mGeneration;
             }
         }
 
-        fastTracksGen = current->mFastTracksGen;
+        mFastTracksGen = current->mFastTracksGen;
 
         dumpState->mNumTracks = popcount(currentTrackMask);
     }
@@ -320,12 +327,12 @@
 
 void FastMixer::onWork()
 {
-    const FastMixerState * const current = (const FastMixerState *) this->current;
-    FastMixerDumpState * const dumpState = (FastMixerDumpState *) this->dumpState;
-    const FastMixerState::Command command = this->command;
+    const FastMixerState * const current = (const FastMixerState *) mCurrent;
+    FastMixerDumpState * const dumpState = (FastMixerDumpState *) mDumpState;
+    const FastMixerState::Command command = mCommand;
     const size_t frameCount = current->mFrameCount;
 
-    if ((command & FastMixerState::MIX) && (mixer != NULL) && isWarm) {
+    if ((command & FastMixerState::MIX) && (mMixer != NULL) && mIsWarm) {
         ALOG_ASSERT(mMixerBuffer != NULL);
         // for each track, update volume and check for underrun
         unsigned currentTrackMask = current->mTrackMask;
@@ -335,9 +342,9 @@
             const FastTrack* fastTrack = &current->mFastTracks[i];
 
             // Refresh the per-track timestamp
-            if (timestampStatus == NO_ERROR) {
+            if (mTimestampStatus == NO_ERROR) {
                 uint32_t trackFramesWrittenButNotPresented =
-                    nativeFramesWrittenButNotPresented;
+                    mNativeFramesWrittenButNotPresented;
                 uint32_t trackFramesWritten = fastTrack->mBufferProvider->framesReleased();
                 // Can't provide an AudioTimestamp before first frame presented,
                 // or during the brief 32-bit wraparound window
@@ -345,20 +352,20 @@
                     AudioTimestamp perTrackTimestamp;
                     perTrackTimestamp.mPosition =
                             trackFramesWritten - trackFramesWrittenButNotPresented;
-                    perTrackTimestamp.mTime = timestamp.mTime;
+                    perTrackTimestamp.mTime = mTimestamp.mTime;
                     fastTrack->mBufferProvider->onTimestamp(perTrackTimestamp);
                 }
             }
 
-            int name = fastTrackNames[i];
+            int name = mFastTrackNames[i];
             ALOG_ASSERT(name >= 0);
             if (fastTrack->mVolumeProvider != NULL) {
                 gain_minifloat_packed_t vlr = fastTrack->mVolumeProvider->getVolumeLR();
                 float vlf = float_from_gain(gain_minifloat_unpack_left(vlr));
                 float vrf = float_from_gain(gain_minifloat_unpack_right(vlr));
 
-                mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &vlf);
-                mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &vrf);
+                mMixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &vlf);
+                mMixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &vrf);
             }
             // FIXME The current implementation of framesReady() for fast tracks
             // takes a tryLock, which can block
@@ -379,43 +386,44 @@
                 if (framesReady == 0) {
                     underruns.mBitFields.mEmpty++;
                     underruns.mBitFields.mMostRecent = UNDERRUN_EMPTY;
-                    mixer->disable(name);
+                    mMixer->disable(name);
                 } else {
                     // allow mixing partial buffer
                     underruns.mBitFields.mPartial++;
                     underruns.mBitFields.mMostRecent = UNDERRUN_PARTIAL;
-                    mixer->enable(name);
+                    mMixer->enable(name);
                 }
             } else {
                 underruns.mBitFields.mFull++;
                 underruns.mBitFields.mMostRecent = UNDERRUN_FULL;
-                mixer->enable(name);
+                mMixer->enable(name);
             }
             ftDump->mUnderruns = underruns;
             ftDump->mFramesReady = framesReady;
         }
 
         int64_t pts;
-        if (outputSink == NULL || (OK != outputSink->getNextWriteTimestamp(&pts))) {
+        if (mOutputSink == NULL || (OK != mOutputSink->getNextWriteTimestamp(&pts))) {
             pts = AudioBufferProvider::kInvalidPTS;
         }
 
         // process() is CPU-bound
-        mixer->process(pts);
+        mMixer->process(pts);
         mMixerBufferState = MIXED;
     } else if (mMixerBufferState == MIXED) {
         mMixerBufferState = UNDEFINED;
     }
     //bool didFullWrite = false;    // dumpsys could display a count of partial writes
-    if ((command & FastMixerState::WRITE) && (outputSink != NULL) && (mMixerBuffer != NULL)) {
+    if ((command & FastMixerState::WRITE) && (mOutputSink != NULL) && (mMixerBuffer != NULL)) {
         if (mMixerBufferState == UNDEFINED) {
             memset(mMixerBuffer, 0, mMixerBufferSize);
             mMixerBufferState = ZEROED;
         }
+        // prepare the buffer used to write to sink
         void *buffer = mSinkBuffer != NULL ? mSinkBuffer : mMixerBuffer;
-        if (format.mFormat != mMixerBufferFormat) { // sink format not the same as mixer format
-            memcpy_by_audio_format(buffer, format.mFormat, mMixerBuffer, mMixerBufferFormat,
-                    frameCount * Format_channelCount(format));
+        if (mFormat.mFormat != mMixerBufferFormat) { // sink format not the same as mixer format
+            memcpy_by_audio_format(buffer, mFormat.mFormat, mMixerBuffer, mMixerBufferFormat,
+                    frameCount * Format_channelCount(mFormat));
         }
         // if non-NULL, then duplicate write() to this non-blocking sink
         NBAIO_Sink* teeSink;
@@ -426,252 +434,34 @@
         //       but this code should be modified to handle both non-blocking and blocking sinks
         dumpState->mWriteSequence++;
         ATRACE_BEGIN("write");
-        ssize_t framesWritten = outputSink->write(buffer, frameCount);
+        ssize_t framesWritten = mOutputSink->write(buffer, frameCount);
         ATRACE_END();
         dumpState->mWriteSequence++;
         if (framesWritten >= 0) {
             ALOG_ASSERT((size_t) framesWritten <= frameCount);
-            totalNativeFramesWritten += framesWritten;
-            dumpState->mFramesWritten = totalNativeFramesWritten;
+            mTotalNativeFramesWritten += framesWritten;
+            dumpState->mFramesWritten = mTotalNativeFramesWritten;
             //if ((size_t) framesWritten == frameCount) {
             //    didFullWrite = true;
             //}
         } else {
             dumpState->mWriteErrors++;
         }
-        attemptedWrite = true;
+        mAttemptedWrite = true;
         // FIXME count # of writes blocked excessively, CPU usage, etc. for dump
 
-        timestampStatus = outputSink->getTimestamp(timestamp);
-        if (timestampStatus == NO_ERROR) {
-            uint32_t totalNativeFramesPresented = timestamp.mPosition;
-            if (totalNativeFramesPresented <= totalNativeFramesWritten) {
-                nativeFramesWrittenButNotPresented =
-                    totalNativeFramesWritten - totalNativeFramesPresented;
+        mTimestampStatus = mOutputSink->getTimestamp(mTimestamp);
+        if (mTimestampStatus == NO_ERROR) {
+            uint32_t totalNativeFramesPresented = mTimestamp.mPosition;
+            if (totalNativeFramesPresented <= mTotalNativeFramesWritten) {
+                mNativeFramesWrittenButNotPresented =
+                    mTotalNativeFramesWritten - totalNativeFramesPresented;
             } else {
                 // HAL reported that more frames were presented than were written
-                timestampStatus = INVALID_OPERATION;
+                mTimestampStatus = INVALID_OPERATION;
             }
         }
     }
 }
 
-FastMixerDumpState::FastMixerDumpState(
-#ifdef FAST_MIXER_STATISTICS
-        uint32_t samplingN
-#endif
-        ) : FastThreadDumpState(),
-    mWriteSequence(0), mFramesWritten(0),
-    mNumTracks(0), mWriteErrors(0),
-    mSampleRate(0), mFrameCount(0),
-    mTrackMask(0)
-{
-#ifdef FAST_MIXER_STATISTICS
-    increaseSamplingN(samplingN);
-#endif
-}
-
-#ifdef FAST_MIXER_STATISTICS
-void FastMixerDumpState::increaseSamplingN(uint32_t samplingN)
-{
-    if (samplingN <= mSamplingN || samplingN > kSamplingN || roundup(samplingN) != samplingN) {
-        return;
-    }
-    uint32_t additional = samplingN - mSamplingN;
-    // sample arrays aren't accessed atomically with respect to the bounds,
-    // so clearing reduces chance for dumpsys to read random uninitialized samples
-    memset(&mMonotonicNs[mSamplingN], 0, sizeof(mMonotonicNs[0]) * additional);
-    memset(&mLoadNs[mSamplingN], 0, sizeof(mLoadNs[0]) * additional);
-#ifdef CPU_FREQUENCY_STATISTICS
-    memset(&mCpukHz[mSamplingN], 0, sizeof(mCpukHz[0]) * additional);
-#endif
-    mSamplingN = samplingN;
-}
-#endif
-
-FastMixerDumpState::~FastMixerDumpState()
-{
-}
-
-// helper function called by qsort()
-static int compare_uint32_t(const void *pa, const void *pb)
-{
-    uint32_t a = *(const uint32_t *)pa;
-    uint32_t b = *(const uint32_t *)pb;
-    if (a < b) {
-        return -1;
-    } else if (a > b) {
-        return 1;
-    } else {
-        return 0;
-    }
-}
-
-void FastMixerDumpState::dump(int fd) const
-{
-    if (mCommand == FastMixerState::INITIAL) {
-        dprintf(fd, "  FastMixer not initialized\n");
-        return;
-    }
-#define COMMAND_MAX 32
-    char string[COMMAND_MAX];
-    switch (mCommand) {
-    case FastMixerState::INITIAL:
-        strcpy(string, "INITIAL");
-        break;
-    case FastMixerState::HOT_IDLE:
-        strcpy(string, "HOT_IDLE");
-        break;
-    case FastMixerState::COLD_IDLE:
-        strcpy(string, "COLD_IDLE");
-        break;
-    case FastMixerState::EXIT:
-        strcpy(string, "EXIT");
-        break;
-    case FastMixerState::MIX:
-        strcpy(string, "MIX");
-        break;
-    case FastMixerState::WRITE:
-        strcpy(string, "WRITE");
-        break;
-    case FastMixerState::MIX_WRITE:
-        strcpy(string, "MIX_WRITE");
-        break;
-    default:
-        snprintf(string, COMMAND_MAX, "%d", mCommand);
-        break;
-    }
-    double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1000.0) +
-            (mMeasuredWarmupTs.tv_nsec / 1000000.0);
-    double mixPeriodSec = (double) mFrameCount / (double) mSampleRate;
-    dprintf(fd, "  FastMixer command=%s writeSequence=%u framesWritten=%u\n"
-                "            numTracks=%u writeErrors=%u underruns=%u overruns=%u\n"
-                "            sampleRate=%u frameCount=%zu measuredWarmup=%.3g ms, warmupCycles=%u\n"
-                "            mixPeriod=%.2f ms\n",
-                 string, mWriteSequence, mFramesWritten,
-                 mNumTracks, mWriteErrors, mUnderruns, mOverruns,
-                 mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles,
-                 mixPeriodSec * 1e3);
-#ifdef FAST_MIXER_STATISTICS
-    // find the interval of valid samples
-    uint32_t bounds = mBounds;
-    uint32_t newestOpen = bounds & 0xFFFF;
-    uint32_t oldestClosed = bounds >> 16;
-    uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
-    if (n > mSamplingN) {
-        ALOGE("too many samples %u", n);
-        n = mSamplingN;
-    }
-    // statistics for monotonic (wall clock) time, thread raw CPU load in time, CPU clock frequency,
-    // and adjusted CPU load in MHz normalized for CPU clock frequency
-    CentralTendencyStatistics wall, loadNs;
-#ifdef CPU_FREQUENCY_STATISTICS
-    CentralTendencyStatistics kHz, loadMHz;
-    uint32_t previousCpukHz = 0;
-#endif
-    // Assuming a normal distribution for cycle times, three standard deviations on either side of
-    // the mean account for 99.73% of the population.  So if we take each tail to be 1/1000 of the
-    // sample set, we get 99.8% combined, or close to three standard deviations.
-    static const uint32_t kTailDenominator = 1000;
-    uint32_t *tail = n >= kTailDenominator ? new uint32_t[n] : NULL;
-    // loop over all the samples
-    for (uint32_t j = 0; j < n; ++j) {
-        size_t i = oldestClosed++ & (mSamplingN - 1);
-        uint32_t wallNs = mMonotonicNs[i];
-        if (tail != NULL) {
-            tail[j] = wallNs;
-        }
-        wall.sample(wallNs);
-        uint32_t sampleLoadNs = mLoadNs[i];
-        loadNs.sample(sampleLoadNs);
-#ifdef CPU_FREQUENCY_STATISTICS
-        uint32_t sampleCpukHz = mCpukHz[i];
-        // skip bad kHz samples
-        if ((sampleCpukHz & ~0xF) != 0) {
-            kHz.sample(sampleCpukHz >> 4);
-            if (sampleCpukHz == previousCpukHz) {
-                double megacycles = (double) sampleLoadNs * (double) (sampleCpukHz >> 4) * 1e-12;
-                double adjMHz = megacycles / mixPeriodSec;  // _not_ wallNs * 1e9
-                loadMHz.sample(adjMHz);
-            }
-        }
-        previousCpukHz = sampleCpukHz;
-#endif
-    }
-    if (n) {
-        dprintf(fd, "  Simple moving statistics over last %.1f seconds:\n",
-                    wall.n() * mixPeriodSec);
-        dprintf(fd, "    wall clock time in ms per mix cycle:\n"
-                    "      mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
-                    wall.mean()*1e-6, wall.minimum()*1e-6, wall.maximum()*1e-6,
-                    wall.stddev()*1e-6);
-        dprintf(fd, "    raw CPU load in us per mix cycle:\n"
-                    "      mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
-                    loadNs.mean()*1e-3, loadNs.minimum()*1e-3, loadNs.maximum()*1e-3,
-                    loadNs.stddev()*1e-3);
-    } else {
-        dprintf(fd, "  No FastMixer statistics available currently\n");
-    }
-#ifdef CPU_FREQUENCY_STATISTICS
-    dprintf(fd, "  CPU clock frequency in MHz:\n"
-                "    mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
-                kHz.mean()*1e-3, kHz.minimum()*1e-3, kHz.maximum()*1e-3, kHz.stddev()*1e-3);
-    dprintf(fd, "  adjusted CPU load in MHz (i.e. normalized for CPU clock frequency):\n"
-                "    mean=%.1f min=%.1f max=%.1f stddev=%.1f\n",
-                loadMHz.mean(), loadMHz.minimum(), loadMHz.maximum(), loadMHz.stddev());
-#endif
-    if (tail != NULL) {
-        qsort(tail, n, sizeof(uint32_t), compare_uint32_t);
-        // assume same number of tail samples on each side, left and right
-        uint32_t count = n / kTailDenominator;
-        CentralTendencyStatistics left, right;
-        for (uint32_t i = 0; i < count; ++i) {
-            left.sample(tail[i]);
-            right.sample(tail[n - (i + 1)]);
-        }
-        dprintf(fd, "  Distribution of mix cycle times in ms for the tails (> ~3 stddev outliers):\n"
-                    "    left tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n"
-                    "    right tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
-                    left.mean()*1e-6, left.minimum()*1e-6, left.maximum()*1e-6, left.stddev()*1e-6,
-                    right.mean()*1e-6, right.minimum()*1e-6, right.maximum()*1e-6,
-                    right.stddev()*1e-6);
-        delete[] tail;
-    }
-#endif
-    // The active track mask and track states are updated non-atomically.
-    // So if we relied on isActive to decide whether to display,
-    // then we might display an obsolete track or omit an active track.
-    // Instead we always display all tracks, with an indication
-    // of whether we think the track is active.
-    uint32_t trackMask = mTrackMask;
-    dprintf(fd, "  Fast tracks: kMaxFastTracks=%u activeMask=%#x\n",
-            FastMixerState::kMaxFastTracks, trackMask);
-    dprintf(fd, "  Index Active Full Partial Empty  Recent Ready\n");
-    for (uint32_t i = 0; i < FastMixerState::kMaxFastTracks; ++i, trackMask >>= 1) {
-        bool isActive = trackMask & 1;
-        const FastTrackDump *ftDump = &mTracks[i];
-        const FastTrackUnderruns& underruns = ftDump->mUnderruns;
-        const char *mostRecent;
-        switch (underruns.mBitFields.mMostRecent) {
-        case UNDERRUN_FULL:
-            mostRecent = "full";
-            break;
-        case UNDERRUN_PARTIAL:
-            mostRecent = "partial";
-            break;
-        case UNDERRUN_EMPTY:
-            mostRecent = "empty";
-            break;
-        default:
-            mostRecent = "?";
-            break;
-        }
-        dprintf(fd, "  %5u %6s %4u %7u %5u %7s %5zu\n", i, isActive ? "yes" : "no",
-                (underruns.mBitFields.mFull) & UNDERRUN_MASK,
-                (underruns.mBitFields.mPartial) & UNDERRUN_MASK,
-                (underruns.mBitFields.mEmpty) & UNDERRUN_MASK,
-                mostRecent, ftDump->mFramesReady);
-    }
-}
-
 }   // namespace android
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index fde8c2b..06a68fb 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -17,11 +17,7 @@
 #ifndef ANDROID_AUDIO_FAST_MIXER_H
 #define ANDROID_AUDIO_FAST_MIXER_H
 
-#include <linux/futex.h>
-#include <sys/syscall.h>
-#include <utils/Debug.h>
 #include "FastThread.h"
-#include <utils/Thread.h>
 #include "StateQueue.h"
 #include "FastMixerState.h"
 #include "FastMixerDumpState.h"
@@ -52,36 +48,39 @@
     virtual void onStateChange();
     virtual void onWork();
 
-    // FIXME these former local variables need comments and to be renamed to have "m" prefix
-    static const FastMixerState initial;
-    FastMixerState preIdle; // copy of state before we went into idle
-    long slopNs;        // accumulated time we've woken up too early (> 0) or too late (< 0)
-    int fastTrackNames[FastMixerState::kMaxFastTracks]; // handles used by mixer to identify tracks
-    int generations[FastMixerState::kMaxFastTracks];    // last observed mFastTracks[i].mGeneration
-    NBAIO_Sink *outputSink;
-    int outputSinkGen;
-    AudioMixer* mixer;
+    // FIXME these former local variables need comments
+    static const FastMixerState sInitial;
+
+    FastMixerState  mPreIdle;   // copy of state before we went into idle
+    long            mSlopNs;    // accumulated time we've woken up too early (> 0) or too late (< 0)
+    int             mFastTrackNames[FastMixerState::kMaxFastTracks];
+                                // handles used by mixer to identify tracks
+    int             mGenerations[FastMixerState::kMaxFastTracks];
+                                // last observed mFastTracks[i].mGeneration
+    NBAIO_Sink*     mOutputSink;
+    int             mOutputSinkGen;
+    AudioMixer*     mMixer;
 
     // mSinkBuffer audio format is stored in format.mFormat.
-    void* mSinkBuffer;                  // used for mixer output format translation
+    void*           mSinkBuffer;        // used for mixer output format translation
                                         // if sink format is different than mixer output.
-    size_t mSinkBufferSize;
-    uint32_t mSinkChannelCount;
+    size_t          mSinkBufferSize;
+    uint32_t        mSinkChannelCount;
     audio_channel_mask_t mSinkChannelMask;
-    void* mMixerBuffer;                 // mixer output buffer.
-    size_t mMixerBufferSize;
-    audio_format_t mMixerBufferFormat;  // mixer output format: AUDIO_FORMAT_PCM_(16_BIT|FLOAT).
+    void*           mMixerBuffer;       // mixer output buffer.
+    size_t          mMixerBufferSize;
+    audio_format_t  mMixerBufferFormat; // mixer output format: AUDIO_FORMAT_PCM_(16_BIT|FLOAT).
 
     enum {UNDEFINED, MIXED, ZEROED} mMixerBufferState;
-    NBAIO_Format format;
-    unsigned sampleRate;
-    int fastTracksGen;
-    FastMixerDumpState dummyDumpState;
-    uint32_t totalNativeFramesWritten;  // copied to dumpState->mFramesWritten
+    NBAIO_Format    mFormat;
+    unsigned        mSampleRate;
+    int             mFastTracksGen;
+    FastMixerDumpState mDummyFastMixerDumpState;
+    uint32_t        mTotalNativeFramesWritten;  // copied to dumpState->mFramesWritten
 
     // next 2 fields are valid only when timestampStatus == NO_ERROR
-    AudioTimestamp timestamp;
-    uint32_t nativeFramesWrittenButNotPresented;
+    AudioTimestamp  mTimestamp;
+    uint32_t        mNativeFramesWrittenButNotPresented;
 
 };  // class FastMixer
 
diff --git a/services/audioflinger/FastMixerDumpState.cpp b/services/audioflinger/FastMixerDumpState.cpp
new file mode 100644
index 0000000..b10942b
--- /dev/null
+++ b/services/audioflinger/FastMixerDumpState.cpp
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "FastMixerDumpState"
+//#define LOG_NDEBUG 0
+
+#include "Configuration.h"
+#ifdef FAST_THREAD_STATISTICS
+#include <cpustats/CentralTendencyStatistics.h>
+#ifdef CPU_FREQUENCY_STATISTICS
+#include <cpustats/ThreadCpuUsage.h>
+#endif
+#endif
+#include <utils/Debug.h>
+#include <utils/Log.h>
+#include "FastMixerDumpState.h"
+
+namespace android {
+
+FastMixerDumpState::FastMixerDumpState() : FastThreadDumpState(),
+    mWriteSequence(0), mFramesWritten(0),
+    mNumTracks(0), mWriteErrors(0),
+    mSampleRate(0), mFrameCount(0),
+    mTrackMask(0)
+{
+}
+
+FastMixerDumpState::~FastMixerDumpState()
+{
+}
+
+// helper function called by qsort()
+static int compare_uint32_t(const void *pa, const void *pb)
+{
+    uint32_t a = *(const uint32_t *)pa;
+    uint32_t b = *(const uint32_t *)pb;
+    if (a < b) {
+        return -1;
+    } else if (a > b) {
+        return 1;
+    } else {
+        return 0;
+    }
+}
+
+void FastMixerDumpState::dump(int fd) const
+{
+    if (mCommand == FastMixerState::INITIAL) {
+        dprintf(fd, "  FastMixer not initialized\n");
+        return;
+    }
+    double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1000.0) +
+            (mMeasuredWarmupTs.tv_nsec / 1000000.0);
+    double mixPeriodSec = (double) mFrameCount / mSampleRate;
+    dprintf(fd, "  FastMixer command=%s writeSequence=%u framesWritten=%u\n"
+                "            numTracks=%u writeErrors=%u underruns=%u overruns=%u\n"
+                "            sampleRate=%u frameCount=%zu measuredWarmup=%.3g ms, warmupCycles=%u\n"
+                "            mixPeriod=%.2f ms\n",
+                FastMixerState::commandToString(mCommand), mWriteSequence, mFramesWritten,
+                mNumTracks, mWriteErrors, mUnderruns, mOverruns,
+                mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles,
+                mixPeriodSec * 1e3);
+#ifdef FAST_THREAD_STATISTICS
+    // find the interval of valid samples
+    uint32_t bounds = mBounds;
+    uint32_t newestOpen = bounds & 0xFFFF;
+    uint32_t oldestClosed = bounds >> 16;
+    uint32_t n = (newestOpen - oldestClosed) & 0xFFFF;
+    if (n > mSamplingN) {
+        ALOGE("too many samples %u", n);
+        n = mSamplingN;
+    }
+    // statistics for monotonic (wall clock) time, thread raw CPU load in time, CPU clock frequency,
+    // and adjusted CPU load in MHz normalized for CPU clock frequency
+    CentralTendencyStatistics wall, loadNs;
+#ifdef CPU_FREQUENCY_STATISTICS
+    CentralTendencyStatistics kHz, loadMHz;
+    uint32_t previousCpukHz = 0;
+#endif
+    // Assuming a normal distribution for cycle times, three standard deviations on either side of
+    // the mean account for 99.73% of the population.  So if we take each tail to be 1/1000 of the
+    // sample set, we get 99.8% combined, or close to three standard deviations.
+    static const uint32_t kTailDenominator = 1000;
+    uint32_t *tail = n >= kTailDenominator ? new uint32_t[n] : NULL;
+    // loop over all the samples
+    for (uint32_t j = 0; j < n; ++j) {
+        size_t i = oldestClosed++ & (mSamplingN - 1);
+        uint32_t wallNs = mMonotonicNs[i];
+        if (tail != NULL) {
+            tail[j] = wallNs;
+        }
+        wall.sample(wallNs);
+        uint32_t sampleLoadNs = mLoadNs[i];
+        loadNs.sample(sampleLoadNs);
+#ifdef CPU_FREQUENCY_STATISTICS
+        uint32_t sampleCpukHz = mCpukHz[i];
+        // skip bad kHz samples
+        if ((sampleCpukHz & ~0xF) != 0) {
+            kHz.sample(sampleCpukHz >> 4);
+            if (sampleCpukHz == previousCpukHz) {
+                double megacycles = (double) sampleLoadNs * (double) (sampleCpukHz >> 4) * 1e-12;
+                double adjMHz = megacycles / mixPeriodSec;  // _not_ wallNs * 1e9
+                loadMHz.sample(adjMHz);
+            }
+        }
+        previousCpukHz = sampleCpukHz;
+#endif
+    }
+    if (n) {
+        dprintf(fd, "  Simple moving statistics over last %.1f seconds:\n",
+                    wall.n() * mixPeriodSec);
+        dprintf(fd, "    wall clock time in ms per mix cycle:\n"
+                    "      mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
+                    wall.mean()*1e-6, wall.minimum()*1e-6, wall.maximum()*1e-6,
+                    wall.stddev()*1e-6);
+        dprintf(fd, "    raw CPU load in us per mix cycle:\n"
+                    "      mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
+                    loadNs.mean()*1e-3, loadNs.minimum()*1e-3, loadNs.maximum()*1e-3,
+                    loadNs.stddev()*1e-3);
+    } else {
+        dprintf(fd, "  No FastMixer statistics available currently\n");
+    }
+#ifdef CPU_FREQUENCY_STATISTICS
+    dprintf(fd, "  CPU clock frequency in MHz:\n"
+                "    mean=%.0f min=%.0f max=%.0f stddev=%.0f\n",
+                kHz.mean()*1e-3, kHz.minimum()*1e-3, kHz.maximum()*1e-3, kHz.stddev()*1e-3);
+    dprintf(fd, "  adjusted CPU load in MHz (i.e. normalized for CPU clock frequency):\n"
+                "    mean=%.1f min=%.1f max=%.1f stddev=%.1f\n",
+                loadMHz.mean(), loadMHz.minimum(), loadMHz.maximum(), loadMHz.stddev());
+#endif
+    if (tail != NULL) {
+        qsort(tail, n, sizeof(uint32_t), compare_uint32_t);
+        // assume same number of tail samples on each side, left and right
+        uint32_t count = n / kTailDenominator;
+        CentralTendencyStatistics left, right;
+        for (uint32_t i = 0; i < count; ++i) {
+            left.sample(tail[i]);
+            right.sample(tail[n - (i + 1)]);
+        }
+        dprintf(fd, "  Distribution of mix cycle times in ms for the tails "
+                    "(> ~3 stddev outliers):\n"
+                    "    left tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n"
+                    "    right tail: mean=%.2f min=%.2f max=%.2f stddev=%.2f\n",
+                    left.mean()*1e-6, left.minimum()*1e-6, left.maximum()*1e-6, left.stddev()*1e-6,
+                    right.mean()*1e-6, right.minimum()*1e-6, right.maximum()*1e-6,
+                    right.stddev()*1e-6);
+        delete[] tail;
+    }
+#endif
+    // The active track mask and track states are updated non-atomically.
+    // So if we relied on isActive to decide whether to display,
+    // then we might display an obsolete track or omit an active track.
+    // Instead we always display all tracks, with an indication
+    // of whether we think the track is active.
+    uint32_t trackMask = mTrackMask;
+    dprintf(fd, "  Fast tracks: kMaxFastTracks=%u activeMask=%#x\n",
+            FastMixerState::kMaxFastTracks, trackMask);
+    dprintf(fd, "  Index Active Full Partial Empty  Recent Ready\n");
+    for (uint32_t i = 0; i < FastMixerState::kMaxFastTracks; ++i, trackMask >>= 1) {
+        bool isActive = trackMask & 1;
+        const FastTrackDump *ftDump = &mTracks[i];
+        const FastTrackUnderruns& underruns = ftDump->mUnderruns;
+        const char *mostRecent;
+        switch (underruns.mBitFields.mMostRecent) {
+        case UNDERRUN_FULL:
+            mostRecent = "full";
+            break;
+        case UNDERRUN_PARTIAL:
+            mostRecent = "partial";
+            break;
+        case UNDERRUN_EMPTY:
+            mostRecent = "empty";
+            break;
+        default:
+            mostRecent = "?";
+            break;
+        }
+        dprintf(fd, "  %5u %6s %4u %7u %5u %7s %5zu\n", i, isActive ? "yes" : "no",
+                (underruns.mBitFields.mFull) & UNDERRUN_MASK,
+                (underruns.mBitFields.mPartial) & UNDERRUN_MASK,
+                (underruns.mBitFields.mEmpty) & UNDERRUN_MASK,
+                mostRecent, ftDump->mFramesReady);
+    }
+}
+
+}   // android
diff --git a/services/audioflinger/FastMixerDumpState.h b/services/audioflinger/FastMixerDumpState.h
index 6a1e4649..ac15e7c 100644
--- a/services/audioflinger/FastMixerDumpState.h
+++ b/services/audioflinger/FastMixerDumpState.h
@@ -17,7 +17,10 @@
 #ifndef ANDROID_AUDIO_FAST_MIXER_DUMP_STATE_H
 #define ANDROID_AUDIO_FAST_MIXER_DUMP_STATE_H
 
+#include <stdint.h>
 #include "Configuration.h"
+#include "FastThreadDumpState.h"
+#include "FastMixerState.h"
 
 namespace android {
 
@@ -52,22 +55,12 @@
 struct FastTrackDump {
     FastTrackDump() : mFramesReady(0) { }
     /*virtual*/ ~FastTrackDump() { }
-    FastTrackUnderruns mUnderruns;
-    size_t mFramesReady;        // most recent value only; no long-term statistics kept
+    FastTrackUnderruns  mUnderruns;
+    size_t              mFramesReady;        // most recent value only; no long-term statistics kept
 };
 
-// The FastMixerDumpState keeps a cache of FastMixer statistics that can be logged by dumpsys.
-// Each individual native word-sized field is accessed atomically.  But the
-// overall structure is non-atomic, that is there may be an inconsistency between fields.
-// No barriers or locks are used for either writing or reading.
-// Only POD types are permitted, and the contents shouldn't be trusted (i.e. do range checks).
-// It has a different lifetime than the FastMixer, and so it can't be a member of FastMixer.
 struct FastMixerDumpState : FastThreadDumpState {
-    FastMixerDumpState(
-#ifdef FAST_MIXER_STATISTICS
-            uint32_t samplingN = kSamplingNforLowRamDevice
-#endif
-            );
+    FastMixerDumpState();
     /*virtual*/ ~FastMixerDumpState();
 
     void dump(int fd) const;    // should only be called on a stable copy, not the original
@@ -80,14 +73,6 @@
     size_t   mFrameCount;
     uint32_t mTrackMask;        // mask of active tracks
     FastTrackDump   mTracks[FastMixerState::kMaxFastTracks];
-
-#ifdef FAST_MIXER_STATISTICS
-    // Compile-time constant for a "low RAM device", must be a power of 2 <= kSamplingN.
-    // This value was chosen such that each array uses 1 small page (4 Kbytes).
-    static const uint32_t kSamplingNforLowRamDevice = 0x400;
-    // Increase sampling window after construction, must be a power of 2 <= kSamplingN
-    void    increaseSamplingN(uint32_t samplingN);
-#endif
 };
 
 }   // android
diff --git a/services/audioflinger/FastMixerState.cpp b/services/audioflinger/FastMixerState.cpp
index 3aa8dad..a8c2634 100644
--- a/services/audioflinger/FastMixerState.cpp
+++ b/services/audioflinger/FastMixerState.cpp
@@ -39,4 +39,19 @@
 {
 }
 
+// static
+const char *FastMixerState::commandToString(Command command)
+{
+    const char *str = FastThreadState::commandToString(command);
+    if (str != NULL) {
+        return str;
+    }
+    switch (command) {
+    case FastMixerState::MIX:       return "MIX";
+    case FastMixerState::WRITE:     return "WRITE";
+    case FastMixerState::MIX_WRITE: return "MIX_WRITE";
+    }
+    LOG_ALWAYS_FATAL("%s", __func__);
+}
+
 }   // namespace android
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index 661c9ca..916514f 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -73,6 +73,9 @@
 
     // This might be a one-time configuration rather than per-state
     NBAIO_Sink* mTeeSink;       // if non-NULL, then duplicate write()s to this non-blocking sink
+
+    // never returns NULL; asserts if command is invalid
+    static const char *commandToString(Command command);
 };  // struct FastMixerState
 
 }   // namespace android
diff --git a/services/audioflinger/FastThread.cpp b/services/audioflinger/FastThread.cpp
index 216dace..5ca579b 100644
--- a/services/audioflinger/FastThread.cpp
+++ b/services/audioflinger/FastThread.cpp
@@ -25,54 +25,58 @@
 #include <utils/Log.h>
 #include <utils/Trace.h>
 #include "FastThread.h"
+#include "FastThreadDumpState.h"
 
 #define FAST_DEFAULT_NS    999999999L   // ~1 sec: default time to sleep
 #define FAST_HOT_IDLE_NS     1000000L   // 1 ms: time to sleep while hot idling
-#define MIN_WARMUP_CYCLES          2    // minimum number of loop cycles to wait for warmup
+#define MIN_WARMUP_CYCLES          2    // minimum number of consecutive in-range loop cycles
+                                        // to wait for warmup
 #define MAX_WARMUP_CYCLES         10    // maximum number of loop cycles to wait for warmup
 
 namespace android {
 
 FastThread::FastThread() : Thread(false /*canCallJava*/),
-    // re-initialized to &initial by subclass constructor
-     previous(NULL), current(NULL),
-    /* oldTs({0, 0}), */
-    oldTsValid(false),
-    sleepNs(-1),
-    periodNs(0),
-    underrunNs(0),
-    overrunNs(0),
-    forceNs(0),
-    warmupNs(0),
-    // re-initialized to &dummyDumpState by subclass constructor
+    // re-initialized to &sInitial by subclass constructor
+    mPrevious(NULL), mCurrent(NULL),
+    /* mOldTs({0, 0}), */
+    mOldTsValid(false),
+    mSleepNs(-1),
+    mPeriodNs(0),
+    mUnderrunNs(0),
+    mOverrunNs(0),
+    mForceNs(0),
+    mWarmupNsMin(0),
+    mWarmupNsMax(LONG_MAX),
+    // re-initialized to &mDummySubclassDumpState by subclass constructor
     mDummyDumpState(NULL),
-    dumpState(NULL),
-    ignoreNextOverrun(true),
-#ifdef FAST_MIXER_STATISTICS
-    // oldLoad
-    oldLoadValid(false),
-    bounds(0),
-    full(false),
-    // tcu
+    mDumpState(NULL),
+    mIgnoreNextOverrun(true),
+#ifdef FAST_THREAD_STATISTICS
+    // mOldLoad
+    mOldLoadValid(false),
+    mBounds(0),
+    mFull(false),
+    // mTcu
 #endif
-    coldGen(0),
-    isWarm(false),
-    /* measuredWarmupTs({0, 0}), */
-    warmupCycles(0),
-    // dummyLogWriter
-    logWriter(&dummyLogWriter),
-    timestampStatus(INVALID_OPERATION),
+    mColdGen(0),
+    mIsWarm(false),
+    /* mMeasuredWarmupTs({0, 0}), */
+    mWarmupCycles(0),
+    mWarmupConsecutiveInRangeCycles(0),
+    // mDummyLogWriter
+    mLogWriter(&mDummyLogWriter),
+    mTimestampStatus(INVALID_OPERATION),
 
-    command(FastThreadState::INITIAL),
+    mCommand(FastThreadState::INITIAL),
 #if 0
     frameCount(0),
 #endif
-    attemptedWrite(false)
+    mAttemptedWrite(false)
 {
-    oldTs.tv_sec = 0;
-    oldTs.tv_nsec = 0;
-    measuredWarmupTs.tv_sec = 0;
-    measuredWarmupTs.tv_nsec = 0;
+    mOldTs.tv_sec = 0;
+    mOldTs.tv_nsec = 0;
+    mMeasuredWarmupTs.tv_sec = 0;
+    mMeasuredWarmupTs.tv_nsec = 0;
 }
 
 FastThread::~FastThread()
@@ -84,34 +88,34 @@
     for (;;) {
 
         // either nanosleep, sched_yield, or busy wait
-        if (sleepNs >= 0) {
-            if (sleepNs > 0) {
-                ALOG_ASSERT(sleepNs < 1000000000);
-                const struct timespec req = {0, sleepNs};
+        if (mSleepNs >= 0) {
+            if (mSleepNs > 0) {
+                ALOG_ASSERT(mSleepNs < 1000000000);
+                const struct timespec req = {0, mSleepNs};
                 nanosleep(&req, NULL);
             } else {
                 sched_yield();
             }
         }
         // default to long sleep for next cycle
-        sleepNs = FAST_DEFAULT_NS;
+        mSleepNs = FAST_DEFAULT_NS;
 
         // poll for state change
         const FastThreadState *next = poll();
         if (next == NULL) {
             // continue to use the default initial state until a real state is available
-            // FIXME &initial not available, should save address earlier
-            //ALOG_ASSERT(current == &initial && previous == &initial);
-            next = current;
+            // FIXME &sInitial not available, should save address earlier
+            //ALOG_ASSERT(mCurrent == &sInitial && previous == &sInitial);
+            next = mCurrent;
         }
 
-        command = next->mCommand;
-        if (next != current) {
+        mCommand = next->mCommand;
+        if (next != mCurrent) {
 
             // As soon as possible of learning of a new dump area, start using it
-            dumpState = next->mDumpState != NULL ? next->mDumpState : mDummyDumpState;
-            logWriter = next->mNBLogWriter != NULL ? next->mNBLogWriter : &dummyLogWriter;
-            setLog(logWriter);
+            mDumpState = next->mDumpState != NULL ? next->mDumpState : mDummyDumpState;
+            mLogWriter = next->mNBLogWriter != NULL ? next->mNBLogWriter : &mDummyLogWriter;
+            setLog(mLogWriter);
 
             // We want to always have a valid reference to the previous (non-idle) state.
             // However, the state queue only guarantees access to current and previous states.
@@ -122,37 +126,38 @@
             //  non-idle -> idle        update previous from copy of current
             //  idle     -> idle        don't update previous
             //  idle     -> non-idle    don't update previous
-            if (!(current->mCommand & FastThreadState::IDLE)) {
-                if (command & FastThreadState::IDLE) {
+            if (!(mCurrent->mCommand & FastThreadState::IDLE)) {
+                if (mCommand & FastThreadState::IDLE) {
                     onIdle();
-                    oldTsValid = false;
-#ifdef FAST_MIXER_STATISTICS
-                    oldLoadValid = false;
+                    mOldTsValid = false;
+#ifdef FAST_THREAD_STATISTICS
+                    mOldLoadValid = false;
 #endif
-                    ignoreNextOverrun = true;
+                    mIgnoreNextOverrun = true;
                 }
-                previous = current;
+                mPrevious = mCurrent;
             }
-            current = next;
+            mCurrent = next;
         }
 #if !LOG_NDEBUG
         next = NULL;    // not referenced again
 #endif
 
-        dumpState->mCommand = command;
+        mDumpState->mCommand = mCommand;
 
+        // FIXME what does this comment mean?
         // << current, previous, command, dumpState >>
 
-        switch (command) {
+        switch (mCommand) {
         case FastThreadState::INITIAL:
         case FastThreadState::HOT_IDLE:
-            sleepNs = FAST_HOT_IDLE_NS;
+            mSleepNs = FAST_HOT_IDLE_NS;
             continue;
         case FastThreadState::COLD_IDLE:
             // only perform a cold idle command once
             // FIXME consider checking previous state and only perform if previous != COLD_IDLE
-            if (current->mColdGen != coldGen) {
-                int32_t *coldFutexAddr = current->mColdFutexAddr;
+            if (mCurrent->mColdGen != mColdGen) {
+                int32_t *coldFutexAddr = mCurrent->mColdFutexAddr;
                 ALOG_ASSERT(coldFutexAddr != NULL);
                 int32_t old = android_atomic_dec(coldFutexAddr);
                 if (old <= 0) {
@@ -164,41 +169,42 @@
                 }
                 // This may be overly conservative; there could be times that the normal mixer
                 // requests such a brief cold idle that it doesn't require resetting this flag.
-                isWarm = false;
-                measuredWarmupTs.tv_sec = 0;
-                measuredWarmupTs.tv_nsec = 0;
-                warmupCycles = 0;
-                sleepNs = -1;
-                coldGen = current->mColdGen;
-#ifdef FAST_MIXER_STATISTICS
-                bounds = 0;
-                full = false;
+                mIsWarm = false;
+                mMeasuredWarmupTs.tv_sec = 0;
+                mMeasuredWarmupTs.tv_nsec = 0;
+                mWarmupCycles = 0;
+                mWarmupConsecutiveInRangeCycles = 0;
+                mSleepNs = -1;
+                mColdGen = mCurrent->mColdGen;
+#ifdef FAST_THREAD_STATISTICS
+                mBounds = 0;
+                mFull = false;
 #endif
-                oldTsValid = !clock_gettime(CLOCK_MONOTONIC, &oldTs);
-                timestampStatus = INVALID_OPERATION;
+                mOldTsValid = !clock_gettime(CLOCK_MONOTONIC, &mOldTs);
+                mTimestampStatus = INVALID_OPERATION;
             } else {
-                sleepNs = FAST_HOT_IDLE_NS;
+                mSleepNs = FAST_HOT_IDLE_NS;
             }
             continue;
         case FastThreadState::EXIT:
             onExit();
             return false;
         default:
-            LOG_ALWAYS_FATAL_IF(!isSubClassCommand(command));
+            LOG_ALWAYS_FATAL_IF(!isSubClassCommand(mCommand));
             break;
         }
 
         // there is a non-idle state available to us; did the state change?
-        if (current != previous) {
+        if (mCurrent != mPrevious) {
             onStateChange();
 #if 1   // FIXME shouldn't need this
             // only process state change once
-            previous = current;
+            mPrevious = mCurrent;
 #endif
         }
 
         // do work using current state here
-        attemptedWrite = false;
+        mAttemptedWrite = false;
         onWork();
 
         // To be exactly periodic, compute the next sleep time based on current time.
@@ -207,13 +213,13 @@
         struct timespec newTs;
         int rc = clock_gettime(CLOCK_MONOTONIC, &newTs);
         if (rc == 0) {
-            //logWriter->logTimestamp(newTs);
-            if (oldTsValid) {
-                time_t sec = newTs.tv_sec - oldTs.tv_sec;
-                long nsec = newTs.tv_nsec - oldTs.tv_nsec;
+            //mLogWriter->logTimestamp(newTs);
+            if (mOldTsValid) {
+                time_t sec = newTs.tv_sec - mOldTs.tv_sec;
+                long nsec = newTs.tv_nsec - mOldTs.tv_nsec;
                 ALOGE_IF(sec < 0 || (sec == 0 && nsec < 0),
                         "clock_gettime(CLOCK_MONOTONIC) failed: was %ld.%09ld but now %ld.%09ld",
-                        oldTs.tv_sec, oldTs.tv_nsec, newTs.tv_sec, newTs.tv_nsec);
+                        mOldTs.tv_sec, mOldTs.tv_nsec, newTs.tv_sec, newTs.tv_nsec);
                 if (nsec < 0) {
                     --sec;
                     nsec += 1000000000;
@@ -221,62 +227,70 @@
                 // To avoid an initial underrun on fast tracks after exiting standby,
                 // do not start pulling data from tracks and mixing until warmup is complete.
                 // Warmup is considered complete after the earlier of:
-                //      MIN_WARMUP_CYCLES write() attempts and last one blocks for at least warmupNs
+                //      MIN_WARMUP_CYCLES consecutive in-range write() attempts,
+                //          where "in-range" means mWarmupNsMin <= cycle time <= mWarmupNsMax
                 //      MAX_WARMUP_CYCLES write() attempts.
                 // This is overly conservative, but to get better accuracy requires a new HAL API.
-                if (!isWarm && attemptedWrite) {
-                    measuredWarmupTs.tv_sec += sec;
-                    measuredWarmupTs.tv_nsec += nsec;
-                    if (measuredWarmupTs.tv_nsec >= 1000000000) {
-                        measuredWarmupTs.tv_sec++;
-                        measuredWarmupTs.tv_nsec -= 1000000000;
+                if (!mIsWarm && mAttemptedWrite) {
+                    mMeasuredWarmupTs.tv_sec += sec;
+                    mMeasuredWarmupTs.tv_nsec += nsec;
+                    if (mMeasuredWarmupTs.tv_nsec >= 1000000000) {
+                        mMeasuredWarmupTs.tv_sec++;
+                        mMeasuredWarmupTs.tv_nsec -= 1000000000;
                     }
-                    ++warmupCycles;
-                    if ((nsec > warmupNs && warmupCycles >= MIN_WARMUP_CYCLES) ||
-                            (warmupCycles >= MAX_WARMUP_CYCLES)) {
-                        isWarm = true;
-                        dumpState->mMeasuredWarmupTs = measuredWarmupTs;
-                        dumpState->mWarmupCycles = warmupCycles;
+                    ++mWarmupCycles;
+                    if (mWarmupNsMin <= nsec && nsec <= mWarmupNsMax) {
+                        ALOGV("warmup cycle %d in range: %.03f ms", mWarmupCycles, nsec * 1e-9);
+                        ++mWarmupConsecutiveInRangeCycles;
+                    } else {
+                        ALOGV("warmup cycle %d out of range: %.03f ms", mWarmupCycles, nsec * 1e-9);
+                        mWarmupConsecutiveInRangeCycles = 0;
+                    }
+                    if ((mWarmupConsecutiveInRangeCycles >= MIN_WARMUP_CYCLES) ||
+                            (mWarmupCycles >= MAX_WARMUP_CYCLES)) {
+                        mIsWarm = true;
+                        mDumpState->mMeasuredWarmupTs = mMeasuredWarmupTs;
+                        mDumpState->mWarmupCycles = mWarmupCycles;
                     }
                 }
-                sleepNs = -1;
-                if (isWarm) {
-                    if (sec > 0 || nsec > underrunNs) {
+                mSleepNs = -1;
+                if (mIsWarm) {
+                    if (sec > 0 || nsec > mUnderrunNs) {
                         ATRACE_NAME("underrun");
                         // FIXME only log occasionally
                         ALOGV("underrun: time since last cycle %d.%03ld sec",
                                 (int) sec, nsec / 1000000L);
-                        dumpState->mUnderruns++;
-                        ignoreNextOverrun = true;
-                    } else if (nsec < overrunNs) {
-                        if (ignoreNextOverrun) {
-                            ignoreNextOverrun = false;
+                        mDumpState->mUnderruns++;
+                        mIgnoreNextOverrun = true;
+                    } else if (nsec < mOverrunNs) {
+                        if (mIgnoreNextOverrun) {
+                            mIgnoreNextOverrun = false;
                         } else {
                             // FIXME only log occasionally
                             ALOGV("overrun: time since last cycle %d.%03ld sec",
                                     (int) sec, nsec / 1000000L);
-                            dumpState->mOverruns++;
+                            mDumpState->mOverruns++;
                         }
                         // This forces a minimum cycle time. It:
                         //  - compensates for an audio HAL with jitter due to sample rate conversion
                         //  - works with a variable buffer depth audio HAL that never pulls at a
-                        //    rate < than overrunNs per buffer.
+                        //    rate < than mOverrunNs per buffer.
                         //  - recovers from overrun immediately after underrun
                         // It doesn't work with a non-blocking audio HAL.
-                        sleepNs = forceNs - nsec;
+                        mSleepNs = mForceNs - nsec;
                     } else {
-                        ignoreNextOverrun = false;
+                        mIgnoreNextOverrun = false;
                     }
                 }
-#ifdef FAST_MIXER_STATISTICS
-                if (isWarm) {
+#ifdef FAST_THREAD_STATISTICS
+                if (mIsWarm) {
                     // advance the FIFO queue bounds
-                    size_t i = bounds & (dumpState->mSamplingN - 1);
-                    bounds = (bounds & 0xFFFF0000) | ((bounds + 1) & 0xFFFF);
-                    if (full) {
-                        bounds += 0x10000;
-                    } else if (!(bounds & (dumpState->mSamplingN - 1))) {
-                        full = true;
+                    size_t i = mBounds & (mDumpState->mSamplingN - 1);
+                    mBounds = (mBounds & 0xFFFF0000) | ((mBounds + 1) & 0xFFFF);
+                    if (mFull) {
+                        mBounds += 0x10000;
+                    } else if (!(mBounds & (mDumpState->mSamplingN - 1))) {
+                        mFull = true;
                     }
                     // compute the delta value of clock_gettime(CLOCK_MONOTONIC)
                     uint32_t monotonicNs = nsec;
@@ -288,9 +302,9 @@
                     struct timespec newLoad;
                     rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad);
                     if (rc == 0) {
-                        if (oldLoadValid) {
-                            sec = newLoad.tv_sec - oldLoad.tv_sec;
-                            nsec = newLoad.tv_nsec - oldLoad.tv_nsec;
+                        if (mOldLoadValid) {
+                            sec = newLoad.tv_sec - mOldLoad.tv_sec;
+                            nsec = newLoad.tv_nsec - mOldLoad.tv_nsec;
                             if (nsec < 0) {
                                 --sec;
                                 nsec += 1000000000;
@@ -301,42 +315,42 @@
                             }
                         } else {
                             // first time through the loop
-                            oldLoadValid = true;
+                            mOldLoadValid = true;
                         }
-                        oldLoad = newLoad;
+                        mOldLoad = newLoad;
                     }
 #ifdef CPU_FREQUENCY_STATISTICS
                     // get the absolute value of CPU clock frequency in kHz
                     int cpuNum = sched_getcpu();
-                    uint32_t kHz = tcu.getCpukHz(cpuNum);
+                    uint32_t kHz = mTcu.getCpukHz(cpuNum);
                     kHz = (kHz << 4) | (cpuNum & 0xF);
 #endif
                     // save values in FIFO queues for dumpsys
                     // these stores #1, #2, #3 are not atomic with respect to each other,
                     // or with respect to store #4 below
-                    dumpState->mMonotonicNs[i] = monotonicNs;
-                    dumpState->mLoadNs[i] = loadNs;
+                    mDumpState->mMonotonicNs[i] = monotonicNs;
+                    mDumpState->mLoadNs[i] = loadNs;
 #ifdef CPU_FREQUENCY_STATISTICS
-                    dumpState->mCpukHz[i] = kHz;
+                    mDumpState->mCpukHz[i] = kHz;
 #endif
                     // this store #4 is not atomic with respect to stores #1, #2, #3 above, but
                     // the newest open & oldest closed halves are atomic with respect to each other
-                    dumpState->mBounds = bounds;
+                    mDumpState->mBounds = mBounds;
                     ATRACE_INT("cycle_ms", monotonicNs / 1000000);
                     ATRACE_INT("load_us", loadNs / 1000);
                 }
 #endif
             } else {
                 // first time through the loop
-                oldTsValid = true;
-                sleepNs = periodNs;
-                ignoreNextOverrun = true;
+                mOldTsValid = true;
+                mSleepNs = mPeriodNs;
+                mIgnoreNextOverrun = true;
             }
-            oldTs = newTs;
+            mOldTs = newTs;
         } else {
             // monotonic clock is broken
-            oldTsValid = false;
-            sleepNs = periodNs;
+            mOldTsValid = false;
+            mSleepNs = mPeriodNs;
         }
 
     }   // for (;;)
diff --git a/services/audioflinger/FastThread.h b/services/audioflinger/FastThread.h
index 1330334..2efb6de 100644
--- a/services/audioflinger/FastThread.h
+++ b/services/audioflinger/FastThread.h
@@ -48,42 +48,45 @@
     virtual void onStateChange() = 0;
     virtual void onWork() = 0;
 
-    // FIXME these former local variables need comments and to be renamed to have an "m" prefix
-    const FastThreadState *previous;
-    const FastThreadState *current;
-    struct timespec oldTs;
-    bool oldTsValid;
-    long sleepNs;   // -1: busy wait, 0: sched_yield, > 0: nanosleep
-    long periodNs;      // expected period; the time required to render one mix buffer
-    long underrunNs;    // underrun likely when write cycle is greater than this value
-    long overrunNs;     // overrun likely when write cycle is less than this value
-    long forceNs;       // if overrun detected, force the write cycle to take this much time
-    long warmupNs;      // warmup complete when write cycle is greater than to this value
-    FastThreadDumpState *mDummyDumpState;
-    FastThreadDumpState *dumpState;
-    bool ignoreNextOverrun;  // used to ignore initial overrun and first after an underrun
-#ifdef FAST_MIXER_STATISTICS
-    struct timespec oldLoad;    // previous value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
-    bool oldLoadValid;  // whether oldLoad is valid
-    uint32_t bounds;
-    bool full;          // whether we have collected at least mSamplingN samples
+    // FIXME these former local variables need comments
+    const FastThreadState*  mPrevious;
+    const FastThreadState*  mCurrent;
+    struct timespec mOldTs;
+    bool            mOldTsValid;
+    long            mSleepNs;       // -1: busy wait, 0: sched_yield, > 0: nanosleep
+    long            mPeriodNs;      // expected period; the time required to render one mix buffer
+    long            mUnderrunNs;    // underrun likely when write cycle is greater than this value
+    long            mOverrunNs;     // overrun likely when write cycle is less than this value
+    long            mForceNs;       // if overrun detected,
+                                    // force the write cycle to take this much time
+    long            mWarmupNsMin;   // warmup complete when write cycle is greater than or equal to
+                                    // this value
+    long            mWarmupNsMax;   // and less than or equal to this value
+    FastThreadDumpState* mDummyDumpState;
+    FastThreadDumpState* mDumpState;
+    bool            mIgnoreNextOverrun;     // used to ignore initial overrun and first after an
+                                            // underrun
+#ifdef FAST_THREAD_STATISTICS
+    struct timespec mOldLoad;       // previous value of clock_gettime(CLOCK_THREAD_CPUTIME_ID)
+    bool            mOldLoadValid;  // whether oldLoad is valid
+    uint32_t        mBounds;
+    bool            mFull;          // whether we have collected at least mSamplingN samples
 #ifdef CPU_FREQUENCY_STATISTICS
-    ThreadCpuUsage tcu;     // for reading the current CPU clock frequency in kHz
+    ThreadCpuUsage  mTcu;           // for reading the current CPU clock frequency in kHz
 #endif
 #endif
-    unsigned coldGen;   // last observed mColdGen
-    bool isWarm;        // true means ready to mix, false means wait for warmup before mixing
-    struct timespec measuredWarmupTs;  // how long did it take for warmup to complete
-    uint32_t warmupCycles;  // counter of number of loop cycles required to warmup
-    NBLog::Writer dummyLogWriter;
-    NBLog::Writer *logWriter;
-    status_t timestampStatus;
+    unsigned        mColdGen;       // last observed mColdGen
+    bool            mIsWarm;        // true means ready to mix,
+                                    // false means wait for warmup before mixing
+    struct timespec mMeasuredWarmupTs;  // how long did it take for warmup to complete
+    uint32_t        mWarmupCycles;  // counter of number of loop cycles during warmup phase
+    uint32_t        mWarmupConsecutiveInRangeCycles;    // number of consecutive cycles in range
+    NBLog::Writer   mDummyLogWriter;
+    NBLog::Writer*  mLogWriter;
+    status_t        mTimestampStatus;
 
-    FastThreadState::Command command;
-#if 0
-    size_t frameCount;
-#endif
-    bool attemptedWrite;
+    FastThreadState::Command mCommand;
+    bool            mAttemptedWrite;
 
 };  // class FastThread
 
diff --git a/services/audioflinger/FastThreadDumpState.cpp b/services/audioflinger/FastThreadDumpState.cpp
new file mode 100644
index 0000000..9df5c4c
--- /dev/null
+++ b/services/audioflinger/FastThreadDumpState.cpp
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "FastThreadDumpState.h"
+
+namespace android {
+
+FastThreadDumpState::FastThreadDumpState() :
+    mCommand(FastThreadState::INITIAL), mUnderruns(0), mOverruns(0),
+    /* mMeasuredWarmupTs({0, 0}), */
+    mWarmupCycles(0)
+#ifdef FAST_THREAD_STATISTICS
+    , mSamplingN(0), mBounds(0)
+#endif
+{
+    mMeasuredWarmupTs.tv_sec = 0;
+    mMeasuredWarmupTs.tv_nsec = 0;
+#ifdef FAST_THREAD_STATISTICS
+    increaseSamplingN(1);
+#endif
+}
+
+FastThreadDumpState::~FastThreadDumpState()
+{
+}
+
+#ifdef FAST_THREAD_STATISTICS
+void FastThreadDumpState::increaseSamplingN(uint32_t samplingN)
+{
+    if (samplingN <= mSamplingN || samplingN > kSamplingN || roundup(samplingN) != samplingN) {
+        return;
+    }
+    uint32_t additional = samplingN - mSamplingN;
+    // sample arrays aren't accessed atomically with respect to the bounds,
+    // so clearing reduces chance for dumpsys to read random uninitialized samples
+    memset(&mMonotonicNs[mSamplingN], 0, sizeof(mMonotonicNs[0]) * additional);
+    memset(&mLoadNs[mSamplingN], 0, sizeof(mLoadNs[0]) * additional);
+#ifdef CPU_FREQUENCY_STATISTICS
+    memset(&mCpukHz[mSamplingN], 0, sizeof(mCpukHz[0]) * additional);
+#endif
+    mSamplingN = samplingN;
+}
+#endif
+
+}   // android
diff --git a/services/audioflinger/FastThreadDumpState.h b/services/audioflinger/FastThreadDumpState.h
new file mode 100644
index 0000000..1ce0914
--- /dev/null
+++ b/services/audioflinger/FastThreadDumpState.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_AUDIO_FAST_THREAD_DUMP_STATE_H
+#define ANDROID_AUDIO_FAST_THREAD_DUMP_STATE_H
+
+#include "Configuration.h"
+#include "FastThreadState.h"
+
+namespace android {
+
+// The FastThreadDumpState keeps a cache of FastThread statistics that can be logged by dumpsys.
+// Each individual native word-sized field is accessed atomically.  But the
+// overall structure is non-atomic, that is there may be an inconsistency between fields.
+// No barriers or locks are used for either writing or reading.
+// Only POD types are permitted, and the contents shouldn't be trusted (i.e. do range checks).
+// It has a different lifetime than the FastThread, and so it can't be a member of FastThread.
+struct FastThreadDumpState {
+    FastThreadDumpState();
+    /*virtual*/ ~FastThreadDumpState();
+
+    FastThreadState::Command mCommand;   // current command
+    uint32_t mUnderruns;        // total number of underruns
+    uint32_t mOverruns;         // total number of overruns
+    struct timespec mMeasuredWarmupTs;  // measured warmup time
+    uint32_t mWarmupCycles;     // number of loop cycles required to warmup
+
+#ifdef FAST_THREAD_STATISTICS
+    // Recently collected samples of per-cycle monotonic time, thread CPU time, and CPU frequency.
+    // kSamplingN is max size of sampling frame (statistics), and must be a power of 2 <= 0x8000.
+    // The sample arrays are virtually allocated based on this compile-time constant,
+    // but are only initialized and used based on the runtime parameter mSamplingN.
+    static const uint32_t kSamplingN = 0x8000;
+    // Compile-time constant for a "low RAM device", must be a power of 2 <= kSamplingN.
+    // This value was chosen such that each array uses 1 small page (4 Kbytes).
+    static const uint32_t kSamplingNforLowRamDevice = 0x400;
+    // Corresponding runtime maximum size of sample arrays, must be a power of 2 <= kSamplingN.
+    uint32_t mSamplingN;
+    // The bounds define the interval of valid samples, and are represented as follows:
+    //      newest open (excluded) endpoint   = lower 16 bits of bounds, modulo N
+    //      oldest closed (included) endpoint = upper 16 bits of bounds, modulo N
+    // Number of valid samples is newest - oldest.
+    uint32_t mBounds;                   // bounds for mMonotonicNs, mThreadCpuNs, and mCpukHz
+    // The elements in the *Ns arrays are in units of nanoseconds <= 3999999999.
+    uint32_t mMonotonicNs[kSamplingN];  // delta monotonic (wall clock) time
+    uint32_t mLoadNs[kSamplingN];       // delta CPU load in time
+#ifdef CPU_FREQUENCY_STATISTICS
+    uint32_t mCpukHz[kSamplingN];       // absolute CPU clock frequency in kHz, bits 0-3 are CPU#
+#endif
+
+    // Increase sampling window after construction, must be a power of 2 <= kSamplingN
+    void    increaseSamplingN(uint32_t samplingN);
+#endif
+
+};  // struct FastThreadDumpState
+
+}   // android
+
+#endif  // ANDROID_AUDIO_FAST_THREAD_DUMP_STATE_H
diff --git a/services/audioflinger/FastThreadState.cpp b/services/audioflinger/FastThreadState.cpp
index 6994872..ad5f31f 100644
--- a/services/audioflinger/FastThreadState.cpp
+++ b/services/audioflinger/FastThreadState.cpp
@@ -29,21 +29,16 @@
 {
 }
 
-
-FastThreadDumpState::FastThreadDumpState() :
-    mCommand(FastThreadState::INITIAL), mUnderruns(0), mOverruns(0),
-    /* mMeasuredWarmupTs({0, 0}), */
-    mWarmupCycles(0)
-#ifdef FAST_MIXER_STATISTICS
-    , mSamplingN(1), mBounds(0)
-#endif
+// static
+const char *FastThreadState::commandToString(FastThreadState::Command command)
 {
-    mMeasuredWarmupTs.tv_sec = 0;
-    mMeasuredWarmupTs.tv_nsec = 0;
-}
-
-FastThreadDumpState::~FastThreadDumpState()
-{
+    switch (command) {
+    case FastThreadState::INITIAL:      return "INITIAL";
+    case FastThreadState::HOT_IDLE:     return "HOT_IDLE";
+    case FastThreadState::COLD_IDLE:    return "COLD_IDLE";
+    case FastThreadState::EXIT:         return "EXIT";
+    }
+    return NULL;
 }
 
 }   // namespace android
diff --git a/services/audioflinger/FastThreadState.h b/services/audioflinger/FastThreadState.h
index 1ab8a0a..f18f846 100644
--- a/services/audioflinger/FastThreadState.h
+++ b/services/audioflinger/FastThreadState.h
@@ -46,43 +46,10 @@
     FastThreadDumpState* mDumpState; // if non-NULL, then update dump state periodically
     NBLog::Writer* mNBLogWriter; // non-blocking logger
 
+    // returns NULL if command belongs to a subclass
+    static const char *commandToString(Command command);
 };  // struct FastThreadState
 
-
-// FIXME extract common part of comment at FastMixerDumpState
-struct FastThreadDumpState {
-    FastThreadDumpState();
-    /*virtual*/ ~FastThreadDumpState();
-
-    FastThreadState::Command mCommand;   // current command
-    uint32_t mUnderruns;        // total number of underruns
-    uint32_t mOverruns;         // total number of overruns
-    struct timespec mMeasuredWarmupTs;  // measured warmup time
-    uint32_t mWarmupCycles;     // number of loop cycles required to warmup
-
-#ifdef FAST_MIXER_STATISTICS
-    // Recently collected samples of per-cycle monotonic time, thread CPU time, and CPU frequency.
-    // kSamplingN is max size of sampling frame (statistics), and must be a power of 2 <= 0x8000.
-    // The sample arrays are virtually allocated based on this compile-time constant,
-    // but are only initialized and used based on the runtime parameter mSamplingN.
-    static const uint32_t kSamplingN = 0x8000;
-    // Corresponding runtime maximum size of sample arrays, must be a power of 2 <= kSamplingN.
-    uint32_t mSamplingN;
-    // The bounds define the interval of valid samples, and are represented as follows:
-    //      newest open (excluded) endpoint   = lower 16 bits of bounds, modulo N
-    //      oldest closed (included) endpoint = upper 16 bits of bounds, modulo N
-    // Number of valid samples is newest - oldest.
-    uint32_t mBounds;                   // bounds for mMonotonicNs, mThreadCpuNs, and mCpukHz
-    // The elements in the *Ns arrays are in units of nanoseconds <= 3999999999.
-    uint32_t mMonotonicNs[kSamplingN];  // delta monotonic (wall clock) time
-    uint32_t mLoadNs[kSamplingN];       // delta CPU load in time
-#ifdef CPU_FREQUENCY_STATISTICS
-    uint32_t mCpukHz[kSamplingN];       // absolute CPU clock frequency in kHz, bits 0-3 are CPU#
-#endif
-#endif
-
-};  // struct FastThreadDumpState
-
 }   // android
 
 #endif  // ANDROID_AUDIO_FAST_THREAD_STATE_H
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 4f0c6b1..f6078a2 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -200,26 +200,17 @@
                     status = BAD_VALUE;
                     goto exit;
                 }
-                // limit to connections between devices and input streams for HAL before 3.0
-                if (patch->sinks[i].ext.mix.hw_module == srcModule &&
-                        (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) &&
-                        (patch->sinks[i].type != AUDIO_PORT_TYPE_MIX)) {
-                    ALOGW("createAudioPatch() invalid sink type %d for device source",
-                          patch->sinks[i].type);
-                    status = BAD_VALUE;
-                    goto exit;
-                }
             }
 
-            if (patch->sinks[0].ext.device.hw_module != srcModule) {
-                // limit to device to device connection if not on same hw module
-                if (patch->sinks[0].type != AUDIO_PORT_TYPE_DEVICE) {
-                    ALOGW("createAudioPatch() invalid sink type for cross hw module");
-                    status = INVALID_OPERATION;
-                    goto exit;
-                }
-                // special case num sources == 2 -=> reuse an exiting output mix to connect to the
-                // sink
+            // manage patches requiring a software bridge
+            // - Device to device AND
+            //    - source HW module != destination HW module OR
+            //    - audio HAL version < 3.0
+            //    - special patch request with 2 sources (reuse one existing output mix)
+            if ((patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) &&
+                    ((patch->sinks[0].ext.device.hw_module != srcModule) ||
+                    (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) ||
+                    (patch->num_sources == 2))) {
                 if (patch->num_sources == 2) {
                     if (patch->sources[1].type != AUDIO_PORT_TYPE_MIX ||
                             patch->sinks[0].ext.device.hw_module !=
@@ -283,52 +274,29 @@
                     goto exit;
                 }
             } else {
-                if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-                    if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
-                        sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
-                                                                  patch->sinks[0].ext.mix.handle);
-                        if (thread == 0) {
-                            ALOGW("createAudioPatch() bad capture I/O handle %d",
-                                                                  patch->sinks[0].ext.mix.handle);
-                            status = BAD_VALUE;
-                            goto exit;
-                        }
-                        status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
-                    } else {
-                        audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
-                        status = hwDevice->create_audio_patch(hwDevice,
-                                                               patch->num_sources,
-                                                               patch->sources,
-                                                               patch->num_sinks,
-                                                               patch->sinks,
-                                                               &halHandle);
-                    }
-                } else {
+                if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
                     sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
-                                                                    patch->sinks[0].ext.mix.handle);
+                                                              patch->sinks[0].ext.mix.handle);
                     if (thread == 0) {
                         ALOGW("createAudioPatch() bad capture I/O handle %d",
-                                                                    patch->sinks[0].ext.mix.handle);
+                                                              patch->sinks[0].ext.mix.handle);
                         status = BAD_VALUE;
                         goto exit;
                     }
-                    char *address;
-                    if (strcmp(patch->sources[0].ext.device.address, "") != 0) {
-                        address = audio_device_address_to_parameter(
-                                                            patch->sources[0].ext.device.type,
-                                                            patch->sources[0].ext.device.address);
-                    } else {
-                        address = (char *)calloc(1, 1);
+                    status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
+                } else {
+                    if (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) {
+                        status = INVALID_OPERATION;
+                        goto exit;
                     }
-                    AudioParameter param = AudioParameter(String8(address));
-                    free(address);
-                    param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING),
-                                 (int)patch->sources[0].ext.device.type);
-                    param.addInt(String8(AUDIO_PARAMETER_STREAM_INPUT_SOURCE),
-                                                     (int)patch->sinks[0].ext.mix.usecase.source);
-                    ALOGV("createAudioPatch() AUDIO_PORT_TYPE_DEVICE setParameters %s",
-                                                                      param.toString().string());
-                    status = thread->setParameters(param.toString());
+
+                    audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
+                    status = hwDevice->create_audio_patch(hwDevice,
+                                                           patch->num_sources,
+                                                           patch->sources,
+                                                           patch->num_sinks,
+                                                           patch->sinks,
+                                                           &halHandle);
                 }
             }
         } break;
@@ -341,6 +309,7 @@
                 goto exit;
             }
             // limit to connections between devices and output streams
+            audio_devices_t type = AUDIO_DEVICE_NONE;
             for (unsigned int i = 0; i < patch->num_sinks; i++) {
                 if (patch->sinks[i].type != AUDIO_PORT_TYPE_DEVICE) {
                     ALOGW("createAudioPatch() invalid sink type %d for mix source",
@@ -353,8 +322,8 @@
                     status = BAD_VALUE;
                     goto exit;
                 }
+                type |= patch->sinks[i].ext.device.type;
             }
-            AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
             sp<ThreadBase> thread =
                             audioflinger->checkPlaybackThread_l(patch->sources[0].ext.mix.handle);
             if (thread == 0) {
@@ -363,28 +332,14 @@
                 status = BAD_VALUE;
                 goto exit;
             }
-            if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-                status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
-            } else {
-                audio_devices_t type = AUDIO_DEVICE_NONE;
-                for (unsigned int i = 0; i < patch->num_sinks; i++) {
-                    type |= patch->sinks[i].ext.device.type;
-                }
-                char *address;
-                if (strcmp(patch->sinks[0].ext.device.address, "") != 0) {
-                    //FIXME: we only support address on first sink with HAL version < 3.0
-                    address = audio_device_address_to_parameter(
-                                                                patch->sinks[0].ext.device.type,
-                                                                patch->sinks[0].ext.device.address);
-                } else {
-                    address = (char *)calloc(1, 1);
-                }
-                AudioParameter param = AudioParameter(String8(address));
-                free(address);
+            if (thread == audioflinger->primaryPlaybackThread_l()) {
+                AudioParameter param = AudioParameter();
                 param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), (int)type);
-                status = thread->setParameters(param.toString());
+
+                audioflinger->broacastParametersToRecordThreads_l(param.toString());
             }
 
+            status = thread->sendCreateAudioPatchConfigEvent(patch, &halHandle);
         } break;
         default:
             status = BAD_VALUE;
@@ -472,6 +427,7 @@
     // this track is given the same buffer as the PatchRecord buffer
     patch->mPatchTrack = new PlaybackThread::PatchTrack(
                                            patch->mPlaybackThread.get(),
+                                           audioPatch->sources[1].ext.mix.usecase.stream,
                                            sampleRate,
                                            outChannelMask,
                                            format,
@@ -525,22 +481,31 @@
     if (patch->mRecordThread != 0) {
         if (patch->mPatchRecord != 0) {
             patch->mRecordThread->deletePatchRecord(patch->mPatchRecord);
-            patch->mPatchRecord.clear();
         }
         audioflinger->closeInputInternal_l(patch->mRecordThread);
-        patch->mRecordThread.clear();
     }
     if (patch->mPlaybackThread != 0) {
         if (patch->mPatchTrack != 0) {
             patch->mPlaybackThread->deletePatchTrack(patch->mPatchTrack);
-            patch->mPatchTrack.clear();
         }
         // if num sources == 2 we are reusing an existing playback thread so we do not close it
         if (patch->mAudioPatch.num_sources != 2) {
             audioflinger->closeOutputInternal_l(patch->mPlaybackThread);
         }
+    }
+    if (patch->mRecordThread != 0) {
+        if (patch->mPatchRecord != 0) {
+            patch->mPatchRecord.clear();
+        }
+        patch->mRecordThread.clear();
+    }
+    if (patch->mPlaybackThread != 0) {
+        if (patch->mPatchTrack != 0) {
+            patch->mPatchTrack.clear();
+        }
         patch->mPlaybackThread.clear();
     }
+
 }
 
 /* Disconnect a patch */
@@ -578,42 +543,30 @@
                 break;
             }
 
-            if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE &&
-                    patch->sinks[0].ext.device.hw_module != srcModule) {
+            if (removedPatch->mRecordPatchHandle != AUDIO_PATCH_HANDLE_NONE ||
+                    removedPatch->mPlaybackPatchHandle != AUDIO_PATCH_HANDLE_NONE) {
                 clearPatchConnections(removedPatch);
                 break;
             }
 
-            AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
-            if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-                if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
-                    sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
-                                                                    patch->sinks[0].ext.mix.handle);
-                    if (thread == 0) {
-                        ALOGW("releaseAudioPatch() bad capture I/O handle %d",
-                                                                  patch->sinks[0].ext.mix.handle);
-                        status = BAD_VALUE;
-                        break;
-                    }
-                    status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
-                } else {
-                    audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
-                    status = hwDevice->release_audio_patch(hwDevice, removedPatch->mHalHandle);
-                }
-            } else {
+            if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
                 sp<ThreadBase> thread = audioflinger->checkRecordThread_l(
-                                                                    patch->sinks[0].ext.mix.handle);
+                                                                patch->sinks[0].ext.mix.handle);
                 if (thread == 0) {
                     ALOGW("releaseAudioPatch() bad capture I/O handle %d",
-                                                                  patch->sinks[0].ext.mix.handle);
+                                                              patch->sinks[0].ext.mix.handle);
                     status = BAD_VALUE;
                     break;
                 }
-                AudioParameter param;
-                param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
-                ALOGV("releaseAudioPatch() AUDIO_PORT_TYPE_DEVICE setParameters %s",
-                                                                      param.toString().string());
-                status = thread->setParameters(param.toString());
+                status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
+            } else {
+                AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
+                if (audioHwDevice->version() < AUDIO_DEVICE_API_VERSION_3_0) {
+                    status = INVALID_OPERATION;
+                    break;
+                }
+                audio_hw_device_t *hwDevice = audioHwDevice->hwDevice();
+                status = hwDevice->release_audio_patch(hwDevice, removedPatch->mHalHandle);
             }
         } break;
         case AUDIO_PORT_TYPE_MIX: {
@@ -632,14 +585,7 @@
                 status = BAD_VALUE;
                 break;
             }
-            AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
-            if (audioHwDevice->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-                status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
-            } else {
-                AudioParameter param;
-                param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
-                status = thread->setParameters(param.toString());
-            }
+            status = thread->sendReleaseAudioPatchConfigEvent(removedPatch->mHalHandle);
         } break;
         default:
             status = BAD_VALUE;
@@ -693,5 +639,4 @@
     return NO_ERROR;
 }
 
-
-}; // namespace android
+} // namespace android
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index ee48276..7bc6f0c 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -156,10 +156,6 @@
     bool                mResumeToStopping; // track was paused in stopping state.
     bool                mFlushHwPending; // track requests for thread flush
 
-    // for last call to getTimestamp
-    bool                mPreviousValid;
-    uint32_t            mPreviousFramesWritten;
-    AudioTimestamp      mPreviousTimestamp;
 };  // end of Track
 
 class TimedTrack : public Track {
@@ -255,7 +251,7 @@
 
     class Buffer : public AudioBufferProvider::Buffer {
     public:
-        int16_t *mBuffer;
+        void *mBuffer;
     };
 
                         OutputTrack(PlaybackThread *thread,
@@ -271,7 +267,7 @@
                                     AudioSystem::SYNC_EVENT_NONE,
                              int triggerSession = 0);
     virtual void        stop();
-            bool        write(int16_t* data, uint32_t frames);
+            bool        write(void* data, uint32_t frames);
             bool        bufferQueueEmpty() const { return mBufferQueue.size() == 0; }
             bool        isActive() const { return mActive; }
     const wp<ThreadBase>& thread() const { return mThread; }
@@ -297,6 +293,7 @@
 public:
 
                         PatchTrack(PlaybackThread *playbackThread,
+                                   audio_stream_type_t streamType,
                                    uint32_t sampleRate,
                                    audio_channel_mask_t channelMask,
                                    audio_format_t format,
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 204a9d6..25d6d95 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -34,6 +34,7 @@
                                 IAudioFlinger::track_flags_t flags,
                                 track_type type);
     virtual             ~RecordTrack();
+    virtual status_t    initCheck() const;
 
     virtual status_t    start(AudioSystem::sync_event_t event, int triggerSession);
     virtual void        stop();
@@ -66,21 +67,6 @@
 
     bool                mOverflow;  // overflow on most recent attempt to fill client buffer
 
-           // updated by RecordThread::readInputParameters_l()
-            AudioResampler                      *mResampler;
-
-            // interleaved stereo pairs of fixed-point Q4.27
-            int32_t                             *mRsmpOutBuffer;
-            // current allocated frame count for the above, which may be larger than needed
-            size_t                              mRsmpOutFrameCount;
-
-            size_t                              mRsmpInUnrel;   // unreleased frames remaining from
-                                                                // most recent getNextBuffer
-                                                                // for debug only
-
-            // rolling counter that is never cleared
-            int32_t                             mRsmpInFront;   // next available frame
-
             AudioBufferProvider::Buffer mSink;  // references client's buffer sink in shared memory
 
             // sync event triggering actual audio capture. Frames read before this event will
@@ -93,7 +79,10 @@
             ssize_t                             mFramesToDrop;
 
             // used by resampler to find source frames
-            ResamplerBufferProvider *mResamplerBufferProvider;
+            ResamplerBufferProvider            *mResamplerBufferProvider;
+
+            // used by the record thread to convert frames to proper destination format
+            RecordBufferConverter              *mRecordBufferConverter;
 };
 
 // playback track, used by PatchPanel
diff --git a/services/audioflinger/ServiceUtilities.cpp b/services/audioflinger/ServiceUtilities.cpp
index fae19a1..2e68dad 100644
--- a/services/audioflinger/ServiceUtilities.cpp
+++ b/services/audioflinger/ServiceUtilities.cpp
@@ -14,61 +14,119 @@
  * limitations under the License.
  */
 
+#include <binder/AppOpsManager.h>
 #include <binder/IPCThreadState.h>
 #include <binder/IServiceManager.h>
 #include <binder/PermissionCache.h>
+#include <private/android_filesystem_config.h>
 #include "ServiceUtilities.h"
 
+/* When performing permission checks we do not use permission cache for
+ * runtime permissions (protection level dangerous) as they may change at
+ * runtime. All other permissions (protection level normal and dangerous)
+ * can be cached as they never change. Of course all permission checked
+ * here are platform defined.
+ */
+
 namespace android {
 
 // Not valid until initialized by AudioFlinger constructor.  It would have to be
 // re-initialized if the process containing AudioFlinger service forks (which it doesn't).
 pid_t getpid_cached;
 
-bool recordingAllowed() {
+bool recordingAllowed(const String16& opPackageName) {
+    // Note: We are getting the UID from the calling IPC thread state because all
+    // clients that perform recording create AudioRecord in their own processes
+    // and the system does not create AudioRecord objects on behalf of apps. This
+    // differs from playback where in some situations the system recreates AudioTrack
+    // instances associated with a client's MediaPlayer on behalf of this client.
+    // In the latter case we have to store the client UID and pass in along for
+    // security checks.
+
     if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
     static const String16 sRecordAudio("android.permission.RECORD_AUDIO");
-    // don't use PermissionCache; this is not a system permission
-    bool ok = checkCallingPermission(sRecordAudio);
-    if (!ok) ALOGE("Request requires android.permission.RECORD_AUDIO");
-    return ok;
+
+    // IMPORTANT: Don't use PermissionCache - a runtime permission and may change.
+    const bool ok = checkCallingPermission(sRecordAudio);
+    if (!ok) {
+        ALOGE("Request requires android.permission.RECORD_AUDIO");
+        return false;
+    }
+
+    const uid_t uid = IPCThreadState::self()->getCallingUid();
+
+    // To permit command-line native tests
+    if (uid == AID_ROOT) return true;
+
+    String16 checkedOpPackageName = opPackageName;
+
+    // In some cases the calling code has no access to the package it runs under.
+    // For example, code using the wilhelm framework's OpenSL-ES APIs. In this
+    // case we will get the packages for the calling UID and pick the first one
+    // for attributing the app op. This will work correctly for runtime permissions
+    // as for legacy apps we will toggle the app op for all packages in the UID.
+    // The caveat is that the operation may be attributed to the wrong package and
+    // stats based on app ops may be slightly off.
+    if (checkedOpPackageName.size() <= 0) {
+        sp<IServiceManager> sm = defaultServiceManager();
+        sp<IBinder> binder = sm->getService(String16("permission"));
+        if (binder == 0) {
+            ALOGE("Cannot get permission service");
+            return false;
+        }
+
+        sp<IPermissionController> permCtrl = interface_cast<IPermissionController>(binder);
+        Vector<String16> packages;
+
+        permCtrl->getPackagesForUid(uid, packages);
+
+        if (packages.isEmpty()) {
+            ALOGE("No packages for calling UID");
+            return false;
+        }
+        checkedOpPackageName = packages[0];
+    }
+
+    AppOpsManager appOps;
+    if (appOps.noteOp(AppOpsManager::OP_RECORD_AUDIO, uid, checkedOpPackageName)
+            != AppOpsManager::MODE_ALLOWED) {
+        ALOGE("Request denied by app op OP_RECORD_AUDIO");
+        return false;
+    }
+
+    return true;
 }
 
 bool captureAudioOutputAllowed() {
     if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
     static const String16 sCaptureAudioOutput("android.permission.CAPTURE_AUDIO_OUTPUT");
-    // don't use PermissionCache; this is not a system permission
-    bool ok = checkCallingPermission(sCaptureAudioOutput);
+    // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
+    bool ok = PermissionCache::checkCallingPermission(sCaptureAudioOutput);
     if (!ok) ALOGE("Request requires android.permission.CAPTURE_AUDIO_OUTPUT");
     return ok;
 }
 
 bool captureHotwordAllowed() {
     static const String16 sCaptureHotwordAllowed("android.permission.CAPTURE_AUDIO_HOTWORD");
-    bool ok = checkCallingPermission(sCaptureHotwordAllowed);
+    // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
+    bool ok = PermissionCache::checkCallingPermission(sCaptureHotwordAllowed);
     if (!ok) ALOGE("android.permission.CAPTURE_AUDIO_HOTWORD");
     return ok;
 }
 
-bool captureFmTunerAllowed() {
-    static const String16 sCaptureFmTunerAllowed("android.permission.ACCESS_FM_RADIO");
-    bool ok = checkCallingPermission(sCaptureFmTunerAllowed);
-    if (!ok) ALOGE("android.permission.ACCESS_FM_RADIO");
-    return ok;
-}
-
 bool settingsAllowed() {
     if (getpid_cached == IPCThreadState::self()->getCallingPid()) return true;
     static const String16 sAudioSettings("android.permission.MODIFY_AUDIO_SETTINGS");
-    // don't use PermissionCache; this is not a system permission
-    bool ok = checkCallingPermission(sAudioSettings);
+    // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
+    bool ok = PermissionCache::checkCallingPermission(sAudioSettings);
     if (!ok) ALOGE("Request requires android.permission.MODIFY_AUDIO_SETTINGS");
     return ok;
 }
 
 bool modifyAudioRoutingAllowed() {
     static const String16 sModifyAudioRoutingAllowed("android.permission.MODIFY_AUDIO_ROUTING");
-    bool ok = checkCallingPermission(sModifyAudioRoutingAllowed);
+    // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
+    bool ok = PermissionCache::checkCallingPermission(sModifyAudioRoutingAllowed);
     if (!ok) ALOGE("android.permission.MODIFY_AUDIO_ROUTING");
     return ok;
 }
@@ -76,7 +134,7 @@
 bool dumpAllowed() {
     // don't optimize for same pid, since mediaserver never dumps itself
     static const String16 sDump("android.permission.DUMP");
-    // OK to use PermissionCache; this is a system permission
+    // IMPORTANT: Use PermissionCache - not a runtime permission and may not change.
     bool ok = PermissionCache::checkCallingPermission(sDump);
     // convention is for caller to dump an error message to fd instead of logging here
     //if (!ok) ALOGE("Request requires android.permission.DUMP");
diff --git a/services/audioflinger/ServiceUtilities.h b/services/audioflinger/ServiceUtilities.h
index ce18a90..fba6dce 100644
--- a/services/audioflinger/ServiceUtilities.h
+++ b/services/audioflinger/ServiceUtilities.h
@@ -20,12 +20,10 @@
 
 extern pid_t getpid_cached;
 
-bool recordingAllowed();
+bool recordingAllowed(const String16& opPackageName);
 bool captureAudioOutputAllowed();
 bool captureHotwordAllowed();
-bool captureFmTunerAllowed();
 bool settingsAllowed();
 bool modifyAudioRoutingAllowed();
 bool dumpAllowed();
-
 }
diff --git a/services/audioflinger/SpdifStreamOut.cpp b/services/audioflinger/SpdifStreamOut.cpp
new file mode 100644
index 0000000..004a068
--- /dev/null
+++ b/services/audioflinger/SpdifStreamOut.cpp
@@ -0,0 +1,128 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#define LOG_TAG "AudioFlinger"
+//#define LOG_NDEBUG 0
+#include <hardware/audio.h>
+#include <utils/Log.h>
+
+#include <audio_utils/spdif/SPDIFEncoder.h>
+
+#include "AudioHwDevice.h"
+#include "AudioStreamOut.h"
+#include "SpdifStreamOut.h"
+
+namespace android {
+
+/**
+ * If the AudioFlinger is processing encoded data and the HAL expects
+ * PCM then we need to wrap the data in an SPDIF wrapper.
+ */
+SpdifStreamOut::SpdifStreamOut(AudioHwDevice *dev,
+            audio_output_flags_t flags,
+            audio_format_t format)
+        // Tell the HAL that the data will be compressed audio wrapped in a data burst.
+        : AudioStreamOut(dev, (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO))
+        , mSpdifEncoder(this, format)
+        , mApplicationFormat(AUDIO_FORMAT_DEFAULT)
+        , mApplicationSampleRate(0)
+        , mApplicationChannelMask(0)
+{
+}
+
+status_t SpdifStreamOut::open(
+                              audio_io_handle_t handle,
+                              audio_devices_t devices,
+                              struct audio_config *config,
+                              const char *address)
+{
+    struct audio_config customConfig = *config;
+
+    mApplicationFormat = config->format;
+    mApplicationSampleRate = config->sample_rate;
+    mApplicationChannelMask = config->channel_mask;
+
+    // Some data bursts run at a higher sample rate.
+    // TODO Move this into the audio_utils as a static method.
+    switch(config->format) {
+        case AUDIO_FORMAT_E_AC3:
+            mRateMultiplier = 4;
+            break;
+        case AUDIO_FORMAT_AC3:
+        case AUDIO_FORMAT_DTS:
+        case AUDIO_FORMAT_DTS_HD:
+            mRateMultiplier = 1;
+            break;
+        default:
+            ALOGE("ERROR SpdifStreamOut::open() unrecognized format 0x%08X\n",
+                config->format);
+            return BAD_VALUE;
+    }
+    customConfig.sample_rate = config->sample_rate * mRateMultiplier;
+
+    customConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+    customConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+
+    // Always print this because otherwise it could be very confusing if the
+    // HAL and AudioFlinger are using different formats.
+    // Print before open() because HAL may modify customConfig.
+    ALOGI("SpdifStreamOut::open() AudioFlinger requested"
+            " sampleRate %d, format %#x, channelMask %#x",
+            config->sample_rate,
+            config->format,
+            config->channel_mask);
+    ALOGI("SpdifStreamOut::open() HAL configured for"
+            " sampleRate %d, format %#x, channelMask %#x",
+            customConfig.sample_rate,
+            customConfig.format,
+            customConfig.channel_mask);
+
+    status_t status = AudioStreamOut::open(
+            handle,
+            devices,
+            &customConfig,
+            address);
+
+    ALOGI("SpdifStreamOut::open() status = %d", status);
+
+    return status;
+}
+
+int SpdifStreamOut::flush()
+{
+    mSpdifEncoder.reset();
+    return AudioStreamOut::flush();
+}
+
+int SpdifStreamOut::standby()
+{
+    mSpdifEncoder.reset();
+    return AudioStreamOut::standby();
+}
+
+ssize_t SpdifStreamOut::writeDataBurst(const void* buffer, size_t bytes)
+{
+    return AudioStreamOut::write(buffer, bytes);
+}
+
+ssize_t SpdifStreamOut::write(const void* buffer, size_t numBytes)
+{
+    // Write to SPDIF wrapper. It will call back to writeDataBurst().
+    return mSpdifEncoder.write(buffer, numBytes);
+}
+
+} // namespace android
diff --git a/services/audioflinger/SpdifStreamOut.h b/services/audioflinger/SpdifStreamOut.h
new file mode 100644
index 0000000..c870250
--- /dev/null
+++ b/services/audioflinger/SpdifStreamOut.h
@@ -0,0 +1,123 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_SPDIF_STREAM_OUT_H
+#define ANDROID_SPDIF_STREAM_OUT_H
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <system/audio.h>
+
+#include "AudioHwDevice.h"
+#include "AudioStreamOut.h"
+#include "SpdifStreamOut.h"
+
+#include <audio_utils/spdif/SPDIFEncoder.h>
+
+namespace android {
+
+/**
+ * Stream that is a PCM data burst in the HAL but looks like an encoded stream
+ * to the AudioFlinger. Wraps encoded data in an SPDIF wrapper per IEC61973-3.
+ */
+class SpdifStreamOut : public AudioStreamOut {
+public:
+
+    SpdifStreamOut(AudioHwDevice *dev, audio_output_flags_t flags,
+            audio_format_t format);
+
+    virtual ~SpdifStreamOut() { }
+
+    virtual status_t open(
+            audio_io_handle_t handle,
+            audio_devices_t devices,
+            struct audio_config *config,
+            const char *address);
+
+    /**
+    * Write audio buffer to driver. Returns number of bytes written, or a
+    * negative status_t. If at least one frame was written successfully prior to the error,
+    * it is suggested that the driver return that successful (short) byte count
+    * and then return an error in the subsequent call.
+    *
+    * If set_callback() has previously been called to enable non-blocking mode
+    * the write() is not allowed to block. It must write only the number of
+    * bytes that currently fit in the driver/hardware buffer and then return
+    * this byte count. If this is less than the requested write size the
+    * callback function must be called when more space is available in the
+    * driver/hardware buffer.
+    */
+    virtual ssize_t write(const void* buffer, size_t bytes);
+
+    /**
+     * @return frame size from the perspective of the application and the AudioFlinger.
+     */
+    virtual size_t getFrameSize() const { return sizeof(int8_t); }
+
+    /**
+     * @return format from the perspective of the application and the AudioFlinger.
+     */
+    virtual audio_format_t getFormat() const { return mApplicationFormat; }
+
+    /**
+     * The HAL may be running at a higher sample rate if, for example, playing wrapped EAC3.
+     * @return sample rate from the perspective of the application and the AudioFlinger.
+     */
+    virtual uint32_t getSampleRate() const { return mApplicationSampleRate; }
+
+    /**
+     * The HAL is in stereo mode when playing multi-channel compressed audio over HDMI.
+     * @return channel mask from the perspective of the application and the AudioFlinger.
+     */
+    virtual audio_channel_mask_t getChannelMask() const { return mApplicationChannelMask; }
+
+    virtual status_t flush();
+    virtual status_t standby();
+
+private:
+
+    class MySPDIFEncoder : public SPDIFEncoder
+    {
+    public:
+        MySPDIFEncoder(SpdifStreamOut *spdifStreamOut, audio_format_t format)
+          :  SPDIFEncoder(format)
+          , mSpdifStreamOut(spdifStreamOut)
+        {
+        }
+
+        virtual ssize_t writeOutput(const void* buffer, size_t bytes)
+        {
+            return mSpdifStreamOut->writeDataBurst(buffer, bytes);
+        }
+    protected:
+        SpdifStreamOut * const mSpdifStreamOut;
+    };
+
+    MySPDIFEncoder       mSpdifEncoder;
+    audio_format_t       mApplicationFormat;
+    uint32_t             mApplicationSampleRate;
+    audio_channel_mask_t mApplicationChannelMask;
+
+    ssize_t  writeDataBurst(const void* data, size_t bytes);
+    ssize_t  writeInternal(const void* buffer, size_t bytes);
+
+};
+
+} // namespace android
+
+#endif // ANDROID_SPDIF_STREAM_OUT_H
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index e5b6661..0a7d4a2 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -23,7 +23,9 @@
 #include "Configuration.h"
 #include <math.h>
 #include <fcntl.h>
+#include <linux/futex.h>
 #include <sys/stat.h>
+#include <sys/syscall.h>
 #include <cutils/properties.h>
 #include <media/AudioParameter.h>
 #include <media/AudioResamplerPublic.h>
@@ -54,6 +56,7 @@
 
 #include "AudioFlinger.h"
 #include "AudioMixer.h"
+#include "BufferProviders.h"
 #include "FastMixer.h"
 #include "FastCapture.h"
 #include "ServiceUtilities.h"
@@ -84,7 +87,17 @@
 #define ALOGVV(a...) do { } while(0)
 #endif
 
+// TODO: Move these macro/inlines to a header file.
 #define max(a, b) ((a) > (b) ? (a) : (b))
+template <typename T>
+static inline T min(const T& a, const T& b)
+{
+    return a < b ? a : b;
+}
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof(a[0]))
+#endif
 
 namespace android {
 
@@ -112,10 +125,15 @@
 static const uint32_t kMaxThreadSleepTimeShift = 2;
 
 // minimum normal sink buffer size, expressed in milliseconds rather than frames
+// FIXME This should be based on experimentally observed scheduling jitter
 static const uint32_t kMinNormalSinkBufferSizeMs = 20;
 // maximum normal sink buffer size
 static const uint32_t kMaxNormalSinkBufferSizeMs = 24;
 
+// minimum capture buffer size in milliseconds to _not_ need a fast capture thread
+// FIXME This should be based on experimentally observed scheduling jitter
+static const uint32_t kMinNormalCaptureBufferSizeMs = 12;
+
 // Offloaded output thread standby delay: allows track transition without going to standby
 static const nsecs_t kOffloadStandbyDelayNs = seconds(1);
 
@@ -314,8 +332,203 @@
 //      ThreadBase
 // ----------------------------------------------------------------------------
 
+// static
+const char *AudioFlinger::ThreadBase::threadTypeToString(AudioFlinger::ThreadBase::type_t type)
+{
+    switch (type) {
+    case MIXER:
+        return "MIXER";
+    case DIRECT:
+        return "DIRECT";
+    case DUPLICATING:
+        return "DUPLICATING";
+    case RECORD:
+        return "RECORD";
+    case OFFLOAD:
+        return "OFFLOAD";
+    default:
+        return "unknown";
+    }
+}
+
+String8 devicesToString(audio_devices_t devices)
+{
+    static const struct mapping {
+        audio_devices_t mDevices;
+        const char *    mString;
+    } mappingsOut[] = {
+        AUDIO_DEVICE_OUT_EARPIECE,          "EARPIECE",
+        AUDIO_DEVICE_OUT_SPEAKER,           "SPEAKER",
+        AUDIO_DEVICE_OUT_WIRED_HEADSET,     "WIRED_HEADSET",
+        AUDIO_DEVICE_OUT_WIRED_HEADPHONE,   "WIRED_HEADPHONE",
+        AUDIO_DEVICE_OUT_BLUETOOTH_SCO,     "BLUETOOTH_SCO",
+        AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET,     "BLUETOOTH_SCO_HEADSET",
+        AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT,      "BLUETOOTH_SCO_CARKIT",
+        AUDIO_DEVICE_OUT_BLUETOOTH_A2DP,            "BLUETOOTH_A2DP",
+        AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES, "BLUETOOTH_A2DP_HEADPHONES",
+        AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER,    "BLUETOOTH_A2DP_SPEAKER",
+        AUDIO_DEVICE_OUT_AUX_DIGITAL,       "AUX_DIGITAL",
+        AUDIO_DEVICE_OUT_HDMI,              "HDMI",
+        AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET, "ANLG_DOCK_HEADSET",
+        AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET, "DGTL_DOCK_HEADSET",
+        AUDIO_DEVICE_OUT_USB_ACCESSORY,     "USB_ACCESSORY",
+        AUDIO_DEVICE_OUT_USB_DEVICE,        "USB_DEVICE",
+        AUDIO_DEVICE_OUT_TELEPHONY_TX,      "TELEPHONY_TX",
+        AUDIO_DEVICE_OUT_LINE,              "LINE",
+        AUDIO_DEVICE_OUT_HDMI_ARC,          "HDMI_ARC",
+        AUDIO_DEVICE_OUT_SPDIF,             "SPDIF",
+        AUDIO_DEVICE_OUT_FM,                "FM",
+        AUDIO_DEVICE_OUT_AUX_LINE,          "AUX_LINE",
+        AUDIO_DEVICE_OUT_SPEAKER_SAFE,      "SPEAKER_SAFE",
+        AUDIO_DEVICE_OUT_IP,                "IP",
+        AUDIO_DEVICE_NONE,                  "NONE",         // must be last
+    }, mappingsIn[] = {
+        AUDIO_DEVICE_IN_COMMUNICATION,      "COMMUNICATION",
+        AUDIO_DEVICE_IN_AMBIENT,            "AMBIENT",
+        AUDIO_DEVICE_IN_BUILTIN_MIC,        "BUILTIN_MIC",
+        AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET,  "BLUETOOTH_SCO_HEADSET",
+        AUDIO_DEVICE_IN_WIRED_HEADSET,      "WIRED_HEADSET",
+        AUDIO_DEVICE_IN_AUX_DIGITAL,        "AUX_DIGITAL",
+        AUDIO_DEVICE_IN_VOICE_CALL,         "VOICE_CALL",
+        AUDIO_DEVICE_IN_TELEPHONY_RX,       "TELEPHONY_RX",
+        AUDIO_DEVICE_IN_BACK_MIC,           "BACK_MIC",
+        AUDIO_DEVICE_IN_REMOTE_SUBMIX,      "REMOTE_SUBMIX",
+        AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET,  "ANLG_DOCK_HEADSET",
+        AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET,  "DGTL_DOCK_HEADSET",
+        AUDIO_DEVICE_IN_USB_ACCESSORY,      "USB_ACCESSORY",
+        AUDIO_DEVICE_IN_USB_DEVICE,         "USB_DEVICE",
+        AUDIO_DEVICE_IN_FM_TUNER,           "FM_TUNER",
+        AUDIO_DEVICE_IN_TV_TUNER,           "TV_TUNER",
+        AUDIO_DEVICE_IN_LINE,               "LINE",
+        AUDIO_DEVICE_IN_SPDIF,              "SPDIF",
+        AUDIO_DEVICE_IN_BLUETOOTH_A2DP,     "BLUETOOTH_A2DP",
+        AUDIO_DEVICE_IN_LOOPBACK,           "LOOPBACK",
+        AUDIO_DEVICE_IN_IP,                 "IP",
+        AUDIO_DEVICE_NONE,                  "NONE",         // must be last
+    };
+    String8 result;
+    audio_devices_t allDevices = AUDIO_DEVICE_NONE;
+    const mapping *entry;
+    if (devices & AUDIO_DEVICE_BIT_IN) {
+        devices &= ~AUDIO_DEVICE_BIT_IN;
+        entry = mappingsIn;
+    } else {
+        entry = mappingsOut;
+    }
+    for ( ; entry->mDevices != AUDIO_DEVICE_NONE; entry++) {
+        allDevices = (audio_devices_t) (allDevices | entry->mDevices);
+        if (devices & entry->mDevices) {
+            if (!result.isEmpty()) {
+                result.append("|");
+            }
+            result.append(entry->mString);
+        }
+    }
+    if (devices & ~allDevices) {
+        if (!result.isEmpty()) {
+            result.append("|");
+        }
+        result.appendFormat("0x%X", devices & ~allDevices);
+    }
+    if (result.isEmpty()) {
+        result.append(entry->mString);
+    }
+    return result;
+}
+
+String8 inputFlagsToString(audio_input_flags_t flags)
+{
+    static const struct mapping {
+        audio_input_flags_t     mFlag;
+        const char *            mString;
+    } mappings[] = {
+        AUDIO_INPUT_FLAG_FAST,              "FAST",
+        AUDIO_INPUT_FLAG_HW_HOTWORD,        "HW_HOTWORD",
+        AUDIO_INPUT_FLAG_NONE,              "NONE",         // must be last
+    };
+    String8 result;
+    audio_input_flags_t allFlags = AUDIO_INPUT_FLAG_NONE;
+    const mapping *entry;
+    for (entry = mappings; entry->mFlag != AUDIO_INPUT_FLAG_NONE; entry++) {
+        allFlags = (audio_input_flags_t) (allFlags | entry->mFlag);
+        if (flags & entry->mFlag) {
+            if (!result.isEmpty()) {
+                result.append("|");
+            }
+            result.append(entry->mString);
+        }
+    }
+    if (flags & ~allFlags) {
+        if (!result.isEmpty()) {
+            result.append("|");
+        }
+        result.appendFormat("0x%X", flags & ~allFlags);
+    }
+    if (result.isEmpty()) {
+        result.append(entry->mString);
+    }
+    return result;
+}
+
+String8 outputFlagsToString(audio_output_flags_t flags)
+{
+    static const struct mapping {
+        audio_output_flags_t    mFlag;
+        const char *            mString;
+    } mappings[] = {
+        AUDIO_OUTPUT_FLAG_DIRECT,           "DIRECT",
+        AUDIO_OUTPUT_FLAG_PRIMARY,          "PRIMARY",
+        AUDIO_OUTPUT_FLAG_FAST,             "FAST",
+        AUDIO_OUTPUT_FLAG_DEEP_BUFFER,      "DEEP_BUFFER",
+        AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD, "COMPRESS_OFFLOAD",
+        AUDIO_OUTPUT_FLAG_NON_BLOCKING,     "NON_BLOCKING",
+        AUDIO_OUTPUT_FLAG_HW_AV_SYNC,       "HW_AV_SYNC",
+        AUDIO_OUTPUT_FLAG_NONE,             "NONE",         // must be last
+    };
+    String8 result;
+    audio_output_flags_t allFlags = AUDIO_OUTPUT_FLAG_NONE;
+    const mapping *entry;
+    for (entry = mappings; entry->mFlag != AUDIO_OUTPUT_FLAG_NONE; entry++) {
+        allFlags = (audio_output_flags_t) (allFlags | entry->mFlag);
+        if (flags & entry->mFlag) {
+            if (!result.isEmpty()) {
+                result.append("|");
+            }
+            result.append(entry->mString);
+        }
+    }
+    if (flags & ~allFlags) {
+        if (!result.isEmpty()) {
+            result.append("|");
+        }
+        result.appendFormat("0x%X", flags & ~allFlags);
+    }
+    if (result.isEmpty()) {
+        result.append(entry->mString);
+    }
+    return result;
+}
+
+const char *sourceToString(audio_source_t source)
+{
+    switch (source) {
+    case AUDIO_SOURCE_DEFAULT:              return "default";
+    case AUDIO_SOURCE_MIC:                  return "mic";
+    case AUDIO_SOURCE_VOICE_UPLINK:         return "voice uplink";
+    case AUDIO_SOURCE_VOICE_DOWNLINK:       return "voice downlink";
+    case AUDIO_SOURCE_VOICE_CALL:           return "voice call";
+    case AUDIO_SOURCE_CAMCORDER:            return "camcorder";
+    case AUDIO_SOURCE_VOICE_RECOGNITION:    return "voice recognition";
+    case AUDIO_SOURCE_VOICE_COMMUNICATION:  return "voice communication";
+    case AUDIO_SOURCE_REMOTE_SUBMIX:        return "remote submix";
+    case AUDIO_SOURCE_FM_TUNER:             return "FM tuner";
+    case AUDIO_SOURCE_HOTWORD:              return "hotword";
+    default:                                return "unknown";
+    }
+}
+
 AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
-        audio_devices_t outDevice, audio_devices_t inDevice, type_t type)
+        audio_devices_t outDevice, audio_devices_t inDevice, type_t type, bool systemReady)
     :   Thread(false /*canCallJava*/),
         mType(type),
         mAudioFlinger(audioFlinger),
@@ -324,10 +537,13 @@
         // RecordThread::readInputParameters_l()
         //FIXME: mStandby should be true here. Is this some kind of hack?
         mStandby(false), mOutDevice(outDevice), mInDevice(inDevice),
+        mPrevOutDevice(AUDIO_DEVICE_NONE), mPrevInDevice(AUDIO_DEVICE_NONE),
         mAudioSource(AUDIO_SOURCE_DEFAULT), mId(id),
         // mName will be set by concrete (non-virtual) subclass
-        mDeathRecipient(new PMDeathRecipient(this))
+        mDeathRecipient(new PMDeathRecipient(this)),
+        mSystemReady(systemReady)
 {
+    memset(&mPatch, 0, sizeof(struct audio_patch));
 }
 
 AudioFlinger::ThreadBase::~ThreadBase()
@@ -394,6 +610,11 @@
 {
     status_t status = NO_ERROR;
 
+    if (event->mRequiresSystemReady && !mSystemReady) {
+        event->mWaitStatus = false;
+        mPendingConfigEvents.add(event);
+        return status;
+    }
     mConfigEvents.add(event);
     ALOGV("sendConfigEvent_l() num events %d event %d", mConfigEvents.size(), event->mType);
     mWaitWorkCV.signal();
@@ -412,19 +633,25 @@
     return status;
 }
 
-void AudioFlinger::ThreadBase::sendIoConfigEvent(int event, int param)
+void AudioFlinger::ThreadBase::sendIoConfigEvent(audio_io_config_event event, pid_t pid)
 {
     Mutex::Autolock _l(mLock);
-    sendIoConfigEvent_l(event, param);
+    sendIoConfigEvent_l(event, pid);
 }
 
 // sendIoConfigEvent_l() must be called with ThreadBase::mLock held
-void AudioFlinger::ThreadBase::sendIoConfigEvent_l(int event, int param)
+void AudioFlinger::ThreadBase::sendIoConfigEvent_l(audio_io_config_event event, pid_t pid)
 {
-    sp<ConfigEvent> configEvent = (ConfigEvent *)new IoConfigEvent(event, param);
+    sp<ConfigEvent> configEvent = (ConfigEvent *)new IoConfigEvent(event, pid);
     sendConfigEvent_l(configEvent);
 }
 
+void AudioFlinger::ThreadBase::sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio)
+{
+    Mutex::Autolock _l(mLock);
+    sendPrioConfigEvent_l(pid, tid, prio);
+}
+
 // sendPrioConfigEvent_l() must be called with ThreadBase::mLock held
 void AudioFlinger::ThreadBase::sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio)
 {
@@ -485,7 +712,7 @@
         } break;
         case CFG_EVENT_IO: {
             IoConfigEventData *data = (IoConfigEventData *)event->mData.get();
-            audioConfigChanged(data->mEvent, data->mParam);
+            ioConfigChanged(data->mEvent, data->mPid);
         } break;
         case CFG_EVENT_SET_PARAMETER: {
             SetParameterConfigEventData *data = (SetParameterConfigEventData *)event->mData.get();
@@ -524,49 +751,63 @@
 
 String8 channelMaskToString(audio_channel_mask_t mask, bool output) {
     String8 s;
-    if (output) {
-        if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, ");
-        if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, ");
-        if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, ");
-        if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low freq, ");
-        if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, ");
-        if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, ");
-        if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, ");
-        if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) s.append("front-right-of-center, ");
-        if (mask & AUDIO_CHANNEL_OUT_BACK_CENTER) s.append("back-center, ");
-        if (mask & AUDIO_CHANNEL_OUT_SIDE_LEFT) s.append("side-left, ");
-        if (mask & AUDIO_CHANNEL_OUT_SIDE_RIGHT) s.append("side-right, ");
-        if (mask & AUDIO_CHANNEL_OUT_TOP_CENTER) s.append("top-center ,");
-        if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT) s.append("top-front-left, ");
-        if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER) s.append("top-front-center, ");
-        if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT) s.append("top-front-right, ");
-        if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, ");
-        if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, " );
-        if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " );
-        if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown,  ");
-    } else {
-        if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
-        if (mask & AUDIO_CHANNEL_IN_RIGHT) s.append("right, ");
-        if (mask & AUDIO_CHANNEL_IN_FRONT) s.append("front, ");
-        if (mask & AUDIO_CHANNEL_IN_BACK) s.append("back, ");
-        if (mask & AUDIO_CHANNEL_IN_LEFT_PROCESSED) s.append("left-processed, ");
-        if (mask & AUDIO_CHANNEL_IN_RIGHT_PROCESSED) s.append("right-processed, ");
-        if (mask & AUDIO_CHANNEL_IN_FRONT_PROCESSED) s.append("front-processed, ");
-        if (mask & AUDIO_CHANNEL_IN_BACK_PROCESSED) s.append("back-processed, ");
-        if (mask & AUDIO_CHANNEL_IN_PRESSURE) s.append("pressure, ");
-        if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, ");
-        if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, ");
-        if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, ");
-        if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
-        if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, ");
-        if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown,  ");
+    const audio_channel_representation_t representation =
+            audio_channel_mask_get_representation(mask);
+
+    switch (representation) {
+    case AUDIO_CHANNEL_REPRESENTATION_POSITION: {
+        if (output) {
+            if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT) s.append("front-left, ");
+            if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT) s.append("front-right, ");
+            if (mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) s.append("front-center, ");
+            if (mask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY) s.append("low freq, ");
+            if (mask & AUDIO_CHANNEL_OUT_BACK_LEFT) s.append("back-left, ");
+            if (mask & AUDIO_CHANNEL_OUT_BACK_RIGHT) s.append("back-right, ");
+            if (mask & AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) s.append("front-left-of-center, ");
+            if (mask & AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) s.append("front-right-of-center, ");
+            if (mask & AUDIO_CHANNEL_OUT_BACK_CENTER) s.append("back-center, ");
+            if (mask & AUDIO_CHANNEL_OUT_SIDE_LEFT) s.append("side-left, ");
+            if (mask & AUDIO_CHANNEL_OUT_SIDE_RIGHT) s.append("side-right, ");
+            if (mask & AUDIO_CHANNEL_OUT_TOP_CENTER) s.append("top-center ,");
+            if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT) s.append("top-front-left, ");
+            if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER) s.append("top-front-center, ");
+            if (mask & AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT) s.append("top-front-right, ");
+            if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_LEFT) s.append("top-back-left, ");
+            if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_CENTER) s.append("top-back-center, " );
+            if (mask & AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT) s.append("top-back-right, " );
+            if (mask & ~AUDIO_CHANNEL_OUT_ALL) s.append("unknown,  ");
+        } else {
+            if (mask & AUDIO_CHANNEL_IN_LEFT) s.append("left, ");
+            if (mask & AUDIO_CHANNEL_IN_RIGHT) s.append("right, ");
+            if (mask & AUDIO_CHANNEL_IN_FRONT) s.append("front, ");
+            if (mask & AUDIO_CHANNEL_IN_BACK) s.append("back, ");
+            if (mask & AUDIO_CHANNEL_IN_LEFT_PROCESSED) s.append("left-processed, ");
+            if (mask & AUDIO_CHANNEL_IN_RIGHT_PROCESSED) s.append("right-processed, ");
+            if (mask & AUDIO_CHANNEL_IN_FRONT_PROCESSED) s.append("front-processed, ");
+            if (mask & AUDIO_CHANNEL_IN_BACK_PROCESSED) s.append("back-processed, ");
+            if (mask & AUDIO_CHANNEL_IN_PRESSURE) s.append("pressure, ");
+            if (mask & AUDIO_CHANNEL_IN_X_AXIS) s.append("X, ");
+            if (mask & AUDIO_CHANNEL_IN_Y_AXIS) s.append("Y, ");
+            if (mask & AUDIO_CHANNEL_IN_Z_AXIS) s.append("Z, ");
+            if (mask & AUDIO_CHANNEL_IN_VOICE_UPLINK) s.append("voice-uplink, ");
+            if (mask & AUDIO_CHANNEL_IN_VOICE_DNLINK) s.append("voice-dnlink, ");
+            if (mask & ~AUDIO_CHANNEL_IN_ALL) s.append("unknown,  ");
+        }
+        const int len = s.length();
+        if (len > 2) {
+            char *str = s.lockBuffer(len); // needed?
+            s.unlockBuffer(len - 2);       // remove trailing ", "
+        }
+        return s;
     }
-    int len = s.length();
-    if (s.length() > 2) {
-        char *str = s.lockBuffer(len);
-        s.unlockBuffer(len - 2);
+    case AUDIO_CHANNEL_REPRESENTATION_INDEX:
+        s.appendFormat("index mask, bits:%#x", audio_channel_mask_get_bits(mask));
+        return s;
+    default:
+        s.appendFormat("unknown mask, representation:%d  bits:%#x",
+                representation, audio_channel_mask_get_bits(mask));
+        return s;
     }
-    return s;
 }
 
 void AudioFlinger::ThreadBase::dumpBase(int fd, const Vector<String16>& args __unused)
@@ -577,20 +818,22 @@
 
     bool locked = AudioFlinger::dumpTryLock(mLock);
     if (!locked) {
-        dprintf(fd, "thread %p maybe dead locked\n", this);
+        dprintf(fd, "thread %p may be deadlocked\n", this);
     }
 
+    dprintf(fd, "  Thread name: %s\n", mThreadName);
     dprintf(fd, "  I/O handle: %d\n", mId);
     dprintf(fd, "  TID: %d\n", getTid());
     dprintf(fd, "  Standby: %s\n", mStandby ? "yes" : "no");
-    dprintf(fd, "  Sample rate: %u\n", mSampleRate);
+    dprintf(fd, "  Sample rate: %u Hz\n", mSampleRate);
     dprintf(fd, "  HAL frame count: %zu\n", mFrameCount);
+    dprintf(fd, "  HAL format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat));
     dprintf(fd, "  HAL buffer size: %u bytes\n", mBufferSize);
-    dprintf(fd, "  Channel Count: %u\n", mChannelCount);
-    dprintf(fd, "  Channel Mask: 0x%08x (%s)\n", mChannelMask,
+    dprintf(fd, "  Channel count: %u\n", mChannelCount);
+    dprintf(fd, "  Channel mask: 0x%08x (%s)\n", mChannelMask,
             channelMaskToString(mChannelMask, mType != RECORD).string());
-    dprintf(fd, "  Format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat));
-    dprintf(fd, "  Frame size: %zu\n", mFrameSize);
+    dprintf(fd, "  Format: 0x%x (%s)\n", mFormat, formatToString(mFormat));
+    dprintf(fd, "  Frame size: %zu bytes\n", mFrameSize);
     dprintf(fd, "  Pending config events:");
     size_t numConfig = mConfigEvents.size();
     if (numConfig) {
@@ -602,6 +845,9 @@
     } else {
         dprintf(fd, " none\n");
     }
+    dprintf(fd, "  Output device: %#x (%s)\n", mOutDevice, devicesToString(mOutDevice).string());
+    dprintf(fd, "  Input device: %#x (%s)\n", mInDevice, devicesToString(mInDevice).string());
+    dprintf(fd, "  Audio source: %d (%s)\n", mAudioSource, sourceToString(mAudioSource));
 
     if (locked) {
         mLock.unlock();
@@ -635,19 +881,19 @@
 String16 AudioFlinger::ThreadBase::getWakeLockTag()
 {
     switch (mType) {
-        case MIXER:
-            return String16("AudioMix");
-        case DIRECT:
-            return String16("AudioDirectOut");
-        case DUPLICATING:
-            return String16("AudioDup");
-        case RECORD:
-            return String16("AudioIn");
-        case OFFLOAD:
-            return String16("AudioOffload");
-        default:
-            ALOG_ASSERT(false);
-            return String16("AudioUnknown");
+    case MIXER:
+        return String16("AudioMix");
+    case DIRECT:
+        return String16("AudioDirectOut");
+    case DUPLICATING:
+        return String16("AudioDup");
+    case RECORD:
+        return String16("AudioIn");
+    case OFFLOAD:
+        return String16("AudioOffload");
+    default:
+        ALOG_ASSERT(false);
+        return String16("AudioUnknown");
     }
 }
 
@@ -674,7 +920,7 @@
         if (status == NO_ERROR) {
             mWakeLockToken = binder;
         }
-        ALOGV("acquireWakeLock_l() %s status %d", mName, status);
+        ALOGV("acquireWakeLock_l() %s status %d", mThreadName, status);
     }
 }
 
@@ -687,7 +933,7 @@
 void AudioFlinger::ThreadBase::releaseWakeLock_l()
 {
     if (mWakeLockToken != 0) {
-        ALOGV("releaseWakeLock_l() %s", mName);
+        ALOGV("releaseWakeLock_l() %s", mThreadName);
         if (mPowerManager != 0) {
             mPowerManager->releaseWakeLock(mWakeLockToken, 0,
                     true /* FIXME force oneway contrary to .aidl */);
@@ -702,13 +948,12 @@
 }
 
 void AudioFlinger::ThreadBase::getPowerManager_l() {
-
-    if (mPowerManager == 0) {
+    if (mSystemReady && mPowerManager == 0) {
         // use checkService() to avoid blocking if power service is not up yet
         sp<IBinder> binder =
             defaultServiceManager()->checkService(String16("power"));
         if (binder == 0) {
-            ALOGW("Thread %s cannot connect to the power manager service", mName);
+            ALOGW("Thread %s cannot connect to the power manager service", mThreadName);
         } else {
             mPowerManager = interface_cast<IPowerManager>(binder);
             binder->linkToDeath(mDeathRecipient);
@@ -717,7 +962,6 @@
 }
 
 void AudioFlinger::ThreadBase::updateWakeLockUids_l(const SortedVector<int> &uids) {
-
     getPowerManager_l();
     if (mWakeLockToken == NULL) {
         ALOGE("no wake lock to update!");
@@ -728,7 +972,7 @@
         status_t status;
         status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array(),
                     true /* FIXME force oneway contrary to .aidl */);
-        ALOGV("acquireWakeLock_l() %s status %d", mName, status);
+        ALOGV("acquireWakeLock_l() %s status %d", mThreadName, status);
     }
 }
 
@@ -912,7 +1156,7 @@
     // mSinkBuffer is not guaranteed to be compatible with effect processing (PCM 16 stereo).
     if (mType == DIRECT) {
         ALOGW("createEffect_l() Cannot add effect %s on Direct output type thread %s",
-                desc->name, mName);
+                desc->name, mThreadName);
         lStatus = BAD_VALUE;
         goto Exit;
     }
@@ -936,7 +1180,8 @@
         case DUPLICATING:
         case RECORD:
         default:
-            ALOGW("createEffect_l() Cannot add global effect %s on thread %s", desc->name, mName);
+            ALOGW("createEffect_l() Cannot add global effect %s on thread %s",
+                    desc->name, mThreadName);
             lStatus = BAD_VALUE;
             goto Exit;
         }
@@ -1158,6 +1403,20 @@
                             AUDIO_PORT_CONFIG_FORMAT;
 }
 
+void AudioFlinger::ThreadBase::systemReady()
+{
+    Mutex::Autolock _l(mLock);
+    if (mSystemReady) {
+        return;
+    }
+    mSystemReady = true;
+
+    for (size_t i = 0; i < mPendingConfigEvents.size(); i++) {
+        sendConfigEvent_l(mPendingConfigEvents.editItemAt(i));
+    }
+    mPendingConfigEvents.clear();
+}
+
 
 // ----------------------------------------------------------------------------
 //      Playback
@@ -1167,8 +1426,9 @@
                                              AudioStreamOut* output,
                                              audio_io_handle_t id,
                                              audio_devices_t device,
-                                             type_t type)
-    :   ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type),
+                                             type_t type,
+                                             bool systemReady)
+    :   ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type, systemReady),
         mNormalFrameCount(0), mSinkBuffer(NULL),
         mMixerBufferEnabled(AudioFlinger::kEnableExtendedPrecision),
         mMixerBuffer(NULL),
@@ -1187,7 +1447,7 @@
         mLastWriteTime(0), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
         mMixerStatus(MIXER_IDLE),
         mMixerStatusIgnoringFastTracks(MIXER_IDLE),
-        standbyDelay(AudioFlinger::mStandbyTimeInNsecs),
+        mStandbyDelayNs(AudioFlinger::mStandbyTimeInNsecs),
         mBytesRemaining(0),
         mCurrentWriteLength(0),
         mUseAsyncWrite(false),
@@ -1201,8 +1461,8 @@
         // mLatchD, mLatchQ,
         mLatchDValid(false), mLatchQValid(false)
 {
-    snprintf(mName, kNameLength, "AudioOut_%X", id);
-    mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mName);
+    snprintf(mThreadName, kThreadNameLength, "AudioOut_%X", id);
+    mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mThreadName);
 
     // Assumes constructor is called by AudioFlinger with it's mLock held, but
     // it would be safer to explicitly pass initial masterVolume/masterMute as
@@ -1315,7 +1575,10 @@
 
 void AudioFlinger::PlaybackThread::dumpInternals(int fd, const Vector<String16>& args)
 {
-    dprintf(fd, "\nOutput thread %p:\n", this);
+    dprintf(fd, "\nOutput thread %p type %d (%s):\n", this, type(), threadTypeToString(type()));
+
+    dumpBase(fd, args);
+
     dprintf(fd, "  Normal frame count: %zu\n", mNormalFrameCount);
     dprintf(fd, "  Last write occurred (msecs): %llu\n", ns2ms(systemTime() - mLastWriteTime));
     dprintf(fd, "  Total writes: %d\n", mNumWrites);
@@ -1326,15 +1589,17 @@
     dprintf(fd, "  Mixer buffer: %p\n", mMixerBuffer);
     dprintf(fd, "  Effect buffer: %p\n", mEffectBuffer);
     dprintf(fd, "  Fast track availMask=%#x\n", mFastTrackAvailMask);
-
-    dumpBase(fd, args);
+    AudioStreamOut *output = mOutput;
+    audio_output_flags_t flags = output != NULL ? output->flags : AUDIO_OUTPUT_FLAG_NONE;
+    String8 flagsAsString = outputFlagsToString(flags);
+    dprintf(fd, "  AudioStreamOut: %p flags %#x (%s)\n", output, flags, flagsAsString.string());
 }
 
 // Thread virtuals
 
 void AudioFlinger::PlaybackThread::onFirstRef()
 {
-    run(mName, ANDROID_PRIORITY_URGENT_AUDIO);
+    run(mThreadName, ANDROID_PRIORITY_URGENT_AUDIO);
 }
 
 // ThreadBase virtuals
@@ -1378,19 +1643,22 @@
               (
                 (sharedBuffer != 0)
               ) ||
-              // use case 2: callback handler and frame count is default or at least as large as HAL
+              // use case 2: frame count is default or at least as large as HAL
               (
-                (tid != -1) &&
+                // we formerly checked for a callback handler (non-0 tid),
+                // but that is no longer required for TRANSFER_OBTAIN mode
                 ((frameCount == 0) ||
                 (frameCount >= mFrameCount))
               )
             ) &&
             // PCM data
             audio_is_linear_pcm(format) &&
-            // identical channel mask to sink, or mono in and stereo sink
+            // TODO: extract as a data library function that checks that a computationally
+            // expensive downmixer is not required: isFastOutputChannelConversion()
             (channelMask == mChannelMask ||
-                    (channelMask == AUDIO_CHANNEL_OUT_MONO &&
-                            mChannelMask == AUDIO_CHANNEL_OUT_STEREO)) &&
+                    mChannelMask != AUDIO_CHANNEL_OUT_STEREO ||
+                    (channelMask == AUDIO_CHANNEL_OUT_MONO
+                            /* && mChannelMask == AUDIO_CHANNEL_OUT_STEREO */)) &&
             // hardware sample rate
             (sampleRate == mSampleRate) &&
             // normal mixer has an associated fast mixer
@@ -1420,20 +1688,31 @@
                 audio_is_linear_pcm(format),
                 channelMask, sampleRate, mSampleRate, hasFastMixer(), tid, mFastTrackAvailMask);
         *flags &= ~IAudioFlinger::TRACK_FAST;
-        // For compatibility with AudioTrack calculation, buffer depth is forced
-        // to be at least 2 x the normal mixer frame count and cover audio hardware latency.
-        // This is probably too conservative, but legacy application code may depend on it.
-        // If you change this calculation, also review the start threshold which is related.
+      }
+    }
+    // For normal PCM streaming tracks, update minimum frame count.
+    // For compatibility with AudioTrack calculation, buffer depth is forced
+    // to be at least 2 x the normal mixer frame count and cover audio hardware latency.
+    // This is probably too conservative, but legacy application code may depend on it.
+    // If you change this calculation, also review the start threshold which is related.
+    if (!(*flags & IAudioFlinger::TRACK_FAST)
+            && audio_is_linear_pcm(format) && sharedBuffer == 0) {
+        // this must match AudioTrack.cpp calculateMinFrameCount().
+        // TODO: Move to a common library
         uint32_t latencyMs = mOutput->stream->get_latency(mOutput->stream);
         uint32_t minBufCount = latencyMs / ((1000 * mNormalFrameCount) / mSampleRate);
         if (minBufCount < 2) {
             minBufCount = 2;
         }
-        size_t minFrameCount = mNormalFrameCount * minBufCount;
-        if (frameCount < minFrameCount) {
+        // For normal mixing tracks, if speed is > 1.0f (normal), AudioTrack
+        // or the client should compute and pass in a larger buffer request.
+        size_t minFrameCount =
+                minBufCount * sourceFramesNeededWithTimestretch(
+                        sampleRate, mNormalFrameCount,
+                        mSampleRate, AUDIO_TIMESTRETCH_SPEED_NORMAL /*speed*/);
+        if (frameCount < minFrameCount) { // including frameCount == 0
             frameCount = minFrameCount;
         }
-      }
     }
     *pFrameCount = frameCount;
 
@@ -1726,32 +2005,29 @@
     return out_s8;
 }
 
-void AudioFlinger::PlaybackThread::audioConfigChanged(int event, int param) {
-    AudioSystem::OutputDescriptor desc;
-    void *param2 = NULL;
+void AudioFlinger::PlaybackThread::ioConfigChanged(audio_io_config_event event, pid_t pid) {
+    sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
+    ALOGV("PlaybackThread::ioConfigChanged, thread %p, event %d", this, event);
 
-    ALOGV("PlaybackThread::audioConfigChanged, thread %p, event %d, param %d", this, event,
-            param);
+    desc->mIoHandle = mId;
 
     switch (event) {
-    case AudioSystem::OUTPUT_OPENED:
-    case AudioSystem::OUTPUT_CONFIG_CHANGED:
-        desc.channelMask = mChannelMask;
-        desc.samplingRate = mSampleRate;
-        desc.format = mFormat;
-        desc.frameCount = mNormalFrameCount; // FIXME see
+    case AUDIO_OUTPUT_OPENED:
+    case AUDIO_OUTPUT_CONFIG_CHANGED:
+        desc->mPatch = mPatch;
+        desc->mChannelMask = mChannelMask;
+        desc->mSamplingRate = mSampleRate;
+        desc->mFormat = mFormat;
+        desc->mFrameCount = mNormalFrameCount; // FIXME see
                                              // AudioFlinger::frameCount(audio_io_handle_t)
-        desc.latency = latency_l();
-        param2 = &desc;
+        desc->mLatency = latency_l();
         break;
 
-    case AudioSystem::STREAM_CONFIG_CHANGED:
-        param2 = &param;
-    case AudioSystem::OUTPUT_CLOSED:
+    case AUDIO_OUTPUT_CLOSED:
     default:
         break;
     }
-    mAudioFlinger->audioConfigChanged(event, mId, param2);
+    mAudioFlinger->ioConfigChanged(event, desc, pid);
 }
 
 void AudioFlinger::PlaybackThread::writeCallback()
@@ -1810,8 +2086,8 @@
 void AudioFlinger::PlaybackThread::readOutputParameters_l()
 {
     // unfortunately we have no way of recovering from errors here, hence the LOG_ALWAYS_FATAL
-    mSampleRate = mOutput->stream->common.get_sample_rate(&mOutput->stream->common);
-    mChannelMask = mOutput->stream->common.get_channels(&mOutput->stream->common);
+    mSampleRate = mOutput->getSampleRate();
+    mChannelMask = mOutput->getChannelMask();
     if (!audio_is_output_channel(mChannelMask)) {
         LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
     }
@@ -1821,8 +2097,12 @@
                 mChannelMask);
     }
     mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
+
+    // Get actual HAL format.
     mHALFormat = mOutput->stream->common.get_format(&mOutput->stream->common);
-    mFormat = mHALFormat;
+    // Get format from the shim, which will be different than the HAL format
+    // if playing compressed audio over HDMI passthrough.
+    mFormat = mOutput->getFormat();
     if (!audio_is_valid_format(mFormat)) {
         LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat);
     }
@@ -1831,7 +2111,7 @@
         LOG_FATAL("HAL format %#x not supported for mixed output",
                 mFormat);
     }
-    mFrameSize = audio_stream_out_frame_size(mOutput->stream);
+    mFrameSize = mOutput->getFrameSize();
     mBufferSize = mOutput->stream->common.get_buffer_size(&mOutput->stream->common);
     mFrameCount = mBufferSize / mFrameSize;
     if (mFrameCount & 15) {
@@ -1860,6 +2140,25 @@
             ALOGW("direct output implements resume but not pause");
         }
     }
+    if (!mHwSupportsPause && mOutput->flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) {
+        LOG_ALWAYS_FATAL("HW_AV_SYNC requested but HAL does not implement pause and resume");
+    }
+
+    if (mType == DUPLICATING && mMixerBufferEnabled && mEffectBufferEnabled) {
+        // For best precision, we use float instead of the associated output
+        // device format (typically PCM 16 bit).
+
+        mFormat = AUDIO_FORMAT_PCM_FLOAT;
+        mFrameSize = mChannelCount * audio_bytes_per_sample(mFormat);
+        mBufferSize = mFrameSize * mFrameCount;
+
+        // TODO: We currently use the associated output device channel mask and sample rate.
+        // (1) Perhaps use the ORed channel mask of all downstream MixerThreads
+        // (if a valid mask) to avoid premature downmix.
+        // (2) Perhaps use the maximum sample rate of all downstream MixerThreads
+        // instead of the output device sample rate to avoid loss of high frequency information.
+        // This may need to be updated as MixerThread/OutputTracks are added and not here.
+    }
 
     // Calculate size of normal sink buffer relative to the HAL output buffer size
     double multiplier = 1.0;
@@ -1905,6 +2204,12 @@
     ALOGI("HAL output buffer size %u frames, normal sink buffer size %u frames", mFrameCount,
             mNormalFrameCount);
 
+    // Check if we want to throttle the processing to no more than 2x normal rate
+    mThreadThrottle = property_get_bool("af.thread.throttle", true /* default_value */);
+    mThreadThrottleTimeMs = 0;
+    mThreadThrottleEndMs = 0;
+    mHalfBufferMs = mNormalFrameCount * 1000 / (2 * mSampleRate);
+
     // mSinkBuffer is the sink buffer.  Size is always multiple-of-16 frames.
     // Originally this was int16_t[] array, need to remove legacy implications.
     free(mSinkBuffer);
@@ -1966,7 +2271,7 @@
     } else {
         status_t status;
         uint32_t frames;
-        status = mOutput->stream->get_render_position(mOutput->stream, &frames);
+        status = mOutput->getRenderPosition(&frames);
         *dspFrames = (size_t)frames;
         return status;
     }
@@ -2008,13 +2313,13 @@
 }
 
 
-AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::getOutput() const
+AudioStreamOut* AudioFlinger::PlaybackThread::getOutput() const
 {
     Mutex::Autolock _l(mLock);
     return mOutput;
 }
 
-AudioFlinger::AudioStreamOut* AudioFlinger::PlaybackThread::clearOutput()
+AudioStreamOut* AudioFlinger::PlaybackThread::clearOutput()
 {
     Mutex::Autolock _l(mLock);
     AudioStreamOut *output = mOutput;
@@ -2137,6 +2442,7 @@
         } else {
             bytesWritten = framesWritten;
         }
+        mLatchDValid = false;
         status_t status = mNormalSink->getTimestamp(mLatchD.mTimestamp);
         if (status == NO_ERROR) {
             size_t totalFramesWritten = mNormalSink->framesWritten();
@@ -2159,8 +2465,7 @@
         }
         // FIXME We should have an implementation of timestamps for direct output threads.
         // They are used e.g for multichannel PCM playback over HDMI.
-        bytesWritten = mOutput->stream->write(mOutput->stream,
-                                                   (char *)mSinkBuffer + offset, mBytesRemaining);
+        bytesWritten = mOutput->write((char *)mSinkBuffer + offset, mBytesRemaining);
         if (mUseAsyncWrite &&
                 ((bytesWritten < 0) || (bytesWritten == (ssize_t)mBytesRemaining))) {
             // do not wait for async callback in case of error of full write
@@ -2206,9 +2511,9 @@
 /*
 The derived values that are cached:
  - mSinkBufferSize from frame count * frame size
- - activeSleepTime from activeSleepTimeUs()
- - idleSleepTime from idleSleepTimeUs()
- - standbyDelay from mActiveSleepTimeUs (DIRECT only)
+ - mActiveSleepTimeUs from activeSleepTimeUs()
+ - mIdleSleepTimeUs from idleSleepTimeUs()
+ - mStandbyDelayNs from mActiveSleepTimeUs (DIRECT only)
  - maxPeriod from frame count and sample rate (MIXER only)
 
 The parameters that affect these derived values are:
@@ -2225,8 +2530,8 @@
 void AudioFlinger::PlaybackThread::cacheParameters_l()
 {
     mSinkBufferSize = mNormalFrameCount * mFrameSize;
-    activeSleepTime = activeSleepTimeUs();
-    idleSleepTime = idleSleepTimeUs();
+    mActiveSleepTimeUs = activeSleepTimeUs();
+    mIdleSleepTimeUs = idleSleepTimeUs();
 }
 
 void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamType)
@@ -2393,7 +2698,7 @@
 {
     Vector< sp<Track> > tracksToRemove;
 
-    standbyTime = systemTime();
+    mStandbyTimeNs = systemTime();
 
     // MIXER
     nsecs_t lastWarning = 0;
@@ -2405,7 +2710,7 @@
     int lastGeneration = 0;
 
     cacheParameters_l();
-    sleepTime = idleSleepTime;
+    mSleepTimeUs = mIdleSleepTimeUs;
 
     if (mType == MIXER) {
         sleepTimeShift = 0;
@@ -2467,19 +2772,29 @@
                 if (exitPending()) {
                     break;
                 }
-                releaseWakeLock_l();
+                bool released = false;
+                // The following works around a bug in the offload driver. Ideally we would release
+                // the wake lock every time, but that causes the last offload buffer(s) to be
+                // dropped while the device is on battery, so we need to hold a wake lock during
+                // the drain phase.
+                if (mBytesRemaining && !(mDrainSequence & 1)) {
+                    releaseWakeLock_l();
+                    released = true;
+                }
                 mWakeLockUids.clear();
                 mActiveTracksGeneration++;
                 ALOGV("wait async completion");
                 mWaitWorkCV.wait(mLock);
                 ALOGV("async completion/wake");
-                acquireWakeLock_l();
-                standbyTime = systemTime() + standbyDelay;
-                sleepTime = 0;
+                if (released) {
+                    acquireWakeLock_l();
+                }
+                mStandbyTimeNs = systemTime() + mStandbyDelayNs;
+                mSleepTimeUs = 0;
 
                 continue;
             }
-            if ((!mActiveTracks.size() && systemTime() > standbyTime) ||
+            if ((!mActiveTracks.size() && systemTime() > mStandbyTimeNs) ||
                                    isSuspended()) {
                 // put audio hardware into standby after short delay
                 if (shouldStandby_l()) {
@@ -2514,8 +2829,8 @@
                     mBytesRemaining = 0;
                     checkSilentMode_l();
 
-                    standbyTime = systemTime() + standbyDelay;
-                    sleepTime = idleSleepTime;
+                    mStandbyTimeNs = systemTime() + mStandbyDelayNs;
+                    mSleepTimeUs = mIdleSleepTimeUs;
                     if (mType == MIXER) {
                         sleepTimeShift = 0;
                     }
@@ -2546,15 +2861,15 @@
                 threadLoop_mix();
             } else if ((mMixerStatus != MIXER_DRAIN_TRACK)
                         && (mMixerStatus != MIXER_DRAIN_ALL)) {
-                // threadLoop_sleepTime sets sleepTime to 0 if data
+                // threadLoop_sleepTime sets mSleepTimeUs to 0 if data
                 // must be written to HAL
                 threadLoop_sleepTime();
-                if (sleepTime == 0) {
+                if (mSleepTimeUs == 0) {
                     mCurrentWriteLength = mSinkBufferSize;
                 }
             }
             // Either threadLoop_mix() or threadLoop_sleepTime() should have set
-            // mMixerBuffer with data if mMixerBufferValid is true and sleepTime == 0.
+            // mMixerBuffer with data if mMixerBufferValid is true and mSleepTimeUs == 0.
             // Merge mMixerBuffer data into mEffectBuffer (if any effects are valid)
             // or mSinkBuffer (if there are no effects).
             //
@@ -2562,7 +2877,7 @@
             // support higher precision, this needs to move.
             //
             // mMixerBufferValid is only set true by MixerThread::prepareTracks_l().
-            // TODO use sleepTime == 0 as an additional condition.
+            // TODO use mSleepTimeUs == 0 as an additional condition.
             if (mMixerBufferValid) {
                 void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer;
                 audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat;
@@ -2573,14 +2888,14 @@
 
             mBytesRemaining = mCurrentWriteLength;
             if (isSuspended()) {
-                sleepTime = suspendSleepTimeUs();
+                mSleepTimeUs = suspendSleepTimeUs();
                 // simulate write to HAL when suspended
                 mBytesWritten += mSinkBufferSize;
                 mBytesRemaining = 0;
             }
 
             // only process effects if we're going to write
-            if (sleepTime == 0 && mType != OFFLOAD) {
+            if (mSleepTimeUs == 0 && mType != OFFLOAD) {
                 for (size_t i = 0; i < effectChains.size(); i ++) {
                     effectChains[i]->process_l();
                 }
@@ -2599,7 +2914,7 @@
         // Only if the Effects buffer is enabled and there is data in the
         // Effects buffer (buffer valid), we need to
         // copy into the sink buffer.
-        // TODO use sleepTime == 0 as an additional condition.
+        // TODO use mSleepTimeUs == 0 as an additional condition.
         if (mEffectBufferValid) {
             //ALOGV("writing effect buffer to sink buffer format %#x", mFormat);
             memcpy_by_audio_format(mSinkBuffer, mFormat, mEffectBuffer, mEffectBufferFormat,
@@ -2610,10 +2925,11 @@
         unlockEffectChains(effectChains);
 
         if (!waitingAsyncCallback()) {
-            // sleepTime == 0 means we must write to audio hardware
-            if (sleepTime == 0) {
+            // mSleepTimeUs == 0 means we must write to audio hardware
+            if (mSleepTimeUs == 0) {
+                ssize_t ret = 0;
                 if (mBytesRemaining) {
-                    ssize_t ret = threadLoop_write();
+                    ret = threadLoop_write();
                     if (ret < 0) {
                         mBytesRemaining = 0;
                     } else {
@@ -2624,11 +2940,11 @@
                         (mMixerStatus == MIXER_DRAIN_ALL)) {
                     threadLoop_drain();
                 }
-                if (mType == MIXER) {
+                if (mType == MIXER && !mStandby) {
                     // write blocked detection
                     nsecs_t now = systemTime();
                     nsecs_t delta = now - mLastWriteTime;
-                    if (!mStandby && delta > maxPeriod) {
+                    if (delta > maxPeriod) {
                         mNumDelayedWrites++;
                         if ((now - lastWarning) > kWarningThrottleNs) {
                             ATRACE_NAME("underrun");
@@ -2637,10 +2953,48 @@
                             lastWarning = now;
                         }
                     }
+
+                    if (mThreadThrottle
+                            && mMixerStatus == MIXER_TRACKS_READY // we are mixing (active tracks)
+                            && ret > 0) {                         // we wrote something
+                        // Limit MixerThread data processing to no more than twice the
+                        // expected processing rate.
+                        //
+                        // This helps prevent underruns with NuPlayer and other applications
+                        // which may set up buffers that are close to the minimum size, or use
+                        // deep buffers, and rely on a double-buffering sleep strategy to fill.
+                        //
+                        // The throttle smooths out sudden large data drains from the device,
+                        // e.g. when it comes out of standby, which often causes problems with
+                        // (1) mixer threads without a fast mixer (which has its own warm-up)
+                        // (2) minimum buffer sized tracks (even if the track is full,
+                        //     the app won't fill fast enough to handle the sudden draw).
+
+                        const int32_t deltaMs = delta / 1000000;
+                        const int32_t throttleMs = mHalfBufferMs - deltaMs;
+                        if ((signed)mHalfBufferMs >= throttleMs && throttleMs > 0) {
+                            usleep(throttleMs * 1000);
+                            // notify of throttle start on verbose log
+                            ALOGV_IF(mThreadThrottleEndMs == mThreadThrottleTimeMs,
+                                    "mixer(%p) throttle begin:"
+                                    " ret(%zd) deltaMs(%d) requires sleep %d ms",
+                                    this, ret, deltaMs, throttleMs);
+                            mThreadThrottleTimeMs += throttleMs;
+                        } else {
+                            uint32_t diff = mThreadThrottleTimeMs - mThreadThrottleEndMs;
+                            if (diff > 0) {
+                                // notify of throttle end on debug log
+                                ALOGD("mixer(%p) throttle end: throttle time(%u)", this, diff);
+                                mThreadThrottleEndMs = mThreadThrottleTimeMs;
+                            }
+                        }
+                    }
                 }
 
             } else {
-                usleep(sleepTime);
+                ATRACE_BEGIN("sleep");
+                usleep(mSleepTimeUs);
+                ATRACE_END();
             }
         }
 
@@ -2711,8 +3065,7 @@
     if ((mType == OFFLOAD || mType == DIRECT)
             && mOutput != NULL && mOutput->stream->get_presentation_position) {
         uint64_t position64;
-        int ret = mOutput->stream->get_presentation_position(
-                                                mOutput->stream, &position64, &timestamp.mTime);
+        int ret = mOutput->getPresentationPosition(&position64, &timestamp.mTime);
         if (ret == 0) {
             timestamp.mPosition = (uint32_t)position64;
             return NO_ERROR;
@@ -2721,21 +3074,83 @@
     return INVALID_OPERATION;
 }
 
+status_t AudioFlinger::MixerThread::createAudioPatch_l(const struct audio_patch *patch,
+                                                          audio_patch_handle_t *handle)
+{
+    // if !&IDLE, holds the FastMixer state to restore after new parameters processed
+    FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE;
+    if (mFastMixer != 0) {
+        FastMixerStateQueue *sq = mFastMixer->sq();
+        FastMixerState *state = sq->begin();
+        if (!(state->mCommand & FastMixerState::IDLE)) {
+            previousCommand = state->mCommand;
+            state->mCommand = FastMixerState::HOT_IDLE;
+            sq->end();
+            sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
+        } else {
+            sq->end(false /*didModify*/);
+        }
+    }
+    status_t status = PlaybackThread::createAudioPatch_l(patch, handle);
+
+    if (!(previousCommand & FastMixerState::IDLE)) {
+        ALOG_ASSERT(mFastMixer != 0);
+        FastMixerStateQueue *sq = mFastMixer->sq();
+        FastMixerState *state = sq->begin();
+        ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE);
+        state->mCommand = previousCommand;
+        sq->end();
+        sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
+    }
+
+    return status;
+}
+
 status_t AudioFlinger::PlaybackThread::createAudioPatch_l(const struct audio_patch *patch,
                                                           audio_patch_handle_t *handle)
 {
     status_t status = NO_ERROR;
-    if (mOutput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-        // store new device and send to effects
-        audio_devices_t type = AUDIO_DEVICE_NONE;
-        for (unsigned int i = 0; i < patch->num_sinks; i++) {
-            type |= patch->sinks[i].ext.device.type;
-        }
-        mOutDevice = type;
-        for (size_t i = 0; i < mEffectChains.size(); i++) {
-            mEffectChains[i]->setDevice_l(mOutDevice);
+
+    // store new device and send to effects
+    audio_devices_t type = AUDIO_DEVICE_NONE;
+    for (unsigned int i = 0; i < patch->num_sinks; i++) {
+        type |= patch->sinks[i].ext.device.type;
+    }
+
+#ifdef ADD_BATTERY_DATA
+    // when changing the audio output device, call addBatteryData to notify
+    // the change
+    if (mOutDevice != type) {
+        uint32_t params = 0;
+        // check whether speaker is on
+        if (type & AUDIO_DEVICE_OUT_SPEAKER) {
+            params |= IMediaPlayerService::kBatteryDataSpeakerOn;
         }
 
+        audio_devices_t deviceWithoutSpeaker
+            = AUDIO_DEVICE_OUT_ALL & ~AUDIO_DEVICE_OUT_SPEAKER;
+        // check if any other device (except speaker) is on
+        if (type & deviceWithoutSpeaker) {
+            params |= IMediaPlayerService::kBatteryDataOtherAudioDeviceOn;
+        }
+
+        if (params != 0) {
+            addBatteryData(params);
+        }
+    }
+#endif
+
+    for (size_t i = 0; i < mEffectChains.size(); i++) {
+        mEffectChains[i]->setDevice_l(type);
+    }
+
+    // mPrevOutDevice is the latest device set by createAudioPatch_l(). It is not set when
+    // the thread is created so that the first patch creation triggers an ioConfigChanged callback
+    bool configChanged = mPrevOutDevice != type;
+    mOutDevice = type;
+    mPatch = *patch;
+
+    if (mOutput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
         audio_hw_device_t *hwDevice = mOutput->audioHwDev->hwDevice();
         status = hwDevice->create_audio_patch(hwDevice,
                                                patch->num_sources,
@@ -2744,19 +3159,75 @@
                                                patch->sinks,
                                                handle);
     } else {
-        ALOG_ASSERT(false, "createAudioPatch_l() called on a pre 3.0 HAL");
+        char *address;
+        if (strcmp(patch->sinks[0].ext.device.address, "") != 0) {
+            //FIXME: we only support address on first sink with HAL version < 3.0
+            address = audio_device_address_to_parameter(
+                                                        patch->sinks[0].ext.device.type,
+                                                        patch->sinks[0].ext.device.address);
+        } else {
+            address = (char *)calloc(1, 1);
+        }
+        AudioParameter param = AudioParameter(String8(address));
+        free(address);
+        param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), (int)type);
+        status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
+                param.toString().string());
+        *handle = AUDIO_PATCH_HANDLE_NONE;
     }
+    if (configChanged) {
+        mPrevOutDevice = type;
+        sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
+    }
+    return status;
+}
+
+status_t AudioFlinger::MixerThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
+{
+    // if !&IDLE, holds the FastMixer state to restore after new parameters processed
+    FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE;
+    if (mFastMixer != 0) {
+        FastMixerStateQueue *sq = mFastMixer->sq();
+        FastMixerState *state = sq->begin();
+        if (!(state->mCommand & FastMixerState::IDLE)) {
+            previousCommand = state->mCommand;
+            state->mCommand = FastMixerState::HOT_IDLE;
+            sq->end();
+            sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
+        } else {
+            sq->end(false /*didModify*/);
+        }
+    }
+
+    status_t status = PlaybackThread::releaseAudioPatch_l(handle);
+
+    if (!(previousCommand & FastMixerState::IDLE)) {
+        ALOG_ASSERT(mFastMixer != 0);
+        FastMixerStateQueue *sq = mFastMixer->sq();
+        FastMixerState *state = sq->begin();
+        ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE);
+        state->mCommand = previousCommand;
+        sq->end();
+        sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
+    }
+
     return status;
 }
 
 status_t AudioFlinger::PlaybackThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
 {
     status_t status = NO_ERROR;
+
+    mOutDevice = AUDIO_DEVICE_NONE;
+
     if (mOutput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
         audio_hw_device_t *hwDevice = mOutput->audioHwDev->hwDevice();
         status = hwDevice->release_audio_patch(hwDevice, handle);
     } else {
-        ALOG_ASSERT(false, "releaseAudioPatch_l() called on a pre 3.0 HAL");
+        AudioParameter param;
+        param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
+        status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
+                param.toString().string());
     }
     return status;
 }
@@ -2784,8 +3255,8 @@
 // ----------------------------------------------------------------------------
 
 AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
-        audio_io_handle_t id, audio_devices_t device, type_t type)
-    :   PlaybackThread(audioFlinger, output, id, device, type),
+        audio_io_handle_t id, audio_devices_t device, bool systemReady, type_t type)
+    :   PlaybackThread(audioFlinger, output, id, device, type, systemReady),
         // mAudioMixer below
         // mFastMixer below
         mFastMixerFutex(0)
@@ -2800,6 +3271,12 @@
             mNormalFrameCount);
     mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
 
+    if (type == DUPLICATING) {
+        // The Duplicating thread uses the AudioMixer and delivers data to OutputTracks
+        // (downstream MixerThreads) in DuplicatingThread::threadLoop_write().
+        // Do not create or use mFastMixer, mOutputSink, mPipeSink, or mNormalSink.
+        return;
+    }
     // create an NBAIO sink for the HAL output stream, and negotiate
     mOutputSink = new AudioStreamOutSink(output->stream);
     size_t numCounterOffers = 0;
@@ -2841,6 +3318,7 @@
         NBAIO_Format format = mOutputSink->format();
         NBAIO_Format origformat = format;
         // adjust format to match that of the Fast Mixer
+        ALOGV("format changed from %d to %d", format.mFormat, fastMixerFormat);
         format.mFormat = fastMixerFormat;
         format.mFrameSize = audio_bytes_per_sample(format.mFormat) * format.mChannelCount;
 
@@ -2911,11 +3389,7 @@
         // start the fast mixer
         mFastMixer->run("FastMixer", PRIORITY_URGENT_AUDIO);
         pid_t tid = mFastMixer->getTid();
-        int err = requestPriority(getpid_cached, tid, kPriorityFastMixer);
-        if (err != 0) {
-            ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
-                    kPriorityFastMixer, getpid_cached, tid, err);
-        }
+        sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer);
 
 #ifdef AUDIO_WATCHDOG
         // create and start the watchdog
@@ -2923,11 +3397,7 @@
         mAudioWatchdog->setDump(&mAudioWatchdogDump);
         mAudioWatchdog->run("AudioWatchdog", PRIORITY_URGENT_AUDIO);
         tid = mAudioWatchdog->getTid();
-        err = requestPriority(getpid_cached, tid, kPriorityFastMixer);
-        if (err != 0) {
-            ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
-                    kPriorityFastMixer, getpid_cached, tid, err);
-        }
+        sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer);
 #endif
 
     }
@@ -3020,8 +3490,10 @@
 #endif
             }
             state->mCommand = FastMixerState::MIX_WRITE;
+#ifdef FAST_THREAD_STATISTICS
             mFastMixerDumpState.increaseSamplingN(mAudioFlinger->isLowRamDevice() ?
-                    FastMixerDumpState::kSamplingNforLowRamDevice : FastMixerDumpState::kSamplingN);
+                FastThreadDumpState::kSamplingNforLowRamDevice : FastThreadDumpState::kSamplingN);
+#endif
             sq->end();
             sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
             if (kUseFastMixer == FastMixer_Dynamic) {
@@ -3083,7 +3555,7 @@
 void AudioFlinger::PlaybackThread::threadLoop_standby()
 {
     ALOGV("Audio hardware entering standby, mixer %p, suspend count %d", this, mSuspended);
-    mOutput->stream->common.standby(&mOutput->stream->common);
+    mOutput->standby();
     if (mUseAsyncWrite != 0) {
         // discard any pending drain or write ack by incrementing sequence
         mWriteAckSequence = (mWriteAckSequence + 2) & ~1;
@@ -3124,11 +3596,11 @@
     // Only increase sleep time if the mixer is ready for two consecutive times to avoid
     // that a steady state of alternating ready/not ready conditions keeps the sleep time
     // such that we would underrun the audio HAL.
-    if ((sleepTime == 0) && (sleepTimeShift > 0)) {
+    if ((mSleepTimeUs == 0) && (sleepTimeShift > 0)) {
         sleepTimeShift--;
     }
-    sleepTime = 0;
-    standbyTime = systemTime() + standbyDelay;
+    mSleepTimeUs = 0;
+    mStandbyTimeNs = systemTime() + mStandbyDelayNs;
     //TODO: delay standby when effects have a tail
 
 }
@@ -3137,11 +3609,11 @@
 {
     // If no tracks are ready, sleep once for the duration of an output
     // buffer size, then write 0s to the output
-    if (sleepTime == 0) {
+    if (mSleepTimeUs == 0) {
         if (mMixerStatus == MIXER_TRACKS_ENABLED) {
-            sleepTime = activeSleepTime >> sleepTimeShift;
-            if (sleepTime < kMinThreadSleepTimeUs) {
-                sleepTime = kMinThreadSleepTimeUs;
+            mSleepTimeUs = mActiveSleepTimeUs >> sleepTimeShift;
+            if (mSleepTimeUs < kMinThreadSleepTimeUs) {
+                mSleepTimeUs = kMinThreadSleepTimeUs;
             }
             // reduce sleep time in case of consecutive application underruns to avoid
             // starving the audio HAL. As activeSleepTimeUs() is larger than a buffer
@@ -3151,7 +3623,7 @@
                 sleepTimeShift++;
             }
         } else {
-            sleepTime = idleSleepTime;
+            mSleepTimeUs = mIdleSleepTimeUs;
         }
     } else if (mBytesWritten != 0 || (mMixerStatus == MIXER_TRACKS_ENABLED)) {
         // clear out mMixerBuffer or mSinkBuffer, to ensure buffers are cleared
@@ -3161,7 +3633,7 @@
         } else {
             memset(mSinkBuffer, 0, mSinkBufferSize);
         }
-        sleepTime = 0;
+        mSleepTimeUs = 0;
         ALOGV_IF(mBytesWritten == 0 && (mMixerStatus == MIXER_TRACKS_ENABLED),
                 "anticipated start");
     }
@@ -3382,22 +3854,16 @@
         // hence the test on (mMixerStatus == MIXER_TRACKS_READY) meaning the track was mixed
         // during last round
         size_t desiredFrames;
-        uint32_t sr = track->sampleRate();
-        if (sr == mSampleRate) {
-            desiredFrames = mNormalFrameCount;
-        } else {
-            // +1 for rounding and +1 for additional sample needed for interpolation
-            desiredFrames = (mNormalFrameCount * sr) / mSampleRate + 1 + 1;
-            // add frames already consumed but not yet released by the resampler
-            // because mAudioTrackServerProxy->framesReady() will include these frames
-            desiredFrames += mAudioMixer->getUnreleasedFrames(track->name());
-#if 0
-            // the minimum track buffer size is normally twice the number of frames necessary
-            // to fill one buffer and the resampler should not leave more than one buffer worth
-            // of unreleased frames after each pass, but just in case...
-            ALOG_ASSERT(desiredFrames <= cblk->frameCount_);
-#endif
-        }
+        const uint32_t sampleRate = track->mAudioTrackServerProxy->getSampleRate();
+        AudioPlaybackRate playbackRate = track->mAudioTrackServerProxy->getPlaybackRate();
+
+        desiredFrames = sourceFramesNeededWithTimestretch(
+                sampleRate, mNormalFrameCount, mSampleRate, playbackRate.mSpeed);
+        // TODO: ONLY USED FOR LEGACY RESAMPLERS, remove when they are removed.
+        // add frames already consumed but not yet released by the resampler
+        // because mAudioTrackServerProxy->framesReady() will include these frames
+        desiredFrames += mAudioMixer->getUnreleasedFrames(track->name());
+
         uint32_t minFrames = 1;
         if ((track->sharedBuffer() == 0) && !track->isStopped() && !track->isPausing() &&
                 (mMixerStatusIgnoringFastTracks == MIXER_TRACKS_READY)) {
@@ -3405,6 +3871,23 @@
         }
 
         size_t framesReady = track->framesReady();
+        if (ATRACE_ENABLED()) {
+            // I wish we had formatted trace names
+            char traceName[16];
+            strcpy(traceName, "nRdy");
+            int name = track->name();
+            if (AudioMixer::TRACK0 <= name &&
+                    name < (int) (AudioMixer::TRACK0 + AudioMixer::MAX_NUM_TRACKS)) {
+                name -= AudioMixer::TRACK0;
+                traceName[4] = (name / 10) + '0';
+                traceName[5] = (name % 10) + '0';
+            } else {
+                traceName[4] = '?';
+                traceName[5] = '?';
+            }
+            traceName[6] = '\0';
+            ATRACE_INT(traceName, framesReady);
+        }
         if ((framesReady >= minFrames) && track->isReady() &&
                 !track->isPaused() && !track->isTerminated())
         {
@@ -3543,6 +4026,14 @@
                 AudioMixer::RESAMPLE,
                 AudioMixer::SAMPLE_RATE,
                 (void *)(uintptr_t)reqSampleRate);
+
+            AudioPlaybackRate playbackRate = track->mAudioTrackServerProxy->getPlaybackRate();
+            mAudioMixer->setParameter(
+                name,
+                AudioMixer::TIMESTRETCH,
+                AudioMixer::PLAYBACK_RATE,
+                &playbackRate);
+
             /*
              * Select the appropriate output buffer for the track.
              *
@@ -3595,6 +4086,8 @@
             }
         } else {
             if (framesReady < desiredFrames && !track->isStopped() && !track->isPaused()) {
+                ALOGV("track(%p) underrun,  framesReady(%zu) < framesDesired(%zd)",
+                        track, framesReady, desiredFrames);
                 track->mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
             }
             // clear effect chain input buffer if an active track underruns to avoid sending
@@ -3812,7 +4305,7 @@
             audio_devices_t deviceWithoutSpeaker
                 = AUDIO_DEVICE_OUT_ALL & ~AUDIO_DEVICE_OUT_SPEAKER;
             // check if any other device (except speaker) is on
-            if (value & deviceWithoutSpeaker ) {
+            if (value & deviceWithoutSpeaker) {
                 params |= IMediaPlayerService::kBatteryDataOtherAudioDeviceOn;
             }
 
@@ -3836,7 +4329,7 @@
         status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
                                                 keyValuePair.string());
         if (!mStandby && status == INVALID_OPERATION) {
-            mOutput->stream->common.standby(&mOutput->stream->common);
+            mOutput->standby();
             mStandby = true;
             mBytesWritten = 0;
             status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
@@ -3854,7 +4347,7 @@
                 }
                 mTracks[i]->mName = name;
             }
-            sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED);
+            sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
         }
     }
 
@@ -3879,7 +4372,7 @@
     String8 result;
 
     PlaybackThread::dumpInternals(fd, args);
-
+    dprintf(fd, "  Thread throttle time (msecs): %u\n", mThreadThrottleTimeMs);
     dprintf(fd, "  AudioMixer tracks: 0x%08x\n", mAudioMixer->trackNames());
 
     // Make a non-atomic copy of fast mixer dump state so it won't change underneath us
@@ -3932,16 +4425,16 @@
 // ----------------------------------------------------------------------------
 
 AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
-        AudioStreamOut* output, audio_io_handle_t id, audio_devices_t device)
-    :   PlaybackThread(audioFlinger, output, id, device, DIRECT)
+        AudioStreamOut* output, audio_io_handle_t id, audio_devices_t device, bool systemReady)
+    :   PlaybackThread(audioFlinger, output, id, device, DIRECT, systemReady)
         // mLeftVolFloat, mRightVolFloat
 {
 }
 
 AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
         AudioStreamOut* output, audio_io_handle_t id, uint32_t device,
-        ThreadBase::type_t type)
-    :   PlaybackThread(audioFlinger, output, id, device, type)
+        ThreadBase::type_t type, bool systemReady)
+    :   PlaybackThread(audioFlinger, output, id, device, type, systemReady)
         // mLeftVolFloat, mRightVolFloat
 {
 }
@@ -3998,6 +4491,24 @@
     }
 }
 
+void AudioFlinger::DirectOutputThread::onAddNewTrack_l()
+{
+    sp<Track> previousTrack = mPreviousTrack.promote();
+    sp<Track> latestTrack = mLatestActiveTrack.promote();
+
+    if (previousTrack != 0 && latestTrack != 0) {
+        if (mType == DIRECT) {
+            if (previousTrack.get() != latestTrack.get()) {
+                mFlushPending = true;
+            }
+        } else /* mType == OFFLOAD */ {
+            if (previousTrack->sessionId() != latestTrack->sessionId()) {
+                mFlushPending = true;
+            }
+        }
+    }
+    PlaybackThread::onAddNewTrack_l();
+}
 
 AudioFlinger::PlaybackThread::mixer_state AudioFlinger::DirectOutputThread::prepareTracks_l(
     Vector< sp<Track> > *tracksToRemove
@@ -4007,7 +4518,6 @@
     mixer_state mixerStatus = MIXER_IDLE;
     bool doHwPause = false;
     bool doHwResume = false;
-    bool flushPending = false;
 
     // find out which tracks need to be processed
     for (size_t i = 0; i < count; i++) {
@@ -4017,6 +4527,12 @@
             continue;
         }
 
+        if (t->isInvalid()) {
+            ALOGW("An invalidated track shouldn't be in active list");
+            tracksToRemove->add(t);
+            continue;
+        }
+
         Track* const track = t.get();
         audio_track_cblk_t* cblk = track->cblk();
         // Only consider last track started for volume and mixer state control.
@@ -4026,9 +4542,9 @@
         sp<Track> l = mLatestActiveTrack.promote();
         bool last = l.get() == track;
 
-        if (mHwSupportsPause && track->isPausing()) {
+        if (track->isPausing()) {
             track->setPaused();
-            if (last && !mHwPaused) {
+            if (mHwSupportsPause && last && !mHwPaused) {
                 doHwPause = true;
                 mHwPaused = true;
             }
@@ -4036,15 +4552,13 @@
         } else if (track->isFlushPending()) {
             track->flushAck();
             if (last) {
-                flushPending = true;
+                mFlushPending = true;
             }
-        } else if (mHwSupportsPause && track->isResumePending()){
+        } else if (track->isResumePending()) {
             track->resumeAck();
-            if (last) {
-                if (mHwPaused) {
-                    doHwResume = true;
-                    mHwPaused = false;
-                }
+            if (last && mHwPaused) {
+                doHwResume = true;
+                mHwPaused = false;
             }
         }
 
@@ -4054,9 +4568,10 @@
         // app does not call stop() and relies on underrun to stop:
         // hence the test on (track->mRetryCount > 1).
         // If retryCount<=1 then track is about to underrun and be removed.
+        // Do not use a high threshold for compressed audio.
         uint32_t minFrames;
         if ((track->sharedBuffer() == 0) && !track->isStopping_1() && !track->isPausing()
-            && (track->mRetryCount > 1)) {
+            && (track->mRetryCount > 1) && audio_is_linear_pcm(mFormat)) {
             minFrames = mNormalFrameCount;
         } else {
             minFrames = 1;
@@ -4079,11 +4594,22 @@
             // compute volume for this track
             processVolume_l(track, last);
             if (last) {
+                sp<Track> previousTrack = mPreviousTrack.promote();
+                if (previousTrack != 0) {
+                    if (track != previousTrack.get()) {
+                        // Flush any data still being written from last track
+                        mBytesRemaining = 0;
+                        // Invalidate previous track to force a seek when resuming.
+                        previousTrack->invalidate();
+                    }
+                }
+                mPreviousTrack = track;
+
                 // reset retry count
                 track->mRetryCount = kMaxTrackRetriesDirect;
                 mActiveTrack = t;
                 mixerStatus = MIXER_TRACKS_READY;
-                if (usesHwAvSync() && mHwPaused) {
+                if (mHwPaused) {
                     doHwResume = true;
                     mHwPaused = false;
                 }
@@ -4096,6 +4622,10 @@
             }
             if (track->isStopping_1()) {
                 track->mState = TrackBase::STOPPING_2;
+                if (last && mHwPaused) {
+                     doHwResume = true;
+                     mHwPaused = false;
+                 }
             }
             if ((track->sharedBuffer() != 0) || track->isStopped() ||
                     track->isStopping_2() || track->isPaused()) {
@@ -4130,8 +4660,11 @@
                     // it will then automatically call start() when data is available
                     android_atomic_or(CBLK_DISABLED, &cblk->mFlags);
                 } else if (last) {
+                    ALOGW("pause because of UNDERRUN, framesReady = %zu,"
+                            "minFrames = %u, mFormat = %#x",
+                            track->framesReady(), minFrames, mFormat);
                     mixerStatus = MIXER_TRACKS_ENABLED;
-                    if (usesHwAvSync() && !mHwPaused && !mStandby) {
+                    if (mHwSupportsPause && !mHwPaused && !mStandby) {
                         doHwPause = true;
                         mHwPaused = true;
                     }
@@ -4141,11 +4674,11 @@
     }
 
     // if an active track did not command a flush, check for pending flush on stopped tracks
-    if (!flushPending) {
+    if (!mFlushPending) {
         for (size_t i = 0; i < mTracks.size(); i++) {
             if (mTracks[i]->isFlushPending()) {
                 mTracks[i]->flushAck();
-                flushPending = true;
+                mFlushPending = true;
             }
         }
     }
@@ -4155,10 +4688,10 @@
     // before flush and then resume HW. This can happen in case of pause/flush/resume
     // if resume is received before pause is executed.
     if (mHwSupportsPause && !mStandby &&
-            (doHwPause || (flushPending && !mHwPaused && (count != 0)))) {
+            (doHwPause || (mFlushPending && !mHwPaused && (count != 0)))) {
         mOutput->stream->pause(mOutput->stream);
     }
-    if (flushPending) {
+    if (mFlushPending) {
         flushHw_l();
     }
     if (mHwSupportsPause && !mStandby && doHwResume) {
@@ -4178,8 +4711,8 @@
     while (frameCount) {
         AudioBufferProvider::Buffer buffer;
         buffer.frameCount = frameCount;
-        mActiveTrack->getNextBuffer(&buffer);
-        if (buffer.raw == NULL) {
+        status_t status = mActiveTrack->getNextBuffer(&buffer);
+        if (status != NO_ERROR || buffer.raw == NULL) {
             memset(curBuf, 0, frameCount * mFrameSize);
             break;
         }
@@ -4189,8 +4722,8 @@
         mActiveTrack->releaseBuffer(&buffer);
     }
     mCurrentWriteLength = curBuf - (int8_t *)mSinkBuffer;
-    sleepTime = 0;
-    standbyTime = systemTime() + standbyDelay;
+    mSleepTimeUs = 0;
+    mStandbyTimeNs = systemTime() + mStandbyDelayNs;
     mActiveTrack.clear();
 }
 
@@ -4198,18 +4731,18 @@
 {
     // do not write to HAL when paused
     if (mHwPaused || (usesHwAvSync() && mStandby)) {
-        sleepTime = idleSleepTime;
+        mSleepTimeUs = mIdleSleepTimeUs;
         return;
     }
-    if (sleepTime == 0) {
+    if (mSleepTimeUs == 0) {
         if (mMixerStatus == MIXER_TRACKS_ENABLED) {
-            sleepTime = activeSleepTime;
+            mSleepTimeUs = mActiveSleepTimeUs;
         } else {
-            sleepTime = idleSleepTime;
+            mSleepTimeUs = mIdleSleepTimeUs;
         }
     } else if (mBytesWritten != 0 && audio_is_linear_pcm(mFormat)) {
         memset(mSinkBuffer, 0, mFrameCount * mFrameSize);
-        sleepTime = 0;
+        mSleepTimeUs = 0;
     }
 }
 
@@ -4217,14 +4750,13 @@
 {
     {
         Mutex::Autolock _l(mLock);
-        bool flushPending = false;
         for (size_t i = 0; i < mTracks.size(); i++) {
             if (mTracks[i]->isFlushPending()) {
                 mTracks[i]->flushAck();
-                flushPending = true;
+                mFlushPending = true;
             }
         }
-        if (flushPending) {
+        if (mFlushPending) {
             flushHw_l();
         }
     }
@@ -4235,14 +4767,17 @@
 bool AudioFlinger::DirectOutputThread::shouldStandby_l()
 {
     bool trackPaused = false;
+    bool trackStopped = false;
 
     // do not put the HAL in standby when paused. AwesomePlayer clear the offloaded AudioTrack
     // after a timeout and we will enter standby then.
     if (mTracks.size() > 0) {
         trackPaused = mTracks[mTracks.size() - 1]->isPaused();
+        trackStopped = mTracks[mTracks.size() - 1]->isStopped() ||
+                           mTracks[mTracks.size() - 1]->mState == TrackBase::IDLE;
     }
 
-    return !mStandby && !(trackPaused || (usesHwAvSync() && mHwPaused));
+    return !mStandby && !(trackPaused || (mHwPaused && !trackStopped));
 }
 
 // getTrackName_l() must be called with ThreadBase::mLock held
@@ -4291,7 +4826,7 @@
         status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
                                                 keyValuePair.string());
         if (!mStandby && status == INVALID_OPERATION) {
-            mOutput->stream->common.standby(&mOutput->stream->common);
+            mOutput->standby();
             mStandby = true;
             mBytesWritten = 0;
             status = mOutput->stream->common.set_parameters(&mOutput->stream->common,
@@ -4299,7 +4834,7 @@
         }
         if (status == NO_ERROR && reconfig) {
             readOutputParameters_l();
-            sendIoConfigEvent_l(AudioSystem::OUTPUT_CONFIG_CHANGED);
+            sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
         }
     }
 
@@ -4345,19 +4880,21 @@
 
     // use shorter standby delay as on normal output to release
     // hardware resources as soon as possible
-    if (audio_is_linear_pcm(mFormat)) {
-        standbyDelay = microseconds(activeSleepTime*2);
+    // no delay on outputs with HW A/V sync
+    if (usesHwAvSync()) {
+        mStandbyDelayNs = 0;
+    } else if ((mType == OFFLOAD) && !audio_is_linear_pcm(mFormat)) {
+        mStandbyDelayNs = kOffloadStandbyDelayNs;
     } else {
-        standbyDelay = kOffloadStandbyDelayNs;
+        mStandbyDelayNs = microseconds(mActiveSleepTimeUs*2);
     }
 }
 
 void AudioFlinger::DirectOutputThread::flushHw_l()
 {
-    if (mOutput->stream->flush != NULL) {
-        mOutput->stream->flush(mOutput->stream);
-    }
+    mOutput->flush();
     mHwPaused = false;
+    mFlushPending = false;
 }
 
 // ----------------------------------------------------------------------------
@@ -4464,8 +5001,8 @@
 
 // ----------------------------------------------------------------------------
 AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger,
-        AudioStreamOut* output, audio_io_handle_t id, uint32_t device)
-    :   DirectOutputThread(audioFlinger, output, id, device, OFFLOAD),
+        AudioStreamOut* output, audio_io_handle_t id, uint32_t device, bool systemReady)
+    :   DirectOutputThread(audioFlinger, output, id, device, OFFLOAD, systemReady),
         mPausedBytesRemaining(0)
 {
     //FIXME: mStandby should be set to true by ThreadBase constructor
@@ -4530,7 +5067,7 @@
         if (track->isPausing()) {
             track->setPaused();
             if (last) {
-                if (!mHwPaused) {
+                if (mHwSupportsPause && !mHwPaused) {
                     doHwPause = true;
                     mHwPaused = true;
                 }
@@ -4566,7 +5103,7 @@
                     // resume an interrupted write
                 }
                 // enable write to audio HAL
-                sleepTime = 0;
+                mSleepTimeUs = 0;
 
                 // Do not handle new data in this iteration even if track->framesReady()
                 mixerStatus = MIXER_TRACKS_ENABLED;
@@ -4626,8 +5163,8 @@
                         // do not modify drain sequence if we are already draining. This happens
                         // when resuming from pause after drain.
                         if ((mDrainSequence & 1) == 0) {
-                            sleepTime = 0;
-                            standbyTime = systemTime() + standbyDelay;
+                            mSleepTimeUs = 0;
+                            mStandbyTimeNs = systemTime() + mStandbyDelayNs;
                             mixerStatus = MIXER_DRAIN_TRACK;
                             mDrainSequence += 2;
                         }
@@ -4646,7 +5183,7 @@
                     size_t audioHALFrames =
                             (mOutput->stream->get_latency(mOutput->stream)*mSampleRate) / 1000;
                     size_t framesWritten =
-                            mBytesWritten / audio_stream_out_frame_size(mOutput->stream);
+                            mBytesWritten / mOutput->getFrameSize();
                     track->presentationComplete(framesWritten, audioHALFrames);
                     track->reset();
                     tracksToRemove->add(track);
@@ -4679,7 +5216,6 @@
     }
     if (mFlushPending) {
         flushHw_l();
-        mFlushPending = false;
     }
     if (!mStandby && doHwResume) {
         mOutput->stream->resume(mOutput->stream);
@@ -4727,24 +5263,12 @@
     }
 }
 
-void AudioFlinger::OffloadThread::onAddNewTrack_l()
-{
-    sp<Track> previousTrack = mPreviousTrack.promote();
-    sp<Track> latestTrack = mLatestActiveTrack.promote();
-
-    if (previousTrack != 0 && latestTrack != 0 &&
-        (previousTrack->sessionId() != latestTrack->sessionId())) {
-        mFlushPending = true;
-    }
-    PlaybackThread::onAddNewTrack_l();
-}
-
 // ----------------------------------------------------------------------------
 
 AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger,
-        AudioFlinger::MixerThread* mainThread, audio_io_handle_t id)
+        AudioFlinger::MixerThread* mainThread, audio_io_handle_t id, bool systemReady)
     :   MixerThread(audioFlinger, mainThread->getOutput(), id, mainThread->outDevice(),
-                DUPLICATING),
+                    systemReady, DUPLICATING),
         mWaitTimeMs(UINT_MAX)
 {
     addOutputTrack(mainThread);
@@ -4769,19 +5293,19 @@
             memset(mSinkBuffer, 0, mSinkBufferSize);
         }
     }
-    sleepTime = 0;
+    mSleepTimeUs = 0;
     writeFrames = mNormalFrameCount;
     mCurrentWriteLength = mSinkBufferSize;
-    standbyTime = systemTime() + standbyDelay;
+    mStandbyTimeNs = systemTime() + mStandbyDelayNs;
 }
 
 void AudioFlinger::DuplicatingThread::threadLoop_sleepTime()
 {
-    if (sleepTime == 0) {
+    if (mSleepTimeUs == 0) {
         if (mMixerStatus == MIXER_TRACKS_ENABLED) {
-            sleepTime = activeSleepTime;
+            mSleepTimeUs = mActiveSleepTimeUs;
         } else {
-            sleepTime = idleSleepTime;
+            mSleepTimeUs = mIdleSleepTimeUs;
         }
     } else if (mBytesWritten != 0) {
         if (mMixerStatus == MIXER_TRACKS_ENABLED) {
@@ -4791,22 +5315,14 @@
             // flush remaining overflow buffers in output tracks
             writeFrames = 0;
         }
-        sleepTime = 0;
+        mSleepTimeUs = 0;
     }
 }
 
 ssize_t AudioFlinger::DuplicatingThread::threadLoop_write()
 {
-    // We convert the duplicating thread format to AUDIO_FORMAT_PCM_16_BIT
-    // for delivery downstream as needed. This in-place conversion is safe as
-    // AUDIO_FORMAT_PCM_16_BIT is smaller than any other supported format
-    // (AUDIO_FORMAT_PCM_8_BIT is not allowed here).
-    if (mFormat != AUDIO_FORMAT_PCM_16_BIT) {
-        memcpy_by_audio_format(mSinkBuffer, AUDIO_FORMAT_PCM_16_BIT,
-                               mSinkBuffer, mFormat, writeFrames * mChannelCount);
-    }
     for (size_t i = 0; i < outputTracks.size(); i++) {
-        outputTracks[i]->write(reinterpret_cast<int16_t*>(mSinkBuffer), writeFrames);
+        outputTracks[i]->write(mSinkBuffer, writeFrames);
     }
     mStandby = false;
     return (ssize_t)mSinkBufferSize;
@@ -4833,25 +5349,26 @@
 void AudioFlinger::DuplicatingThread::addOutputTrack(MixerThread *thread)
 {
     Mutex::Autolock _l(mLock);
-    // FIXME explain this formula
-    size_t frameCount = (3 * mNormalFrameCount * mSampleRate) / thread->sampleRate();
-    // OutputTrack is forced to AUDIO_FORMAT_PCM_16_BIT regardless of mFormat
-    // due to current usage case and restrictions on the AudioBufferProvider.
-    // Actual buffer conversion is done in threadLoop_write().
-    //
-    // TODO: This may change in the future, depending on multichannel
-    // (and non int16_t*) support on AF::PlaybackThread::OutputTrack
-    OutputTrack *outputTrack = new OutputTrack(thread,
+    // The downstream MixerThread consumes thread->frameCount() amount of frames per mix pass.
+    // Adjust for thread->sampleRate() to determine minimum buffer frame count.
+    // Then triple buffer because Threads do not run synchronously and may not be clock locked.
+    const size_t frameCount =
+            3 * sourceFramesNeeded(mSampleRate, thread->frameCount(), thread->sampleRate());
+    // TODO: Consider asynchronous sample rate conversion to handle clock disparity
+    // from different OutputTracks and their associated MixerThreads (e.g. one may
+    // nearly empty and the other may be dropping data).
+
+    sp<OutputTrack> outputTrack = new OutputTrack(thread,
                                             this,
                                             mSampleRate,
-                                            AUDIO_FORMAT_PCM_16_BIT,
+                                            mFormat,
                                             mChannelMask,
                                             frameCount,
                                             IPCThreadState::self()->getCallingUid());
     if (outputTrack->cblk() != NULL) {
         thread->setStreamVolume(AUDIO_STREAM_PATCH, 1.0f);
         mOutputTracks.add(outputTrack);
-        ALOGV("addOutputTrack() track %p, on thread %p", outputTrack, thread);
+        ALOGV("addOutputTrack() track %p, on thread %p", outputTrack.get(), thread);
         updateWaitTime_l();
     }
 }
@@ -4931,12 +5448,13 @@
                                          AudioStreamIn *input,
                                          audio_io_handle_t id,
                                          audio_devices_t outDevice,
-                                         audio_devices_t inDevice
+                                         audio_devices_t inDevice,
+                                         bool systemReady
 #ifdef TEE_SINK
                                          , const sp<NBAIO_Sink>& teeSink
 #endif
                                          ) :
-    ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD),
+    ThreadBase(audioFlinger, id, outDevice, inDevice, RECORD, systemReady),
     mInput(input), mActiveTracksGen(0), mRsmpInBuffer(NULL),
     // mRsmpInFrames and mRsmpInFramesP2 are set by readInputParameters_l()
     mRsmpInRear(0)
@@ -4955,8 +5473,8 @@
     // mFastCaptureNBLogWriter
     , mFastTrackAvail(false)
 {
-    snprintf(mName, kNameLength, "AudioIn_%X", id);
-    mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mName);
+    snprintf(mThreadName, kThreadNameLength, "AudioIn_%X", id);
+    mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mThreadName);
 
     readInputParameters_l();
 
@@ -4977,26 +5495,13 @@
         initFastCapture = true;
         break;
     case FastCapture_Static:
-        uint32_t primaryOutputSampleRate;
-        {
-            AutoMutex _l(audioFlinger->mHardwareLock);
-            primaryOutputSampleRate = audioFlinger->mPrimaryOutputSampleRate;
-        }
-        initFastCapture =
-                // either capture sample rate is same as (a reasonable) primary output sample rate
-                (((primaryOutputSampleRate == 44100 || primaryOutputSampleRate == 48000) &&
-                    (mSampleRate == primaryOutputSampleRate)) ||
-                // or primary output sample rate is unknown, and capture sample rate is reasonable
-                ((primaryOutputSampleRate == 0) &&
-                    ((mSampleRate == 44100 || mSampleRate == 48000)))) &&
-                // and the buffer size is < 12 ms
-                (mFrameCount * 1000) / mSampleRate < 12;
+        initFastCapture = (mFrameCount * 1000) / mSampleRate < kMinNormalCaptureBufferSizeMs;
         break;
     // case FastCapture_Dynamic:
     }
 
     if (initFastCapture) {
-        // create a Pipe for FastMixer to write to, and for us and fast tracks to read from
+        // create a Pipe for FastCapture to write to, and for us and fast tracks to read from
         NBAIO_Format format = mInputSource->format();
         size_t pipeFramesP2 = roundup(mSampleRate / 25);    // double-buffering of 20 ms each
         size_t pipeSize = pipeFramesP2 * Format_frameSize(format);
@@ -5055,12 +5560,7 @@
         // start the fast capture
         mFastCapture->run("FastCapture", ANDROID_PRIORITY_URGENT_AUDIO);
         pid_t tid = mFastCapture->getTid();
-        int err = requestPriority(getpid_cached, tid, kPriorityFastMixer);
-        if (err != 0) {
-            ALOGW("Policy SCHED_FIFO priority %d is unavailable for pid %d tid %d; error %d",
-                    kPriorityFastCapture, getpid_cached, tid, err);
-        }
-
+        sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer);
 #ifdef AUDIO_WATCHDOG
         // FIXME
 #endif
@@ -5072,7 +5572,6 @@
     // FIXME mNormalSource
 }
 
-
 AudioFlinger::RecordThread::~RecordThread()
 {
     if (mFastCapture != 0) {
@@ -5092,12 +5591,12 @@
     }
     mAudioFlinger->unregisterWriter(mFastCaptureNBLogWriter);
     mAudioFlinger->unregisterWriter(mNBLogWriter);
-    delete[] mRsmpInBuffer;
+    free(mRsmpInBuffer);
 }
 
 void AudioFlinger::RecordThread::onFirstRef()
 {
-    run(mName, PRIORITY_URGENT_AUDIO);
+    run(mThreadName, PRIORITY_URGENT_AUDIO);
 }
 
 bool AudioFlinger::RecordThread::threadLoop()
@@ -5138,7 +5637,9 @@
 
         // sleep with mutex unlocked
         if (sleepUs > 0) {
+            ATRACE_BEGIN("sleep");
             usleep(sleepUs);
+            ATRACE_END();
             sleepUs = 0;
         }
 
@@ -5282,7 +5783,8 @@
                 state->mCommand = FastCaptureState::READ_WRITE;
 #if 0   // FIXME
                 mFastCaptureDumpState.increaseSamplingN(mAudioFlinger->isLowRamDevice() ?
-                        FastCaptureDumpState::kSamplingNforLowRamDevice : FastMixerDumpState::kSamplingN);
+                        FastThreadDumpState::kSamplingNforLowRamDevice :
+                        FastThreadDumpState::kSamplingN);
 #endif
                 didModify = true;
             }
@@ -5322,7 +5824,7 @@
         // If an NBAIO source is present, use it to read the normal capture's data
         if (mPipeSource != 0) {
             size_t framesToRead = mBufferSize / mFrameSize;
-            framesRead = mPipeSource->read(&mRsmpInBuffer[rear * mChannelCount],
+            framesRead = mPipeSource->read((uint8_t*)mRsmpInBuffer + rear * mFrameSize,
                     framesToRead, AudioBufferProvider::kInvalidPTS);
             if (framesRead == 0) {
                 // since pipe is non-blocking, simulate blocking input
@@ -5331,7 +5833,7 @@
         // otherwise use the HAL / AudioStreamIn directly
         } else {
             ssize_t bytesRead = mInput->stream->read(mInput->stream,
-                    &mRsmpInBuffer[rear * mChannelCount], mBufferSize);
+                    (uint8_t*)mRsmpInBuffer + rear * mFrameSize, mBufferSize);
             if (bytesRead < 0) {
                 framesRead = bytesRead;
             } else {
@@ -5351,13 +5853,13 @@
         ALOG_ASSERT(framesRead > 0);
 
         if (mTeeSink != 0) {
-            (void) mTeeSink->write(&mRsmpInBuffer[rear * mChannelCount], framesRead);
+            (void) mTeeSink->write((uint8_t*)mRsmpInBuffer + rear * mFrameSize, framesRead);
         }
         // If destination is non-contiguous, we now correct for reading past end of buffer.
         {
             size_t part1 = mRsmpInFramesP2 - rear;
             if ((size_t) framesRead > part1) {
-                memcpy(mRsmpInBuffer, &mRsmpInBuffer[mRsmpInFramesP2 * mChannelCount],
+                memcpy(mRsmpInBuffer, (uint8_t*)mRsmpInBuffer + mRsmpInFramesP2 * mFrameSize,
                         (framesRead - part1) * mFrameSize);
             }
         }
@@ -5373,6 +5875,9 @@
                 continue;
             }
 
+            // TODO: This code probably should be moved to RecordTrack.
+            // TODO: Update the activeTrack buffer converter in case of reconfigure.
+
             enum {
                 OVERRUN_UNKNOWN,
                 OVERRUN_TRUE,
@@ -5387,131 +5892,28 @@
                 size_t framesOut = activeTrack->mSink.frameCount;
                 LOG_ALWAYS_FATAL_IF((status == OK) != (framesOut > 0));
 
-                int32_t front = activeTrack->mRsmpInFront;
-                ssize_t filled = rear - front;
+                // check available frames and handle overrun conditions
+                // if the record track isn't draining fast enough.
+                bool hasOverrun;
                 size_t framesIn;
-
-                if (filled < 0) {
-                    // should not happen, but treat like a massive overrun and re-sync
-                    framesIn = 0;
-                    activeTrack->mRsmpInFront = rear;
-                    overrun = OVERRUN_TRUE;
-                } else if ((size_t) filled <= mRsmpInFrames) {
-                    framesIn = (size_t) filled;
-                } else {
-                    // client is not keeping up with server, but give it latest data
-                    framesIn = mRsmpInFrames;
-                    activeTrack->mRsmpInFront = front = rear - framesIn;
+                activeTrack->mResamplerBufferProvider->sync(&framesIn, &hasOverrun);
+                if (hasOverrun) {
                     overrun = OVERRUN_TRUE;
                 }
-
                 if (framesOut == 0 || framesIn == 0) {
                     break;
                 }
 
-                if (activeTrack->mResampler == NULL) {
-                    // no resampling
-                    if (framesIn > framesOut) {
-                        framesIn = framesOut;
-                    } else {
-                        framesOut = framesIn;
-                    }
-                    int8_t *dst = activeTrack->mSink.i8;
-                    while (framesIn > 0) {
-                        front &= mRsmpInFramesP2 - 1;
-                        size_t part1 = mRsmpInFramesP2 - front;
-                        if (part1 > framesIn) {
-                            part1 = framesIn;
-                        }
-                        int8_t *src = (int8_t *)mRsmpInBuffer + (front * mFrameSize);
-                        if (mChannelCount == activeTrack->mChannelCount) {
-                            memcpy(dst, src, part1 * mFrameSize);
-                        } else if (mChannelCount == 1) {
-                            upmix_to_stereo_i16_from_mono_i16((int16_t *)dst, (const int16_t *)src,
-                                    part1);
-                        } else {
-                            downmix_to_mono_i16_from_stereo_i16((int16_t *)dst, (const int16_t *)src,
-                                    part1);
-                        }
-                        dst += part1 * activeTrack->mFrameSize;
-                        front += part1;
-                        framesIn -= part1;
-                    }
-                    activeTrack->mRsmpInFront += framesOut;
-
-                } else {
-                    // resampling
-                    // FIXME framesInNeeded should really be part of resampler API, and should
-                    //       depend on the SRC ratio
-                    //       to keep mRsmpInBuffer full so resampler always has sufficient input
-                    size_t framesInNeeded;
-                    // FIXME only re-calculate when it changes, and optimize for common ratios
-                    // Do not precompute in/out because floating point is not associative
-                    // e.g. a*b/c != a*(b/c).
-                    const double in(mSampleRate);
-                    const double out(activeTrack->mSampleRate);
-                    framesInNeeded = ceil(framesOut * in / out) + 1;
-                    ALOGV("need %u frames in to produce %u out given in/out ratio of %.4g",
-                                framesInNeeded, framesOut, in / out);
-                    // Although we theoretically have framesIn in circular buffer, some of those are
-                    // unreleased frames, and thus must be discounted for purpose of budgeting.
-                    size_t unreleased = activeTrack->mRsmpInUnrel;
-                    framesIn = framesIn > unreleased ? framesIn - unreleased : 0;
-                    if (framesIn < framesInNeeded) {
-                        ALOGV("not enough to resample: have %u frames in but need %u in to "
-                                "produce %u out given in/out ratio of %.4g",
-                                framesIn, framesInNeeded, framesOut, in / out);
-                        size_t newFramesOut = framesIn > 0 ? floor((framesIn - 1) * out / in) : 0;
-                        LOG_ALWAYS_FATAL_IF(newFramesOut >= framesOut);
-                        if (newFramesOut == 0) {
-                            break;
-                        }
-                        framesInNeeded = ceil(newFramesOut * in / out) + 1;
-                        ALOGV("now need %u frames in to produce %u out given out/in ratio of %.4g",
-                                framesInNeeded, newFramesOut, out / in);
-                        LOG_ALWAYS_FATAL_IF(framesIn < framesInNeeded);
-                        ALOGV("success 2: have %u frames in and need %u in to produce %u out "
-                              "given in/out ratio of %.4g",
-                              framesIn, framesInNeeded, newFramesOut, in / out);
-                        framesOut = newFramesOut;
-                    } else {
-                        ALOGV("success 1: have %u in and need %u in to produce %u out "
-                            "given in/out ratio of %.4g",
-                            framesIn, framesInNeeded, framesOut, in / out);
-                    }
-
-                    // reallocate mRsmpOutBuffer as needed; we will grow but never shrink
-                    if (activeTrack->mRsmpOutFrameCount < framesOut) {
-                        // FIXME why does each track need it's own mRsmpOutBuffer? can't they share?
-                        delete[] activeTrack->mRsmpOutBuffer;
-                        // resampler always outputs stereo
-                        activeTrack->mRsmpOutBuffer = new int32_t[framesOut * FCC_2];
-                        activeTrack->mRsmpOutFrameCount = framesOut;
-                    }
-
-                    // resampler accumulates, but we only have one source track
-                    memset(activeTrack->mRsmpOutBuffer, 0, framesOut * FCC_2 * sizeof(int32_t));
-                    activeTrack->mResampler->resample(activeTrack->mRsmpOutBuffer, framesOut,
-                            // FIXME how about having activeTrack implement this interface itself?
-                            activeTrack->mResamplerBufferProvider
-                            /*this*/ /* AudioBufferProvider* */);
-                    // ditherAndClamp() works as long as all buffers returned by
-                    // activeTrack->getNextBuffer() are 32 bit aligned which should be always true.
-                    if (activeTrack->mChannelCount == 1) {
-                        // temporarily type pun mRsmpOutBuffer from Q4.27 to int16_t
-                        ditherAndClamp(activeTrack->mRsmpOutBuffer, activeTrack->mRsmpOutBuffer,
-                                framesOut);
-                        // the resampler always outputs stereo samples:
-                        // do post stereo to mono conversion
-                        downmix_to_mono_i16_from_stereo_i16(activeTrack->mSink.i16,
-                                (const int16_t *)activeTrack->mRsmpOutBuffer, framesOut);
-                    } else {
-                        ditherAndClamp((int32_t *)activeTrack->mSink.raw,
-                                activeTrack->mRsmpOutBuffer, framesOut);
-                    }
-                    // now done with mRsmpOutBuffer
-
-                }
+                // Don't allow framesOut to be larger than what is possible with resampling
+                // from framesIn.
+                // This isn't strictly necessary but helps limit buffer resizing in
+                // RecordBufferConverter.  TODO: remove when no longer needed.
+                framesOut = min(framesOut,
+                        destinationFramesPossible(
+                                framesIn, mSampleRate, activeTrack->mSampleRate));
+                // process frames from the RecordThread buffer provider to the RecordTrack buffer
+                framesOut = activeTrack->mRecordBufferConverter->convert(
+                        activeTrack->mSink.raw, activeTrack->mResamplerBufferProvider, framesOut);
 
                 if (framesOut > 0 && (overrun == OVERRUN_UNKNOWN)) {
                     overrun = OVERRUN_FALSE;
@@ -5652,8 +6054,9 @@
     // client expresses a preference for FAST, but we get the final say
     if (*flags & IAudioFlinger::TRACK_FAST) {
       if (
-            // use case: callback handler
-            (tid != -1) &&
+            // we formerly checked for a callback handler (non-0 tid),
+            // but that is no longer required for TRANSFER_OBTAIN mode
+            //
             // frame count is not specified, or is exactly the pipe depth
             ((frameCount == 0) || (frameCount == mPipeFramesP2)) &&
             // PCM data
@@ -5819,12 +6222,9 @@
         // was initialized to some value closer to the thread's mRsmpInFront, then the track could
         // see previously buffered data before it called start(), but with greater risk of overrun.
 
-        recordTrack->mRsmpInFront = mRsmpInRear;
-        recordTrack->mRsmpInUnrel = 0;
-        // FIXME why reset?
-        if (recordTrack->mResampler != NULL) {
-            recordTrack->mResampler->reset();
-        }
+        recordTrack->mResamplerBufferProvider->reset();
+        // clear any converter state as new data will be discontinuous
+        recordTrack->mRecordBufferConverter->reset();
         recordTrack->mState = TrackBase::STARTING_2;
         // signal thread to start
         mWaitWorkCV.broadcast();
@@ -5942,15 +6342,17 @@
 {
     dprintf(fd, "\nInput thread %p:\n", this);
 
-    if (mActiveTracks.size() > 0) {
-        dprintf(fd, "  Buffer size: %zu bytes\n", mBufferSize);
-    } else {
+    dumpBase(fd, args);
+
+    if (mActiveTracks.size() == 0) {
         dprintf(fd, "  No active record clients\n");
     }
     dprintf(fd, "  Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no");
     dprintf(fd, "  Fast track available: %s\n", mFastTrackAvail ? "yes" : "no");
 
-    dumpBase(fd, args);
+    //  Make a non-atomic copy of fast capture dump state so it won't change underneath us
+    const FastCaptureDumpState copy(mFastCaptureDumpState);
+    copy.dump(fd);
 }
 
 void AudioFlinger::RecordThread::dumpTracks(int fd, const Vector<String16>& args __unused)
@@ -5998,12 +6400,52 @@
     write(fd, result.string(), result.size());
 }
 
+
+void AudioFlinger::RecordThread::ResamplerBufferProvider::reset()
+{
+    sp<ThreadBase> threadBase = mRecordTrack->mThread.promote();
+    RecordThread *recordThread = (RecordThread *) threadBase.get();
+    mRsmpInFront = recordThread->mRsmpInRear;
+    mRsmpInUnrel = 0;
+}
+
+void AudioFlinger::RecordThread::ResamplerBufferProvider::sync(
+        size_t *framesAvailable, bool *hasOverrun)
+{
+    sp<ThreadBase> threadBase = mRecordTrack->mThread.promote();
+    RecordThread *recordThread = (RecordThread *) threadBase.get();
+    const int32_t rear = recordThread->mRsmpInRear;
+    const int32_t front = mRsmpInFront;
+    const ssize_t filled = rear - front;
+
+    size_t framesIn;
+    bool overrun = false;
+    if (filled < 0) {
+        // should not happen, but treat like a massive overrun and re-sync
+        framesIn = 0;
+        mRsmpInFront = rear;
+        overrun = true;
+    } else if ((size_t) filled <= recordThread->mRsmpInFrames) {
+        framesIn = (size_t) filled;
+    } else {
+        // client is not keeping up with server, but give it latest data
+        framesIn = recordThread->mRsmpInFrames;
+        mRsmpInFront = /* front = */ rear - framesIn;
+        overrun = true;
+    }
+    if (framesAvailable != NULL) {
+        *framesAvailable = framesIn;
+    }
+    if (hasOverrun != NULL) {
+        *hasOverrun = overrun;
+    }
+}
+
 // AudioBufferProvider interface
 status_t AudioFlinger::RecordThread::ResamplerBufferProvider::getNextBuffer(
         AudioBufferProvider::Buffer* buffer, int64_t pts __unused)
 {
-    RecordTrack *activeTrack = mRecordTrack;
-    sp<ThreadBase> threadBase = activeTrack->mThread.promote();
+    sp<ThreadBase> threadBase = mRecordTrack->mThread.promote();
     if (threadBase == 0) {
         buffer->frameCount = 0;
         buffer->raw = NULL;
@@ -6011,7 +6453,7 @@
     }
     RecordThread *recordThread = (RecordThread *) threadBase.get();
     int32_t rear = recordThread->mRsmpInRear;
-    int32_t front = activeTrack->mRsmpInFront;
+    int32_t front = mRsmpInFront;
     ssize_t filled = rear - front;
     // FIXME should not be P2 (don't want to increase latency)
     // FIXME if client not keeping up, discard
@@ -6028,17 +6470,16 @@
         part1 = ask;
     }
     if (part1 == 0) {
-        // Higher-level should keep mRsmpInBuffer full, and not call resampler if empty
-        LOG_ALWAYS_FATAL("RecordThread::getNextBuffer() starved");
+        // out of data is fine since the resampler will return a short-count.
         buffer->raw = NULL;
         buffer->frameCount = 0;
-        activeTrack->mRsmpInUnrel = 0;
+        mRsmpInUnrel = 0;
         return NOT_ENOUGH_DATA;
     }
 
-    buffer->raw = recordThread->mRsmpInBuffer + front * recordThread->mChannelCount;
+    buffer->raw = (uint8_t*)recordThread->mRsmpInBuffer + front * recordThread->mFrameSize;
     buffer->frameCount = part1;
-    activeTrack->mRsmpInUnrel = part1;
+    mRsmpInUnrel = part1;
     return NO_ERROR;
 }
 
@@ -6046,18 +6487,264 @@
 void AudioFlinger::RecordThread::ResamplerBufferProvider::releaseBuffer(
         AudioBufferProvider::Buffer* buffer)
 {
-    RecordTrack *activeTrack = mRecordTrack;
     size_t stepCount = buffer->frameCount;
     if (stepCount == 0) {
         return;
     }
-    ALOG_ASSERT(stepCount <= activeTrack->mRsmpInUnrel);
-    activeTrack->mRsmpInUnrel -= stepCount;
-    activeTrack->mRsmpInFront += stepCount;
+    ALOG_ASSERT(stepCount <= mRsmpInUnrel);
+    mRsmpInUnrel -= stepCount;
+    mRsmpInFront += stepCount;
     buffer->raw = NULL;
     buffer->frameCount = 0;
 }
 
+AudioFlinger::RecordThread::RecordBufferConverter::RecordBufferConverter(
+        audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+        uint32_t srcSampleRate,
+        audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+        uint32_t dstSampleRate) :
+            mSrcChannelMask(AUDIO_CHANNEL_INVALID), // updateParameters will set following vars
+            // mSrcFormat
+            // mSrcSampleRate
+            // mDstChannelMask
+            // mDstFormat
+            // mDstSampleRate
+            // mSrcChannelCount
+            // mDstChannelCount
+            // mDstFrameSize
+            mBuf(NULL), mBufFrames(0), mBufFrameSize(0),
+            mResampler(NULL),
+            mIsLegacyDownmix(false),
+            mIsLegacyUpmix(false),
+            mRequiresFloat(false),
+            mInputConverterProvider(NULL)
+{
+    (void)updateParameters(srcChannelMask, srcFormat, srcSampleRate,
+            dstChannelMask, dstFormat, dstSampleRate);
+}
+
+AudioFlinger::RecordThread::RecordBufferConverter::~RecordBufferConverter() {
+    free(mBuf);
+    delete mResampler;
+    delete mInputConverterProvider;
+}
+
+size_t AudioFlinger::RecordThread::RecordBufferConverter::convert(void *dst,
+        AudioBufferProvider *provider, size_t frames)
+{
+    if (mInputConverterProvider != NULL) {
+        mInputConverterProvider->setBufferProvider(provider);
+        provider = mInputConverterProvider;
+    }
+
+    if (mResampler == NULL) {
+        ALOGVV("NO RESAMPLING sampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
+                mSrcSampleRate, mSrcFormat, mDstFormat);
+
+        AudioBufferProvider::Buffer buffer;
+        for (size_t i = frames; i > 0; ) {
+            buffer.frameCount = i;
+            status_t status = provider->getNextBuffer(&buffer, 0);
+            if (status != OK || buffer.frameCount == 0) {
+                frames -= i; // cannot fill request.
+                break;
+            }
+            // format convert to destination buffer
+            convertNoResampler(dst, buffer.raw, buffer.frameCount);
+
+            dst = (int8_t*)dst + buffer.frameCount * mDstFrameSize;
+            i -= buffer.frameCount;
+            provider->releaseBuffer(&buffer);
+        }
+    } else {
+         ALOGVV("RESAMPLING mSrcSampleRate:%u mDstSampleRate:%u mSrcFormat:%#x mDstFormat:%#x",
+                 mSrcSampleRate, mDstSampleRate, mSrcFormat, mDstFormat);
+
+         // reallocate buffer if needed
+         if (mBufFrameSize != 0 && mBufFrames < frames) {
+             free(mBuf);
+             mBufFrames = frames;
+             (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
+         }
+        // resampler accumulates, but we only have one source track
+        memset(mBuf, 0, frames * mBufFrameSize);
+        frames = mResampler->resample((int32_t*)mBuf, frames, provider);
+        // format convert to destination buffer
+        convertResampler(dst, mBuf, frames);
+    }
+    return frames;
+}
+
+status_t AudioFlinger::RecordThread::RecordBufferConverter::updateParameters(
+        audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+        uint32_t srcSampleRate,
+        audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+        uint32_t dstSampleRate)
+{
+    // quick evaluation if there is any change.
+    if (mSrcFormat == srcFormat
+            && mSrcChannelMask == srcChannelMask
+            && mSrcSampleRate == srcSampleRate
+            && mDstFormat == dstFormat
+            && mDstChannelMask == dstChannelMask
+            && mDstSampleRate == dstSampleRate) {
+        return NO_ERROR;
+    }
+
+    ALOGV("RecordBufferConverter updateParameters srcMask:%#x dstMask:%#x"
+            "  srcFormat:%#x dstFormat:%#x  srcRate:%u dstRate:%u",
+            srcChannelMask, dstChannelMask, srcFormat, dstFormat, srcSampleRate, dstSampleRate);
+    const bool valid =
+            audio_is_input_channel(srcChannelMask)
+            && audio_is_input_channel(dstChannelMask)
+            && audio_is_valid_format(srcFormat) && audio_is_linear_pcm(srcFormat)
+            && audio_is_valid_format(dstFormat) && audio_is_linear_pcm(dstFormat)
+            && (srcSampleRate <= dstSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX)
+            ; // no upsampling checks for now
+    if (!valid) {
+        return BAD_VALUE;
+    }
+
+    mSrcFormat = srcFormat;
+    mSrcChannelMask = srcChannelMask;
+    mSrcSampleRate = srcSampleRate;
+    mDstFormat = dstFormat;
+    mDstChannelMask = dstChannelMask;
+    mDstSampleRate = dstSampleRate;
+
+    // compute derived parameters
+    mSrcChannelCount = audio_channel_count_from_in_mask(srcChannelMask);
+    mDstChannelCount = audio_channel_count_from_in_mask(dstChannelMask);
+    mDstFrameSize = mDstChannelCount * audio_bytes_per_sample(mDstFormat);
+
+    // do we need to resample?
+    delete mResampler;
+    mResampler = NULL;
+    if (mSrcSampleRate != mDstSampleRate) {
+        mResampler = AudioResampler::create(AUDIO_FORMAT_PCM_FLOAT,
+                mSrcChannelCount, mDstSampleRate);
+        mResampler->setSampleRate(mSrcSampleRate);
+        mResampler->setVolume(AudioMixer::UNITY_GAIN_FLOAT, AudioMixer::UNITY_GAIN_FLOAT);
+    }
+
+    // are we running legacy channel conversion modes?
+    mIsLegacyDownmix = (mSrcChannelMask == AUDIO_CHANNEL_IN_STEREO
+                            || mSrcChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK)
+                   && mDstChannelMask == AUDIO_CHANNEL_IN_MONO;
+    mIsLegacyUpmix = mSrcChannelMask == AUDIO_CHANNEL_IN_MONO
+                   && (mDstChannelMask == AUDIO_CHANNEL_IN_STEREO
+                            || mDstChannelMask == AUDIO_CHANNEL_IN_FRONT_BACK);
+
+    // do we need to process in float?
+    mRequiresFloat = mResampler != NULL || mIsLegacyDownmix || mIsLegacyUpmix;
+
+    // do we need a staging buffer to convert for destination (we can still optimize this)?
+    // we use mBufFrameSize > 0 to indicate both frame size as well as buffer necessity
+    if (mResampler != NULL) {
+        mBufFrameSize = max(mSrcChannelCount, FCC_2)
+                * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
+    } else if (mIsLegacyUpmix || mIsLegacyDownmix) { // legacy modes always float
+        mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(AUDIO_FORMAT_PCM_FLOAT);
+    } else if (mSrcChannelMask != mDstChannelMask && mDstFormat != mSrcFormat) {
+        mBufFrameSize = mDstChannelCount * audio_bytes_per_sample(mSrcFormat);
+    } else {
+        mBufFrameSize = 0;
+    }
+    mBufFrames = 0; // force the buffer to be resized.
+
+    // do we need an input converter buffer provider to give us float?
+    delete mInputConverterProvider;
+    mInputConverterProvider = NULL;
+    if (mRequiresFloat && mSrcFormat != AUDIO_FORMAT_PCM_FLOAT) {
+        mInputConverterProvider = new ReformatBufferProvider(
+                audio_channel_count_from_in_mask(mSrcChannelMask),
+                mSrcFormat,
+                AUDIO_FORMAT_PCM_FLOAT,
+                256 /* provider buffer frame count */);
+    }
+
+    // do we need a remixer to do channel mask conversion
+    if (!mIsLegacyDownmix && !mIsLegacyUpmix && mSrcChannelMask != mDstChannelMask) {
+        (void) memcpy_by_index_array_initialization_from_channel_mask(
+                mIdxAry, ARRAY_SIZE(mIdxAry), mDstChannelMask, mSrcChannelMask);
+    }
+    return NO_ERROR;
+}
+
+void AudioFlinger::RecordThread::RecordBufferConverter::convertNoResampler(
+        void *dst, const void *src, size_t frames)
+{
+    // src is native type unless there is legacy upmix or downmix, whereupon it is float.
+    if (mBufFrameSize != 0 && mBufFrames < frames) {
+        free(mBuf);
+        mBufFrames = frames;
+        (void)posix_memalign(&mBuf, 32, mBufFrames * mBufFrameSize);
+    }
+    // do we need to do legacy upmix and downmix?
+    if (mIsLegacyUpmix || mIsLegacyDownmix) {
+        void *dstBuf = mBuf != NULL ? mBuf : dst;
+        if (mIsLegacyUpmix) {
+            upmix_to_stereo_float_from_mono_float((float *)dstBuf,
+                    (const float *)src, frames);
+        } else /*mIsLegacyDownmix */ {
+            downmix_to_mono_float_from_stereo_float((float *)dstBuf,
+                    (const float *)src, frames);
+        }
+        if (mBuf != NULL) {
+            memcpy_by_audio_format(dst, mDstFormat, mBuf, AUDIO_FORMAT_PCM_FLOAT,
+                    frames * mDstChannelCount);
+        }
+        return;
+    }
+    // do we need to do channel mask conversion?
+    if (mSrcChannelMask != mDstChannelMask) {
+        void *dstBuf = mBuf != NULL ? mBuf : dst;
+        memcpy_by_index_array(dstBuf, mDstChannelCount,
+                src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mSrcFormat), frames);
+        if (dstBuf == dst) {
+            return; // format is the same
+        }
+    }
+    // convert to destination buffer
+    const void *convertBuf = mBuf != NULL ? mBuf : src;
+    memcpy_by_audio_format(dst, mDstFormat, convertBuf, mSrcFormat,
+            frames * mDstChannelCount);
+}
+
+void AudioFlinger::RecordThread::RecordBufferConverter::convertResampler(
+        void *dst, /*not-a-const*/ void *src, size_t frames)
+{
+    // src buffer format is ALWAYS float when entering this routine
+    if (mIsLegacyUpmix) {
+        ; // mono to stereo already handled by resampler
+    } else if (mIsLegacyDownmix
+            || (mSrcChannelMask == mDstChannelMask && mSrcChannelCount == 1)) {
+        // the resampler outputs stereo for mono input channel (a feature?)
+        // must convert to mono
+        downmix_to_mono_float_from_stereo_float((float *)src,
+                (const float *)src, frames);
+    } else if (mSrcChannelMask != mDstChannelMask) {
+        // convert to mono channel again for channel mask conversion (could be skipped
+        // with further optimization).
+        if (mSrcChannelCount == 1) {
+            downmix_to_mono_float_from_stereo_float((float *)src,
+                (const float *)src, frames);
+        }
+        // convert to destination format (in place, OK as float is larger than other types)
+        if (mDstFormat != AUDIO_FORMAT_PCM_FLOAT) {
+            memcpy_by_audio_format(src, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
+                    frames * mSrcChannelCount);
+        }
+        // channel convert and save to dst
+        memcpy_by_index_array(dst, mDstChannelCount,
+                src, mSrcChannelCount, mIdxAry, audio_bytes_per_sample(mDstFormat), frames);
+        return;
+    }
+    // convert to destination format and save to dst
+    memcpy_by_audio_format(dst, mDstFormat, src, AUDIO_FORMAT_PCM_FLOAT,
+            frames * mDstChannelCount);
+}
+
 bool AudioFlinger::RecordThread::checkForNewParameter_l(const String8& keyValuePair,
                                                         status_t& status)
 {
@@ -6067,6 +6754,7 @@
 
     audio_format_t reqFormat = mFormat;
     uint32_t samplingRate = mSampleRate;
+    // TODO this may change if we want to support capture from HDMI PCM multi channel (e.g on TVs).
     audio_channel_mask_t channelMask = audio_channel_in_mask_from_count(mChannelCount);
 
     AudioParameter param = AudioParameter(keyValuePair);
@@ -6079,7 +6767,7 @@
         reconfig = true;
     }
     if (param.getInt(String8(AudioParameter::keyFormat), value) == NO_ERROR) {
-        if ((audio_format_t) value != AUDIO_FORMAT_PCM_16_BIT) {
+        if (!audio_is_linear_pcm((audio_format_t) value)) {
             status = BAD_VALUE;
         } else {
             reqFormat = (audio_format_t) value;
@@ -6088,7 +6776,8 @@
     }
     if (param.getInt(String8(AudioParameter::keyChannels), value) == NO_ERROR) {
         audio_channel_mask_t mask = (audio_channel_mask_t) value;
-        if (mask != AUDIO_CHANNEL_IN_MONO && mask != AUDIO_CHANNEL_IN_STEREO) {
+        if (!audio_is_input_channel(mask) ||
+                audio_channel_count_from_in_mask(mask) > FCC_8) {
             status = BAD_VALUE;
         } else {
             channelMask = mask;
@@ -6120,6 +6809,9 @@
             status = BAD_VALUE;
         } else {
             mInDevice = value;
+            if (value != AUDIO_DEVICE_NONE) {
+                mPrevInDevice = value;
+            }
             // disable AEC and NS if the device is a BT SCO headset supporting those
             // pre processings
             if (mTracks.size() > 0) {
@@ -6153,19 +6845,17 @@
         }
         if (reconfig) {
             if (status == BAD_VALUE &&
-                reqFormat == mInput->stream->common.get_format(&mInput->stream->common) &&
-                reqFormat == AUDIO_FORMAT_PCM_16_BIT &&
+                audio_is_linear_pcm(mInput->stream->common.get_format(&mInput->stream->common)) &&
+                audio_is_linear_pcm(reqFormat) &&
                 (mInput->stream->common.get_sample_rate(&mInput->stream->common)
-                        <= (2 * samplingRate)) &&
+                        <= (AUDIO_RESAMPLER_DOWN_RATIO_MAX * samplingRate)) &&
                 audio_channel_count_from_in_mask(
-                        mInput->stream->common.get_channels(&mInput->stream->common)) <= FCC_2 &&
-                (channelMask == AUDIO_CHANNEL_IN_MONO ||
-                        channelMask == AUDIO_CHANNEL_IN_STEREO)) {
+                        mInput->stream->common.get_channels(&mInput->stream->common)) <= FCC_8) {
                 status = NO_ERROR;
             }
             if (status == NO_ERROR) {
                 readInputParameters_l();
-                sendIoConfigEvent_l(AudioSystem::INPUT_CONFIG_CHANGED);
+                sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
             }
         }
     }
@@ -6186,26 +6876,27 @@
     return out_s8;
 }
 
-void AudioFlinger::RecordThread::audioConfigChanged(int event, int param __unused) {
-    AudioSystem::OutputDescriptor desc;
-    const void *param2 = NULL;
+void AudioFlinger::RecordThread::ioConfigChanged(audio_io_config_event event, pid_t pid) {
+    sp<AudioIoDescriptor> desc = new AudioIoDescriptor();
+
+    desc->mIoHandle = mId;
 
     switch (event) {
-    case AudioSystem::INPUT_OPENED:
-    case AudioSystem::INPUT_CONFIG_CHANGED:
-        desc.channelMask = mChannelMask;
-        desc.samplingRate = mSampleRate;
-        desc.format = mFormat;
-        desc.frameCount = mFrameCount;
-        desc.latency = 0;
-        param2 = &desc;
+    case AUDIO_INPUT_OPENED:
+    case AUDIO_INPUT_CONFIG_CHANGED:
+        desc->mPatch = mPatch;
+        desc->mChannelMask = mChannelMask;
+        desc->mSamplingRate = mSampleRate;
+        desc->mFormat = mFormat;
+        desc->mFrameCount = mFrameCount;
+        desc->mLatency = 0;
         break;
 
-    case AudioSystem::INPUT_CLOSED:
+    case AUDIO_INPUT_CLOSED:
     default:
         break;
     }
-    mAudioFlinger->audioConfigChanged(event, mId, param2);
+    mAudioFlinger->ioConfigChanged(event, desc, pid);
 }
 
 void AudioFlinger::RecordThread::readInputParameters_l()
@@ -6213,10 +6904,13 @@
     mSampleRate = mInput->stream->common.get_sample_rate(&mInput->stream->common);
     mChannelMask = mInput->stream->common.get_channels(&mInput->stream->common);
     mChannelCount = audio_channel_count_from_in_mask(mChannelMask);
+    if (mChannelCount > FCC_8) {
+        ALOGE("HAL channel count %d > %d", mChannelCount, FCC_8);
+    }
     mHALFormat = mInput->stream->common.get_format(&mInput->stream->common);
     mFormat = mHALFormat;
-    if (mFormat != AUDIO_FORMAT_PCM_16_BIT) {
-        ALOGE("HAL format %#x not supported; must be AUDIO_FORMAT_PCM_16_BIT", mFormat);
+    if (!audio_is_linear_pcm(mFormat)) {
+        ALOGE("HAL format %#x is not linear pcm", mFormat);
     }
     mFrameSize = audio_stream_in_frame_size(mInput->stream);
     mBufferSize = mInput->stream->common.get_buffer_size(&mInput->stream->common);
@@ -6227,9 +6921,11 @@
     // The value is somewhat arbitrary, and could probably be even larger.
     // A larger value should allow more old data to be read after a track calls start(),
     // without increasing latency.
+    //
+    // Note this is independent of the maximum downsampling ratio permitted for capture.
     mRsmpInFrames = mFrameCount * 7;
     mRsmpInFramesP2 = roundup(mRsmpInFrames);
-    delete[] mRsmpInBuffer;
+    free(mRsmpInBuffer);
 
     // TODO optimize audio capture buffer sizes ...
     // Here we calculate the size of the sliding buffer used as a source
@@ -6239,7 +6935,7 @@
     // The current value is higher than necessary.  However it should not add to latency.
 
     // Over-allocate beyond mRsmpInFramesP2 to permit a HAL read past end of buffer
-    mRsmpInBuffer = new int16_t[(mRsmpInFramesP2 + mFrameCount - 1) * mChannelCount];
+    (void)posix_memalign(&mRsmpInBuffer, 32, (mRsmpInFramesP2 + mFrameCount - 1) * mFrameSize);
 
     // AudioRecord mSampleRate and mChannelCount are constant due to AudioRecord API constraints.
     // But if thread's mSampleRate or mChannelCount changes, how will that affect active tracks?
@@ -6343,33 +7039,35 @@
                                                           audio_patch_handle_t *handle)
 {
     status_t status = NO_ERROR;
-    if (mInput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
-        // store new device and send to effects
-        mInDevice = patch->sources[0].ext.device.type;
+
+    // store new device and send to effects
+    mInDevice = patch->sources[0].ext.device.type;
+    mPatch = *patch;
+    for (size_t i = 0; i < mEffectChains.size(); i++) {
+        mEffectChains[i]->setDevice_l(mInDevice);
+    }
+
+    // disable AEC and NS if the device is a BT SCO headset supporting those
+    // pre processings
+    if (mTracks.size() > 0) {
+        bool suspend = audio_is_bluetooth_sco_device(mInDevice) &&
+                            mAudioFlinger->btNrecIsOff();
+        for (size_t i = 0; i < mTracks.size(); i++) {
+            sp<RecordTrack> track = mTracks[i];
+            setEffectSuspended_l(FX_IID_AEC, suspend, track->sessionId());
+            setEffectSuspended_l(FX_IID_NS, suspend, track->sessionId());
+        }
+    }
+
+    // store new source and send to effects
+    if (mAudioSource != patch->sinks[0].ext.mix.usecase.source) {
+        mAudioSource = patch->sinks[0].ext.mix.usecase.source;
         for (size_t i = 0; i < mEffectChains.size(); i++) {
-            mEffectChains[i]->setDevice_l(mInDevice);
+            mEffectChains[i]->setAudioSource_l(mAudioSource);
         }
+    }
 
-        // disable AEC and NS if the device is a BT SCO headset supporting those
-        // pre processings
-        if (mTracks.size() > 0) {
-            bool suspend = audio_is_bluetooth_sco_device(mInDevice) &&
-                                mAudioFlinger->btNrecIsOff();
-            for (size_t i = 0; i < mTracks.size(); i++) {
-                sp<RecordTrack> track = mTracks[i];
-                setEffectSuspended_l(FX_IID_AEC, suspend, track->sessionId());
-                setEffectSuspended_l(FX_IID_NS, suspend, track->sessionId());
-            }
-        }
-
-        // store new source and send to effects
-        if (mAudioSource != patch->sinks[0].ext.mix.usecase.source) {
-            mAudioSource = patch->sinks[0].ext.mix.usecase.source;
-            for (size_t i = 0; i < mEffectChains.size(); i++) {
-                mEffectChains[i]->setAudioSource_l(mAudioSource);
-            }
-        }
-
+    if (mInput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
         audio_hw_device_t *hwDevice = mInput->audioHwDev->hwDevice();
         status = hwDevice->create_audio_patch(hwDevice,
                                                patch->num_sources,
@@ -6378,19 +7076,47 @@
                                                patch->sinks,
                                                handle);
     } else {
-        ALOG_ASSERT(false, "createAudioPatch_l() called on a pre 3.0 HAL");
+        char *address;
+        if (strcmp(patch->sources[0].ext.device.address, "") != 0) {
+            address = audio_device_address_to_parameter(
+                                                patch->sources[0].ext.device.type,
+                                                patch->sources[0].ext.device.address);
+        } else {
+            address = (char *)calloc(1, 1);
+        }
+        AudioParameter param = AudioParameter(String8(address));
+        free(address);
+        param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING),
+                     (int)patch->sources[0].ext.device.type);
+        param.addInt(String8(AUDIO_PARAMETER_STREAM_INPUT_SOURCE),
+                                         (int)patch->sinks[0].ext.mix.usecase.source);
+        status = mInput->stream->common.set_parameters(&mInput->stream->common,
+                param.toString().string());
+        *handle = AUDIO_PATCH_HANDLE_NONE;
     }
+
+    if (mInDevice != mPrevInDevice) {
+        sendIoConfigEvent_l(AUDIO_INPUT_CONFIG_CHANGED);
+        mPrevInDevice = mInDevice;
+    }
+
     return status;
 }
 
 status_t AudioFlinger::RecordThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
 {
     status_t status = NO_ERROR;
+
+    mInDevice = AUDIO_DEVICE_NONE;
+
     if (mInput->audioHwDev->version() >= AUDIO_DEVICE_API_VERSION_3_0) {
         audio_hw_device_t *hwDevice = mInput->audioHwDev->hwDevice();
         status = hwDevice->release_audio_patch(hwDevice, handle);
     } else {
-        ALOG_ASSERT(false, "releaseAudioPatch_l() called on a pre 3.0 HAL");
+        AudioParameter param;
+        param.addInt(String8(AUDIO_PARAMETER_STREAM_ROUTING), 0);
+        status = mInput->stream->common.set_parameters(&mInput->stream->common,
+                param.toString().string());
     }
     return status;
 }
@@ -6415,4 +7141,4 @@
     config->ext.mix.usecase.source = mAudioSource;
 }
 
-}; // namespace android
+} // namespace android
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 56a42a8..46ac300 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -32,8 +32,11 @@
         OFFLOAD             // Thread class is OffloadThread
     };
 
+    static const char *threadTypeToString(type_t type);
+
     ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
-                audio_devices_t outDevice, audio_devices_t inDevice, type_t type);
+                audio_devices_t outDevice, audio_devices_t inDevice, type_t type,
+                bool systemReady);
     virtual             ~ThreadBase();
 
     virtual status_t    readyToRun();
@@ -90,30 +93,33 @@
         Condition mCond; // condition for status return
         status_t mStatus; // status communicated to sender
         bool mWaitStatus; // true if sender is waiting for status
+        bool mRequiresSystemReady; // true if must wait for system ready to enter event queue
         sp<ConfigEventData> mData;     // event specific parameter data
 
     protected:
-        ConfigEvent(int type) : mType(type), mStatus(NO_ERROR), mWaitStatus(false), mData(NULL) {}
+        ConfigEvent(int type, bool requiresSystemReady = false) :
+            mType(type), mStatus(NO_ERROR), mWaitStatus(false),
+            mRequiresSystemReady(requiresSystemReady), mData(NULL) {}
     };
 
     class IoConfigEventData : public ConfigEventData {
     public:
-        IoConfigEventData(int event, int param) :
-            mEvent(event), mParam(param) {}
+        IoConfigEventData(audio_io_config_event event, pid_t pid) :
+            mEvent(event), mPid(pid) {}
 
         virtual  void dump(char *buffer, size_t size) {
-            snprintf(buffer, size, "IO event: event %d, param %d\n", mEvent, mParam);
+            snprintf(buffer, size, "IO event: event %d\n", mEvent);
         }
 
-        const int mEvent;
-        const int mParam;
+        const audio_io_config_event mEvent;
+        const pid_t                 mPid;
     };
 
     class IoConfigEvent : public ConfigEvent {
     public:
-        IoConfigEvent(int event, int param) :
+        IoConfigEvent(audio_io_config_event event, pid_t pid) :
             ConfigEvent(CFG_EVENT_IO) {
-            mData = new IoConfigEventData(event, param);
+            mData = new IoConfigEventData(event, pid);
         }
         virtual ~IoConfigEvent() {}
     };
@@ -135,7 +141,7 @@
     class PrioConfigEvent : public ConfigEvent {
     public:
         PrioConfigEvent(pid_t pid, pid_t tid, int32_t prio) :
-            ConfigEvent(CFG_EVENT_PRIO) {
+            ConfigEvent(CFG_EVENT_PRIO, true) {
             mData = new PrioConfigEventData(pid, tid, prio);
         }
         virtual ~PrioConfigEvent() {}
@@ -250,13 +256,14 @@
                                                     status_t& status) = 0;
     virtual     status_t    setParameters(const String8& keyValuePairs);
     virtual     String8     getParameters(const String8& keys) = 0;
-    virtual     void        audioConfigChanged(int event, int param = 0) = 0;
+    virtual     void        ioConfigChanged(audio_io_config_event event, pid_t pid = 0) = 0;
                 // sendConfigEvent_l() must be called with ThreadBase::mLock held
                 // Can temporarily release the lock if waiting for a reply from
                 // processConfigEvents_l().
                 status_t    sendConfigEvent_l(sp<ConfigEvent>& event);
-                void        sendIoConfigEvent(int event, int param = 0);
-                void        sendIoConfigEvent_l(int event, int param = 0);
+                void        sendIoConfigEvent(audio_io_config_event event, pid_t pid = 0);
+                void        sendIoConfigEvent_l(audio_io_config_event event, pid_t pid = 0);
+                void        sendPrioConfigEvent(pid_t pid, pid_t tid, int32_t prio);
                 void        sendPrioConfigEvent_l(pid_t pid, pid_t tid, int32_t prio);
                 status_t    sendSetParameterConfigEvent_l(const String8& keyValuePair);
                 status_t    sendCreateAudioPatchConfigEvent(const struct audio_patch *patch,
@@ -358,6 +365,8 @@
 
                 virtual sp<IMemory> pipeMemory() const { return 0; }
 
+                        void systemReady();
+
     mutable     Mutex                   mLock;
 
 protected:
@@ -408,6 +417,7 @@
                 audio_channel_mask_t    mChannelMask;
                 uint32_t                mChannelCount;
                 size_t                  mFrameSize;
+                // not HAL frame size, this is for output sink (to pipe to fast mixer)
                 audio_format_t          mFormat;           // Source format for Recording and
                                                            // Sink format for Playback.
                                                            // Sink format may be different than
@@ -416,6 +426,7 @@
                 size_t                  mBufferSize;       // HAL buffer size for read() or write()
 
                 Vector< sp<ConfigEvent> >     mConfigEvents;
+                Vector< sp<ConfigEvent> >     mPendingConfigEvents; // events awaiting system ready
 
                 // These fields are written and read by thread itself without lock or barrier,
                 // and read by other threads without lock or barrier via standby(), outDevice()
@@ -426,13 +437,16 @@
                 bool                    mStandby;     // Whether thread is currently in standby.
                 audio_devices_t         mOutDevice;   // output device
                 audio_devices_t         mInDevice;    // input device
-                audio_source_t          mAudioSource; // (see audio.h, audio_source_t)
+                audio_devices_t         mPrevOutDevice;   // previous output device
+                audio_devices_t         mPrevInDevice;    // previous input device
+                struct audio_patch      mPatch;
+                audio_source_t          mAudioSource;
 
                 const audio_io_handle_t mId;
                 Vector< sp<EffectChain> > mEffectChains;
 
-                static const int        kNameLength = 16;   // prctl(PR_SET_NAME) limit
-                char                    mName[kNameLength];
+                static const int        kThreadNameLength = 16; // prctl(PR_SET_NAME) limit
+                char                    mThreadName[kThreadNameLength]; // guaranteed NUL-terminated
                 sp<IPowerManager>       mPowerManager;
                 sp<IBinder>             mWakeLockToken;
                 const sp<PMDeathRecipient> mDeathRecipient;
@@ -442,6 +456,7 @@
                                         mSuspendedSessions;
                 static const size_t     kLogSize = 4 * 1024;
                 sp<NBLog::Writer>       mNBLogWriter;
+                bool                    mSystemReady;
 };
 
 // --- PlaybackThread ---
@@ -467,7 +482,7 @@
     static const int8_t kMaxTrackRetriesOffload = 20;
 
     PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
-                   audio_io_handle_t id, audio_devices_t device, type_t type);
+                   audio_io_handle_t id, audio_devices_t device, type_t type, bool systemReady);
     virtual             ~PlaybackThread();
 
                 void        dump(int fd, const Vector<String16>& args);
@@ -559,7 +574,7 @@
                                 { return android_atomic_acquire_load(&mSuspended) > 0; }
 
     virtual     String8     getParameters(const String8& keys);
-    virtual     void        audioConfigChanged(int event, int param = 0);
+    virtual     void        ioConfigChanged(audio_io_config_event event, pid_t pid = 0);
                 status_t    getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames);
                 // FIXME rename mixBuffer() to sinkBuffer() and remove int16_t* dependency.
                 // Consider also removing and passing an explicit mMainBuffer initialization
@@ -601,6 +616,11 @@
     // updated by readOutputParameters_l()
     size_t                          mNormalFrameCount;  // normal mixer and effects
 
+    bool                            mThreadThrottle;     // throttle the thread processing
+    uint32_t                        mThreadThrottleTimeMs; // throttle time for MIXER threads
+    uint32_t                        mThreadThrottleEndMs;  // notify once per throttling
+    uint32_t                        mHalfBufferMs;       // half the buffer size in milliseconds
+
     void*                           mSinkBuffer;         // frame size aligned sink buffer
 
     // TODO:
@@ -712,8 +732,9 @@
                                    audio_patch_handle_t *handle);
     virtual     status_t    releaseAudioPatch_l(const audio_patch_handle_t handle);
 
-                bool        usesHwAvSync() const { return (mType == DIRECT) && (mOutput != NULL) &&
-                                                (mOutput->flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC); }
+                bool        usesHwAvSync() const { return (mType == DIRECT) && (mOutput != NULL)
+                                    && mHwSupportsPause
+                                    && (mOutput->flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC); }
 
 private:
 
@@ -742,14 +763,14 @@
     bool                            mInWrite;
 
     // FIXME rename these former local variables of threadLoop to standard "m" names
-    nsecs_t                         standbyTime;
+    nsecs_t                         mStandbyTimeNs;
     size_t                          mSinkBufferSize;
 
     // cached copies of activeSleepTimeUs() and idleSleepTimeUs() made by cacheParameters_l()
-    uint32_t                        activeSleepTime;
-    uint32_t                        idleSleepTime;
+    uint32_t                        mActiveSleepTimeUs;
+    uint32_t                        mIdleSleepTimeUs;
 
-    uint32_t                        sleepTime;
+    uint32_t                        mSleepTimeUs;
 
     // mixer status returned by prepareTracks_l()
     mixer_state                     mMixerStatus; // current cycle
@@ -762,7 +783,7 @@
     uint32_t                        sleepTimeShift;
 
     // same as AudioFlinger::mStandbyTimeInNsecs except for DIRECT which uses a shorter value
-    nsecs_t                         standbyDelay;
+    nsecs_t                         mStandbyDelayNs;
 
     // MIXER only
     nsecs_t                         maxPeriod;
@@ -838,6 +859,7 @@
                 AudioStreamOut* output,
                 audio_io_handle_t id,
                 audio_devices_t device,
+                bool systemReady,
                 type_t type = MIXER);
     virtual             ~MixerThread();
 
@@ -864,6 +886,10 @@
     virtual     void        threadLoop_removeTracks(const Vector< sp<Track> >& tracksToRemove);
     virtual     uint32_t    correctLatency_l(uint32_t latency) const;
 
+    virtual     status_t    createAudioPatch_l(const struct audio_patch *patch,
+                                   audio_patch_handle_t *handle);
+    virtual     status_t    releaseAudioPatch_l(const audio_patch_handle_t handle);
+
                 AudioMixer* mAudioMixer;    // normal mixer
 private:
                 // one-time initialization, no locks required
@@ -895,7 +921,7 @@
 public:
 
     DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
-                       audio_io_handle_t id, audio_devices_t device);
+                       audio_io_handle_t id, audio_devices_t device, bool systemReady);
     virtual                 ~DirectOutputThread();
 
     // Thread virtuals
@@ -920,16 +946,22 @@
     virtual     void        threadLoop_exit();
     virtual     bool        shouldStandby_l();
 
+    virtual     void        onAddNewTrack_l();
+
     // volumes last sent to audio HAL with stream->set_volume()
     float mLeftVolFloat;
     float mRightVolFloat;
 
     DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
-                        audio_io_handle_t id, uint32_t device, ThreadBase::type_t type);
+                        audio_io_handle_t id, uint32_t device, ThreadBase::type_t type,
+                        bool systemReady);
     void processVolume_l(Track *track, bool lastTrack);
 
     // prepareTracks_l() tells threadLoop_mix() the name of the single active track
     sp<Track>               mActiveTrack;
+
+    wp<Track>               mPreviousTrack;         // used to detect track switch
+
 public:
     virtual     bool        hasFastMixer() const { return false; }
 };
@@ -938,7 +970,7 @@
 public:
 
     OffloadThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
-                        audio_io_handle_t id, uint32_t device);
+                        audio_io_handle_t id, uint32_t device, bool systemReady);
     virtual                 ~OffloadThread() {};
     virtual     void        flushHw_l();
 
@@ -949,12 +981,10 @@
 
     virtual     bool        waitingAsyncCallback();
     virtual     bool        waitingAsyncCallback_l();
-    virtual     void        onAddNewTrack_l();
 
 private:
     size_t      mPausedWriteLength;     // length in bytes of write interrupted by pause
     size_t      mPausedBytesRemaining;  // bytes still waiting in mixbuffer after resume
-    wp<Track>   mPreviousTrack;         // used to detect track switch
 };
 
 class AsyncCallbackThread : public Thread {
@@ -993,7 +1023,7 @@
 class DuplicatingThread : public MixerThread {
 public:
     DuplicatingThread(const sp<AudioFlinger>& audioFlinger, MixerThread* mainThread,
-                      audio_io_handle_t id);
+                      audio_io_handle_t id, bool systemReady);
     virtual                 ~DuplicatingThread();
 
     // Thread virtuals
@@ -1035,17 +1065,132 @@
 public:
 
     class RecordTrack;
+
+    /* The ResamplerBufferProvider is used to retrieve recorded input data from the
+     * RecordThread.  It maintains local state on the relative position of the read
+     * position of the RecordTrack compared with the RecordThread.
+     */
     class ResamplerBufferProvider : public AudioBufferProvider
-                        // derives from AudioBufferProvider interface for use by resampler
     {
     public:
-        ResamplerBufferProvider(RecordTrack* recordTrack) : mRecordTrack(recordTrack) { }
+        ResamplerBufferProvider(RecordTrack* recordTrack) :
+            mRecordTrack(recordTrack),
+            mRsmpInUnrel(0), mRsmpInFront(0) { }
         virtual ~ResamplerBufferProvider() { }
+
+        // called to set the ResamplerBufferProvider to head of the RecordThread data buffer,
+        // skipping any previous data read from the hal.
+        virtual void reset();
+
+        /* Synchronizes RecordTrack position with the RecordThread.
+         * Calculates available frames and handle overruns if the RecordThread
+         * has advanced faster than the ResamplerBufferProvider has retrieved data.
+         * TODO: why not do this for every getNextBuffer?
+         *
+         * Parameters
+         * framesAvailable:  pointer to optional output size_t to store record track
+         *                   frames available.
+         *      hasOverrun:  pointer to optional boolean, returns true if track has overrun.
+         */
+
+        virtual void sync(size_t *framesAvailable = NULL, bool *hasOverrun = NULL);
+
         // AudioBufferProvider interface
         virtual status_t    getNextBuffer(AudioBufferProvider::Buffer* buffer, int64_t pts);
         virtual void        releaseBuffer(AudioBufferProvider::Buffer* buffer);
     private:
         RecordTrack * const mRecordTrack;
+        size_t              mRsmpInUnrel;   // unreleased frames remaining from
+                                            // most recent getNextBuffer
+                                            // for debug only
+        int32_t             mRsmpInFront;   // next available frame
+                                            // rolling counter that is never cleared
+    };
+
+    /* The RecordBufferConverter is used for format, channel, and sample rate
+     * conversion for a RecordTrack.
+     *
+     * TODO: Self contained, so move to a separate file later.
+     *
+     * RecordBufferConverter uses the convert() method rather than exposing a
+     * buffer provider interface; this is to save a memory copy.
+     */
+    class RecordBufferConverter
+    {
+    public:
+        RecordBufferConverter(
+                audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+                uint32_t srcSampleRate,
+                audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+                uint32_t dstSampleRate);
+
+        ~RecordBufferConverter();
+
+        /* Converts input data from an AudioBufferProvider by format, channelMask,
+         * and sampleRate to a destination buffer.
+         *
+         * Parameters
+         *      dst:  buffer to place the converted data.
+         * provider:  buffer provider to obtain source data.
+         *   frames:  number of frames to convert
+         *
+         * Returns the number of frames converted.
+         */
+        size_t convert(void *dst, AudioBufferProvider *provider, size_t frames);
+
+        // returns NO_ERROR if constructor was successful
+        status_t initCheck() const {
+            // mSrcChannelMask set on successful updateParameters
+            return mSrcChannelMask != AUDIO_CHANNEL_INVALID ? NO_ERROR : NO_INIT;
+        }
+
+        // allows dynamic reconfigure of all parameters
+        status_t updateParameters(
+                audio_channel_mask_t srcChannelMask, audio_format_t srcFormat,
+                uint32_t srcSampleRate,
+                audio_channel_mask_t dstChannelMask, audio_format_t dstFormat,
+                uint32_t dstSampleRate);
+
+        // called to reset resampler buffers on record track discontinuity
+        void reset() {
+            if (mResampler != NULL) {
+                mResampler->reset();
+            }
+        }
+
+    private:
+        // format conversion when not using resampler
+        void convertNoResampler(void *dst, const void *src, size_t frames);
+
+        // format conversion when using resampler; modifies src in-place
+        void convertResampler(void *dst, /*not-a-const*/ void *src, size_t frames);
+
+        // user provided information
+        audio_channel_mask_t mSrcChannelMask;
+        audio_format_t       mSrcFormat;
+        uint32_t             mSrcSampleRate;
+        audio_channel_mask_t mDstChannelMask;
+        audio_format_t       mDstFormat;
+        uint32_t             mDstSampleRate;
+
+        // derived information
+        uint32_t             mSrcChannelCount;
+        uint32_t             mDstChannelCount;
+        size_t               mDstFrameSize;
+
+        // format conversion buffer
+        void                *mBuf;
+        size_t               mBufFrames;
+        size_t               mBufFrameSize;
+
+        // resampler info
+        AudioResampler      *mResampler;
+
+        bool                 mIsLegacyDownmix;  // legacy stereo to mono conversion needed
+        bool                 mIsLegacyUpmix;    // legacy mono to stereo conversion needed
+        bool                 mRequiresFloat;    // data processing requires float (e.g. resampler)
+        PassthruBufferProvider *mInputConverterProvider;    // converts input to float
+        int8_t               mIdxAry[sizeof(uint32_t) * 8]; // used for channel mask conversion
     };
 
 #include "RecordTracks.h"
@@ -1054,7 +1199,8 @@
                     AudioStreamIn *input,
                     audio_io_handle_t id,
                     audio_devices_t outDevice,
-                    audio_devices_t inDevice
+                    audio_devices_t inDevice,
+                    bool systemReady
 #ifdef TEE_SINK
                     , const sp<NBAIO_Sink>& teeSink
 #endif
@@ -1110,7 +1256,7 @@
                                                status_t& status);
     virtual void        cacheParameters_l() {}
     virtual String8     getParameters(const String8& keys);
-    virtual void        audioConfigChanged(int event, int param = 0);
+    virtual void        ioConfigChanged(audio_io_config_event event, pid_t pid = 0);
     virtual status_t    createAudioPatch_l(const struct audio_patch *patch,
                                            audio_patch_handle_t *handle);
     virtual status_t    releaseAudioPatch_l(const audio_patch_handle_t handle);
@@ -1156,7 +1302,7 @@
             Condition                           mStartStopCond;
 
             // resampler converts input at HAL Hz to output at AudioRecord client Hz
-            int16_t                             *mRsmpInBuffer; // see new[] for details on the size
+            void                               *mRsmpInBuffer; //
             size_t                              mRsmpInFrames;  // size of resampler input in frames
             size_t                              mRsmpInFramesP2;// size rounded up to a power-of-2
 
@@ -1169,7 +1315,9 @@
             const sp<MemoryDealer>              mReadOnlyHeap;
 
             // one-time initialization, no locks required
-            sp<FastCapture>                     mFastCapture;   // non-0 if there is also a fast capture
+            sp<FastCapture>                     mFastCapture;   // non-0 if there is also
+                                                                // a fast capture
+
             // FIXME audio watchdog thread
 
             // contents are not guaranteed to be consistent, no locks required
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index e970036..f7da209 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -20,6 +20,7 @@
 //#define LOG_NDEBUG 0
 
 #include "Configuration.h"
+#include <linux/futex.h>
 #include <math.h>
 #include <sys/syscall.h>
 #include <utils/Log.h>
@@ -403,10 +404,7 @@
     mIsInvalid(false),
     mAudioTrackServerProxy(NULL),
     mResumeToStopping(false),
-    mFlushHwPending(false),
-    mPreviousValid(false),
-    mPreviousFramesWritten(0)
-    // mPreviousTimestamp
+    mFlushHwPending(false)
 {
     // client == 0 implies sharedBuffer == 0
     ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
@@ -443,8 +441,6 @@
         //       this means we are potentially denying other more important fast tracks from
         //       being created.  It would be better to allocate the index dynamically.
         mFastIndex = i;
-        // Read the initial underruns because this field is never cleared by the fast mixer
-        mObservedUnderruns = thread->getFastTrackUnderruns(i);
         thread->mFastTrackAvailMask &= ~(1 << i);
     }
 }
@@ -693,6 +689,12 @@
         }
 
         PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+        if (isFastTrack()) {
+            // refresh fast track underruns on start because that field is never cleared
+            // by the fast mixer; furthermore, the same track can be recycled, i.e. start
+            // after stop.
+            mObservedUnderruns = playbackThread->getFastTrackUnderruns(mFastIndex);
+        }
         status = playbackThread->addTrack_l(this);
         if (status == INVALID_OPERATION || status == PERMISSION_DENIED) {
             triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
@@ -742,6 +744,7 @@
                 // move to STOPPING_2 when drain completes and then STOPPED
                 mState = STOPPING_1;
             }
+            playbackThread->broadcast_l();
             ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName,
                     playbackThread);
         }
@@ -880,24 +883,27 @@
 {
     // Client should implement this using SSQ; the unpresented frame count in latch is irrelevant
     if (isFastTrack()) {
-        // FIXME no lock held to set mPreviousValid = false
         return INVALID_OPERATION;
     }
     sp<ThreadBase> thread = mThread.promote();
     if (thread == 0) {
-        // FIXME no lock held to set mPreviousValid = false
         return INVALID_OPERATION;
     }
+
     Mutex::Autolock _l(thread->mLock);
     PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+
+    status_t result = INVALID_OPERATION;
     if (!isOffloaded() && !isDirect()) {
         if (!playbackThread->mLatchQValid) {
-            mPreviousValid = false;
             return INVALID_OPERATION;
         }
-        uint32_t unpresentedFrames =
-                ((int64_t) playbackThread->mLatchQ.mUnpresentedFrames * mSampleRate) /
-                playbackThread->mSampleRate;
+        // FIXME Not accurate under dynamic changes of sample rate and speed.
+        // Do not use track's mSampleRate as it is not current for mixer tracks.
+        uint32_t sampleRate = mAudioTrackServerProxy->getSampleRate();
+        AudioPlaybackRate playbackRate = mAudioTrackServerProxy->getPlaybackRate();
+        uint32_t unpresentedFrames = ((double) playbackThread->mLatchQ.mUnpresentedFrames *
+                sampleRate * playbackRate.mSpeed)/ playbackThread->mSampleRate;
         // FIXME Since we're using a raw pointer as the key, it is theoretically possible
         //       for a brand new track to share the same address as a recently destroyed
         //       track, and thus for us to get the frames released of the wrong track.
@@ -908,36 +914,16 @@
         uint32_t framesWritten = i >= 0 ?
                 playbackThread->mLatchQ.mFramesReleased[i] :
                 mAudioTrackServerProxy->framesReleased();
-        bool checkPreviousTimestamp = mPreviousValid && framesWritten >= mPreviousFramesWritten;
-        if (framesWritten < unpresentedFrames) {
-            mPreviousValid = false;
-            return INVALID_OPERATION;
+        if (framesWritten >= unpresentedFrames) {
+            timestamp.mPosition = framesWritten - unpresentedFrames;
+            timestamp.mTime = playbackThread->mLatchQ.mTimestamp.mTime;
+            result = NO_ERROR;
         }
-        mPreviousFramesWritten = framesWritten;
-        uint32_t position = framesWritten - unpresentedFrames;
-        struct timespec time = playbackThread->mLatchQ.mTimestamp.mTime;
-        if (checkPreviousTimestamp) {
-            if (time.tv_sec < mPreviousTimestamp.mTime.tv_sec ||
-                    (time.tv_sec == mPreviousTimestamp.mTime.tv_sec &&
-                    time.tv_nsec < mPreviousTimestamp.mTime.tv_nsec)) {
-                ALOGW("Time is going backwards");
-            }
-            // position can bobble slightly as an artifact; this hides the bobble
-            static const uint32_t MINIMUM_POSITION_DELTA = 8u;
-            if ((position <= mPreviousTimestamp.mPosition) ||
-                    (position - mPreviousTimestamp.mPosition) < MINIMUM_POSITION_DELTA) {
-                position = mPreviousTimestamp.mPosition;
-                time = mPreviousTimestamp.mTime;
-            }
-        }
-        timestamp.mPosition = position;
-        timestamp.mTime = time;
-        mPreviousTimestamp = timestamp;
-        mPreviousValid = true;
-        return NO_ERROR;
+    } else { // offloaded or direct
+        result = playbackThread->getTimestamp_l(timestamp);
     }
 
-    return playbackThread->getTimestamp_l(timestamp);
+    return result;
 }
 
 status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
@@ -1709,36 +1695,18 @@
     mActive = false;
 }
 
-bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t frames)
+bool AudioFlinger::PlaybackThread::OutputTrack::write(void* data, uint32_t frames)
 {
     Buffer *pInBuffer;
     Buffer inBuffer;
-    uint32_t channelCount = mChannelCount;
     bool outputBufferFull = false;
     inBuffer.frameCount = frames;
-    inBuffer.i16 = data;
+    inBuffer.raw = data;
 
     uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
 
     if (!mActive && frames != 0) {
-        start();
-        sp<ThreadBase> thread = mThread.promote();
-        if (thread != 0) {
-            MixerThread *mixerThread = (MixerThread *)thread.get();
-            if (mFrameCount > frames) {
-                if (mBufferQueue.size() < kMaxOverFlowBuffers) {
-                    uint32_t startFrames = (mFrameCount - frames);
-                    pInBuffer = new Buffer;
-                    pInBuffer->mBuffer = new int16_t[startFrames * channelCount];
-                    pInBuffer->frameCount = startFrames;
-                    pInBuffer->i16 = pInBuffer->mBuffer;
-                    memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t));
-                    mBufferQueue.add(pInBuffer);
-                } else {
-                    ALOGW("OutputTrack::write() %p no more buffers in queue", this);
-                }
-            }
-        }
+        (void) start();
     }
 
     while (waitTimeLeftMs) {
@@ -1773,20 +1741,20 @@
 
         uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
                 pInBuffer->frameCount;
-        memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t));
+        memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * mFrameSize);
         Proxy::Buffer buf;
         buf.mFrameCount = outFrames;
         buf.mRaw = NULL;
         mClientProxy->releaseBuffer(&buf);
         pInBuffer->frameCount -= outFrames;
-        pInBuffer->i16 += outFrames * channelCount;
+        pInBuffer->raw = (int8_t *)pInBuffer->raw + outFrames * mFrameSize;
         mOutBuffer.frameCount -= outFrames;
-        mOutBuffer.i16 += outFrames * channelCount;
+        mOutBuffer.raw = (int8_t *)mOutBuffer.raw + outFrames * mFrameSize;
 
         if (pInBuffer->frameCount == 0) {
             if (mBufferQueue.size()) {
                 mBufferQueue.removeAt(0);
-                delete [] pInBuffer->mBuffer;
+                free(pInBuffer->mBuffer);
                 delete pInBuffer;
                 ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this,
                         mThread.unsafe_get(), mBufferQueue.size());
@@ -1802,11 +1770,10 @@
         if (thread != 0 && !thread->standby()) {
             if (mBufferQueue.size() < kMaxOverFlowBuffers) {
                 pInBuffer = new Buffer;
-                pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channelCount];
+                pInBuffer->mBuffer = malloc(inBuffer.frameCount * mFrameSize);
                 pInBuffer->frameCount = inBuffer.frameCount;
-                pInBuffer->i16 = pInBuffer->mBuffer;
-                memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount *
-                        sizeof(int16_t));
+                pInBuffer->raw = pInBuffer->mBuffer;
+                memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * mFrameSize);
                 mBufferQueue.add(pInBuffer);
                 ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this,
                         mThread.unsafe_get(), mBufferQueue.size());
@@ -1817,23 +1784,10 @@
         }
     }
 
-    // Calling write() with a 0 length buffer, means that no more data will be written:
-    // If no more buffers are pending, fill output track buffer to make sure it is started
-    // by output mixer.
-    if (frames == 0 && mBufferQueue.size() == 0) {
-        // FIXME borken, replace by getting framesReady() from proxy
-        size_t user = 0;    // was mCblk->user
-        if (user < mFrameCount) {
-            frames = mFrameCount - user;
-            pInBuffer = new Buffer;
-            pInBuffer->mBuffer = new int16_t[frames * channelCount];
-            pInBuffer->frameCount = frames;
-            pInBuffer->i16 = pInBuffer->mBuffer;
-            memset(pInBuffer->raw, 0, frames * channelCount * sizeof(int16_t));
-            mBufferQueue.add(pInBuffer);
-        } else if (mActive) {
-            stop();
-        }
+    // Calling write() with a 0 length buffer means that no more data will be written:
+    // We rely on stop() to set the appropriate flags to allow the remaining frames to play out.
+    if (frames == 0 && mBufferQueue.size() == 0 && mActive) {
+        stop();
     }
 
     return outputBufferFull;
@@ -1859,7 +1813,7 @@
 
     for (size_t i = 0; i < size; i++) {
         Buffer *pBuffer = mBufferQueue.itemAt(i);
-        delete [] pBuffer->mBuffer;
+        free(pBuffer->mBuffer);
         delete pBuffer;
     }
     mBufferQueue.clear();
@@ -1867,13 +1821,14 @@
 
 
 AudioFlinger::PlaybackThread::PatchTrack::PatchTrack(PlaybackThread *playbackThread,
+                                                     audio_stream_type_t streamType,
                                                      uint32_t sampleRate,
                                                      audio_channel_mask_t channelMask,
                                                      audio_format_t format,
                                                      size_t frameCount,
                                                      void *buffer,
                                                      IAudioFlinger::track_flags_t flags)
-    :   Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
+    :   Track(playbackThread, NULL, streamType,
               sampleRate, format, channelMask, frameCount,
               buffer, 0, 0, getuid(), flags, TYPE_PATCH),
               mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true))
@@ -1995,29 +1950,32 @@
                           ((flags & IAudioFlinger::TRACK_FAST) ? ALLOC_PIPE : ALLOC_CBLK) :
                           ((buffer == NULL) ? ALLOC_LOCAL : ALLOC_NONE),
                   type),
-        mOverflow(false), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpOutFrameCount(0),
-        // See real initialization of mRsmpInFront at RecordThread::start()
-        mRsmpInUnrel(0), mRsmpInFront(0), mFramesToDrop(0), mResamplerBufferProvider(NULL)
+        mOverflow(false),
+        mFramesToDrop(0),
+        mResamplerBufferProvider(NULL), // initialize in case of early constructor exit
+        mRecordBufferConverter(NULL)
 {
     if (mCblk == NULL) {
         return;
     }
 
+    mRecordBufferConverter = new RecordBufferConverter(
+            thread->mChannelMask, thread->mFormat, thread->mSampleRate,
+            channelMask, format, sampleRate);
+    // Check if the RecordBufferConverter construction was successful.
+    // If not, don't continue with construction.
+    //
+    // NOTE: It would be extremely rare that the record track cannot be created
+    // for the current device, but a pending or future device change would make
+    // the record track configuration valid.
+    if (mRecordBufferConverter->initCheck() != NO_ERROR) {
+        ALOGE("RecordTrack unable to create record buffer converter");
+        return;
+    }
+
     mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
                                               mFrameSize, !isExternalTrack());
-
-    uint32_t channelCount = audio_channel_count_from_in_mask(channelMask);
-    // FIXME I don't understand either of the channel count checks
-    if (thread->mSampleRate != sampleRate && thread->mChannelCount <= FCC_2 &&
-            channelCount <= FCC_2) {
-        // sink SR
-        mResampler = AudioResampler::create(AUDIO_FORMAT_PCM_16_BIT,
-                thread->mChannelCount, sampleRate);
-        // source SR
-        mResampler->setSampleRate(thread->mSampleRate);
-        mResampler->setVolume(AudioMixer::UNITY_GAIN_FLOAT, AudioMixer::UNITY_GAIN_FLOAT);
-        mResamplerBufferProvider = new ResamplerBufferProvider(this);
-    }
+    mResamplerBufferProvider = new ResamplerBufferProvider(this);
 
     if (flags & IAudioFlinger::TRACK_FAST) {
         ALOG_ASSERT(thread->mFastTrackAvail);
@@ -2028,11 +1986,19 @@
 AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
 {
     ALOGV("%s", __func__);
-    delete mResampler;
-    delete[] mRsmpOutBuffer;
+    delete mRecordBufferConverter;
     delete mResamplerBufferProvider;
 }
 
+status_t AudioFlinger::RecordThread::RecordTrack::initCheck() const
+{
+    status_t status = TrackBase::initCheck();
+    if (status == NO_ERROR && mServerProxy == 0) {
+        status = BAD_VALUE;
+    }
+    return status;
+}
+
 // AudioBufferProvider interface
 status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer,
         int64_t pts __unused)
@@ -2212,4 +2178,4 @@
     mProxy->releaseBuffer(buffer);
 }
 
-}; // namespace android
+} // namespace android
diff --git a/services/audioflinger/test-resample.cpp b/services/audioflinger/test-resample.cpp
index 84a655a..7893778 100644
--- a/services/audioflinger/test-resample.cpp
+++ b/services/audioflinger/test-resample.cpp
@@ -427,6 +427,14 @@
         printf("quality: %d  channels: %d  msec: %" PRId64 "  Mfrms/s: %.2lf\n",
                 quality, channels, time/1000000, output_frames * looplimit / (time / 1e9) / 1e6);
         resampler->reset();
+
+        // TODO fix legacy bug: reset does not clear buffers.
+        // delete and recreate resampler here.
+        delete resampler;
+        resampler = AudioResampler::create(format, channels,
+                    output_freq, quality);
+        resampler->setSampleRate(input_freq);
+        resampler->setVolume(AudioResampler::UNITY_GAIN_FLOAT, AudioResampler::UNITY_GAIN_FLOAT);
     }
 
     memset(output_vaddr, 0, output_size);
diff --git a/services/audioflinger/tests/Android.mk b/services/audioflinger/tests/Android.mk
index 8604ef5..e152468 100644
--- a/services/audioflinger/tests/Android.mk
+++ b/services/audioflinger/tests/Android.mk
@@ -30,20 +30,16 @@
 #
 include $(CLEAR_VARS)
 
-# Clang++ aborts on AudioMixer.cpp,
-# b/18373866, "do not know how to split this operator."
-ifeq ($(filter $(TARGET_ARCH),arm arm64),$(TARGET_ARCH))
-    LOCAL_CLANG := false
-endif
-
 LOCAL_SRC_FILES:= \
 	test-mixer.cpp \
 	../AudioMixer.cpp.arm \
+	../BufferProviders.cpp
 
 LOCAL_C_INCLUDES := \
 	$(call include-path-for, audio-effects) \
 	$(call include-path-for, audio-utils) \
-	frameworks/av/services/audioflinger
+	frameworks/av/services/audioflinger \
+	external/sonic
 
 LOCAL_STATIC_LIBRARIES := \
 	libsndfile
@@ -57,7 +53,8 @@
 	libdl \
 	libcutils \
 	libutils \
-	liblog
+	liblog \
+	libsonic
 
 LOCAL_MODULE:= test-mixer
 
diff --git a/services/audioflinger/tests/build_and_run_all_unit_tests.sh b/services/audioflinger/tests/build_and_run_all_unit_tests.sh
index 2c453b0..7f4d456 100755
--- a/services/audioflinger/tests/build_and_run_all_unit_tests.sh
+++ b/services/audioflinger/tests/build_and_run_all_unit_tests.sh
@@ -15,7 +15,7 @@
 echo "waiting for device"
 adb root && adb wait-for-device remount
 adb push $OUT/system/lib/libaudioresampler.so /system/lib
-adb push $OUT/system/bin/resampler_tests /system/bin
+adb push $OUT/data/nativetest/resampler_tests /system/bin
 
 sh $ANDROID_BUILD_TOP/frameworks/av/services/audioflinger/tests/run_all_unit_tests.sh
 
diff --git a/services/audioflinger/tests/mixer_to_wav_tests.sh b/services/audioflinger/tests/mixer_to_wav_tests.sh
index 9b39e77..d0482a1 100755
--- a/services/audioflinger/tests/mixer_to_wav_tests.sh
+++ b/services/audioflinger/tests/mixer_to_wav_tests.sh
@@ -60,11 +60,21 @@
     fi
 
 # Test:
+# process__genericResampling with mixed integer and float track input
+# track__Resample / track__genericResample
+    adb shell test-mixer $1 -s 48000 \
+        -o /sdcard/tm48000grif.wav \
+        sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000 \
+        sine:f,6,6000,19000  chirp:i,4,30000
+    adb pull /sdcard/tm48000grif.wav $2
+
+# Test:
 # process__genericResampling
 # track__Resample / track__genericResample
     adb shell test-mixer $1 -s 48000 \
         -o /sdcard/tm48000gr.wav \
-        sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000
+        sine:2,4000,7520 chirp:2,9200 sine:1,3000,18000 \
+        sine:6,6000,19000
     adb pull /sdcard/tm48000gr.wav $2
 
 # Test:
diff --git a/services/audioflinger/tests/resampler_tests.cpp b/services/audioflinger/tests/resampler_tests.cpp
index d6217ba..9e375db 100644
--- a/services/audioflinger/tests/resampler_tests.cpp
+++ b/services/audioflinger/tests/resampler_tests.cpp
@@ -48,7 +48,10 @@
         if (thisFrames == 0 || thisFrames > outputFrames - i) {
             thisFrames = outputFrames - i;
         }
-        resampler->resample((int32_t*) output + channels*i, thisFrames, provider);
+        size_t framesResampled = resampler->resample(
+                (int32_t*) output + channels*i, thisFrames, provider);
+        // we should have enough buffer space, so there is no short count.
+        ASSERT_EQ(thisFrames, framesResampled);
         i += thisFrames;
     }
 }
diff --git a/services/audioflinger/tests/test-mixer.cpp b/services/audioflinger/tests/test-mixer.cpp
index 9a4fad6..8da6245 100644
--- a/services/audioflinger/tests/test-mixer.cpp
+++ b/services/audioflinger/tests/test-mixer.cpp
@@ -39,7 +39,7 @@
     fprintf(stderr, "Usage: %s [-f] [-m] [-c channels]"
                     " [-s sample-rate] [-o <output-file>] [-a <aux-buffer-file>] [-P csv]"
                     " (<input-file> | <command>)+\n", name);
-    fprintf(stderr, "    -f    enable floating point input track\n");
+    fprintf(stderr, "    -f    enable floating point input track by default\n");
     fprintf(stderr, "    -m    enable floating point mixer output\n");
     fprintf(stderr, "    -c    number of mixer output channels\n");
     fprintf(stderr, "    -s    mixer sample-rate\n");
@@ -47,8 +47,8 @@
     fprintf(stderr, "    -a    <aux-buffer-file>\n");
     fprintf(stderr, "    -P    # frames provided per call to resample() in CSV format\n");
     fprintf(stderr, "    <input-file> is a WAV file\n");
-    fprintf(stderr, "    <command> can be 'sine:<channels>,<frequency>,<samplerate>'\n");
-    fprintf(stderr, "                     'chirp:<channels>,<samplerate>'\n");
+    fprintf(stderr, "    <command> can be 'sine:[(i|f),]<channels>,<frequency>,<samplerate>'\n");
+    fprintf(stderr, "                     'chirp:[(i|f),]<channels>,<samplerate>'\n");
 }
 
 static int writeFile(const char *filename, const void *buffer,
@@ -78,6 +78,18 @@
     return EXIT_SUCCESS;
 }
 
+const char *parseFormat(const char *s, bool *useFloat) {
+    if (!strncmp(s, "f,", 2)) {
+        *useFloat = true;
+        return s + 2;
+    }
+    if (!strncmp(s, "i,", 2)) {
+        *useFloat = false;
+        return s + 2;
+    }
+    return s;
+}
+
 int main(int argc, char* argv[]) {
     const char* const progname = argv[0];
     bool useInputFloat = false;
@@ -88,8 +100,9 @@
     std::vector<int> Pvalues;
     const char* outputFilename = NULL;
     const char* auxFilename = NULL;
-    std::vector<int32_t> Names;
-    std::vector<SignalProvider> Providers;
+    std::vector<int32_t> names;
+    std::vector<SignalProvider> providers;
+    std::vector<audio_format_t> formats;
 
     for (int ch; (ch = getopt(argc, argv, "fmc:s:o:a:P:")) != -1;) {
         switch (ch) {
@@ -138,54 +151,65 @@
     size_t outputFrames = 0;
 
     // create providers for each track
-    Providers.resize(argc);
+    names.resize(argc);
+    providers.resize(argc);
+    formats.resize(argc);
     for (int i = 0; i < argc; ++i) {
         static const char chirp[] = "chirp:";
         static const char sine[] = "sine:";
         static const double kSeconds = 1;
+        bool useFloat = useInputFloat;
 
         if (!strncmp(argv[i], chirp, strlen(chirp))) {
             std::vector<int> v;
+            const char *s = parseFormat(argv[i] + strlen(chirp), &useFloat);
 
-            parseCSV(argv[i] + strlen(chirp), v);
+            parseCSV(s, v);
             if (v.size() == 2) {
                 printf("creating chirp(%d %d)\n", v[0], v[1]);
-                if (useInputFloat) {
-                    Providers[i].setChirp<float>(v[0], 0, v[1]/2, v[1], kSeconds);
+                if (useFloat) {
+                    providers[i].setChirp<float>(v[0], 0, v[1]/2, v[1], kSeconds);
+                    formats[i] = AUDIO_FORMAT_PCM_FLOAT;
                 } else {
-                    Providers[i].setChirp<int16_t>(v[0], 0, v[1]/2, v[1], kSeconds);
+                    providers[i].setChirp<int16_t>(v[0], 0, v[1]/2, v[1], kSeconds);
+                    formats[i] = AUDIO_FORMAT_PCM_16_BIT;
                 }
-                Providers[i].setIncr(Pvalues);
+                providers[i].setIncr(Pvalues);
             } else {
                 fprintf(stderr, "malformed input '%s'\n", argv[i]);
             }
         } else if (!strncmp(argv[i], sine, strlen(sine))) {
             std::vector<int> v;
+            const char *s = parseFormat(argv[i] + strlen(sine), &useFloat);
 
-            parseCSV(argv[i] + strlen(sine), v);
+            parseCSV(s, v);
             if (v.size() == 3) {
                 printf("creating sine(%d %d %d)\n", v[0], v[1], v[2]);
-                if (useInputFloat) {
-                    Providers[i].setSine<float>(v[0], v[1], v[2], kSeconds);
+                if (useFloat) {
+                    providers[i].setSine<float>(v[0], v[1], v[2], kSeconds);
+                    formats[i] = AUDIO_FORMAT_PCM_FLOAT;
                 } else {
-                    Providers[i].setSine<int16_t>(v[0], v[1], v[2], kSeconds);
+                    providers[i].setSine<int16_t>(v[0], v[1], v[2], kSeconds);
+                    formats[i] = AUDIO_FORMAT_PCM_16_BIT;
                 }
-                Providers[i].setIncr(Pvalues);
+                providers[i].setIncr(Pvalues);
             } else {
                 fprintf(stderr, "malformed input '%s'\n", argv[i]);
             }
         } else {
             printf("creating filename(%s)\n", argv[i]);
             if (useInputFloat) {
-                Providers[i].setFile<float>(argv[i]);
+                providers[i].setFile<float>(argv[i]);
+                formats[i] = AUDIO_FORMAT_PCM_FLOAT;
             } else {
-                Providers[i].setFile<short>(argv[i]);
+                providers[i].setFile<short>(argv[i]);
+                formats[i] = AUDIO_FORMAT_PCM_16_BIT;
             }
-            Providers[i].setIncr(Pvalues);
+            providers[i].setIncr(Pvalues);
         }
         // calculate the number of output frames
-        size_t nframes = (int64_t) Providers[i].getNumFrames() * outputSampleRate
-                / Providers[i].getSampleRate();
+        size_t nframes = (int64_t) providers[i].getNumFrames() * outputSampleRate
+                / providers[i].getSampleRate();
         if (i == 0 || outputFrames > nframes) { // choose minimum for outputFrames
             outputFrames = nframes;
         }
@@ -213,22 +237,20 @@
     // create the mixer.
     const size_t mixerFrameCount = 320; // typical numbers may range from 240 or 960
     AudioMixer *mixer = new AudioMixer(mixerFrameCount, outputSampleRate);
-    audio_format_t inputFormat = useInputFloat
-            ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
     audio_format_t mixerFormat = useMixerFloat
             ? AUDIO_FORMAT_PCM_FLOAT : AUDIO_FORMAT_PCM_16_BIT;
-    float f = AudioMixer::UNITY_GAIN_FLOAT / Providers.size(); // normalize volume by # tracks
+    float f = AudioMixer::UNITY_GAIN_FLOAT / providers.size(); // normalize volume by # tracks
     static float f0; // zero
 
     // set up the tracks.
-    for (size_t i = 0; i < Providers.size(); ++i) {
-        //printf("track %d out of %d\n", i, Providers.size());
-        uint32_t channelMask = audio_channel_out_mask_from_count(Providers[i].getNumChannels());
+    for (size_t i = 0; i < providers.size(); ++i) {
+        //printf("track %d out of %d\n", i, providers.size());
+        uint32_t channelMask = audio_channel_out_mask_from_count(providers[i].getNumChannels());
         int32_t name = mixer->getTrackName(channelMask,
-                inputFormat, AUDIO_SESSION_OUTPUT_MIX);
+                formats[i], AUDIO_SESSION_OUTPUT_MIX);
         ALOG_ASSERT(name >= 0);
-        Names.push_back(name);
-        mixer->setBufferProvider(name, &Providers[i]);
+        names[i] = name;
+        mixer->setBufferProvider(name, &providers[i]);
         mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
                 (void *)outputAddr);
         mixer->setParameter(
@@ -240,7 +262,7 @@
                 name,
                 AudioMixer::TRACK,
                 AudioMixer::FORMAT,
-                (void *)(uintptr_t)inputFormat);
+                (void *)(uintptr_t)formats[i]);
         mixer->setParameter(
                 name,
                 AudioMixer::TRACK,
@@ -255,7 +277,7 @@
                 name,
                 AudioMixer::RESAMPLE,
                 AudioMixer::SAMPLE_RATE,
-                (void *)(uintptr_t)Providers[i].getSampleRate());
+                (void *)(uintptr_t)providers[i].getSampleRate());
         if (useRamp) {
             mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, &f0);
             mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, &f0);
@@ -277,11 +299,11 @@
     // pump the mixer to process data.
     size_t i;
     for (i = 0; i < outputFrames - mixerFrameCount; i += mixerFrameCount) {
-        for (size_t j = 0; j < Names.size(); ++j) {
-            mixer->setParameter(Names[j], AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
+        for (size_t j = 0; j < names.size(); ++j) {
+            mixer->setParameter(names[j], AudioMixer::TRACK, AudioMixer::MAIN_BUFFER,
                     (char *) outputAddr + i * outputFrameSize);
             if (auxFilename) {
-                mixer->setParameter(Names[j], AudioMixer::TRACK, AudioMixer::AUX_BUFFER,
+                mixer->setParameter(names[j], AudioMixer::TRACK, AudioMixer::AUX_BUFFER,
                         (char *) auxAddr + i * auxFrameSize);
             }
         }
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 188fc89..5b38e1c 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -3,25 +3,27 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= \
-    AudioPolicyService.cpp \
-    AudioPolicyEffects.cpp
+    service/AudioPolicyService.cpp \
+    service/AudioPolicyEffects.cpp
 
 ifeq ($(USE_LEGACY_AUDIO_POLICY), 1)
 LOCAL_SRC_FILES += \
-    AudioPolicyInterfaceImplLegacy.cpp \
-    AudioPolicyClientImplLegacy.cpp
+    service/AudioPolicyInterfaceImplLegacy.cpp \
+    service/AudioPolicyClientImplLegacy.cpp
 
     LOCAL_CFLAGS += -DUSE_LEGACY_AUDIO_POLICY
 else
 LOCAL_SRC_FILES += \
-    AudioPolicyInterfaceImpl.cpp \
-    AudioPolicyClientImpl.cpp
+    service/AudioPolicyInterfaceImpl.cpp \
+    service/AudioPolicyClientImpl.cpp
 endif
 
 LOCAL_C_INCLUDES := \
     $(TOPDIR)frameworks/av/services/audioflinger \
     $(call include-path-for, audio-effects) \
-    $(call include-path-for, audio-utils)
+    $(call include-path-for, audio-utils) \
+    $(TOPDIR)frameworks/av/services/audiopolicy/common/include \
+    $(TOPDIR)frameworks/av/services/audiopolicy/engine/interface \
 
 LOCAL_SHARED_LIBRARIES := \
     libcutils \
@@ -39,7 +41,8 @@
 endif
 
 LOCAL_STATIC_LIBRARIES := \
-    libmedia_helper
+    libmedia_helper \
+    libaudiopolicycomponents
 
 LOCAL_MODULE:= libaudiopolicyservice
 
@@ -53,7 +56,7 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= \
-    AudioPolicyManager.cpp
+    managerdefault/AudioPolicyManager.cpp \
 
 LOCAL_SHARED_LIBRARIES := \
     libcutils \
@@ -61,8 +64,30 @@
     liblog \
     libsoundtrigger
 
+ifeq ($(USE_CONFIGURABLE_AUDIO_POLICY), 1)
+
+LOCAL_REQUIRED_MODULES := \
+    parameter-framework.policy \
+    audio_policy_criteria.conf \
+
+LOCAL_C_INCLUDES += \
+    $(TOPDIR)frameworks/av/services/audiopolicy/engineconfigurable/include \
+
+LOCAL_SHARED_LIBRARIES += libaudiopolicyengineconfigurable
+
+else
+
+LOCAL_SHARED_LIBRARIES += libaudiopolicyenginedefault
+
+endif
+
+LOCAL_C_INCLUDES += \
+    $(TOPDIR)frameworks/av/services/audiopolicy/common/include \
+    $(TOPDIR)frameworks/av/services/audiopolicy/engine/interface \
+
 LOCAL_STATIC_LIBRARIES := \
-    libmedia_helper
+    libmedia_helper \
+    libaudiopolicycomponents
 
 LOCAL_MODULE:= libaudiopolicymanagerdefault
 
@@ -73,14 +98,26 @@
 include $(CLEAR_VARS)
 
 LOCAL_SRC_FILES:= \
-    AudioPolicyFactory.cpp
+    manager/AudioPolicyFactory.cpp
 
 LOCAL_SHARED_LIBRARIES := \
     libaudiopolicymanagerdefault
 
+LOCAL_STATIC_LIBRARIES := \
+    libaudiopolicycomponents
+
+LOCAL_C_INCLUDES += \
+    $(TOPDIR)frameworks/av/services/audiopolicy/common/include \
+    $(TOPDIR)frameworks/av/services/audiopolicy/engine/interface \
+
 LOCAL_MODULE:= libaudiopolicymanager
 
 include $(BUILD_SHARED_LIBRARY)
 
 endif
 endif
+
+#######################################################################
+# Recursive call sub-folder Android.mk
+#
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 4508fa7..c1e7bc0 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -64,6 +64,7 @@
         API_INPUT_MIX_EXT_POLICY_REROUTE,// used for platform audio rerouting, where mixes are
                                          // handled by external and dynamically installed
                                          // policies which reroute audio mixes
+        API_INPUT_TELEPHONY_RX, // used for capture from telephony RX path
     } input_type_t;
 
 public:
@@ -75,7 +76,8 @@
     // indicate a change in device connection status
     virtual status_t setDeviceConnectionState(audio_devices_t device,
                                               audio_policy_dev_state_t state,
-                                          const char *device_address) = 0;
+                                              const char *device_address,
+                                              const char *device_name) = 0;
     // retrieve a device connection status
     virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
                                                                           const char *device_address) = 0;
@@ -105,10 +107,12 @@
                                         audio_io_handle_t *output,
                                         audio_session_t session,
                                         audio_stream_type_t *stream,
+                                        uid_t uid,
                                         uint32_t samplingRate,
                                         audio_format_t format,
                                         audio_channel_mask_t channelMask,
                                         audio_output_flags_t flags,
+                                        int selectedDeviceId,
                                         const audio_offload_info_t *offloadInfo) = 0;
     // indicates to the audio policy manager that the output starts being used by corresponding stream.
     virtual status_t startOutput(audio_io_handle_t output,
@@ -127,10 +131,12 @@
     virtual status_t getInputForAttr(const audio_attributes_t *attr,
                                      audio_io_handle_t *input,
                                      audio_session_t session,
+                                     uid_t uid,
                                      uint32_t samplingRate,
                                      audio_format_t format,
                                      audio_channel_mask_t channelMask,
                                      audio_input_flags_t flags,
+                                     audio_port_handle_t selectedDeviceId,
                                      input_type_t *inputType) = 0;
     // indicates to the audio policy manager that the input starts being used.
     virtual status_t startInput(audio_io_handle_t input,
@@ -206,7 +212,7 @@
                                       struct audio_patch *patches,
                                       unsigned int *generation) = 0;
     virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
-    virtual void clearAudioPatches(uid_t uid) = 0;
+    virtual void releaseResourcesForUid(uid_t uid) = 0;
 
     virtual status_t acquireSoundTriggerSession(audio_session_t *session,
                                            audio_io_handle_t *ioHandle,
@@ -216,6 +222,11 @@
 
     virtual status_t registerPolicyMixes(Vector<AudioMix> mixes) = 0;
     virtual status_t unregisterPolicyMixes(Vector<AudioMix> mixes) = 0;
+
+    virtual status_t startAudioSource(const struct audio_port_config *source,
+                                      const audio_attributes_t *attributes,
+                                      audio_io_handle_t *handle) = 0;
+    virtual status_t stopAudioSource(audio_io_handle_t handle) = 0;
 };
 
 
@@ -318,6 +329,8 @@
     virtual void onAudioPatchListUpdate() = 0;
 
     virtual audio_unique_id_t newAudioUniqueId() = 0;
+
+    virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state) = 0;
 };
 
 extern "C" AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface);
diff --git a/services/audiopolicy/AudioPolicyManager.cpp b/services/audiopolicy/AudioPolicyManager.cpp
deleted file mode 100644
index 6ebd0ed..0000000
--- a/services/audiopolicy/AudioPolicyManager.cpp
+++ /dev/null
@@ -1,8098 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "AudioPolicyManager"
-//#define LOG_NDEBUG 0
-
-//#define VERY_VERBOSE_LOGGING
-#ifdef VERY_VERBOSE_LOGGING
-#define ALOGVV ALOGV
-#else
-#define ALOGVV(a...) do { } while(0)
-#endif
-
-// A device mask for all audio input devices that are considered "virtual" when evaluating
-// active inputs in getActiveInput()
-#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL  (AUDIO_DEVICE_IN_REMOTE_SUBMIX|AUDIO_DEVICE_IN_FM_TUNER)
-// A device mask for all audio output devices that are considered "remote" when evaluating
-// active output devices in isStreamActiveRemotely()
-#define APM_AUDIO_OUT_DEVICE_REMOTE_ALL  AUDIO_DEVICE_OUT_REMOTE_SUBMIX
-// A device mask for all audio input and output devices where matching inputs/outputs on device
-// type alone is not enough: the address must match too
-#define APM_AUDIO_DEVICE_MATCH_ADDRESS_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX | \
-                                            AUDIO_DEVICE_OUT_REMOTE_SUBMIX)
-
-#include <inttypes.h>
-#include <math.h>
-
-#include <cutils/properties.h>
-#include <utils/Log.h>
-#include <hardware/audio.h>
-#include <hardware/audio_effect.h>
-#include <media/AudioParameter.h>
-#include <media/AudioPolicyHelper.h>
-#include <soundtrigger/SoundTrigger.h>
-#include "AudioPolicyManager.h"
-#include "audio_policy_conf.h"
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-// Definitions for audio_policy.conf file parsing
-// ----------------------------------------------------------------------------
-
-struct StringToEnum {
-    const char *name;
-    uint32_t value;
-};
-
-#define STRING_TO_ENUM(string) { #string, string }
-#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
-
-const StringToEnum sDeviceNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_EARPIECE),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER_SAFE),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_SCO),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_A2DP),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_AUX_DIGITAL),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_HDMI),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_USB_ACCESSORY),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_USB_DEVICE),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_USB),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_REMOTE_SUBMIX),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_TELEPHONY_TX),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_LINE),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_HDMI_ARC),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPDIF),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_FM),
-    STRING_TO_ENUM(AUDIO_DEVICE_OUT_AUX_LINE),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_AMBIENT),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_ALL_SCO),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_HDMI),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_TELEPHONY_RX),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_BACK_MIC),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_REMOTE_SUBMIX),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_USB_ACCESSORY),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_USB_DEVICE),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_FM_TUNER),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_TV_TUNER),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_LINE),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_SPDIF),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_A2DP),
-    STRING_TO_ENUM(AUDIO_DEVICE_IN_LOOPBACK),
-};
-
-const StringToEnum sOutputFlagNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DIRECT),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_PRIMARY),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_FAST),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DEEP_BUFFER),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_NON_BLOCKING),
-    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_HW_AV_SYNC),
-};
-
-const StringToEnum sInputFlagNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_INPUT_FLAG_FAST),
-    STRING_TO_ENUM(AUDIO_INPUT_FLAG_HW_HOTWORD),
-};
-
-const StringToEnum sFormatNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_16_BIT),
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_8_BIT),
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_32_BIT),
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_8_24_BIT),
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_FLOAT),
-    STRING_TO_ENUM(AUDIO_FORMAT_PCM_24_BIT_PACKED),
-    STRING_TO_ENUM(AUDIO_FORMAT_MP3),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_MAIN),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_LC),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_SSR),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_LTP),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_HE_V1),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_SCALABLE),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_ERLC),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_LD),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_HE_V2),
-    STRING_TO_ENUM(AUDIO_FORMAT_AAC_ELD),
-    STRING_TO_ENUM(AUDIO_FORMAT_VORBIS),
-    STRING_TO_ENUM(AUDIO_FORMAT_HE_AAC_V1),
-    STRING_TO_ENUM(AUDIO_FORMAT_HE_AAC_V2),
-    STRING_TO_ENUM(AUDIO_FORMAT_OPUS),
-    STRING_TO_ENUM(AUDIO_FORMAT_AC3),
-    STRING_TO_ENUM(AUDIO_FORMAT_E_AC3),
-};
-
-const StringToEnum sOutChannelsNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_MONO),
-    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_STEREO),
-    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_QUAD),
-    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
-    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
-};
-
-const StringToEnum sInChannelsNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_CHANNEL_IN_MONO),
-    STRING_TO_ENUM(AUDIO_CHANNEL_IN_STEREO),
-    STRING_TO_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
-};
-
-const StringToEnum sGainModeNameToEnumTable[] = {
-    STRING_TO_ENUM(AUDIO_GAIN_MODE_JOINT),
-    STRING_TO_ENUM(AUDIO_GAIN_MODE_CHANNELS),
-    STRING_TO_ENUM(AUDIO_GAIN_MODE_RAMP),
-};
-
-
-uint32_t AudioPolicyManager::stringToEnum(const struct StringToEnum *table,
-                                              size_t size,
-                                              const char *name)
-{
-    for (size_t i = 0; i < size; i++) {
-        if (strcmp(table[i].name, name) == 0) {
-            ALOGV("stringToEnum() found %s", table[i].name);
-            return table[i].value;
-        }
-    }
-    return 0;
-}
-
-const char *AudioPolicyManager::enumToString(const struct StringToEnum *table,
-                                              size_t size,
-                                              uint32_t value)
-{
-    for (size_t i = 0; i < size; i++) {
-        if (table[i].value == value) {
-            return table[i].name;
-        }
-    }
-    return "";
-}
-
-bool AudioPolicyManager::stringToBool(const char *value)
-{
-    return ((strcasecmp("true", value) == 0) || (strcmp("1", value) == 0));
-}
-
-
-// ----------------------------------------------------------------------------
-// AudioPolicyInterface implementation
-// ----------------------------------------------------------------------------
-
-status_t AudioPolicyManager::setDeviceConnectionState(audio_devices_t device,
-                                                          audio_policy_dev_state_t state,
-                                                  const char *device_address)
-{
-    return setDeviceConnectionStateInt(device, state, device_address);
-}
-
-status_t AudioPolicyManager::setDeviceConnectionStateInt(audio_devices_t device,
-                                                         audio_policy_dev_state_t state,
-                                                         const char *device_address)
-{
-    ALOGV("setDeviceConnectionState() device: %x, state %d, address %s",
-            device, state, device_address != NULL ? device_address : "");
-
-    // connect/disconnect only 1 device at a time
-    if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
-
-    sp<DeviceDescriptor> devDesc = getDeviceDescriptor(device, device_address);
-
-    // handle output devices
-    if (audio_is_output_device(device)) {
-        SortedVector <audio_io_handle_t> outputs;
-
-        ssize_t index = mAvailableOutputDevices.indexOf(devDesc);
-
-        // save a copy of the opened output descriptors before any output is opened or closed
-        // by checkOutputsForDevice(). This will be needed by checkOutputForAllStrategies()
-        mPreviousOutputs = mOutputs;
-        switch (state)
-        {
-        // handle output device connection
-        case AUDIO_POLICY_DEVICE_STATE_AVAILABLE: {
-            if (index >= 0) {
-                ALOGW("setDeviceConnectionState() device already connected: %x", device);
-                return INVALID_OPERATION;
-            }
-            ALOGV("setDeviceConnectionState() connecting device %x", device);
-
-            // register new device as available
-            index = mAvailableOutputDevices.add(devDesc);
-            if (index >= 0) {
-                sp<HwModule> module = getModuleForDevice(device);
-                if (module == 0) {
-                    ALOGD("setDeviceConnectionState() could not find HW module for device %08x",
-                          device);
-                    mAvailableOutputDevices.remove(devDesc);
-                    return INVALID_OPERATION;
-                }
-                mAvailableOutputDevices[index]->mId = nextUniqueId();
-                mAvailableOutputDevices[index]->mModule = module;
-            } else {
-                return NO_MEMORY;
-            }
-
-            if (checkOutputsForDevice(devDesc, state, outputs, devDesc->mAddress) != NO_ERROR) {
-                mAvailableOutputDevices.remove(devDesc);
-                return INVALID_OPERATION;
-            }
-            // outputs should never be empty here
-            ALOG_ASSERT(outputs.size() != 0, "setDeviceConnectionState():"
-                    "checkOutputsForDevice() returned no outputs but status OK");
-            ALOGV("setDeviceConnectionState() checkOutputsForDevice() returned %zu outputs",
-                  outputs.size());
-
-
-            // Set connect to HALs
-            AudioParameter param = AudioParameter(devDesc->mAddress);
-            param.addInt(String8(AUDIO_PARAMETER_DEVICE_CONNECT), device);
-            mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
-
-            } break;
-        // handle output device disconnection
-        case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: {
-            if (index < 0) {
-                ALOGW("setDeviceConnectionState() device not connected: %x", device);
-                return INVALID_OPERATION;
-            }
-
-            ALOGV("setDeviceConnectionState() disconnecting output device %x", device);
-
-            // Set Disconnect to HALs
-            AudioParameter param = AudioParameter(devDesc->mAddress);
-            param.addInt(String8(AUDIO_PARAMETER_DEVICE_DISCONNECT), device);
-            mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
-
-            // remove device from available output devices
-            mAvailableOutputDevices.remove(devDesc);
-
-            checkOutputsForDevice(devDesc, state, outputs, devDesc->mAddress);
-            } break;
-
-        default:
-            ALOGE("setDeviceConnectionState() invalid state: %x", state);
-            return BAD_VALUE;
-        }
-
-        // checkA2dpSuspend must run before checkOutputForAllStrategies so that A2DP
-        // output is suspended before any tracks are moved to it
-        checkA2dpSuspend();
-        checkOutputForAllStrategies();
-        // outputs must be closed after checkOutputForAllStrategies() is executed
-        if (!outputs.isEmpty()) {
-            for (size_t i = 0; i < outputs.size(); i++) {
-                sp<AudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]);
-                // close unused outputs after device disconnection or direct outputs that have been
-                // opened by checkOutputsForDevice() to query dynamic parameters
-                if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
-                        (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
-                         (desc->mDirectOpenCount == 0))) {
-                    closeOutput(outputs[i]);
-                }
-            }
-            // check again after closing A2DP output to reset mA2dpSuspended if needed
-            checkA2dpSuspend();
-        }
-
-        updateDevicesAndOutputs();
-        if (mPhoneState == AUDIO_MODE_IN_CALL) {
-            audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
-            updateCallRouting(newDevice);
-        }
-        for (size_t i = 0; i < mOutputs.size(); i++) {
-            audio_io_handle_t output = mOutputs.keyAt(i);
-            if ((mPhoneState != AUDIO_MODE_IN_CALL) || (output != mPrimaryOutput)) {
-                audio_devices_t newDevice = getNewOutputDevice(mOutputs.keyAt(i),
-                                                               true /*fromCache*/);
-                // do not force device change on duplicated output because if device is 0, it will
-                // also force a device 0 for the two outputs it is duplicated to which may override
-                // a valid device selection on those outputs.
-                bool force = !mOutputs.valueAt(i)->isDuplicated()
-                        && (!deviceDistinguishesOnAddress(device)
-                                // always force when disconnecting (a non-duplicated device)
-                                || (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE));
-                setOutputDevice(output, newDevice, force, 0);
-            }
-        }
-
-        mpClientInterface->onAudioPortListUpdate();
-        return NO_ERROR;
-    }  // end if is output device
-
-    // handle input devices
-    if (audio_is_input_device(device)) {
-        SortedVector <audio_io_handle_t> inputs;
-
-        ssize_t index = mAvailableInputDevices.indexOf(devDesc);
-        switch (state)
-        {
-        // handle input device connection
-        case AUDIO_POLICY_DEVICE_STATE_AVAILABLE: {
-            if (index >= 0) {
-                ALOGW("setDeviceConnectionState() device already connected: %d", device);
-                return INVALID_OPERATION;
-            }
-            sp<HwModule> module = getModuleForDevice(device);
-            if (module == NULL) {
-                ALOGW("setDeviceConnectionState(): could not find HW module for device %08x",
-                      device);
-                return INVALID_OPERATION;
-            }
-            if (checkInputsForDevice(device, state, inputs, devDesc->mAddress) != NO_ERROR) {
-                return INVALID_OPERATION;
-            }
-
-            index = mAvailableInputDevices.add(devDesc);
-            if (index >= 0) {
-                mAvailableInputDevices[index]->mId = nextUniqueId();
-                mAvailableInputDevices[index]->mModule = module;
-            } else {
-                return NO_MEMORY;
-            }
-
-            // Set connect to HALs
-            AudioParameter param = AudioParameter(devDesc->mAddress);
-            param.addInt(String8(AUDIO_PARAMETER_DEVICE_CONNECT), device);
-            mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
-
-        } break;
-
-        // handle input device disconnection
-        case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: {
-            if (index < 0) {
-                ALOGW("setDeviceConnectionState() device not connected: %d", device);
-                return INVALID_OPERATION;
-            }
-
-            ALOGV("setDeviceConnectionState() disconnecting input device %x", device);
-
-            // Set Disconnect to HALs
-            AudioParameter param = AudioParameter(devDesc->mAddress);
-            param.addInt(String8(AUDIO_PARAMETER_DEVICE_DISCONNECT), device);
-            mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
-
-            checkInputsForDevice(device, state, inputs, devDesc->mAddress);
-            mAvailableInputDevices.remove(devDesc);
-
-        } break;
-
-        default:
-            ALOGE("setDeviceConnectionState() invalid state: %x", state);
-            return BAD_VALUE;
-        }
-
-        closeAllInputs();
-
-        if (mPhoneState == AUDIO_MODE_IN_CALL) {
-            audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
-            updateCallRouting(newDevice);
-        }
-
-        mpClientInterface->onAudioPortListUpdate();
-        return NO_ERROR;
-    } // end if is input device
-
-    ALOGW("setDeviceConnectionState() invalid device: %x", device);
-    return BAD_VALUE;
-}
-
-audio_policy_dev_state_t AudioPolicyManager::getDeviceConnectionState(audio_devices_t device,
-                                                  const char *device_address)
-{
-    sp<DeviceDescriptor> devDesc = getDeviceDescriptor(device, device_address);
-    DeviceVector *deviceVector;
-
-    if (audio_is_output_device(device)) {
-        deviceVector = &mAvailableOutputDevices;
-    } else if (audio_is_input_device(device)) {
-        deviceVector = &mAvailableInputDevices;
-    } else {
-        ALOGW("getDeviceConnectionState() invalid device type %08x", device);
-        return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
-    }
-
-    ssize_t index = deviceVector->indexOf(devDesc);
-    if (index >= 0) {
-        return AUDIO_POLICY_DEVICE_STATE_AVAILABLE;
-    } else {
-        return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
-    }
-}
-
-sp<AudioPolicyManager::DeviceDescriptor>  AudioPolicyManager::getDeviceDescriptor(
-                                                                    const audio_devices_t device,
-                                                                    const char *device_address)
-{
-    String8 address = (device_address == NULL) ? String8("") : String8(device_address);
-    // handle legacy remote submix case where the address was not always specified
-    if (deviceDistinguishesOnAddress(device) && (address.length() == 0)) {
-        address = String8("0");
-    }
-
-    for (size_t i = 0; i < mHwModules.size(); i++) {
-        if (mHwModules[i]->mHandle == 0) {
-            continue;
-        }
-        DeviceVector deviceList =
-                mHwModules[i]->mDeclaredDevices.getDevicesFromTypeAddr(device, address);
-        if (!deviceList.isEmpty()) {
-            return deviceList.itemAt(0);
-        }
-        deviceList = mHwModules[i]->mDeclaredDevices.getDevicesFromType(device);
-        if (!deviceList.isEmpty()) {
-            return deviceList.itemAt(0);
-        }
-    }
-
-    sp<DeviceDescriptor> devDesc = new DeviceDescriptor(String8(""), device);
-    devDesc->mAddress = address;
-    return devDesc;
-}
-
-void AudioPolicyManager::updateCallRouting(audio_devices_t rxDevice, int delayMs)
-{
-    bool createTxPatch = false;
-    struct audio_patch patch;
-    patch.num_sources = 1;
-    patch.num_sinks = 1;
-    status_t status;
-    audio_patch_handle_t afPatchHandle;
-    DeviceVector deviceList;
-
-    audio_devices_t txDevice = getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
-    ALOGV("updateCallRouting device rxDevice %08x txDevice %08x", rxDevice, txDevice);
-
-    // release existing RX patch if any
-    if (mCallRxPatch != 0) {
-        mpClientInterface->releaseAudioPatch(mCallRxPatch->mAfPatchHandle, 0);
-        mCallRxPatch.clear();
-    }
-    // release TX patch if any
-    if (mCallTxPatch != 0) {
-        mpClientInterface->releaseAudioPatch(mCallTxPatch->mAfPatchHandle, 0);
-        mCallTxPatch.clear();
-    }
-
-    // If the RX device is on the primary HW module, then use legacy routing method for voice calls
-    // via setOutputDevice() on primary output.
-    // Otherwise, create two audio patches for TX and RX path.
-    if (availablePrimaryOutputDevices() & rxDevice) {
-        setOutputDevice(mPrimaryOutput, rxDevice, true, delayMs);
-        // If the TX device is also on the primary HW module, setOutputDevice() will take care
-        // of it due to legacy implementation. If not, create a patch.
-        if ((availablePrimaryInputDevices() & txDevice & ~AUDIO_DEVICE_BIT_IN)
-                == AUDIO_DEVICE_NONE) {
-            createTxPatch = true;
-        }
-    } else {
-        // create RX path audio patch
-        deviceList = mAvailableOutputDevices.getDevicesFromType(rxDevice);
-        ALOG_ASSERT(!deviceList.isEmpty(),
-                    "updateCallRouting() selected device not in output device list");
-        sp<DeviceDescriptor> rxSinkDeviceDesc = deviceList.itemAt(0);
-        deviceList = mAvailableInputDevices.getDevicesFromType(AUDIO_DEVICE_IN_TELEPHONY_RX);
-        ALOG_ASSERT(!deviceList.isEmpty(),
-                    "updateCallRouting() no telephony RX device");
-        sp<DeviceDescriptor> rxSourceDeviceDesc = deviceList.itemAt(0);
-
-        rxSourceDeviceDesc->toAudioPortConfig(&patch.sources[0]);
-        rxSinkDeviceDesc->toAudioPortConfig(&patch.sinks[0]);
-
-        // request to reuse existing output stream if one is already opened to reach the RX device
-        SortedVector<audio_io_handle_t> outputs =
-                                getOutputsForDevice(rxDevice, mOutputs);
-        audio_io_handle_t output = selectOutput(outputs,
-                                                AUDIO_OUTPUT_FLAG_NONE,
-                                                AUDIO_FORMAT_INVALID);
-        if (output != AUDIO_IO_HANDLE_NONE) {
-            sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
-            ALOG_ASSERT(!outputDesc->isDuplicated(),
-                        "updateCallRouting() RX device output is duplicated");
-            outputDesc->toAudioPortConfig(&patch.sources[1]);
-            patch.num_sources = 2;
-        }
-
-        afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
-        status = mpClientInterface->createAudioPatch(&patch, &afPatchHandle, 0);
-        ALOGW_IF(status != NO_ERROR, "updateCallRouting() error %d creating RX audio patch",
-                                               status);
-        if (status == NO_ERROR) {
-            mCallRxPatch = new AudioPatch((audio_patch_handle_t)nextUniqueId(),
-                                       &patch, mUidCached);
-            mCallRxPatch->mAfPatchHandle = afPatchHandle;
-            mCallRxPatch->mUid = mUidCached;
-        }
-        createTxPatch = true;
-    }
-    if (createTxPatch) {
-
-        struct audio_patch patch;
-        patch.num_sources = 1;
-        patch.num_sinks = 1;
-        deviceList = mAvailableInputDevices.getDevicesFromType(txDevice);
-        ALOG_ASSERT(!deviceList.isEmpty(),
-                    "updateCallRouting() selected device not in input device list");
-        sp<DeviceDescriptor> txSourceDeviceDesc = deviceList.itemAt(0);
-        txSourceDeviceDesc->toAudioPortConfig(&patch.sources[0]);
-        deviceList = mAvailableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_TELEPHONY_TX);
-        ALOG_ASSERT(!deviceList.isEmpty(),
-                    "updateCallRouting() no telephony TX device");
-        sp<DeviceDescriptor> txSinkDeviceDesc = deviceList.itemAt(0);
-        txSinkDeviceDesc->toAudioPortConfig(&patch.sinks[0]);
-
-        SortedVector<audio_io_handle_t> outputs =
-                                getOutputsForDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX, mOutputs);
-        audio_io_handle_t output = selectOutput(outputs,
-                                                AUDIO_OUTPUT_FLAG_NONE,
-                                                AUDIO_FORMAT_INVALID);
-        // request to reuse existing output stream if one is already opened to reach the TX
-        // path output device
-        if (output != AUDIO_IO_HANDLE_NONE) {
-            sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
-            ALOG_ASSERT(!outputDesc->isDuplicated(),
-                        "updateCallRouting() RX device output is duplicated");
-            outputDesc->toAudioPortConfig(&patch.sources[1]);
-            patch.num_sources = 2;
-        }
-
-        afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
-        status = mpClientInterface->createAudioPatch(&patch, &afPatchHandle, 0);
-        ALOGW_IF(status != NO_ERROR, "setPhoneState() error %d creating TX audio patch",
-                                               status);
-        if (status == NO_ERROR) {
-            mCallTxPatch = new AudioPatch((audio_patch_handle_t)nextUniqueId(),
-                                       &patch, mUidCached);
-            mCallTxPatch->mAfPatchHandle = afPatchHandle;
-            mCallTxPatch->mUid = mUidCached;
-        }
-    }
-}
-
-void AudioPolicyManager::setPhoneState(audio_mode_t state)
-{
-    ALOGV("setPhoneState() state %d", state);
-    if (state < 0 || state >= AUDIO_MODE_CNT) {
-        ALOGW("setPhoneState() invalid state %d", state);
-        return;
-    }
-
-    if (state == mPhoneState ) {
-        ALOGW("setPhoneState() setting same state %d", state);
-        return;
-    }
-
-    // if leaving call state, handle special case of active streams
-    // pertaining to sonification strategy see handleIncallSonification()
-    if (isInCall()) {
-        ALOGV("setPhoneState() in call state management: new state is %d", state);
-        for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
-            if (stream == AUDIO_STREAM_PATCH) {
-                continue;
-            }
-            handleIncallSonification((audio_stream_type_t)stream, false, true);
-        }
-
-        // force reevaluating accessibility routing when call starts
-        mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
-    }
-
-    // store previous phone state for management of sonification strategy below
-    int oldState = mPhoneState;
-    mPhoneState = state;
-    bool force = false;
-
-    // are we entering or starting a call
-    if (!isStateInCall(oldState) && isStateInCall(state)) {
-        ALOGV("  Entering call in setPhoneState()");
-        // force routing command to audio hardware when starting a call
-        // even if no device change is needed
-        force = true;
-        for (int j = 0; j < DEVICE_CATEGORY_CNT; j++) {
-            mStreams[AUDIO_STREAM_DTMF].mVolumeCurve[j] =
-                    sVolumeProfiles[AUDIO_STREAM_VOICE_CALL][j];
-        }
-    } else if (isStateInCall(oldState) && !isStateInCall(state)) {
-        ALOGV("  Exiting call in setPhoneState()");
-        // force routing command to audio hardware when exiting a call
-        // even if no device change is needed
-        force = true;
-        for (int j = 0; j < DEVICE_CATEGORY_CNT; j++) {
-            mStreams[AUDIO_STREAM_DTMF].mVolumeCurve[j] =
-                    sVolumeProfiles[AUDIO_STREAM_DTMF][j];
-        }
-    } else if (isStateInCall(state) && (state != oldState)) {
-        ALOGV("  Switching between telephony and VoIP in setPhoneState()");
-        // force routing command to audio hardware when switching between telephony and VoIP
-        // even if no device change is needed
-        force = true;
-    }
-
-    // check for device and output changes triggered by new phone state
-    checkA2dpSuspend();
-    checkOutputForAllStrategies();
-    updateDevicesAndOutputs();
-
-    sp<AudioOutputDescriptor> hwOutputDesc = mOutputs.valueFor(mPrimaryOutput);
-
-    int delayMs = 0;
-    if (isStateInCall(state)) {
-        nsecs_t sysTime = systemTime();
-        for (size_t i = 0; i < mOutputs.size(); i++) {
-            sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
-            // mute media and sonification strategies and delay device switch by the largest
-            // latency of any output where either strategy is active.
-            // This avoid sending the ring tone or music tail into the earpiece or headset.
-            if ((desc->isStrategyActive(STRATEGY_MEDIA,
-                                     SONIFICATION_HEADSET_MUSIC_DELAY,
-                                     sysTime) ||
-                    desc->isStrategyActive(STRATEGY_SONIFICATION,
-                                         SONIFICATION_HEADSET_MUSIC_DELAY,
-                                         sysTime)) &&
-                    (delayMs < (int)desc->mLatency*2)) {
-                delayMs = desc->mLatency*2;
-            }
-            setStrategyMute(STRATEGY_MEDIA, true, mOutputs.keyAt(i));
-            setStrategyMute(STRATEGY_MEDIA, false, mOutputs.keyAt(i), MUTE_TIME_MS,
-                getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/));
-            setStrategyMute(STRATEGY_SONIFICATION, true, mOutputs.keyAt(i));
-            setStrategyMute(STRATEGY_SONIFICATION, false, mOutputs.keyAt(i), MUTE_TIME_MS,
-                getDeviceForStrategy(STRATEGY_SONIFICATION, true /*fromCache*/));
-        }
-    }
-
-    // Note that despite the fact that getNewOutputDevice() is called on the primary output,
-    // the device returned is not necessarily reachable via this output
-    audio_devices_t rxDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
-    // force routing command to audio hardware when ending call
-    // even if no device change is needed
-    if (isStateInCall(oldState) && rxDevice == AUDIO_DEVICE_NONE) {
-        rxDevice = hwOutputDesc->device();
-    }
-
-    if (state == AUDIO_MODE_IN_CALL) {
-        updateCallRouting(rxDevice, delayMs);
-    } else if (oldState == AUDIO_MODE_IN_CALL) {
-        if (mCallRxPatch != 0) {
-            mpClientInterface->releaseAudioPatch(mCallRxPatch->mAfPatchHandle, 0);
-            mCallRxPatch.clear();
-        }
-        if (mCallTxPatch != 0) {
-            mpClientInterface->releaseAudioPatch(mCallTxPatch->mAfPatchHandle, 0);
-            mCallTxPatch.clear();
-        }
-        setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
-    } else {
-        setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
-    }
-    // if entering in call state, handle special case of active streams
-    // pertaining to sonification strategy see handleIncallSonification()
-    if (isStateInCall(state)) {
-        ALOGV("setPhoneState() in call state management: new state is %d", state);
-        for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
-            if (stream == AUDIO_STREAM_PATCH) {
-                continue;
-            }
-            handleIncallSonification((audio_stream_type_t)stream, true, true);
-        }
-    }
-
-    // Flag that ringtone volume must be limited to music volume until we exit MODE_RINGTONE
-    if (state == AUDIO_MODE_RINGTONE &&
-        isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY)) {
-        mLimitRingtoneVolume = true;
-    } else {
-        mLimitRingtoneVolume = false;
-    }
-}
-
-void AudioPolicyManager::setForceUse(audio_policy_force_use_t usage,
-                                         audio_policy_forced_cfg_t config)
-{
-    ALOGV("setForceUse() usage %d, config %d, mPhoneState %d", usage, config, mPhoneState);
-
-    bool forceVolumeReeval = false;
-    switch(usage) {
-    case AUDIO_POLICY_FORCE_FOR_COMMUNICATION:
-        if (config != AUDIO_POLICY_FORCE_SPEAKER && config != AUDIO_POLICY_FORCE_BT_SCO &&
-            config != AUDIO_POLICY_FORCE_NONE) {
-            ALOGW("setForceUse() invalid config %d for FOR_COMMUNICATION", config);
-            return;
-        }
-        forceVolumeReeval = true;
-        mForceUse[usage] = config;
-        break;
-    case AUDIO_POLICY_FORCE_FOR_MEDIA:
-        if (config != AUDIO_POLICY_FORCE_HEADPHONES && config != AUDIO_POLICY_FORCE_BT_A2DP &&
-            config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
-            config != AUDIO_POLICY_FORCE_ANALOG_DOCK &&
-            config != AUDIO_POLICY_FORCE_DIGITAL_DOCK && config != AUDIO_POLICY_FORCE_NONE &&
-            config != AUDIO_POLICY_FORCE_NO_BT_A2DP && config != AUDIO_POLICY_FORCE_SPEAKER ) {
-            ALOGW("setForceUse() invalid config %d for FOR_MEDIA", config);
-            return;
-        }
-        mForceUse[usage] = config;
-        break;
-    case AUDIO_POLICY_FORCE_FOR_RECORD:
-        if (config != AUDIO_POLICY_FORCE_BT_SCO && config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
-            config != AUDIO_POLICY_FORCE_NONE) {
-            ALOGW("setForceUse() invalid config %d for FOR_RECORD", config);
-            return;
-        }
-        mForceUse[usage] = config;
-        break;
-    case AUDIO_POLICY_FORCE_FOR_DOCK:
-        if (config != AUDIO_POLICY_FORCE_NONE && config != AUDIO_POLICY_FORCE_BT_CAR_DOCK &&
-            config != AUDIO_POLICY_FORCE_BT_DESK_DOCK &&
-            config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
-            config != AUDIO_POLICY_FORCE_ANALOG_DOCK &&
-            config != AUDIO_POLICY_FORCE_DIGITAL_DOCK) {
-            ALOGW("setForceUse() invalid config %d for FOR_DOCK", config);
-        }
-        forceVolumeReeval = true;
-        mForceUse[usage] = config;
-        break;
-    case AUDIO_POLICY_FORCE_FOR_SYSTEM:
-        if (config != AUDIO_POLICY_FORCE_NONE &&
-            config != AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
-            ALOGW("setForceUse() invalid config %d for FOR_SYSTEM", config);
-        }
-        forceVolumeReeval = true;
-        mForceUse[usage] = config;
-        break;
-    case AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO:
-        if (config != AUDIO_POLICY_FORCE_NONE &&
-            config != AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED) {
-            ALOGW("setForceUse() invalid config %d forHDMI_SYSTEM_AUDIO", config);
-        }
-        mForceUse[usage] = config;
-        break;
-    default:
-        ALOGW("setForceUse() invalid usage %d", usage);
-        break;
-    }
-
-    // check for device and output changes triggered by new force usage
-    checkA2dpSuspend();
-    checkOutputForAllStrategies();
-    updateDevicesAndOutputs();
-    if (mPhoneState == AUDIO_MODE_IN_CALL) {
-        audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, true /*fromCache*/);
-        updateCallRouting(newDevice);
-    }
-    for (size_t i = 0; i < mOutputs.size(); i++) {
-        audio_io_handle_t output = mOutputs.keyAt(i);
-        audio_devices_t newDevice = getNewOutputDevice(output, true /*fromCache*/);
-        if ((mPhoneState != AUDIO_MODE_IN_CALL) || (output != mPrimaryOutput)) {
-            setOutputDevice(output, newDevice, (newDevice != AUDIO_DEVICE_NONE));
-        }
-        if (forceVolumeReeval && (newDevice != AUDIO_DEVICE_NONE)) {
-            applyStreamVolumes(output, newDevice, 0, true);
-        }
-    }
-
-    audio_io_handle_t activeInput = getActiveInput();
-    if (activeInput != 0) {
-        setInputDevice(activeInput, getNewInputDevice(activeInput));
-    }
-
-}
-
-audio_policy_forced_cfg_t AudioPolicyManager::getForceUse(audio_policy_force_use_t usage)
-{
-    return mForceUse[usage];
-}
-
-void AudioPolicyManager::setSystemProperty(const char* property, const char* value)
-{
-    ALOGV("setSystemProperty() property %s, value %s", property, value);
-}
-
-// Find a direct output profile compatible with the parameters passed, even if the input flags do
-// not explicitly request a direct output
-sp<AudioPolicyManager::IOProfile> AudioPolicyManager::getProfileForDirectOutput(
-                                                               audio_devices_t device,
-                                                               uint32_t samplingRate,
-                                                               audio_format_t format,
-                                                               audio_channel_mask_t channelMask,
-                                                               audio_output_flags_t flags)
-{
-    for (size_t i = 0; i < mHwModules.size(); i++) {
-        if (mHwModules[i]->mHandle == 0) {
-            continue;
-        }
-        for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) {
-            sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
-            bool found = profile->isCompatibleProfile(device, String8(""), samplingRate,
-                    NULL /*updatedSamplingRate*/, format, channelMask,
-                    flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD ?
-                        AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD : AUDIO_OUTPUT_FLAG_DIRECT);
-            if (found && (mAvailableOutputDevices.types() & profile->mSupportedDevices.types())) {
-                return profile;
-            }
-        }
-    }
-    return 0;
-}
-
-audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream,
-                                    uint32_t samplingRate,
-                                    audio_format_t format,
-                                    audio_channel_mask_t channelMask,
-                                    audio_output_flags_t flags,
-                                    const audio_offload_info_t *offloadInfo)
-{
-    routing_strategy strategy = getStrategy(stream);
-    audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
-    ALOGV("getOutput() device %d, stream %d, samplingRate %d, format %x, channelMask %x, flags %x",
-          device, stream, samplingRate, format, channelMask, flags);
-
-    return getOutputForDevice(device, AUDIO_SESSION_ALLOCATE,
-                              stream, samplingRate,format, channelMask,
-                              flags, offloadInfo);
-}
-
-status_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr,
-                                              audio_io_handle_t *output,
-                                              audio_session_t session,
-                                              audio_stream_type_t *stream,
-                                              uint32_t samplingRate,
-                                              audio_format_t format,
-                                              audio_channel_mask_t channelMask,
-                                              audio_output_flags_t flags,
-                                              const audio_offload_info_t *offloadInfo)
-{
-    audio_attributes_t attributes;
-    if (attr != NULL) {
-        if (!isValidAttributes(attr)) {
-            ALOGE("getOutputForAttr() invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
-                  attr->usage, attr->content_type, attr->flags,
-                  attr->tags);
-            return BAD_VALUE;
-        }
-        attributes = *attr;
-    } else {
-        if (*stream < AUDIO_STREAM_MIN || *stream >= AUDIO_STREAM_PUBLIC_CNT) {
-            ALOGE("getOutputForAttr():  invalid stream type");
-            return BAD_VALUE;
-        }
-        stream_type_to_audio_attributes(*stream, &attributes);
-    }
-
-    for (size_t i = 0; i < mPolicyMixes.size(); i++) {
-        sp<AudioOutputDescriptor> desc;
-        if (mPolicyMixes[i]->mMix.mMixType == MIX_TYPE_PLAYERS) {
-            for (size_t j = 0; j < mPolicyMixes[i]->mMix.mCriteria.size(); j++) {
-                if ((RULE_MATCH_ATTRIBUTE_USAGE == mPolicyMixes[i]->mMix.mCriteria[j].mRule &&
-                        mPolicyMixes[i]->mMix.mCriteria[j].mAttr.mUsage == attributes.usage) ||
-                    (RULE_EXCLUDE_ATTRIBUTE_USAGE == mPolicyMixes[i]->mMix.mCriteria[j].mRule &&
-                        mPolicyMixes[i]->mMix.mCriteria[j].mAttr.mUsage != attributes.usage)) {
-                    desc = mPolicyMixes[i]->mOutput;
-                    break;
-                }
-                if (strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
-                        strncmp(attributes.tags + strlen("addr="),
-                                mPolicyMixes[i]->mMix.mRegistrationId.string(),
-                                AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
-                    desc = mPolicyMixes[i]->mOutput;
-                    break;
-                }
-            }
-        } else if (mPolicyMixes[i]->mMix.mMixType == MIX_TYPE_RECORDERS) {
-            if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE &&
-                    strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
-                    strncmp(attributes.tags + strlen("addr="),
-                            mPolicyMixes[i]->mMix.mRegistrationId.string(),
-                            AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
-                desc = mPolicyMixes[i]->mOutput;
-            }
-        }
-        if (desc != 0) {
-            if (!audio_is_linear_pcm(format)) {
-                return BAD_VALUE;
-            }
-            desc->mPolicyMix = &mPolicyMixes[i]->mMix;
-            *stream = streamTypefromAttributesInt(&attributes);
-            *output = desc->mIoHandle;
-            ALOGV("getOutputForAttr() returns output %d", *output);
-            return NO_ERROR;
-        }
-    }
-    if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
-        ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
-        return BAD_VALUE;
-    }
-
-    ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s flags=%08x",
-            attributes.usage, attributes.content_type, attributes.tags, attributes.flags);
-
-    routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
-    audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
-
-    if ((attributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
-        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
-    }
-
-    ALOGV("getOutputForAttr() device 0x%x, samplingRate %d, format %x, channelMask %x, flags %x",
-          device, samplingRate, format, channelMask, flags);
-
-    *stream = streamTypefromAttributesInt(&attributes);
-    *output = getOutputForDevice(device, session, *stream,
-                                 samplingRate, format, channelMask,
-                                 flags, offloadInfo);
-    if (*output == AUDIO_IO_HANDLE_NONE) {
-        return INVALID_OPERATION;
-    }
-    return NO_ERROR;
-}
-
-audio_io_handle_t AudioPolicyManager::getOutputForDevice(
-        audio_devices_t device,
-        audio_session_t session __unused,
-        audio_stream_type_t stream,
-        uint32_t samplingRate,
-        audio_format_t format,
-        audio_channel_mask_t channelMask,
-        audio_output_flags_t flags,
-        const audio_offload_info_t *offloadInfo)
-{
-    audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-    uint32_t latency = 0;
-    status_t status;
-
-#ifdef AUDIO_POLICY_TEST
-    if (mCurOutput != 0) {
-        ALOGV("getOutput() test output mCurOutput %d, samplingRate %d, format %d, channelMask %x, mDirectOutput %d",
-                mCurOutput, mTestSamplingRate, mTestFormat, mTestChannels, mDirectOutput);
-
-        if (mTestOutputs[mCurOutput] == 0) {
-            ALOGV("getOutput() opening test output");
-            sp<AudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(NULL);
-            outputDesc->mDevice = mTestDevice;
-            outputDesc->mLatency = mTestLatencyMs;
-            outputDesc->mFlags =
-                    (audio_output_flags_t)(mDirectOutput ? AUDIO_OUTPUT_FLAG_DIRECT : 0);
-            outputDesc->mRefCount[stream] = 0;
-            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-            config.sample_rate = mTestSamplingRate;
-            config.channel_mask = mTestChannels;
-            config.format = mTestFormat;
-            if (offloadInfo != NULL) {
-                config.offload_info = *offloadInfo;
-            }
-            status = mpClientInterface->openOutput(0,
-                                                  &mTestOutputs[mCurOutput],
-                                                  &config,
-                                                  &outputDesc->mDevice,
-                                                  String8(""),
-                                                  &outputDesc->mLatency,
-                                                  outputDesc->mFlags);
-            if (status == NO_ERROR) {
-                outputDesc->mSamplingRate = config.sample_rate;
-                outputDesc->mFormat = config.format;
-                outputDesc->mChannelMask = config.channel_mask;
-                AudioParameter outputCmd = AudioParameter();
-                outputCmd.addInt(String8("set_id"),mCurOutput);
-                mpClientInterface->setParameters(mTestOutputs[mCurOutput],outputCmd.toString());
-                addOutput(mTestOutputs[mCurOutput], outputDesc);
-            }
-        }
-        return mTestOutputs[mCurOutput];
-    }
-#endif //AUDIO_POLICY_TEST
-
-    // open a direct output if required by specified parameters
-    //force direct flag if offload flag is set: offloading implies a direct output stream
-    // and all common behaviors are driven by checking only the direct flag
-    // this should normally be set appropriately in the policy configuration file
-    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
-        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
-    }
-    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
-        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
-    }
-    // only allow deep buffering for music stream type
-    if (stream != AUDIO_STREAM_MUSIC) {
-        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
-    }
-
-    sp<IOProfile> profile;
-
-    // skip direct output selection if the request can obviously be attached to a mixed output
-    // and not explicitly requested
-    if (((flags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
-            audio_is_linear_pcm(format) && samplingRate <= MAX_MIXER_SAMPLING_RATE &&
-            audio_channel_count_from_out_mask(channelMask) <= 2) {
-        goto non_direct_output;
-    }
-
-    // Do not allow offloading if one non offloadable effect is enabled. This prevents from
-    // creating an offloaded track and tearing it down immediately after start when audioflinger
-    // detects there is an active non offloadable effect.
-    // FIXME: We should check the audio session here but we do not have it in this context.
-    // This may prevent offloading in rare situations where effects are left active by apps
-    // in the background.
-
-    if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
-            !isNonOffloadableEffectEnabled()) {
-        profile = getProfileForDirectOutput(device,
-                                           samplingRate,
-                                           format,
-                                           channelMask,
-                                           (audio_output_flags_t)flags);
-    }
-
-    if (profile != 0) {
-        sp<AudioOutputDescriptor> outputDesc = NULL;
-
-        for (size_t i = 0; i < mOutputs.size(); i++) {
-            sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
-            if (!desc->isDuplicated() && (profile == desc->mProfile)) {
-                outputDesc = desc;
-                // reuse direct output if currently open and configured with same parameters
-                if ((samplingRate == outputDesc->mSamplingRate) &&
-                        (format == outputDesc->mFormat) &&
-                        (channelMask == outputDesc->mChannelMask)) {
-                    outputDesc->mDirectOpenCount++;
-                    ALOGV("getOutput() reusing direct output %d", mOutputs.keyAt(i));
-                    return mOutputs.keyAt(i);
-                }
-            }
-        }
-        // close direct output if currently open and configured with different parameters
-        if (outputDesc != NULL) {
-            closeOutput(outputDesc->mIoHandle);
-        }
-        outputDesc = new AudioOutputDescriptor(profile);
-        outputDesc->mDevice = device;
-        outputDesc->mLatency = 0;
-        outputDesc->mFlags =(audio_output_flags_t) (outputDesc->mFlags | flags);
-        audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-        config.sample_rate = samplingRate;
-        config.channel_mask = channelMask;
-        config.format = format;
-        if (offloadInfo != NULL) {
-            config.offload_info = *offloadInfo;
-        }
-        status = mpClientInterface->openOutput(profile->mModule->mHandle,
-                                               &output,
-                                               &config,
-                                               &outputDesc->mDevice,
-                                               String8(""),
-                                               &outputDesc->mLatency,
-                                               outputDesc->mFlags);
-
-        // only accept an output with the requested parameters
-        if (status != NO_ERROR ||
-            (samplingRate != 0 && samplingRate != config.sample_rate) ||
-            (format != AUDIO_FORMAT_DEFAULT && format != config.format) ||
-            (channelMask != 0 && channelMask != config.channel_mask)) {
-            ALOGV("getOutput() failed opening direct output: output %d samplingRate %d %d,"
-                    "format %d %d, channelMask %04x %04x", output, samplingRate,
-                    outputDesc->mSamplingRate, format, outputDesc->mFormat, channelMask,
-                    outputDesc->mChannelMask);
-            if (output != AUDIO_IO_HANDLE_NONE) {
-                mpClientInterface->closeOutput(output);
-            }
-            // fall back to mixer output if possible when the direct output could not be open
-            if (audio_is_linear_pcm(format) && samplingRate <= MAX_MIXER_SAMPLING_RATE) {
-                goto non_direct_output;
-            }
-            return AUDIO_IO_HANDLE_NONE;
-        }
-        outputDesc->mSamplingRate = config.sample_rate;
-        outputDesc->mChannelMask = config.channel_mask;
-        outputDesc->mFormat = config.format;
-        outputDesc->mRefCount[stream] = 0;
-        outputDesc->mStopTime[stream] = 0;
-        outputDesc->mDirectOpenCount = 1;
-
-        audio_io_handle_t srcOutput = getOutputForEffect();
-        addOutput(output, outputDesc);
-        audio_io_handle_t dstOutput = getOutputForEffect();
-        if (dstOutput == output) {
-            mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, srcOutput, dstOutput);
-        }
-        mPreviousOutputs = mOutputs;
-        ALOGV("getOutput() returns new direct output %d", output);
-        mpClientInterface->onAudioPortListUpdate();
-        return output;
-    }
-
-non_direct_output:
-
-    // ignoring channel mask due to downmix capability in mixer
-
-    // open a non direct output
-
-    // for non direct outputs, only PCM is supported
-    if (audio_is_linear_pcm(format)) {
-        // get which output is suitable for the specified stream. The actual
-        // routing change will happen when startOutput() will be called
-        SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
-
-        // at this stage we should ignore the DIRECT flag as no direct output could be found earlier
-        flags = (audio_output_flags_t)(flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
-        output = selectOutput(outputs, flags, format);
-    }
-    ALOGW_IF((output == 0), "getOutput() could not find output for stream %d, samplingRate %d,"
-            "format %d, channels %x, flags %x", stream, samplingRate, format, channelMask, flags);
-
-    ALOGV("getOutput() returns output %d", output);
-
-    return output;
-}
-
-audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
-                                                       audio_output_flags_t flags,
-                                                       audio_format_t format)
-{
-    // select one output among several that provide a path to a particular device or set of
-    // devices (the list was previously build by getOutputsForDevice()).
-    // The priority is as follows:
-    // 1: the output with the highest number of requested policy flags
-    // 2: the primary output
-    // 3: the first output in the list
-
-    if (outputs.size() == 0) {
-        return 0;
-    }
-    if (outputs.size() == 1) {
-        return outputs[0];
-    }
-
-    int maxCommonFlags = 0;
-    audio_io_handle_t outputFlags = 0;
-    audio_io_handle_t outputPrimary = 0;
-
-    for (size_t i = 0; i < outputs.size(); i++) {
-        sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
-        if (!outputDesc->isDuplicated()) {
-            // if a valid format is specified, skip output if not compatible
-            if (format != AUDIO_FORMAT_INVALID) {
-                if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
-                    if (format != outputDesc->mFormat) {
-                        continue;
-                    }
-                } else if (!audio_is_linear_pcm(format)) {
-                    continue;
-                }
-            }
-
-            int commonFlags = popcount(outputDesc->mProfile->mFlags & flags);
-            if (commonFlags > maxCommonFlags) {
-                outputFlags = outputs[i];
-                maxCommonFlags = commonFlags;
-                ALOGV("selectOutput() commonFlags for output %d, %04x", outputs[i], commonFlags);
-            }
-            if (outputDesc->mProfile->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) {
-                outputPrimary = outputs[i];
-            }
-        }
-    }
-
-    if (outputFlags != 0) {
-        return outputFlags;
-    }
-    if (outputPrimary != 0) {
-        return outputPrimary;
-    }
-
-    return outputs[0];
-}
-
-status_t AudioPolicyManager::startOutput(audio_io_handle_t output,
-                                             audio_stream_type_t stream,
-                                             audio_session_t session)
-{
-    ALOGV("startOutput() output %d, stream %d, session %d", output, stream, session);
-    ssize_t index = mOutputs.indexOfKey(output);
-    if (index < 0) {
-        ALOGW("startOutput() unknown output %d", output);
-        return BAD_VALUE;
-    }
-
-    // cannot start playback of STREAM_TTS if any other output is being used
-    uint32_t beaconMuteLatency = 0;
-    if (stream == AUDIO_STREAM_TTS) {
-        ALOGV("\t found BEACON stream");
-        if (isAnyOutputActive(AUDIO_STREAM_TTS /*streamToIgnore*/)) {
-            return INVALID_OPERATION;
-        } else {
-            beaconMuteLatency = handleEventForBeacon(STARTING_BEACON);
-        }
-    } else {
-        // some playback other than beacon starts
-        beaconMuteLatency = handleEventForBeacon(STARTING_OUTPUT);
-    }
-
-    sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
-
-    // increment usage count for this stream on the requested output:
-    // NOTE that the usage count is the same for duplicated output and hardware output which is
-    // necessary for a correct control of hardware output routing by startOutput() and stopOutput()
-    outputDesc->changeRefCount(stream, 1);
-
-    if (outputDesc->mRefCount[stream] == 1) {
-        // starting an output being rerouted?
-        audio_devices_t newDevice;
-        if (outputDesc->mPolicyMix != NULL) {
-            newDevice = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
-        } else {
-            newDevice = getNewOutputDevice(output, false /*fromCache*/);
-        }
-        routing_strategy strategy = getStrategy(stream);
-        bool shouldWait = (strategy == STRATEGY_SONIFICATION) ||
-                            (strategy == STRATEGY_SONIFICATION_RESPECTFUL) ||
-                            (beaconMuteLatency > 0);
-        uint32_t waitMs = beaconMuteLatency;
-        bool force = false;
-        for (size_t i = 0; i < mOutputs.size(); i++) {
-            sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
-            if (desc != outputDesc) {
-                // force a device change if any other output is managed by the same hw
-                // module and has a current device selection that differs from selected device.
-                // In this case, the audio HAL must receive the new device selection so that it can
-                // change the device currently selected by the other active output.
-                if (outputDesc->sharesHwModuleWith(desc) &&
-                    desc->device() != newDevice) {
-                    force = true;
-                }
-                // wait for audio on other active outputs to be presented when starting
-                // a notification so that audio focus effect can propagate, or that a mute/unmute
-                // event occurred for beacon
-                uint32_t latency = desc->latency();
-                if (shouldWait && desc->isActive(latency * 2) && (waitMs < latency)) {
-                    waitMs = latency;
-                }
-            }
-        }
-        uint32_t muteWaitMs = setOutputDevice(output, newDevice, force);
-
-        // handle special case for sonification while in call
-        if (isInCall()) {
-            handleIncallSonification(stream, true, false);
-        }
-
-        // apply volume rules for current stream and device if necessary
-        checkAndSetVolume(stream,
-                          mStreams[stream].getVolumeIndex(newDevice),
-                          output,
-                          newDevice);
-
-        // update the outputs if starting an output with a stream that can affect notification
-        // routing
-        handleNotificationRoutingForStream(stream);
-
-        // Automatically enable the remote submix input when output is started on a re routing mix
-        // of type MIX_TYPE_RECORDERS
-        if (audio_is_remote_submix_device(newDevice) && outputDesc->mPolicyMix != NULL &&
-                outputDesc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS) {
-                setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
-                        AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                        outputDesc->mPolicyMix->mRegistrationId);
-        }
-
-        // force reevaluating accessibility routing when ringtone or alarm starts
-        if (strategy == STRATEGY_SONIFICATION) {
-            mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
-        }
-
-        if (waitMs > muteWaitMs) {
-            usleep((waitMs - muteWaitMs) * 2 * 1000);
-        }
-    }
-    return NO_ERROR;
-}
-
-
-status_t AudioPolicyManager::stopOutput(audio_io_handle_t output,
-                                            audio_stream_type_t stream,
-                                            audio_session_t session)
-{
-    ALOGV("stopOutput() output %d, stream %d, session %d", output, stream, session);
-    ssize_t index = mOutputs.indexOfKey(output);
-    if (index < 0) {
-        ALOGW("stopOutput() unknown output %d", output);
-        return BAD_VALUE;
-    }
-
-    sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
-
-    // always handle stream stop, check which stream type is stopping
-    handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT);
-
-    // handle special case for sonification while in call
-    if (isInCall()) {
-        handleIncallSonification(stream, false, false);
-    }
-
-    if (outputDesc->mRefCount[stream] > 0) {
-        // decrement usage count of this stream on the output
-        outputDesc->changeRefCount(stream, -1);
-        // store time at which the stream was stopped - see isStreamActive()
-        if (outputDesc->mRefCount[stream] == 0) {
-            // Automatically disable the remote submix input when output is stopped on a
-            // re routing mix of type MIX_TYPE_RECORDERS
-            if (audio_is_remote_submix_device(outputDesc->mDevice) &&
-                    outputDesc->mPolicyMix != NULL &&
-                    outputDesc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS) {
-                setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
-                        AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                        outputDesc->mPolicyMix->mRegistrationId);
-            }
-
-            outputDesc->mStopTime[stream] = systemTime();
-            audio_devices_t newDevice = getNewOutputDevice(output, false /*fromCache*/);
-            // delay the device switch by twice the latency because stopOutput() is executed when
-            // the track stop() command is received and at that time the audio track buffer can
-            // still contain data that needs to be drained. The latency only covers the audio HAL
-            // and kernel buffers. Also the latency does not always include additional delay in the
-            // audio path (audio DSP, CODEC ...)
-            setOutputDevice(output, newDevice, false, outputDesc->mLatency*2);
-
-            // force restoring the device selection on other active outputs if it differs from the
-            // one being selected for this output
-            for (size_t i = 0; i < mOutputs.size(); i++) {
-                audio_io_handle_t curOutput = mOutputs.keyAt(i);
-                sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
-                if (curOutput != output &&
-                        desc->isActive() &&
-                        outputDesc->sharesHwModuleWith(desc) &&
-                        (newDevice != desc->device())) {
-                    setOutputDevice(curOutput,
-                                    getNewOutputDevice(curOutput, false /*fromCache*/),
-                                    true,
-                                    outputDesc->mLatency*2);
-                }
-            }
-            // update the outputs if stopping one with a stream that can affect notification routing
-            handleNotificationRoutingForStream(stream);
-        }
-        return NO_ERROR;
-    } else {
-        ALOGW("stopOutput() refcount is already 0 for output %d", output);
-        return INVALID_OPERATION;
-    }
-}
-
-void AudioPolicyManager::releaseOutput(audio_io_handle_t output,
-                                       audio_stream_type_t stream __unused,
-                                       audio_session_t session __unused)
-{
-    ALOGV("releaseOutput() %d", output);
-    ssize_t index = mOutputs.indexOfKey(output);
-    if (index < 0) {
-        ALOGW("releaseOutput() releasing unknown output %d", output);
-        return;
-    }
-
-#ifdef AUDIO_POLICY_TEST
-    int testIndex = testOutputIndex(output);
-    if (testIndex != 0) {
-        sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
-        if (outputDesc->isActive()) {
-            mpClientInterface->closeOutput(output);
-            mOutputs.removeItem(output);
-            mTestOutputs[testIndex] = 0;
-        }
-        return;
-    }
-#endif //AUDIO_POLICY_TEST
-
-    sp<AudioOutputDescriptor> desc = mOutputs.valueAt(index);
-    if (desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
-        if (desc->mDirectOpenCount <= 0) {
-            ALOGW("releaseOutput() invalid open count %d for output %d",
-                                                              desc->mDirectOpenCount, output);
-            return;
-        }
-        if (--desc->mDirectOpenCount == 0) {
-            closeOutput(output);
-            // If effects where present on the output, audioflinger moved them to the primary
-            // output by default: move them back to the appropriate output.
-            audio_io_handle_t dstOutput = getOutputForEffect();
-            if (dstOutput != mPrimaryOutput) {
-                mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, mPrimaryOutput, dstOutput);
-            }
-            mpClientInterface->onAudioPortListUpdate();
-        }
-    }
-}
-
-
-status_t AudioPolicyManager::getInputForAttr(const audio_attributes_t *attr,
-                                             audio_io_handle_t *input,
-                                             audio_session_t session,
-                                             uint32_t samplingRate,
-                                             audio_format_t format,
-                                             audio_channel_mask_t channelMask,
-                                             audio_input_flags_t flags,
-                                             input_type_t *inputType)
-{
-    ALOGV("getInputForAttr() source %d, samplingRate %d, format %d, channelMask %x,"
-            "session %d, flags %#x",
-          attr->source, samplingRate, format, channelMask, session, flags);
-
-    *input = AUDIO_IO_HANDLE_NONE;
-    *inputType = API_INPUT_INVALID;
-    audio_devices_t device;
-    // handle legacy remote submix case where the address was not always specified
-    String8 address = String8("");
-    bool isSoundTrigger = false;
-    audio_source_t inputSource = attr->source;
-    audio_source_t halInputSource;
-    AudioMix *policyMix = NULL;
-
-    if (inputSource == AUDIO_SOURCE_DEFAULT) {
-        inputSource = AUDIO_SOURCE_MIC;
-    }
-    halInputSource = inputSource;
-
-    if (inputSource == AUDIO_SOURCE_REMOTE_SUBMIX &&
-            strncmp(attr->tags, "addr=", strlen("addr=")) == 0) {
-        device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
-        address = String8(attr->tags + strlen("addr="));
-        ssize_t index = mPolicyMixes.indexOfKey(address);
-        if (index < 0) {
-            ALOGW("getInputForAttr() no policy for address %s", address.string());
-            return BAD_VALUE;
-        }
-        if (mPolicyMixes[index]->mMix.mMixType != MIX_TYPE_PLAYERS) {
-            ALOGW("getInputForAttr() bad policy mix type for address %s", address.string());
-            return BAD_VALUE;
-        }
-        policyMix = &mPolicyMixes[index]->mMix;
-        *inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
-    } else {
-        device = getDeviceAndMixForInputSource(inputSource, &policyMix);
-        if (device == AUDIO_DEVICE_NONE) {
-            ALOGW("getInputForAttr() could not find device for source %d", inputSource);
-            return BAD_VALUE;
-        }
-        if (policyMix != NULL) {
-            address = policyMix->mRegistrationId;
-            if (policyMix->mMixType == MIX_TYPE_RECORDERS) {
-                // there is an external policy, but this input is attached to a mix of recorders,
-                // meaning it receives audio injected into the framework, so the recorder doesn't
-                // know about it and is therefore considered "legacy"
-                *inputType = API_INPUT_LEGACY;
-            } else {
-                // recording a mix of players defined by an external policy, we're rerouting for
-                // an external policy
-                *inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
-            }
-        } else if (audio_is_remote_submix_device(device)) {
-            address = String8("0");
-            *inputType = API_INPUT_MIX_CAPTURE;
-        } else {
-            *inputType = API_INPUT_LEGACY;
-        }
-        // adapt channel selection to input source
-        switch (inputSource) {
-        case AUDIO_SOURCE_VOICE_UPLINK:
-            channelMask = AUDIO_CHANNEL_IN_VOICE_UPLINK;
-            break;
-        case AUDIO_SOURCE_VOICE_DOWNLINK:
-            channelMask = AUDIO_CHANNEL_IN_VOICE_DNLINK;
-            break;
-        case AUDIO_SOURCE_VOICE_CALL:
-            channelMask = AUDIO_CHANNEL_IN_VOICE_UPLINK | AUDIO_CHANNEL_IN_VOICE_DNLINK;
-            break;
-        default:
-            break;
-        }
-        if (inputSource == AUDIO_SOURCE_HOTWORD) {
-            ssize_t index = mSoundTriggerSessions.indexOfKey(session);
-            if (index >= 0) {
-                *input = mSoundTriggerSessions.valueFor(session);
-                isSoundTrigger = true;
-                flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_HW_HOTWORD);
-                ALOGV("SoundTrigger capture on session %d input %d", session, *input);
-            } else {
-                halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
-            }
-        }
-    }
-
-    sp<IOProfile> profile = getInputProfile(device, address,
-                                            samplingRate, format, channelMask,
-                                            flags);
-    if (profile == 0) {
-        //retry without flags
-        audio_input_flags_t log_flags = flags;
-        flags = AUDIO_INPUT_FLAG_NONE;
-        profile = getInputProfile(device, address,
-                                  samplingRate, format, channelMask,
-                                  flags);
-        if (profile == 0) {
-            ALOGW("getInputForAttr() could not find profile for device 0x%X, samplingRate %u,"
-                    "format %#x, channelMask 0x%X, flags %#x",
-                    device, samplingRate, format, channelMask, log_flags);
-            return BAD_VALUE;
-        }
-    }
-
-    if (profile->mModule->mHandle == 0) {
-        ALOGE("getInputForAttr(): HW module %s not opened", profile->mModule->mName);
-        return NO_INIT;
-    }
-
-    audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-    config.sample_rate = samplingRate;
-    config.channel_mask = channelMask;
-    config.format = format;
-
-    status_t status = mpClientInterface->openInput(profile->mModule->mHandle,
-                                                   input,
-                                                   &config,
-                                                   &device,
-                                                   address,
-                                                   halInputSource,
-                                                   flags);
-
-    // only accept input with the exact requested set of parameters
-    if (status != NO_ERROR || *input == AUDIO_IO_HANDLE_NONE ||
-        (samplingRate != config.sample_rate) ||
-        (format != config.format) ||
-        (channelMask != config.channel_mask)) {
-        ALOGW("getInputForAttr() failed opening input: samplingRate %d, format %d, channelMask %x",
-                samplingRate, format, channelMask);
-        if (*input != AUDIO_IO_HANDLE_NONE) {
-            mpClientInterface->closeInput(*input);
-        }
-        return BAD_VALUE;
-    }
-
-    sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile);
-    inputDesc->mInputSource = inputSource;
-    inputDesc->mRefCount = 0;
-    inputDesc->mOpenRefCount = 1;
-    inputDesc->mSamplingRate = samplingRate;
-    inputDesc->mFormat = format;
-    inputDesc->mChannelMask = channelMask;
-    inputDesc->mDevice = device;
-    inputDesc->mSessions.add(session);
-    inputDesc->mIsSoundTrigger = isSoundTrigger;
-    inputDesc->mPolicyMix = policyMix;
-
-    ALOGV("getInputForAttr() returns input type = %d", inputType);
-
-    addInput(*input, inputDesc);
-    mpClientInterface->onAudioPortListUpdate();
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::startInput(audio_io_handle_t input,
-                                        audio_session_t session)
-{
-    ALOGV("startInput() input %d", input);
-    ssize_t index = mInputs.indexOfKey(input);
-    if (index < 0) {
-        ALOGW("startInput() unknown input %d", input);
-        return BAD_VALUE;
-    }
-    sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
-
-    index = inputDesc->mSessions.indexOf(session);
-    if (index < 0) {
-        ALOGW("startInput() unknown session %d on input %d", session, input);
-        return BAD_VALUE;
-    }
-
-    // virtual input devices are compatible with other input devices
-    if (!isVirtualInputDevice(inputDesc->mDevice)) {
-
-        // for a non-virtual input device, check if there is another (non-virtual) active input
-        audio_io_handle_t activeInput = getActiveInput();
-        if (activeInput != 0 && activeInput != input) {
-
-            // If the already active input uses AUDIO_SOURCE_HOTWORD then it is closed,
-            // otherwise the active input continues and the new input cannot be started.
-            sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
-            if (activeDesc->mInputSource == AUDIO_SOURCE_HOTWORD) {
-                ALOGW("startInput(%d) preempting low-priority input %d", input, activeInput);
-                stopInput(activeInput, activeDesc->mSessions.itemAt(0));
-                releaseInput(activeInput, activeDesc->mSessions.itemAt(0));
-            } else {
-                ALOGE("startInput(%d) failed: other input %d already started", input, activeInput);
-                return INVALID_OPERATION;
-            }
-        }
-    }
-
-    if (inputDesc->mRefCount == 0) {
-        if (activeInputsCount() == 0) {
-            SoundTrigger::setCaptureState(true);
-        }
-        setInputDevice(input, getNewInputDevice(input), true /* force */);
-
-        // automatically enable the remote submix output when input is started if not
-        // used by a policy mix of type MIX_TYPE_RECORDERS
-        // For remote submix (a virtual device), we open only one input per capture request.
-        if (audio_is_remote_submix_device(inputDesc->mDevice)) {
-            String8 address = String8("");
-            if (inputDesc->mPolicyMix == NULL) {
-                address = String8("0");
-            } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
-                address = inputDesc->mPolicyMix->mRegistrationId;
-            }
-            if (address != "") {
-                setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-                        AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                        address);
-            }
-        }
-    }
-
-    ALOGV("AudioPolicyManager::startInput() input source = %d", inputDesc->mInputSource);
-
-    inputDesc->mRefCount++;
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::stopInput(audio_io_handle_t input,
-                                       audio_session_t session)
-{
-    ALOGV("stopInput() input %d", input);
-    ssize_t index = mInputs.indexOfKey(input);
-    if (index < 0) {
-        ALOGW("stopInput() unknown input %d", input);
-        return BAD_VALUE;
-    }
-    sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
-
-    index = inputDesc->mSessions.indexOf(session);
-    if (index < 0) {
-        ALOGW("stopInput() unknown session %d on input %d", session, input);
-        return BAD_VALUE;
-    }
-
-    if (inputDesc->mRefCount == 0) {
-        ALOGW("stopInput() input %d already stopped", input);
-        return INVALID_OPERATION;
-    }
-
-    inputDesc->mRefCount--;
-    if (inputDesc->mRefCount == 0) {
-
-        // automatically disable the remote submix output when input is stopped if not
-        // used by a policy mix of type MIX_TYPE_RECORDERS
-        if (audio_is_remote_submix_device(inputDesc->mDevice)) {
-            String8 address = String8("");
-            if (inputDesc->mPolicyMix == NULL) {
-                address = String8("0");
-            } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
-                address = inputDesc->mPolicyMix->mRegistrationId;
-            }
-            if (address != "") {
-                setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-                                         AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                                         address);
-            }
-        }
-
-        resetInputDevice(input);
-
-        if (activeInputsCount() == 0) {
-            SoundTrigger::setCaptureState(false);
-        }
-    }
-    return NO_ERROR;
-}
-
-void AudioPolicyManager::releaseInput(audio_io_handle_t input,
-                                      audio_session_t session)
-{
-    ALOGV("releaseInput() %d", input);
-    ssize_t index = mInputs.indexOfKey(input);
-    if (index < 0) {
-        ALOGW("releaseInput() releasing unknown input %d", input);
-        return;
-    }
-    sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
-    ALOG_ASSERT(inputDesc != 0);
-
-    index = inputDesc->mSessions.indexOf(session);
-    if (index < 0) {
-        ALOGW("releaseInput() unknown session %d on input %d", session, input);
-        return;
-    }
-    inputDesc->mSessions.remove(session);
-    if (inputDesc->mOpenRefCount == 0) {
-        ALOGW("releaseInput() invalid open ref count %d", inputDesc->mOpenRefCount);
-        return;
-    }
-    inputDesc->mOpenRefCount--;
-    if (inputDesc->mOpenRefCount > 0) {
-        ALOGV("releaseInput() exit > 0");
-        return;
-    }
-
-    closeInput(input);
-    mpClientInterface->onAudioPortListUpdate();
-    ALOGV("releaseInput() exit");
-}
-
-void AudioPolicyManager::closeAllInputs() {
-    bool patchRemoved = false;
-
-    for(size_t input_index = 0; input_index < mInputs.size(); input_index++) {
-        sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(input_index);
-        ssize_t patch_index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
-        if (patch_index >= 0) {
-            sp<AudioPatch> patchDesc = mAudioPatches.valueAt(patch_index);
-            status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
-            mAudioPatches.removeItemsAt(patch_index);
-            patchRemoved = true;
-        }
-        mpClientInterface->closeInput(mInputs.keyAt(input_index));
-    }
-    mInputs.clear();
-    nextAudioPortGeneration();
-
-    if (patchRemoved) {
-        mpClientInterface->onAudioPatchListUpdate();
-    }
-}
-
-void AudioPolicyManager::initStreamVolume(audio_stream_type_t stream,
-                                            int indexMin,
-                                            int indexMax)
-{
-    ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
-    if (indexMin < 0 || indexMin >= indexMax) {
-        ALOGW("initStreamVolume() invalid index limits for stream %d, min %d, max %d", stream , indexMin, indexMax);
-        return;
-    }
-    mStreams[stream].mIndexMin = indexMin;
-    mStreams[stream].mIndexMax = indexMax;
-    //FIXME: AUDIO_STREAM_ACCESSIBILITY volume follows AUDIO_STREAM_MUSIC for now
-    if (stream == AUDIO_STREAM_MUSIC) {
-        mStreams[AUDIO_STREAM_ACCESSIBILITY].mIndexMin = indexMin;
-        mStreams[AUDIO_STREAM_ACCESSIBILITY].mIndexMax = indexMax;
-    }
-}
-
-status_t AudioPolicyManager::setStreamVolumeIndex(audio_stream_type_t stream,
-                                                      int index,
-                                                      audio_devices_t device)
-{
-
-    if ((index < mStreams[stream].mIndexMin) || (index > mStreams[stream].mIndexMax)) {
-        return BAD_VALUE;
-    }
-    if (!audio_is_output_device(device)) {
-        return BAD_VALUE;
-    }
-
-    // Force max volume if stream cannot be muted
-    if (!mStreams[stream].mCanBeMuted) index = mStreams[stream].mIndexMax;
-
-    ALOGV("setStreamVolumeIndex() stream %d, device %04x, index %d",
-          stream, device, index);
-
-    // if device is AUDIO_DEVICE_OUT_DEFAULT set default value and
-    // clear all device specific values
-    if (device == AUDIO_DEVICE_OUT_DEFAULT) {
-        mStreams[stream].mIndexCur.clear();
-    }
-    mStreams[stream].mIndexCur.add(device, index);
-
-    // update volume on all outputs whose current device is also selected by the same
-    // strategy as the device specified by the caller
-    audio_devices_t strategyDevice = getDeviceForStrategy(getStrategy(stream), true /*fromCache*/);
-
-
-    //FIXME: AUDIO_STREAM_ACCESSIBILITY volume follows AUDIO_STREAM_MUSIC for now
-    audio_devices_t accessibilityDevice = AUDIO_DEVICE_NONE;
-    if (stream == AUDIO_STREAM_MUSIC) {
-        mStreams[AUDIO_STREAM_ACCESSIBILITY].mIndexCur.add(device, index);
-        accessibilityDevice = getDeviceForStrategy(STRATEGY_ACCESSIBILITY, true /*fromCache*/);
-    }
-    if ((device != AUDIO_DEVICE_OUT_DEFAULT) &&
-            (device & (strategyDevice | accessibilityDevice)) == 0) {
-        return NO_ERROR;
-    }
-    status_t status = NO_ERROR;
-    for (size_t i = 0; i < mOutputs.size(); i++) {
-        audio_devices_t curDevice =
-                getDeviceForVolume(mOutputs.valueAt(i)->device());
-        if ((device == AUDIO_DEVICE_OUT_DEFAULT) || ((curDevice & strategyDevice) != 0)) {
-            status_t volStatus = checkAndSetVolume(stream, index, mOutputs.keyAt(i), curDevice);
-            if (volStatus != NO_ERROR) {
-                status = volStatus;
-            }
-        }
-        if ((device == AUDIO_DEVICE_OUT_DEFAULT) || ((curDevice & accessibilityDevice) != 0)) {
-            status_t volStatus = checkAndSetVolume(AUDIO_STREAM_ACCESSIBILITY,
-                                                   index, mOutputs.keyAt(i), curDevice);
-        }
-    }
-    return status;
-}
-
-status_t AudioPolicyManager::getStreamVolumeIndex(audio_stream_type_t stream,
-                                                      int *index,
-                                                      audio_devices_t device)
-{
-    if (index == NULL) {
-        return BAD_VALUE;
-    }
-    if (!audio_is_output_device(device)) {
-        return BAD_VALUE;
-    }
-    // if device is AUDIO_DEVICE_OUT_DEFAULT, return volume for device corresponding to
-    // the strategy the stream belongs to.
-    if (device == AUDIO_DEVICE_OUT_DEFAULT) {
-        device = getDeviceForStrategy(getStrategy(stream), true /*fromCache*/);
-    }
-    device = getDeviceForVolume(device);
-
-    *index =  mStreams[stream].getVolumeIndex(device);
-    ALOGV("getStreamVolumeIndex() stream %d device %08x index %d", stream, device, *index);
-    return NO_ERROR;
-}
-
-audio_io_handle_t AudioPolicyManager::selectOutputForEffects(
-                                            const SortedVector<audio_io_handle_t>& outputs)
-{
-    // select one output among several suitable for global effects.
-    // The priority is as follows:
-    // 1: An offloaded output. If the effect ends up not being offloadable,
-    //    AudioFlinger will invalidate the track and the offloaded output
-    //    will be closed causing the effect to be moved to a PCM output.
-    // 2: A deep buffer output
-    // 3: the first output in the list
-
-    if (outputs.size() == 0) {
-        return 0;
-    }
-
-    audio_io_handle_t outputOffloaded = 0;
-    audio_io_handle_t outputDeepBuffer = 0;
-
-    for (size_t i = 0; i < outputs.size(); i++) {
-        sp<AudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]);
-        ALOGV("selectOutputForEffects outputs[%zu] flags %x", i, desc->mFlags);
-        if ((desc->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
-            outputOffloaded = outputs[i];
-        }
-        if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
-            outputDeepBuffer = outputs[i];
-        }
-    }
-
-    ALOGV("selectOutputForEffects outputOffloaded %d outputDeepBuffer %d",
-          outputOffloaded, outputDeepBuffer);
-    if (outputOffloaded != 0) {
-        return outputOffloaded;
-    }
-    if (outputDeepBuffer != 0) {
-        return outputDeepBuffer;
-    }
-
-    return outputs[0];
-}
-
-audio_io_handle_t AudioPolicyManager::getOutputForEffect(const effect_descriptor_t *desc)
-{
-    // apply simple rule where global effects are attached to the same output as MUSIC streams
-
-    routing_strategy strategy = getStrategy(AUDIO_STREAM_MUSIC);
-    audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
-    SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevice(device, mOutputs);
-
-    audio_io_handle_t output = selectOutputForEffects(dstOutputs);
-    ALOGV("getOutputForEffect() got output %d for fx %s flags %x",
-          output, (desc == NULL) ? "unspecified" : desc->name,  (desc == NULL) ? 0 : desc->flags);
-
-    return output;
-}
-
-status_t AudioPolicyManager::registerEffect(const effect_descriptor_t *desc,
-                                audio_io_handle_t io,
-                                uint32_t strategy,
-                                int session,
-                                int id)
-{
-    ssize_t index = mOutputs.indexOfKey(io);
-    if (index < 0) {
-        index = mInputs.indexOfKey(io);
-        if (index < 0) {
-            ALOGW("registerEffect() unknown io %d", io);
-            return INVALID_OPERATION;
-        }
-    }
-
-    if (mTotalEffectsMemory + desc->memoryUsage > getMaxEffectsMemory()) {
-        ALOGW("registerEffect() memory limit exceeded for Fx %s, Memory %d KB",
-                desc->name, desc->memoryUsage);
-        return INVALID_OPERATION;
-    }
-    mTotalEffectsMemory += desc->memoryUsage;
-    ALOGV("registerEffect() effect %s, io %d, strategy %d session %d id %d",
-            desc->name, io, strategy, session, id);
-    ALOGV("registerEffect() memory %d, total memory %d", desc->memoryUsage, mTotalEffectsMemory);
-
-    sp<EffectDescriptor> effectDesc = new EffectDescriptor();
-    memcpy (&effectDesc->mDesc, desc, sizeof(effect_descriptor_t));
-    effectDesc->mIo = io;
-    effectDesc->mStrategy = (routing_strategy)strategy;
-    effectDesc->mSession = session;
-    effectDesc->mEnabled = false;
-
-    mEffects.add(id, effectDesc);
-
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::unregisterEffect(int id)
-{
-    ssize_t index = mEffects.indexOfKey(id);
-    if (index < 0) {
-        ALOGW("unregisterEffect() unknown effect ID %d", id);
-        return INVALID_OPERATION;
-    }
-
-    sp<EffectDescriptor> effectDesc = mEffects.valueAt(index);
-
-    setEffectEnabled(effectDesc, false);
-
-    if (mTotalEffectsMemory < effectDesc->mDesc.memoryUsage) {
-        ALOGW("unregisterEffect() memory %d too big for total %d",
-                effectDesc->mDesc.memoryUsage, mTotalEffectsMemory);
-        effectDesc->mDesc.memoryUsage = mTotalEffectsMemory;
-    }
-    mTotalEffectsMemory -= effectDesc->mDesc.memoryUsage;
-    ALOGV("unregisterEffect() effect %s, ID %d, memory %d total memory %d",
-            effectDesc->mDesc.name, id, effectDesc->mDesc.memoryUsage, mTotalEffectsMemory);
-
-    mEffects.removeItem(id);
-
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::setEffectEnabled(int id, bool enabled)
-{
-    ssize_t index = mEffects.indexOfKey(id);
-    if (index < 0) {
-        ALOGW("unregisterEffect() unknown effect ID %d", id);
-        return INVALID_OPERATION;
-    }
-
-    return setEffectEnabled(mEffects.valueAt(index), enabled);
-}
-
-status_t AudioPolicyManager::setEffectEnabled(const sp<EffectDescriptor>& effectDesc, bool enabled)
-{
-    if (enabled == effectDesc->mEnabled) {
-        ALOGV("setEffectEnabled(%s) effect already %s",
-             enabled?"true":"false", enabled?"enabled":"disabled");
-        return INVALID_OPERATION;
-    }
-
-    if (enabled) {
-        if (mTotalEffectsCpuLoad + effectDesc->mDesc.cpuLoad > getMaxEffectsCpuLoad()) {
-            ALOGW("setEffectEnabled(true) CPU Load limit exceeded for Fx %s, CPU %f MIPS",
-                 effectDesc->mDesc.name, (float)effectDesc->mDesc.cpuLoad/10);
-            return INVALID_OPERATION;
-        }
-        mTotalEffectsCpuLoad += effectDesc->mDesc.cpuLoad;
-        ALOGV("setEffectEnabled(true) total CPU %d", mTotalEffectsCpuLoad);
-    } else {
-        if (mTotalEffectsCpuLoad < effectDesc->mDesc.cpuLoad) {
-            ALOGW("setEffectEnabled(false) CPU load %d too high for total %d",
-                    effectDesc->mDesc.cpuLoad, mTotalEffectsCpuLoad);
-            effectDesc->mDesc.cpuLoad = mTotalEffectsCpuLoad;
-        }
-        mTotalEffectsCpuLoad -= effectDesc->mDesc.cpuLoad;
-        ALOGV("setEffectEnabled(false) total CPU %d", mTotalEffectsCpuLoad);
-    }
-    effectDesc->mEnabled = enabled;
-    return NO_ERROR;
-}
-
-bool AudioPolicyManager::isNonOffloadableEffectEnabled()
-{
-    for (size_t i = 0; i < mEffects.size(); i++) {
-        sp<EffectDescriptor> effectDesc = mEffects.valueAt(i);
-        if (effectDesc->mEnabled && (effectDesc->mStrategy == STRATEGY_MEDIA) &&
-                ((effectDesc->mDesc.flags & EFFECT_FLAG_OFFLOAD_SUPPORTED) == 0)) {
-            ALOGV("isNonOffloadableEffectEnabled() non offloadable effect %s enabled on session %d",
-                  effectDesc->mDesc.name, effectDesc->mSession);
-            return true;
-        }
-    }
-    return false;
-}
-
-bool AudioPolicyManager::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
-{
-    nsecs_t sysTime = systemTime();
-    for (size_t i = 0; i < mOutputs.size(); i++) {
-        const sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
-        if (outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
-            return true;
-        }
-    }
-    return false;
-}
-
-bool AudioPolicyManager::isStreamActiveRemotely(audio_stream_type_t stream,
-                                                    uint32_t inPastMs) const
-{
-    nsecs_t sysTime = systemTime();
-    for (size_t i = 0; i < mOutputs.size(); i++) {
-        const sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
-        if (((outputDesc->device() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) != 0) &&
-                outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
-            // do not consider re routing (when the output is going to a dynamic policy)
-            // as "remote playback"
-            if (outputDesc->mPolicyMix == NULL) {
-                return true;
-            }
-        }
-    }
-    return false;
-}
-
-bool AudioPolicyManager::isSourceActive(audio_source_t source) const
-{
-    for (size_t i = 0; i < mInputs.size(); i++) {
-        const sp<AudioInputDescriptor>  inputDescriptor = mInputs.valueAt(i);
-        if (inputDescriptor->mRefCount == 0) {
-            continue;
-        }
-        if (inputDescriptor->mInputSource == (int)source) {
-            return true;
-        }
-        // AUDIO_SOURCE_HOTWORD is equivalent to AUDIO_SOURCE_VOICE_RECOGNITION only if it
-        // corresponds to an active capture triggered by a hardware hotword recognition
-        if ((source == AUDIO_SOURCE_VOICE_RECOGNITION) &&
-                 (inputDescriptor->mInputSource == AUDIO_SOURCE_HOTWORD)) {
-            // FIXME: we should not assume that the first session is the active one and keep
-            // activity count per session. Same in startInput().
-            ssize_t index = mSoundTriggerSessions.indexOfKey(inputDescriptor->mSessions.itemAt(0));
-            if (index >= 0) {
-                return true;
-            }
-        }
-    }
-    return false;
-}
-
-// Register a list of custom mixes with their attributes and format.
-// When a mix is registered, corresponding input and output profiles are
-// added to the remote submix hw module. The profile contains only the
-// parameters (sampling rate, format...) specified by the mix.
-// The corresponding input remote submix device is also connected.
-//
-// When a remote submix device is connected, the address is checked to select the
-// appropriate profile and the corresponding input or output stream is opened.
-//
-// When capture starts, getInputForAttr() will:
-//  - 1 look for a mix matching the address passed in attribtutes tags if any
-//  - 2 if none found, getDeviceForInputSource() will:
-//     - 2.1 look for a mix matching the attributes source
-//     - 2.2 if none found, default to device selection by policy rules
-// At this time, the corresponding output remote submix device is also connected
-// and active playback use cases can be transferred to this mix if needed when reconnecting
-// after AudioTracks are invalidated
-//
-// When playback starts, getOutputForAttr() will:
-//  - 1 look for a mix matching the address passed in attribtutes tags if any
-//  - 2 if none found, look for a mix matching the attributes usage
-//  - 3 if none found, default to device and output selection by policy rules.
-
-status_t AudioPolicyManager::registerPolicyMixes(Vector<AudioMix> mixes)
-{
-    sp<HwModule> module;
-    for (size_t i = 0; i < mHwModules.size(); i++) {
-        if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[i]->mName) == 0 &&
-                mHwModules[i]->mHandle != 0) {
-            module = mHwModules[i];
-            break;
-        }
-    }
-
-    if (module == 0) {
-        return INVALID_OPERATION;
-    }
-
-    ALOGV("registerPolicyMixes() num mixes %d", mixes.size());
-
-    for (size_t i = 0; i < mixes.size(); i++) {
-        String8 address = mixes[i].mRegistrationId;
-        ssize_t index = mPolicyMixes.indexOfKey(address);
-        if (index >= 0) {
-            ALOGE("registerPolicyMixes(): mix for address %s already registered", address.string());
-            continue;
-        }
-        audio_config_t outputConfig = mixes[i].mFormat;
-        audio_config_t inputConfig = mixes[i].mFormat;
-        // NOTE: audio flinger mixer does not support mono output: configure remote submix HAL in
-        // stereo and let audio flinger do the channel conversion if needed.
-        outputConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
-        inputConfig.channel_mask = AUDIO_CHANNEL_IN_STEREO;
-        module->addOutputProfile(address, &outputConfig,
-                                 AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address);
-        module->addInputProfile(address, &inputConfig,
-                                 AUDIO_DEVICE_IN_REMOTE_SUBMIX, address);
-        sp<AudioPolicyMix> policyMix = new AudioPolicyMix();
-        policyMix->mMix = mixes[i];
-        mPolicyMixes.add(address, policyMix);
-        if (mixes[i].mMixType == MIX_TYPE_PLAYERS) {
-            setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
-                                     AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                                     address.string());
-        } else {
-            setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-                                     AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
-                                     address.string());
-        }
-    }
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::unregisterPolicyMixes(Vector<AudioMix> mixes)
-{
-    sp<HwModule> module;
-    for (size_t i = 0; i < mHwModules.size(); i++) {
-        if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[i]->mName) == 0 &&
-                mHwModules[i]->mHandle != 0) {
-            module = mHwModules[i];
-            break;
-        }
-    }
-
-    if (module == 0) {
-        return INVALID_OPERATION;
-    }
-
-    ALOGV("unregisterPolicyMixes() num mixes %d", mixes.size());
-
-    for (size_t i = 0; i < mixes.size(); i++) {
-        String8 address = mixes[i].mRegistrationId;
-        ssize_t index = mPolicyMixes.indexOfKey(address);
-        if (index < 0) {
-            ALOGE("unregisterPolicyMixes(): mix for address %s not registered", address.string());
-            continue;
-        }
-
-        mPolicyMixes.removeItemsAt(index);
-
-        if (getDeviceConnectionState(AUDIO_DEVICE_IN_REMOTE_SUBMIX, address.string()) ==
-                                             AUDIO_POLICY_DEVICE_STATE_AVAILABLE)
-        {
-            setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
-                                     AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                                     address.string());
-        }
-
-        if (getDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address.string()) ==
-                                             AUDIO_POLICY_DEVICE_STATE_AVAILABLE)
-        {
-            setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
-                                     AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
-                                     address.string());
-        }
-        module->removeOutputProfile(address);
-        module->removeInputProfile(address);
-    }
-    return NO_ERROR;
-}
-
-
-status_t AudioPolicyManager::dump(int fd)
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    snprintf(buffer, SIZE, "\nAudioPolicyManager Dump: %p\n", this);
-    result.append(buffer);
-
-    snprintf(buffer, SIZE, " Primary Output: %d\n", mPrimaryOutput);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Phone state: %d\n", mPhoneState);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Force use for communications %d\n",
-             mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Force use for media %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA]);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Force use for record %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD]);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Force use for dock %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK]);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Force use for system %d\n", mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM]);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Force use for hdmi system audio %d\n",
-            mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO]);
-    result.append(buffer);
-
-    snprintf(buffer, SIZE, " Available output devices:\n");
-    result.append(buffer);
-    write(fd, result.string(), result.size());
-    for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
-        mAvailableOutputDevices[i]->dump(fd, 2, i);
-    }
-    snprintf(buffer, SIZE, "\n Available input devices:\n");
-    write(fd, buffer, strlen(buffer));
-    for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
-        mAvailableInputDevices[i]->dump(fd, 2, i);
-    }
-
-    snprintf(buffer, SIZE, "\nHW Modules dump:\n");
-    write(fd, buffer, strlen(buffer));
-    for (size_t i = 0; i < mHwModules.size(); i++) {
-        snprintf(buffer, SIZE, "- HW Module %zu:\n", i + 1);
-        write(fd, buffer, strlen(buffer));
-        mHwModules[i]->dump(fd);
-    }
-
-    snprintf(buffer, SIZE, "\nOutputs dump:\n");
-    write(fd, buffer, strlen(buffer));
-    for (size_t i = 0; i < mOutputs.size(); i++) {
-        snprintf(buffer, SIZE, "- Output %d dump:\n", mOutputs.keyAt(i));
-        write(fd, buffer, strlen(buffer));
-        mOutputs.valueAt(i)->dump(fd);
-    }
-
-    snprintf(buffer, SIZE, "\nInputs dump:\n");
-    write(fd, buffer, strlen(buffer));
-    for (size_t i = 0; i < mInputs.size(); i++) {
-        snprintf(buffer, SIZE, "- Input %d dump:\n", mInputs.keyAt(i));
-        write(fd, buffer, strlen(buffer));
-        mInputs.valueAt(i)->dump(fd);
-    }
-
-    snprintf(buffer, SIZE, "\nStreams dump:\n");
-    write(fd, buffer, strlen(buffer));
-    snprintf(buffer, SIZE,
-             " Stream  Can be muted  Index Min  Index Max  Index Cur [device : index]...\n");
-    write(fd, buffer, strlen(buffer));
-    for (size_t i = 0; i < AUDIO_STREAM_CNT; i++) {
-        snprintf(buffer, SIZE, " %02zu      ", i);
-        write(fd, buffer, strlen(buffer));
-        mStreams[i].dump(fd);
-    }
-
-    snprintf(buffer, SIZE, "\nTotal Effects CPU: %f MIPS, Total Effects memory: %d KB\n",
-            (float)mTotalEffectsCpuLoad/10, mTotalEffectsMemory);
-    write(fd, buffer, strlen(buffer));
-
-    snprintf(buffer, SIZE, "Registered effects:\n");
-    write(fd, buffer, strlen(buffer));
-    for (size_t i = 0; i < mEffects.size(); i++) {
-        snprintf(buffer, SIZE, "- Effect %d dump:\n", mEffects.keyAt(i));
-        write(fd, buffer, strlen(buffer));
-        mEffects.valueAt(i)->dump(fd);
-    }
-
-    snprintf(buffer, SIZE, "\nAudio Patches:\n");
-    write(fd, buffer, strlen(buffer));
-    for (size_t i = 0; i < mAudioPatches.size(); i++) {
-        mAudioPatches[i]->dump(fd, 2, i);
-    }
-
-    return NO_ERROR;
-}
-
-// This function checks for the parameters which can be offloaded.
-// This can be enhanced depending on the capability of the DSP and policy
-// of the system.
-bool AudioPolicyManager::isOffloadSupported(const audio_offload_info_t& offloadInfo)
-{
-    ALOGV("isOffloadSupported: SR=%u, CM=0x%x, Format=0x%x, StreamType=%d,"
-     " BitRate=%u, duration=%" PRId64 " us, has_video=%d",
-     offloadInfo.sample_rate, offloadInfo.channel_mask,
-     offloadInfo.format,
-     offloadInfo.stream_type, offloadInfo.bit_rate, offloadInfo.duration_us,
-     offloadInfo.has_video);
-
-    // Check if offload has been disabled
-    char propValue[PROPERTY_VALUE_MAX];
-    if (property_get("audio.offload.disable", propValue, "0")) {
-        if (atoi(propValue) != 0) {
-            ALOGV("offload disabled by audio.offload.disable=%s", propValue );
-            return false;
-        }
-    }
-
-    // Check if stream type is music, then only allow offload as of now.
-    if (offloadInfo.stream_type != AUDIO_STREAM_MUSIC)
-    {
-        ALOGV("isOffloadSupported: stream_type != MUSIC, returning false");
-        return false;
-    }
-
-    //TODO: enable audio offloading with video when ready
-    if (offloadInfo.has_video)
-    {
-        ALOGV("isOffloadSupported: has_video == true, returning false");
-        return false;
-    }
-
-    //If duration is less than minimum value defined in property, return false
-    if (property_get("audio.offload.min.duration.secs", propValue, NULL)) {
-        if (offloadInfo.duration_us < (atoi(propValue) * 1000000 )) {
-            ALOGV("Offload denied by duration < audio.offload.min.duration.secs(=%s)", propValue);
-            return false;
-        }
-    } else if (offloadInfo.duration_us < OFFLOAD_DEFAULT_MIN_DURATION_SECS * 1000000) {
-        ALOGV("Offload denied by duration < default min(=%u)", OFFLOAD_DEFAULT_MIN_DURATION_SECS);
-        return false;
-    }
-
-    // Do not allow offloading if one non offloadable effect is enabled. This prevents from
-    // creating an offloaded track and tearing it down immediately after start when audioflinger
-    // detects there is an active non offloadable effect.
-    // FIXME: We should check the audio session here but we do not have it in this context.
-    // This may prevent offloading in rare situations where effects are left active by apps
-    // in the background.
-    if (isNonOffloadableEffectEnabled()) {
-        return false;
-    }
-
-    // See if there is a profile to support this.
-    // AUDIO_DEVICE_NONE
-    sp<IOProfile> profile = getProfileForDirectOutput(AUDIO_DEVICE_NONE /*ignore device */,
-                                            offloadInfo.sample_rate,
-                                            offloadInfo.format,
-                                            offloadInfo.channel_mask,
-                                            AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
-    ALOGV("isOffloadSupported() profile %sfound", profile != 0 ? "" : "NOT ");
-    return (profile != 0);
-}
-
-status_t AudioPolicyManager::listAudioPorts(audio_port_role_t role,
-                                            audio_port_type_t type,
-                                            unsigned int *num_ports,
-                                            struct audio_port *ports,
-                                            unsigned int *generation)
-{
-    if (num_ports == NULL || (*num_ports != 0 && ports == NULL) ||
-            generation == NULL) {
-        return BAD_VALUE;
-    }
-    ALOGV("listAudioPorts() role %d type %d num_ports %d ports %p", role, type, *num_ports, ports);
-    if (ports == NULL) {
-        *num_ports = 0;
-    }
-
-    size_t portsWritten = 0;
-    size_t portsMax = *num_ports;
-    *num_ports = 0;
-    if (type == AUDIO_PORT_TYPE_NONE || type == AUDIO_PORT_TYPE_DEVICE) {
-        if (role == AUDIO_PORT_ROLE_SINK || role == AUDIO_PORT_ROLE_NONE) {
-            for (size_t i = 0;
-                    i  < mAvailableOutputDevices.size() && portsWritten < portsMax; i++) {
-                mAvailableOutputDevices[i]->toAudioPort(&ports[portsWritten++]);
-            }
-            *num_ports += mAvailableOutputDevices.size();
-        }
-        if (role == AUDIO_PORT_ROLE_SOURCE || role == AUDIO_PORT_ROLE_NONE) {
-            for (size_t i = 0;
-                    i  < mAvailableInputDevices.size() && portsWritten < portsMax; i++) {
-                mAvailableInputDevices[i]->toAudioPort(&ports[portsWritten++]);
-            }
-            *num_ports += mAvailableInputDevices.size();
-        }
-    }
-    if (type == AUDIO_PORT_TYPE_NONE || type == AUDIO_PORT_TYPE_MIX) {
-        if (role == AUDIO_PORT_ROLE_SINK || role == AUDIO_PORT_ROLE_NONE) {
-            for (size_t i = 0; i < mInputs.size() && portsWritten < portsMax; i++) {
-                mInputs[i]->toAudioPort(&ports[portsWritten++]);
-            }
-            *num_ports += mInputs.size();
-        }
-        if (role == AUDIO_PORT_ROLE_SOURCE || role == AUDIO_PORT_ROLE_NONE) {
-            size_t numOutputs = 0;
-            for (size_t i = 0; i < mOutputs.size(); i++) {
-                if (!mOutputs[i]->isDuplicated()) {
-                    numOutputs++;
-                    if (portsWritten < portsMax) {
-                        mOutputs[i]->toAudioPort(&ports[portsWritten++]);
-                    }
-                }
-            }
-            *num_ports += numOutputs;
-        }
-    }
-    *generation = curAudioPortGeneration();
-    ALOGV("listAudioPorts() got %zu ports needed %d", portsWritten, *num_ports);
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::getAudioPort(struct audio_port *port __unused)
-{
-    return NO_ERROR;
-}
-
-sp<AudioPolicyManager::AudioOutputDescriptor> AudioPolicyManager::getOutputFromId(
-                                                                    audio_port_handle_t id) const
-{
-    sp<AudioOutputDescriptor> outputDesc = NULL;
-    for (size_t i = 0; i < mOutputs.size(); i++) {
-        outputDesc = mOutputs.valueAt(i);
-        if (outputDesc->mId == id) {
-            break;
-        }
-    }
-    return outputDesc;
-}
-
-sp<AudioPolicyManager::AudioInputDescriptor> AudioPolicyManager::getInputFromId(
-                                                                    audio_port_handle_t id) const
-{
-    sp<AudioInputDescriptor> inputDesc = NULL;
-    for (size_t i = 0; i < mInputs.size(); i++) {
-        inputDesc = mInputs.valueAt(i);
-        if (inputDesc->mId == id) {
-            break;
-        }
-    }
-    return inputDesc;
-}
-
-sp <AudioPolicyManager::HwModule> AudioPolicyManager::getModuleForDevice(
-                                                                    audio_devices_t device) const
-{
-    sp <HwModule> module;
-
-    for (size_t i = 0; i < mHwModules.size(); i++) {
-        if (mHwModules[i]->mHandle == 0) {
-            continue;
-        }
-        if (audio_is_output_device(device)) {
-            for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
-            {
-                if (mHwModules[i]->mOutputProfiles[j]->mSupportedDevices.types() & device) {
-                    return mHwModules[i];
-                }
-            }
-        } else {
-            for (size_t j = 0; j < mHwModules[i]->mInputProfiles.size(); j++) {
-                if (mHwModules[i]->mInputProfiles[j]->mSupportedDevices.types() &
-                        device & ~AUDIO_DEVICE_BIT_IN) {
-                    return mHwModules[i];
-                }
-            }
-        }
-    }
-    return module;
-}
-
-sp <AudioPolicyManager::HwModule> AudioPolicyManager::getModuleFromName(const char *name) const
-{
-    sp <HwModule> module;
-
-    for (size_t i = 0; i < mHwModules.size(); i++)
-    {
-        if (strcmp(mHwModules[i]->mName, name) == 0) {
-            return mHwModules[i];
-        }
-    }
-    return module;
-}
-
-audio_devices_t AudioPolicyManager::availablePrimaryOutputDevices()
-{
-    sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(mPrimaryOutput);
-    audio_devices_t devices = outputDesc->mProfile->mSupportedDevices.types();
-    return devices & mAvailableOutputDevices.types();
-}
-
-audio_devices_t AudioPolicyManager::availablePrimaryInputDevices()
-{
-    audio_module_handle_t primaryHandle =
-                                mOutputs.valueFor(mPrimaryOutput)->mProfile->mModule->mHandle;
-    audio_devices_t devices = AUDIO_DEVICE_NONE;
-    for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
-        if (mAvailableInputDevices[i]->mModule->mHandle == primaryHandle) {
-            devices |= mAvailableInputDevices[i]->mDeviceType;
-        }
-    }
-    return devices;
-}
-
-status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch,
-                                               audio_patch_handle_t *handle,
-                                               uid_t uid)
-{
-    ALOGV("createAudioPatch()");
-
-    if (handle == NULL || patch == NULL) {
-        return BAD_VALUE;
-    }
-    ALOGV("createAudioPatch() num sources %d num sinks %d", patch->num_sources, patch->num_sinks);
-
-    if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
-            patch->num_sinks == 0 || patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
-        return BAD_VALUE;
-    }
-    // only one source per audio patch supported for now
-    if (patch->num_sources > 1) {
-        return INVALID_OPERATION;
-    }
-
-    if (patch->sources[0].role != AUDIO_PORT_ROLE_SOURCE) {
-        return INVALID_OPERATION;
-    }
-    for (size_t i = 0; i < patch->num_sinks; i++) {
-        if (patch->sinks[i].role != AUDIO_PORT_ROLE_SINK) {
-            return INVALID_OPERATION;
-        }
-    }
-
-    sp<AudioPatch> patchDesc;
-    ssize_t index = mAudioPatches.indexOfKey(*handle);
-
-    ALOGV("createAudioPatch source id %d role %d type %d", patch->sources[0].id,
-                                                           patch->sources[0].role,
-                                                           patch->sources[0].type);
-#if LOG_NDEBUG == 0
-    for (size_t i = 0; i < patch->num_sinks; i++) {
-        ALOGV("createAudioPatch sink %d: id %d role %d type %d", i, patch->sinks[i].id,
-                                                             patch->sinks[i].role,
-                                                             patch->sinks[i].type);
-    }
-#endif
-
-    if (index >= 0) {
-        patchDesc = mAudioPatches.valueAt(index);
-        ALOGV("createAudioPatch() mUidCached %d patchDesc->mUid %d uid %d",
-                                                                  mUidCached, patchDesc->mUid, uid);
-        if (patchDesc->mUid != mUidCached && uid != patchDesc->mUid) {
-            return INVALID_OPERATION;
-        }
-    } else {
-        *handle = 0;
-    }
-
-    if (patch->sources[0].type == AUDIO_PORT_TYPE_MIX) {
-        sp<AudioOutputDescriptor> outputDesc = getOutputFromId(patch->sources[0].id);
-        if (outputDesc == NULL) {
-            ALOGV("createAudioPatch() output not found for id %d", patch->sources[0].id);
-            return BAD_VALUE;
-        }
-        ALOG_ASSERT(!outputDesc->isDuplicated(),"duplicated output %d in source in ports",
-                                                outputDesc->mIoHandle);
-        if (patchDesc != 0) {
-            if (patchDesc->mPatch.sources[0].id != patch->sources[0].id) {
-                ALOGV("createAudioPatch() source id differs for patch current id %d new id %d",
-                                          patchDesc->mPatch.sources[0].id, patch->sources[0].id);
-                return BAD_VALUE;
-            }
-        }
-        DeviceVector devices;
-        for (size_t i = 0; i < patch->num_sinks; i++) {
-            // Only support mix to devices connection
-            // TODO add support for mix to mix connection
-            if (patch->sinks[i].type != AUDIO_PORT_TYPE_DEVICE) {
-                ALOGV("createAudioPatch() source mix but sink is not a device");
-                return INVALID_OPERATION;
-            }
-            sp<DeviceDescriptor> devDesc =
-                    mAvailableOutputDevices.getDeviceFromId(patch->sinks[i].id);
-            if (devDesc == 0) {
-                ALOGV("createAudioPatch() out device not found for id %d", patch->sinks[i].id);
-                return BAD_VALUE;
-            }
-
-            if (!outputDesc->mProfile->isCompatibleProfile(devDesc->mDeviceType,
-                                                           devDesc->mAddress,
-                                                           patch->sources[0].sample_rate,
-                                                         NULL,  // updatedSamplingRate
-                                                         patch->sources[0].format,
-                                                         patch->sources[0].channel_mask,
-                                                         AUDIO_OUTPUT_FLAG_NONE /*FIXME*/)) {
-                ALOGV("createAudioPatch() profile not supported for device %08x",
-                      devDesc->mDeviceType);
-                return INVALID_OPERATION;
-            }
-            devices.add(devDesc);
-        }
-        if (devices.size() == 0) {
-            return INVALID_OPERATION;
-        }
-
-        // TODO: reconfigure output format and channels here
-        ALOGV("createAudioPatch() setting device %08x on output %d",
-              devices.types(), outputDesc->mIoHandle);
-        setOutputDevice(outputDesc->mIoHandle, devices.types(), true, 0, handle);
-        index = mAudioPatches.indexOfKey(*handle);
-        if (index >= 0) {
-            if (patchDesc != 0 && patchDesc != mAudioPatches.valueAt(index)) {
-                ALOGW("createAudioPatch() setOutputDevice() did not reuse the patch provided");
-            }
-            patchDesc = mAudioPatches.valueAt(index);
-            patchDesc->mUid = uid;
-            ALOGV("createAudioPatch() success");
-        } else {
-            ALOGW("createAudioPatch() setOutputDevice() failed to create a patch");
-            return INVALID_OPERATION;
-        }
-    } else if (patch->sources[0].type == AUDIO_PORT_TYPE_DEVICE) {
-        if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
-            // input device to input mix connection
-            // only one sink supported when connecting an input device to a mix
-            if (patch->num_sinks > 1) {
-                return INVALID_OPERATION;
-            }
-            sp<AudioInputDescriptor> inputDesc = getInputFromId(patch->sinks[0].id);
-            if (inputDesc == NULL) {
-                return BAD_VALUE;
-            }
-            if (patchDesc != 0) {
-                if (patchDesc->mPatch.sinks[0].id != patch->sinks[0].id) {
-                    return BAD_VALUE;
-                }
-            }
-            sp<DeviceDescriptor> devDesc =
-                    mAvailableInputDevices.getDeviceFromId(patch->sources[0].id);
-            if (devDesc == 0) {
-                return BAD_VALUE;
-            }
-
-            if (!inputDesc->mProfile->isCompatibleProfile(devDesc->mDeviceType,
-                                                          devDesc->mAddress,
-                                                          patch->sinks[0].sample_rate,
-                                                          NULL, /*updatedSampleRate*/
-                                                          patch->sinks[0].format,
-                                                          patch->sinks[0].channel_mask,
-                                                          // FIXME for the parameter type,
-                                                          // and the NONE
-                                                          (audio_output_flags_t)
-                                                            AUDIO_INPUT_FLAG_NONE)) {
-                return INVALID_OPERATION;
-            }
-            // TODO: reconfigure output format and channels here
-            ALOGV("createAudioPatch() setting device %08x on output %d",
-                                                  devDesc->mDeviceType, inputDesc->mIoHandle);
-            setInputDevice(inputDesc->mIoHandle, devDesc->mDeviceType, true, handle);
-            index = mAudioPatches.indexOfKey(*handle);
-            if (index >= 0) {
-                if (patchDesc != 0 && patchDesc != mAudioPatches.valueAt(index)) {
-                    ALOGW("createAudioPatch() setInputDevice() did not reuse the patch provided");
-                }
-                patchDesc = mAudioPatches.valueAt(index);
-                patchDesc->mUid = uid;
-                ALOGV("createAudioPatch() success");
-            } else {
-                ALOGW("createAudioPatch() setInputDevice() failed to create a patch");
-                return INVALID_OPERATION;
-            }
-        } else if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) {
-            // device to device connection
-            if (patchDesc != 0) {
-                if (patchDesc->mPatch.sources[0].id != patch->sources[0].id) {
-                    return BAD_VALUE;
-                }
-            }
-            sp<DeviceDescriptor> srcDeviceDesc =
-                    mAvailableInputDevices.getDeviceFromId(patch->sources[0].id);
-            if (srcDeviceDesc == 0) {
-                return BAD_VALUE;
-            }
-
-            //update source and sink with our own data as the data passed in the patch may
-            // be incomplete.
-            struct audio_patch newPatch = *patch;
-            srcDeviceDesc->toAudioPortConfig(&newPatch.sources[0], &patch->sources[0]);
-
-            for (size_t i = 0; i < patch->num_sinks; i++) {
-                if (patch->sinks[i].type != AUDIO_PORT_TYPE_DEVICE) {
-                    ALOGV("createAudioPatch() source device but one sink is not a device");
-                    return INVALID_OPERATION;
-                }
-
-                sp<DeviceDescriptor> sinkDeviceDesc =
-                        mAvailableOutputDevices.getDeviceFromId(patch->sinks[i].id);
-                if (sinkDeviceDesc == 0) {
-                    return BAD_VALUE;
-                }
-                sinkDeviceDesc->toAudioPortConfig(&newPatch.sinks[i], &patch->sinks[i]);
-
-                if (srcDeviceDesc->mModule != sinkDeviceDesc->mModule) {
-                    // only one sink supported when connected devices across HW modules
-                    if (patch->num_sinks > 1) {
-                        return INVALID_OPERATION;
-                    }
-                    SortedVector<audio_io_handle_t> outputs =
-                                            getOutputsForDevice(sinkDeviceDesc->mDeviceType,
-                                                                mOutputs);
-                    // if the sink device is reachable via an opened output stream, request to go via
-                    // this output stream by adding a second source to the patch description
-                    audio_io_handle_t output = selectOutput(outputs,
-                                                            AUDIO_OUTPUT_FLAG_NONE,
-                                                            AUDIO_FORMAT_INVALID);
-                    if (output != AUDIO_IO_HANDLE_NONE) {
-                        sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
-                        if (outputDesc->isDuplicated()) {
-                            return INVALID_OPERATION;
-                        }
-                        outputDesc->toAudioPortConfig(&newPatch.sources[1], &patch->sources[0]);
-                        newPatch.num_sources = 2;
-                    }
-                }
-            }
-            // TODO: check from routing capabilities in config file and other conflicting patches
-
-            audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
-            if (index >= 0) {
-                afPatchHandle = patchDesc->mAfPatchHandle;
-            }
-
-            status_t status = mpClientInterface->createAudioPatch(&newPatch,
-                                                                  &afPatchHandle,
-                                                                  0);
-            ALOGV("createAudioPatch() patch panel returned %d patchHandle %d",
-                                                                  status, afPatchHandle);
-            if (status == NO_ERROR) {
-                if (index < 0) {
-                    patchDesc = new AudioPatch((audio_patch_handle_t)nextUniqueId(),
-                                               &newPatch, uid);
-                    addAudioPatch(patchDesc->mHandle, patchDesc);
-                } else {
-                    patchDesc->mPatch = newPatch;
-                }
-                patchDesc->mAfPatchHandle = afPatchHandle;
-                *handle = patchDesc->mHandle;
-                nextAudioPortGeneration();
-                mpClientInterface->onAudioPatchListUpdate();
-            } else {
-                ALOGW("createAudioPatch() patch panel could not connect device patch, error %d",
-                status);
-                return INVALID_OPERATION;
-            }
-        } else {
-            return BAD_VALUE;
-        }
-    } else {
-        return BAD_VALUE;
-    }
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::releaseAudioPatch(audio_patch_handle_t handle,
-                                                  uid_t uid)
-{
-    ALOGV("releaseAudioPatch() patch %d", handle);
-
-    ssize_t index = mAudioPatches.indexOfKey(handle);
-
-    if (index < 0) {
-        return BAD_VALUE;
-    }
-    sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
-    ALOGV("releaseAudioPatch() mUidCached %d patchDesc->mUid %d uid %d",
-          mUidCached, patchDesc->mUid, uid);
-    if (patchDesc->mUid != mUidCached && uid != patchDesc->mUid) {
-        return INVALID_OPERATION;
-    }
-
-    struct audio_patch *patch = &patchDesc->mPatch;
-    patchDesc->mUid = mUidCached;
-    if (patch->sources[0].type == AUDIO_PORT_TYPE_MIX) {
-        sp<AudioOutputDescriptor> outputDesc = getOutputFromId(patch->sources[0].id);
-        if (outputDesc == NULL) {
-            ALOGV("releaseAudioPatch() output not found for id %d", patch->sources[0].id);
-            return BAD_VALUE;
-        }
-
-        setOutputDevice(outputDesc->mIoHandle,
-                        getNewOutputDevice(outputDesc->mIoHandle, true /*fromCache*/),
-                       true,
-                       0,
-                       NULL);
-    } else if (patch->sources[0].type == AUDIO_PORT_TYPE_DEVICE) {
-        if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
-            sp<AudioInputDescriptor> inputDesc = getInputFromId(patch->sinks[0].id);
-            if (inputDesc == NULL) {
-                ALOGV("releaseAudioPatch() input not found for id %d", patch->sinks[0].id);
-                return BAD_VALUE;
-            }
-            setInputDevice(inputDesc->mIoHandle,
-                           getNewInputDevice(inputDesc->mIoHandle),
-                           true,
-                           NULL);
-        } else if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) {
-            audio_patch_handle_t afPatchHandle = patchDesc->mAfPatchHandle;
-            status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
-            ALOGV("releaseAudioPatch() patch panel returned %d patchHandle %d",
-                                                              status, patchDesc->mAfPatchHandle);
-            removeAudioPatch(patchDesc->mHandle);
-            nextAudioPortGeneration();
-            mpClientInterface->onAudioPatchListUpdate();
-        } else {
-            return BAD_VALUE;
-        }
-    } else {
-        return BAD_VALUE;
-    }
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::listAudioPatches(unsigned int *num_patches,
-                                              struct audio_patch *patches,
-                                              unsigned int *generation)
-{
-    if (num_patches == NULL || (*num_patches != 0 && patches == NULL) ||
-            generation == NULL) {
-        return BAD_VALUE;
-    }
-    ALOGV("listAudioPatches() num_patches %d patches %p available patches %zu",
-          *num_patches, patches, mAudioPatches.size());
-    if (patches == NULL) {
-        *num_patches = 0;
-    }
-
-    size_t patchesWritten = 0;
-    size_t patchesMax = *num_patches;
-    for (size_t i = 0;
-            i  < mAudioPatches.size() && patchesWritten < patchesMax; i++) {
-        patches[patchesWritten] = mAudioPatches[i]->mPatch;
-        patches[patchesWritten++].id = mAudioPatches[i]->mHandle;
-        ALOGV("listAudioPatches() patch %zu num_sources %d num_sinks %d",
-              i, mAudioPatches[i]->mPatch.num_sources, mAudioPatches[i]->mPatch.num_sinks);
-    }
-    *num_patches = mAudioPatches.size();
-
-    *generation = curAudioPortGeneration();
-    ALOGV("listAudioPatches() got %zu patches needed %d", patchesWritten, *num_patches);
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::setAudioPortConfig(const struct audio_port_config *config)
-{
-    ALOGV("setAudioPortConfig()");
-
-    if (config == NULL) {
-        return BAD_VALUE;
-    }
-    ALOGV("setAudioPortConfig() on port handle %d", config->id);
-    // Only support gain configuration for now
-    if (config->config_mask != AUDIO_PORT_CONFIG_GAIN) {
-        return INVALID_OPERATION;
-    }
-
-    sp<AudioPortConfig> audioPortConfig;
-    if (config->type == AUDIO_PORT_TYPE_MIX) {
-        if (config->role == AUDIO_PORT_ROLE_SOURCE) {
-            sp<AudioOutputDescriptor> outputDesc = getOutputFromId(config->id);
-            if (outputDesc == NULL) {
-                return BAD_VALUE;
-            }
-            ALOG_ASSERT(!outputDesc->isDuplicated(),
-                        "setAudioPortConfig() called on duplicated output %d",
-                        outputDesc->mIoHandle);
-            audioPortConfig = outputDesc;
-        } else if (config->role == AUDIO_PORT_ROLE_SINK) {
-            sp<AudioInputDescriptor> inputDesc = getInputFromId(config->id);
-            if (inputDesc == NULL) {
-                return BAD_VALUE;
-            }
-            audioPortConfig = inputDesc;
-        } else {
-            return BAD_VALUE;
-        }
-    } else if (config->type == AUDIO_PORT_TYPE_DEVICE) {
-        sp<DeviceDescriptor> deviceDesc;
-        if (config->role == AUDIO_PORT_ROLE_SOURCE) {
-            deviceDesc = mAvailableInputDevices.getDeviceFromId(config->id);
-        } else if (config->role == AUDIO_PORT_ROLE_SINK) {
-            deviceDesc = mAvailableOutputDevices.getDeviceFromId(config->id);
-        } else {
-            return BAD_VALUE;
-        }
-        if (deviceDesc == NULL) {
-            return BAD_VALUE;
-        }
-        audioPortConfig = deviceDesc;
-    } else {
-        return BAD_VALUE;
-    }
-
-    struct audio_port_config backupConfig;
-    status_t status = audioPortConfig->applyAudioPortConfig(config, &backupConfig);
-    if (status == NO_ERROR) {
-        struct audio_port_config newConfig;
-        audioPortConfig->toAudioPortConfig(&newConfig, config);
-        status = mpClientInterface->setAudioPortConfig(&newConfig, 0);
-    }
-    if (status != NO_ERROR) {
-        audioPortConfig->applyAudioPortConfig(&backupConfig);
-    }
-
-    return status;
-}
-
-void AudioPolicyManager::clearAudioPatches(uid_t uid)
-{
-    for (ssize_t i = (ssize_t)mAudioPatches.size() - 1; i >= 0; i--)  {
-        sp<AudioPatch> patchDesc = mAudioPatches.valueAt(i);
-        if (patchDesc->mUid == uid) {
-            releaseAudioPatch(mAudioPatches.keyAt(i), uid);
-        }
-    }
-}
-
-status_t AudioPolicyManager::acquireSoundTriggerSession(audio_session_t *session,
-                                       audio_io_handle_t *ioHandle,
-                                       audio_devices_t *device)
-{
-    *session = (audio_session_t)mpClientInterface->newAudioUniqueId();
-    *ioHandle = (audio_io_handle_t)mpClientInterface->newAudioUniqueId();
-    *device = getDeviceAndMixForInputSource(AUDIO_SOURCE_HOTWORD);
-
-    mSoundTriggerSessions.add(*session, *ioHandle);
-
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::releaseSoundTriggerSession(audio_session_t session)
-{
-    ssize_t index = mSoundTriggerSessions.indexOfKey(session);
-    if (index < 0) {
-        ALOGW("acquireSoundTriggerSession() session %d not registered", session);
-        return BAD_VALUE;
-    }
-
-    mSoundTriggerSessions.removeItem(session);
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::addAudioPatch(audio_patch_handle_t handle,
-                                           const sp<AudioPatch>& patch)
-{
-    ssize_t index = mAudioPatches.indexOfKey(handle);
-
-    if (index >= 0) {
-        ALOGW("addAudioPatch() patch %d already in", handle);
-        return ALREADY_EXISTS;
-    }
-    mAudioPatches.add(handle, patch);
-    ALOGV("addAudioPatch() handle %d af handle %d num_sources %d num_sinks %d source handle %d"
-            "sink handle %d",
-          handle, patch->mAfPatchHandle, patch->mPatch.num_sources, patch->mPatch.num_sinks,
-          patch->mPatch.sources[0].id, patch->mPatch.sinks[0].id);
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::removeAudioPatch(audio_patch_handle_t handle)
-{
-    ssize_t index = mAudioPatches.indexOfKey(handle);
-
-    if (index < 0) {
-        ALOGW("removeAudioPatch() patch %d not in", handle);
-        return ALREADY_EXISTS;
-    }
-    ALOGV("removeAudioPatch() handle %d af handle %d", handle,
-                      mAudioPatches.valueAt(index)->mAfPatchHandle);
-    mAudioPatches.removeItemsAt(index);
-    return NO_ERROR;
-}
-
-// ----------------------------------------------------------------------------
-// AudioPolicyManager
-// ----------------------------------------------------------------------------
-
-uint32_t AudioPolicyManager::nextUniqueId()
-{
-    return android_atomic_inc(&mNextUniqueId);
-}
-
-uint32_t AudioPolicyManager::nextAudioPortGeneration()
-{
-    return android_atomic_inc(&mAudioPortGeneration);
-}
-
-AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface)
-    :
-#ifdef AUDIO_POLICY_TEST
-    Thread(false),
-#endif //AUDIO_POLICY_TEST
-    mPrimaryOutput((audio_io_handle_t)0),
-    mPhoneState(AUDIO_MODE_NORMAL),
-    mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
-    mTotalEffectsCpuLoad(0), mTotalEffectsMemory(0),
-    mA2dpSuspended(false),
-    mSpeakerDrcEnabled(false), mNextUniqueId(1),
-    mAudioPortGeneration(1),
-    mBeaconMuteRefCount(0),
-    mBeaconPlayingRefCount(0),
-    mBeaconMuted(false)
-{
-    mUidCached = getuid();
-    mpClientInterface = clientInterface;
-
-    for (int i = 0; i < AUDIO_POLICY_FORCE_USE_CNT; i++) {
-        mForceUse[i] = AUDIO_POLICY_FORCE_NONE;
-    }
-
-    mDefaultOutputDevice = new DeviceDescriptor(String8(""), AUDIO_DEVICE_OUT_SPEAKER);
-    if (loadAudioPolicyConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE) != NO_ERROR) {
-        if (loadAudioPolicyConfig(AUDIO_POLICY_CONFIG_FILE) != NO_ERROR) {
-            ALOGE("could not load audio policy configuration file, setting defaults");
-            defaultAudioPolicyConfig();
-        }
-    }
-    // mAvailableOutputDevices and mAvailableInputDevices now contain all attached devices
-
-    // must be done after reading the policy
-    initializeVolumeCurves();
-
-    // open all output streams needed to access attached devices
-    audio_devices_t outputDeviceTypes = mAvailableOutputDevices.types();
-    audio_devices_t inputDeviceTypes = mAvailableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
-    for (size_t i = 0; i < mHwModules.size(); i++) {
-        mHwModules[i]->mHandle = mpClientInterface->loadHwModule(mHwModules[i]->mName);
-        if (mHwModules[i]->mHandle == 0) {
-            ALOGW("could not open HW module %s", mHwModules[i]->mName);
-            continue;
-        }
-        // open all output streams needed to access attached devices
-        // except for direct output streams that are only opened when they are actually
-        // required by an app.
-        // This also validates mAvailableOutputDevices list
-        for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
-        {
-            const sp<IOProfile> outProfile = mHwModules[i]->mOutputProfiles[j];
-
-            if (outProfile->mSupportedDevices.isEmpty()) {
-                ALOGW("Output profile contains no device on module %s", mHwModules[i]->mName);
-                continue;
-            }
-
-            if ((outProfile->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
-                continue;
-            }
-            audio_devices_t profileType = outProfile->mSupportedDevices.types();
-            if ((profileType & mDefaultOutputDevice->mDeviceType) != AUDIO_DEVICE_NONE) {
-                profileType = mDefaultOutputDevice->mDeviceType;
-            } else {
-                // chose first device present in mSupportedDevices also part of
-                // outputDeviceTypes
-                for (size_t k = 0; k  < outProfile->mSupportedDevices.size(); k++) {
-                    profileType = outProfile->mSupportedDevices[k]->mDeviceType;
-                    if ((profileType & outputDeviceTypes) != 0) {
-                        break;
-                    }
-                }
-            }
-            if ((profileType & outputDeviceTypes) == 0) {
-                continue;
-            }
-            sp<AudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(outProfile);
-
-            outputDesc->mDevice = profileType;
-            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-            config.sample_rate = outputDesc->mSamplingRate;
-            config.channel_mask = outputDesc->mChannelMask;
-            config.format = outputDesc->mFormat;
-            audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-            status_t status = mpClientInterface->openOutput(outProfile->mModule->mHandle,
-                                                            &output,
-                                                            &config,
-                                                            &outputDesc->mDevice,
-                                                            String8(""),
-                                                            &outputDesc->mLatency,
-                                                            outputDesc->mFlags);
-
-            if (status != NO_ERROR) {
-                ALOGW("Cannot open output stream for device %08x on hw module %s",
-                      outputDesc->mDevice,
-                      mHwModules[i]->mName);
-            } else {
-                outputDesc->mSamplingRate = config.sample_rate;
-                outputDesc->mChannelMask = config.channel_mask;
-                outputDesc->mFormat = config.format;
-
-                for (size_t k = 0; k  < outProfile->mSupportedDevices.size(); k++) {
-                    audio_devices_t type = outProfile->mSupportedDevices[k]->mDeviceType;
-                    ssize_t index =
-                            mAvailableOutputDevices.indexOf(outProfile->mSupportedDevices[k]);
-                    // give a valid ID to an attached device once confirmed it is reachable
-                    if ((index >= 0) && (mAvailableOutputDevices[index]->mId == 0)) {
-                        mAvailableOutputDevices[index]->mId = nextUniqueId();
-                        mAvailableOutputDevices[index]->mModule = mHwModules[i];
-                    }
-                }
-                if (mPrimaryOutput == 0 &&
-                        outProfile->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) {
-                    mPrimaryOutput = output;
-                }
-                addOutput(output, outputDesc);
-                setOutputDevice(output,
-                                outputDesc->mDevice,
-                                true);
-            }
-        }
-        // open input streams needed to access attached devices to validate
-        // mAvailableInputDevices list
-        for (size_t j = 0; j < mHwModules[i]->mInputProfiles.size(); j++)
-        {
-            const sp<IOProfile> inProfile = mHwModules[i]->mInputProfiles[j];
-
-            if (inProfile->mSupportedDevices.isEmpty()) {
-                ALOGW("Input profile contains no device on module %s", mHwModules[i]->mName);
-                continue;
-            }
-            // chose first device present in mSupportedDevices also part of
-            // inputDeviceTypes
-            audio_devices_t profileType = AUDIO_DEVICE_NONE;
-            for (size_t k = 0; k  < inProfile->mSupportedDevices.size(); k++) {
-                profileType = inProfile->mSupportedDevices[k]->mDeviceType;
-                if (profileType & inputDeviceTypes) {
-                    break;
-                }
-            }
-            if ((profileType & inputDeviceTypes) == 0) {
-                continue;
-            }
-            sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(inProfile);
-
-            inputDesc->mInputSource = AUDIO_SOURCE_MIC;
-            inputDesc->mDevice = profileType;
-
-            // find the address
-            DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(profileType);
-            //   the inputs vector must be of size 1, but we don't want to crash here
-            String8 address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress
-                    : String8("");
-            ALOGV("  for input device 0x%x using address %s", profileType, address.string());
-            ALOGE_IF(inputDevices.size() == 0, "Input device list is empty!");
-
-            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-            config.sample_rate = inputDesc->mSamplingRate;
-            config.channel_mask = inputDesc->mChannelMask;
-            config.format = inputDesc->mFormat;
-            audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
-            status_t status = mpClientInterface->openInput(inProfile->mModule->mHandle,
-                                                           &input,
-                                                           &config,
-                                                           &inputDesc->mDevice,
-                                                           address,
-                                                           AUDIO_SOURCE_MIC,
-                                                           AUDIO_INPUT_FLAG_NONE);
-
-            if (status == NO_ERROR) {
-                for (size_t k = 0; k  < inProfile->mSupportedDevices.size(); k++) {
-                    audio_devices_t type = inProfile->mSupportedDevices[k]->mDeviceType;
-                    ssize_t index =
-                            mAvailableInputDevices.indexOf(inProfile->mSupportedDevices[k]);
-                    // give a valid ID to an attached device once confirmed it is reachable
-                    if ((index >= 0) && (mAvailableInputDevices[index]->mId == 0)) {
-                        mAvailableInputDevices[index]->mId = nextUniqueId();
-                        mAvailableInputDevices[index]->mModule = mHwModules[i];
-                    }
-                }
-                mpClientInterface->closeInput(input);
-            } else {
-                ALOGW("Cannot open input stream for device %08x on hw module %s",
-                      inputDesc->mDevice,
-                      mHwModules[i]->mName);
-            }
-        }
-    }
-    // make sure all attached devices have been allocated a unique ID
-    for (size_t i = 0; i  < mAvailableOutputDevices.size();) {
-        if (mAvailableOutputDevices[i]->mId == 0) {
-            ALOGW("Input device %08x unreachable", mAvailableOutputDevices[i]->mDeviceType);
-            mAvailableOutputDevices.remove(mAvailableOutputDevices[i]);
-            continue;
-        }
-        i++;
-    }
-    for (size_t i = 0; i  < mAvailableInputDevices.size();) {
-        if (mAvailableInputDevices[i]->mId == 0) {
-            ALOGW("Input device %08x unreachable", mAvailableInputDevices[i]->mDeviceType);
-            mAvailableInputDevices.remove(mAvailableInputDevices[i]);
-            continue;
-        }
-        i++;
-    }
-    // make sure default device is reachable
-    if (mAvailableOutputDevices.indexOf(mDefaultOutputDevice) < 0) {
-        ALOGE("Default device %08x is unreachable", mDefaultOutputDevice->mDeviceType);
-    }
-
-    ALOGE_IF((mPrimaryOutput == 0), "Failed to open primary output");
-
-    updateDevicesAndOutputs();
-
-#ifdef AUDIO_POLICY_TEST
-    if (mPrimaryOutput != 0) {
-        AudioParameter outputCmd = AudioParameter();
-        outputCmd.addInt(String8("set_id"), 0);
-        mpClientInterface->setParameters(mPrimaryOutput, outputCmd.toString());
-
-        mTestDevice = AUDIO_DEVICE_OUT_SPEAKER;
-        mTestSamplingRate = 44100;
-        mTestFormat = AUDIO_FORMAT_PCM_16_BIT;
-        mTestChannels =  AUDIO_CHANNEL_OUT_STEREO;
-        mTestLatencyMs = 0;
-        mCurOutput = 0;
-        mDirectOutput = false;
-        for (int i = 0; i < NUM_TEST_OUTPUTS; i++) {
-            mTestOutputs[i] = 0;
-        }
-
-        const size_t SIZE = 256;
-        char buffer[SIZE];
-        snprintf(buffer, SIZE, "AudioPolicyManagerTest");
-        run(buffer, ANDROID_PRIORITY_AUDIO);
-    }
-#endif //AUDIO_POLICY_TEST
-}
-
-AudioPolicyManager::~AudioPolicyManager()
-{
-#ifdef AUDIO_POLICY_TEST
-    exit();
-#endif //AUDIO_POLICY_TEST
-   for (size_t i = 0; i < mOutputs.size(); i++) {
-        mpClientInterface->closeOutput(mOutputs.keyAt(i));
-   }
-   for (size_t i = 0; i < mInputs.size(); i++) {
-        mpClientInterface->closeInput(mInputs.keyAt(i));
-   }
-   mAvailableOutputDevices.clear();
-   mAvailableInputDevices.clear();
-   mOutputs.clear();
-   mInputs.clear();
-   mHwModules.clear();
-}
-
-status_t AudioPolicyManager::initCheck()
-{
-    return (mPrimaryOutput == 0) ? NO_INIT : NO_ERROR;
-}
-
-#ifdef AUDIO_POLICY_TEST
-bool AudioPolicyManager::threadLoop()
-{
-    ALOGV("entering threadLoop()");
-    while (!exitPending())
-    {
-        String8 command;
-        int valueInt;
-        String8 value;
-
-        Mutex::Autolock _l(mLock);
-        mWaitWorkCV.waitRelative(mLock, milliseconds(50));
-
-        command = mpClientInterface->getParameters(0, String8("test_cmd_policy"));
-        AudioParameter param = AudioParameter(command);
-
-        if (param.getInt(String8("test_cmd_policy"), valueInt) == NO_ERROR &&
-            valueInt != 0) {
-            ALOGV("Test command %s received", command.string());
-            String8 target;
-            if (param.get(String8("target"), target) != NO_ERROR) {
-                target = "Manager";
-            }
-            if (param.getInt(String8("test_cmd_policy_output"), valueInt) == NO_ERROR) {
-                param.remove(String8("test_cmd_policy_output"));
-                mCurOutput = valueInt;
-            }
-            if (param.get(String8("test_cmd_policy_direct"), value) == NO_ERROR) {
-                param.remove(String8("test_cmd_policy_direct"));
-                if (value == "false") {
-                    mDirectOutput = false;
-                } else if (value == "true") {
-                    mDirectOutput = true;
-                }
-            }
-            if (param.getInt(String8("test_cmd_policy_input"), valueInt) == NO_ERROR) {
-                param.remove(String8("test_cmd_policy_input"));
-                mTestInput = valueInt;
-            }
-
-            if (param.get(String8("test_cmd_policy_format"), value) == NO_ERROR) {
-                param.remove(String8("test_cmd_policy_format"));
-                int format = AUDIO_FORMAT_INVALID;
-                if (value == "PCM 16 bits") {
-                    format = AUDIO_FORMAT_PCM_16_BIT;
-                } else if (value == "PCM 8 bits") {
-                    format = AUDIO_FORMAT_PCM_8_BIT;
-                } else if (value == "Compressed MP3") {
-                    format = AUDIO_FORMAT_MP3;
-                }
-                if (format != AUDIO_FORMAT_INVALID) {
-                    if (target == "Manager") {
-                        mTestFormat = format;
-                    } else if (mTestOutputs[mCurOutput] != 0) {
-                        AudioParameter outputParam = AudioParameter();
-                        outputParam.addInt(String8("format"), format);
-                        mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
-                    }
-                }
-            }
-            if (param.get(String8("test_cmd_policy_channels"), value) == NO_ERROR) {
-                param.remove(String8("test_cmd_policy_channels"));
-                int channels = 0;
-
-                if (value == "Channels Stereo") {
-                    channels =  AUDIO_CHANNEL_OUT_STEREO;
-                } else if (value == "Channels Mono") {
-                    channels =  AUDIO_CHANNEL_OUT_MONO;
-                }
-                if (channels != 0) {
-                    if (target == "Manager") {
-                        mTestChannels = channels;
-                    } else if (mTestOutputs[mCurOutput] != 0) {
-                        AudioParameter outputParam = AudioParameter();
-                        outputParam.addInt(String8("channels"), channels);
-                        mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
-                    }
-                }
-            }
-            if (param.getInt(String8("test_cmd_policy_sampleRate"), valueInt) == NO_ERROR) {
-                param.remove(String8("test_cmd_policy_sampleRate"));
-                if (valueInt >= 0 && valueInt <= 96000) {
-                    int samplingRate = valueInt;
-                    if (target == "Manager") {
-                        mTestSamplingRate = samplingRate;
-                    } else if (mTestOutputs[mCurOutput] != 0) {
-                        AudioParameter outputParam = AudioParameter();
-                        outputParam.addInt(String8("sampling_rate"), samplingRate);
-                        mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
-                    }
-                }
-            }
-
-            if (param.get(String8("test_cmd_policy_reopen"), value) == NO_ERROR) {
-                param.remove(String8("test_cmd_policy_reopen"));
-
-                sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(mPrimaryOutput);
-                mpClientInterface->closeOutput(mPrimaryOutput);
-
-                audio_module_handle_t moduleHandle = outputDesc->mModule->mHandle;
-
-                mOutputs.removeItem(mPrimaryOutput);
-
-                sp<AudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(NULL);
-                outputDesc->mDevice = AUDIO_DEVICE_OUT_SPEAKER;
-                audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-                config.sample_rate = outputDesc->mSamplingRate;
-                config.channel_mask = outputDesc->mChannelMask;
-                config.format = outputDesc->mFormat;
-                status_t status = mpClientInterface->openOutput(moduleHandle,
-                                                                &mPrimaryOutput,
-                                                                &config,
-                                                                &outputDesc->mDevice,
-                                                                String8(""),
-                                                                &outputDesc->mLatency,
-                                                                outputDesc->mFlags);
-                if (status != NO_ERROR) {
-                    ALOGE("Failed to reopen hardware output stream, "
-                        "samplingRate: %d, format %d, channels %d",
-                        outputDesc->mSamplingRate, outputDesc->mFormat, outputDesc->mChannelMask);
-                } else {
-                    outputDesc->mSamplingRate = config.sample_rate;
-                    outputDesc->mChannelMask = config.channel_mask;
-                    outputDesc->mFormat = config.format;
-                    AudioParameter outputCmd = AudioParameter();
-                    outputCmd.addInt(String8("set_id"), 0);
-                    mpClientInterface->setParameters(mPrimaryOutput, outputCmd.toString());
-                    addOutput(mPrimaryOutput, outputDesc);
-                }
-            }
-
-
-            mpClientInterface->setParameters(0, String8("test_cmd_policy="));
-        }
-    }
-    return false;
-}
-
-void AudioPolicyManager::exit()
-{
-    {
-        AutoMutex _l(mLock);
-        requestExit();
-        mWaitWorkCV.signal();
-    }
-    requestExitAndWait();
-}
-
-int AudioPolicyManager::testOutputIndex(audio_io_handle_t output)
-{
-    for (int i = 0; i < NUM_TEST_OUTPUTS; i++) {
-        if (output == mTestOutputs[i]) return i;
-    }
-    return 0;
-}
-#endif //AUDIO_POLICY_TEST
-
-// ---
-
-void AudioPolicyManager::addOutput(audio_io_handle_t output, sp<AudioOutputDescriptor> outputDesc)
-{
-    outputDesc->mIoHandle = output;
-    outputDesc->mId = nextUniqueId();
-    mOutputs.add(output, outputDesc);
-    nextAudioPortGeneration();
-}
-
-void AudioPolicyManager::addInput(audio_io_handle_t input, sp<AudioInputDescriptor> inputDesc)
-{
-    inputDesc->mIoHandle = input;
-    inputDesc->mId = nextUniqueId();
-    mInputs.add(input, inputDesc);
-    nextAudioPortGeneration();
-}
-
-void AudioPolicyManager::findIoHandlesByAddress(sp<AudioOutputDescriptor> desc /*in*/,
-        const audio_devices_t device /*in*/,
-        const String8 address /*in*/,
-        SortedVector<audio_io_handle_t>& outputs /*out*/) {
-    sp<DeviceDescriptor> devDesc =
-        desc->mProfile->mSupportedDevices.getDevice(device, address);
-    if (devDesc != 0) {
-        ALOGV("findIoHandlesByAddress(): adding opened output %d on same address %s",
-              desc->mIoHandle, address.string());
-        outputs.add(desc->mIoHandle);
-    }
-}
-
-status_t AudioPolicyManager::checkOutputsForDevice(const sp<DeviceDescriptor> devDesc,
-                                                       audio_policy_dev_state_t state,
-                                                       SortedVector<audio_io_handle_t>& outputs,
-                                                       const String8 address)
-{
-    audio_devices_t device = devDesc->mDeviceType;
-    sp<AudioOutputDescriptor> desc;
-    // erase all current sample rates, formats and channel masks
-    devDesc->clearCapabilities();
-
-    if (state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
-        // first list already open outputs that can be routed to this device
-        for (size_t i = 0; i < mOutputs.size(); i++) {
-            desc = mOutputs.valueAt(i);
-            if (!desc->isDuplicated() && (desc->mProfile->mSupportedDevices.types() & device)) {
-                if (!deviceDistinguishesOnAddress(device)) {
-                    ALOGV("checkOutputsForDevice(): adding opened output %d", mOutputs.keyAt(i));
-                    outputs.add(mOutputs.keyAt(i));
-                } else {
-                    ALOGV("  checking address match due to device 0x%x", device);
-                    findIoHandlesByAddress(desc, device, address, outputs);
-                }
-            }
-        }
-        // then look for output profiles that can be routed to this device
-        SortedVector< sp<IOProfile> > profiles;
-        for (size_t i = 0; i < mHwModules.size(); i++)
-        {
-            if (mHwModules[i]->mHandle == 0) {
-                continue;
-            }
-            for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
-            {
-                sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
-                if (profile->mSupportedDevices.types() & device) {
-                    if (!deviceDistinguishesOnAddress(device) ||
-                            address == profile->mSupportedDevices[0]->mAddress) {
-                        profiles.add(profile);
-                        ALOGV("checkOutputsForDevice(): adding profile %zu from module %zu", j, i);
-                    }
-                }
-            }
-        }
-
-        ALOGV("  found %d profiles, %d outputs", profiles.size(), outputs.size());
-
-        if (profiles.isEmpty() && outputs.isEmpty()) {
-            ALOGW("checkOutputsForDevice(): No output available for device %04x", device);
-            return BAD_VALUE;
-        }
-
-        // open outputs for matching profiles if needed. Direct outputs are also opened to
-        // query for dynamic parameters and will be closed later by setDeviceConnectionState()
-        for (ssize_t profile_index = 0; profile_index < (ssize_t)profiles.size(); profile_index++) {
-            sp<IOProfile> profile = profiles[profile_index];
-
-            // nothing to do if one output is already opened for this profile
-            size_t j;
-            for (j = 0; j < outputs.size(); j++) {
-                desc = mOutputs.valueFor(outputs.itemAt(j));
-                if (!desc->isDuplicated() && desc->mProfile == profile) {
-                    // matching profile: save the sample rates, format and channel masks supported
-                    // by the profile in our device descriptor
-                    devDesc->importAudioPort(profile);
-                    break;
-                }
-            }
-            if (j != outputs.size()) {
-                continue;
-            }
-
-            ALOGV("opening output for device %08x with params %s profile %p",
-                                                      device, address.string(), profile.get());
-            desc = new AudioOutputDescriptor(profile);
-            desc->mDevice = device;
-            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-            config.sample_rate = desc->mSamplingRate;
-            config.channel_mask = desc->mChannelMask;
-            config.format = desc->mFormat;
-            config.offload_info.sample_rate = desc->mSamplingRate;
-            config.offload_info.channel_mask = desc->mChannelMask;
-            config.offload_info.format = desc->mFormat;
-            audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
-            status_t status = mpClientInterface->openOutput(profile->mModule->mHandle,
-                                                            &output,
-                                                            &config,
-                                                            &desc->mDevice,
-                                                            address,
-                                                            &desc->mLatency,
-                                                            desc->mFlags);
-            if (status == NO_ERROR) {
-                desc->mSamplingRate = config.sample_rate;
-                desc->mChannelMask = config.channel_mask;
-                desc->mFormat = config.format;
-
-                // Here is where the out_set_parameters() for card & device gets called
-                if (!address.isEmpty()) {
-                    char *param = audio_device_address_to_parameter(device, address);
-                    mpClientInterface->setParameters(output, String8(param));
-                    free(param);
-                }
-
-                // Here is where we step through and resolve any "dynamic" fields
-                String8 reply;
-                char *value;
-                if (profile->mSamplingRates[0] == 0) {
-                    reply = mpClientInterface->getParameters(output,
-                                            String8(AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES));
-                    ALOGV("checkOutputsForDevice() supported sampling rates %s",
-                              reply.string());
-                    value = strpbrk((char *)reply.string(), "=");
-                    if (value != NULL) {
-                        profile->loadSamplingRates(value + 1);
-                    }
-                }
-                if (profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) {
-                    reply = mpClientInterface->getParameters(output,
-                                                   String8(AUDIO_PARAMETER_STREAM_SUP_FORMATS));
-                    ALOGV("checkOutputsForDevice() supported formats %s",
-                              reply.string());
-                    value = strpbrk((char *)reply.string(), "=");
-                    if (value != NULL) {
-                        profile->loadFormats(value + 1);
-                    }
-                }
-                if (profile->mChannelMasks[0] == 0) {
-                    reply = mpClientInterface->getParameters(output,
-                                                  String8(AUDIO_PARAMETER_STREAM_SUP_CHANNELS));
-                    ALOGV("checkOutputsForDevice() supported channel masks %s",
-                              reply.string());
-                    value = strpbrk((char *)reply.string(), "=");
-                    if (value != NULL) {
-                        profile->loadOutChannels(value + 1);
-                    }
-                }
-                if (((profile->mSamplingRates[0] == 0) &&
-                         (profile->mSamplingRates.size() < 2)) ||
-                     ((profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) &&
-                         (profile->mFormats.size() < 2)) ||
-                     ((profile->mChannelMasks[0] == 0) &&
-                         (profile->mChannelMasks.size() < 2))) {
-                    ALOGW("checkOutputsForDevice() missing param");
-                    mpClientInterface->closeOutput(output);
-                    output = AUDIO_IO_HANDLE_NONE;
-                } else if (profile->mSamplingRates[0] == 0 || profile->mFormats[0] == 0 ||
-                            profile->mChannelMasks[0] == 0) {
-                    mpClientInterface->closeOutput(output);
-                    config.sample_rate = profile->pickSamplingRate();
-                    config.channel_mask = profile->pickChannelMask();
-                    config.format = profile->pickFormat();
-                    config.offload_info.sample_rate = config.sample_rate;
-                    config.offload_info.channel_mask = config.channel_mask;
-                    config.offload_info.format = config.format;
-                    status = mpClientInterface->openOutput(profile->mModule->mHandle,
-                                                           &output,
-                                                           &config,
-                                                           &desc->mDevice,
-                                                           address,
-                                                           &desc->mLatency,
-                                                           desc->mFlags);
-                    if (status == NO_ERROR) {
-                        desc->mSamplingRate = config.sample_rate;
-                        desc->mChannelMask = config.channel_mask;
-                        desc->mFormat = config.format;
-                    } else {
-                        output = AUDIO_IO_HANDLE_NONE;
-                    }
-                }
-
-                if (output != AUDIO_IO_HANDLE_NONE) {
-                    addOutput(output, desc);
-                    if (deviceDistinguishesOnAddress(device) && address != "0") {
-                        ssize_t index = mPolicyMixes.indexOfKey(address);
-                        if (index >= 0) {
-                            mPolicyMixes[index]->mOutput = desc;
-                            desc->mPolicyMix = &mPolicyMixes[index]->mMix;
-                        } else {
-                            ALOGE("checkOutputsForDevice() cannot find policy for address %s",
-                                  address.string());
-                        }
-                    } else if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) {
-                        // no duplicated output for direct outputs and
-                        // outputs used by dynamic policy mixes
-                        audio_io_handle_t duplicatedOutput = AUDIO_IO_HANDLE_NONE;
-
-                        // set initial stream volume for device
-                        applyStreamVolumes(output, device, 0, true);
-
-                        //TODO: configure audio effect output stage here
-
-                        // open a duplicating output thread for the new output and the primary output
-                        duplicatedOutput = mpClientInterface->openDuplicateOutput(output,
-                                                                                  mPrimaryOutput);
-                        if (duplicatedOutput != AUDIO_IO_HANDLE_NONE) {
-                            // add duplicated output descriptor
-                            sp<AudioOutputDescriptor> dupOutputDesc =
-                                    new AudioOutputDescriptor(NULL);
-                            dupOutputDesc->mOutput1 = mOutputs.valueFor(mPrimaryOutput);
-                            dupOutputDesc->mOutput2 = mOutputs.valueFor(output);
-                            dupOutputDesc->mSamplingRate = desc->mSamplingRate;
-                            dupOutputDesc->mFormat = desc->mFormat;
-                            dupOutputDesc->mChannelMask = desc->mChannelMask;
-                            dupOutputDesc->mLatency = desc->mLatency;
-                            addOutput(duplicatedOutput, dupOutputDesc);
-                            applyStreamVolumes(duplicatedOutput, device, 0, true);
-                        } else {
-                            ALOGW("checkOutputsForDevice() could not open dup output for %d and %d",
-                                    mPrimaryOutput, output);
-                            mpClientInterface->closeOutput(output);
-                            mOutputs.removeItem(output);
-                            nextAudioPortGeneration();
-                            output = AUDIO_IO_HANDLE_NONE;
-                        }
-                    }
-                }
-            } else {
-                output = AUDIO_IO_HANDLE_NONE;
-            }
-            if (output == AUDIO_IO_HANDLE_NONE) {
-                ALOGW("checkOutputsForDevice() could not open output for device %x", device);
-                profiles.removeAt(profile_index);
-                profile_index--;
-            } else {
-                outputs.add(output);
-                devDesc->importAudioPort(profile);
-
-                if (deviceDistinguishesOnAddress(device)) {
-                    ALOGV("checkOutputsForDevice(): setOutputDevice(dev=0x%x, addr=%s)",
-                            device, address.string());
-                    setOutputDevice(output, device, true/*force*/, 0/*delay*/,
-                            NULL/*patch handle*/, address.string());
-                }
-                ALOGV("checkOutputsForDevice(): adding output %d", output);
-            }
-        }
-
-        if (profiles.isEmpty()) {
-            ALOGW("checkOutputsForDevice(): No output available for device %04x", device);
-            return BAD_VALUE;
-        }
-    } else { // Disconnect
-        // check if one opened output is not needed any more after disconnecting one device
-        for (size_t i = 0; i < mOutputs.size(); i++) {
-            desc = mOutputs.valueAt(i);
-            if (!desc->isDuplicated()) {
-                // exact match on device
-                if (deviceDistinguishesOnAddress(device) &&
-                        (desc->mProfile->mSupportedDevices.types() == device)) {
-                    findIoHandlesByAddress(desc, device, address, outputs);
-                } else if (!(desc->mProfile->mSupportedDevices.types()
-                        & mAvailableOutputDevices.types())) {
-                    ALOGV("checkOutputsForDevice(): disconnecting adding output %d",
-                            mOutputs.keyAt(i));
-                    outputs.add(mOutputs.keyAt(i));
-                }
-            }
-        }
-        // Clear any profiles associated with the disconnected device.
-        for (size_t i = 0; i < mHwModules.size(); i++)
-        {
-            if (mHwModules[i]->mHandle == 0) {
-                continue;
-            }
-            for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
-            {
-                sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
-                if (profile->mSupportedDevices.types() & device) {
-                    ALOGV("checkOutputsForDevice(): "
-                            "clearing direct output profile %zu on module %zu", j, i);
-                    if (profile->mSamplingRates[0] == 0) {
-                        profile->mSamplingRates.clear();
-                        profile->mSamplingRates.add(0);
-                    }
-                    if (profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) {
-                        profile->mFormats.clear();
-                        profile->mFormats.add(AUDIO_FORMAT_DEFAULT);
-                    }
-                    if (profile->mChannelMasks[0] == 0) {
-                        profile->mChannelMasks.clear();
-                        profile->mChannelMasks.add(0);
-                    }
-                }
-            }
-        }
-    }
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::checkInputsForDevice(audio_devices_t device,
-                                                      audio_policy_dev_state_t state,
-                                                      SortedVector<audio_io_handle_t>& inputs,
-                                                      const String8 address)
-{
-    sp<AudioInputDescriptor> desc;
-    if (state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
-        // first list already open inputs that can be routed to this device
-        for (size_t input_index = 0; input_index < mInputs.size(); input_index++) {
-            desc = mInputs.valueAt(input_index);
-            if (desc->mProfile->mSupportedDevices.types() & (device & ~AUDIO_DEVICE_BIT_IN)) {
-                ALOGV("checkInputsForDevice(): adding opened input %d", mInputs.keyAt(input_index));
-               inputs.add(mInputs.keyAt(input_index));
-            }
-        }
-
-        // then look for input profiles that can be routed to this device
-        SortedVector< sp<IOProfile> > profiles;
-        for (size_t module_idx = 0; module_idx < mHwModules.size(); module_idx++)
-        {
-            if (mHwModules[module_idx]->mHandle == 0) {
-                continue;
-            }
-            for (size_t profile_index = 0;
-                 profile_index < mHwModules[module_idx]->mInputProfiles.size();
-                 profile_index++)
-            {
-                sp<IOProfile> profile = mHwModules[module_idx]->mInputProfiles[profile_index];
-
-                if (profile->mSupportedDevices.types() & (device & ~AUDIO_DEVICE_BIT_IN)) {
-                    if (!deviceDistinguishesOnAddress(device) ||
-                            address == profile->mSupportedDevices[0]->mAddress) {
-                        profiles.add(profile);
-                        ALOGV("checkInputsForDevice(): adding profile %zu from module %zu",
-                              profile_index, module_idx);
-                    }
-                }
-            }
-        }
-
-        if (profiles.isEmpty() && inputs.isEmpty()) {
-            ALOGW("checkInputsForDevice(): No input available for device 0x%X", device);
-            return BAD_VALUE;
-        }
-
-        // open inputs for matching profiles if needed. Direct inputs are also opened to
-        // query for dynamic parameters and will be closed later by setDeviceConnectionState()
-        for (ssize_t profile_index = 0; profile_index < (ssize_t)profiles.size(); profile_index++) {
-
-            sp<IOProfile> profile = profiles[profile_index];
-            // nothing to do if one input is already opened for this profile
-            size_t input_index;
-            for (input_index = 0; input_index < mInputs.size(); input_index++) {
-                desc = mInputs.valueAt(input_index);
-                if (desc->mProfile == profile) {
-                    break;
-                }
-            }
-            if (input_index != mInputs.size()) {
-                continue;
-            }
-
-            ALOGV("opening input for device 0x%X with params %s", device, address.string());
-            desc = new AudioInputDescriptor(profile);
-            desc->mDevice = device;
-            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
-            config.sample_rate = desc->mSamplingRate;
-            config.channel_mask = desc->mChannelMask;
-            config.format = desc->mFormat;
-            audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
-            status_t status = mpClientInterface->openInput(profile->mModule->mHandle,
-                                                           &input,
-                                                           &config,
-                                                           &desc->mDevice,
-                                                           address,
-                                                           AUDIO_SOURCE_MIC,
-                                                           AUDIO_INPUT_FLAG_NONE /*FIXME*/);
-
-            if (status == NO_ERROR) {
-                desc->mSamplingRate = config.sample_rate;
-                desc->mChannelMask = config.channel_mask;
-                desc->mFormat = config.format;
-
-                if (!address.isEmpty()) {
-                    char *param = audio_device_address_to_parameter(device, address);
-                    mpClientInterface->setParameters(input, String8(param));
-                    free(param);
-                }
-
-                // Here is where we step through and resolve any "dynamic" fields
-                String8 reply;
-                char *value;
-                if (profile->mSamplingRates[0] == 0) {
-                    reply = mpClientInterface->getParameters(input,
-                                            String8(AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES));
-                    ALOGV("checkInputsForDevice() direct input sup sampling rates %s",
-                              reply.string());
-                    value = strpbrk((char *)reply.string(), "=");
-                    if (value != NULL) {
-                        profile->loadSamplingRates(value + 1);
-                    }
-                }
-                if (profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) {
-                    reply = mpClientInterface->getParameters(input,
-                                                   String8(AUDIO_PARAMETER_STREAM_SUP_FORMATS));
-                    ALOGV("checkInputsForDevice() direct input sup formats %s", reply.string());
-                    value = strpbrk((char *)reply.string(), "=");
-                    if (value != NULL) {
-                        profile->loadFormats(value + 1);
-                    }
-                }
-                if (profile->mChannelMasks[0] == 0) {
-                    reply = mpClientInterface->getParameters(input,
-                                                  String8(AUDIO_PARAMETER_STREAM_SUP_CHANNELS));
-                    ALOGV("checkInputsForDevice() direct input sup channel masks %s",
-                              reply.string());
-                    value = strpbrk((char *)reply.string(), "=");
-                    if (value != NULL) {
-                        profile->loadInChannels(value + 1);
-                    }
-                }
-                if (((profile->mSamplingRates[0] == 0) && (profile->mSamplingRates.size() < 2)) ||
-                     ((profile->mFormats[0] == 0) && (profile->mFormats.size() < 2)) ||
-                     ((profile->mChannelMasks[0] == 0) && (profile->mChannelMasks.size() < 2))) {
-                    ALOGW("checkInputsForDevice() direct input missing param");
-                    mpClientInterface->closeInput(input);
-                    input = AUDIO_IO_HANDLE_NONE;
-                }
-
-                if (input != 0) {
-                    addInput(input, desc);
-                }
-            } // endif input != 0
-
-            if (input == AUDIO_IO_HANDLE_NONE) {
-                ALOGW("checkInputsForDevice() could not open input for device 0x%X", device);
-                profiles.removeAt(profile_index);
-                profile_index--;
-            } else {
-                inputs.add(input);
-                ALOGV("checkInputsForDevice(): adding input %d", input);
-            }
-        } // end scan profiles
-
-        if (profiles.isEmpty()) {
-            ALOGW("checkInputsForDevice(): No input available for device 0x%X", device);
-            return BAD_VALUE;
-        }
-    } else {
-        // Disconnect
-        // check if one opened input is not needed any more after disconnecting one device
-        for (size_t input_index = 0; input_index < mInputs.size(); input_index++) {
-            desc = mInputs.valueAt(input_index);
-            if (!(desc->mProfile->mSupportedDevices.types() & mAvailableInputDevices.types() &
-                    ~AUDIO_DEVICE_BIT_IN)) {
-                ALOGV("checkInputsForDevice(): disconnecting adding input %d",
-                      mInputs.keyAt(input_index));
-                inputs.add(mInputs.keyAt(input_index));
-            }
-        }
-        // Clear any profiles associated with the disconnected device.
-        for (size_t module_index = 0; module_index < mHwModules.size(); module_index++) {
-            if (mHwModules[module_index]->mHandle == 0) {
-                continue;
-            }
-            for (size_t profile_index = 0;
-                 profile_index < mHwModules[module_index]->mInputProfiles.size();
-                 profile_index++) {
-                sp<IOProfile> profile = mHwModules[module_index]->mInputProfiles[profile_index];
-                if (profile->mSupportedDevices.types() & device & ~AUDIO_DEVICE_BIT_IN) {
-                    ALOGV("checkInputsForDevice(): clearing direct input profile %zu on module %zu",
-                          profile_index, module_index);
-                    if (profile->mSamplingRates[0] == 0) {
-                        profile->mSamplingRates.clear();
-                        profile->mSamplingRates.add(0);
-                    }
-                    if (profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) {
-                        profile->mFormats.clear();
-                        profile->mFormats.add(AUDIO_FORMAT_DEFAULT);
-                    }
-                    if (profile->mChannelMasks[0] == 0) {
-                        profile->mChannelMasks.clear();
-                        profile->mChannelMasks.add(0);
-                    }
-                }
-            }
-        }
-    } // end disconnect
-
-    return NO_ERROR;
-}
-
-
-void AudioPolicyManager::closeOutput(audio_io_handle_t output)
-{
-    ALOGV("closeOutput(%d)", output);
-
-    sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
-    if (outputDesc == NULL) {
-        ALOGW("closeOutput() unknown output %d", output);
-        return;
-    }
-
-    for (size_t i = 0; i < mPolicyMixes.size(); i++) {
-        if (mPolicyMixes[i]->mOutput == outputDesc) {
-            mPolicyMixes[i]->mOutput.clear();
-        }
-    }
-
-    // look for duplicated outputs connected to the output being removed.
-    for (size_t i = 0; i < mOutputs.size(); i++) {
-        sp<AudioOutputDescriptor> dupOutputDesc = mOutputs.valueAt(i);
-        if (dupOutputDesc->isDuplicated() &&
-                (dupOutputDesc->mOutput1 == outputDesc ||
-                dupOutputDesc->mOutput2 == outputDesc)) {
-            sp<AudioOutputDescriptor> outputDesc2;
-            if (dupOutputDesc->mOutput1 == outputDesc) {
-                outputDesc2 = dupOutputDesc->mOutput2;
-            } else {
-                outputDesc2 = dupOutputDesc->mOutput1;
-            }
-            // As all active tracks on duplicated output will be deleted,
-            // and as they were also referenced on the other output, the reference
-            // count for their stream type must be adjusted accordingly on
-            // the other output.
-            for (int j = 0; j < AUDIO_STREAM_CNT; j++) {
-                int refCount = dupOutputDesc->mRefCount[j];
-                outputDesc2->changeRefCount((audio_stream_type_t)j,-refCount);
-            }
-            audio_io_handle_t duplicatedOutput = mOutputs.keyAt(i);
-            ALOGV("closeOutput() closing also duplicated output %d", duplicatedOutput);
-
-            mpClientInterface->closeOutput(duplicatedOutput);
-            mOutputs.removeItem(duplicatedOutput);
-        }
-    }
-
-    nextAudioPortGeneration();
-
-    ssize_t index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
-    if (index >= 0) {
-        sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
-        status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
-        mAudioPatches.removeItemsAt(index);
-        mpClientInterface->onAudioPatchListUpdate();
-    }
-
-    AudioParameter param;
-    param.add(String8("closing"), String8("true"));
-    mpClientInterface->setParameters(output, param.toString());
-
-    mpClientInterface->closeOutput(output);
-    mOutputs.removeItem(output);
-    mPreviousOutputs = mOutputs;
-}
-
-void AudioPolicyManager::closeInput(audio_io_handle_t input)
-{
-    ALOGV("closeInput(%d)", input);
-
-    sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input);
-    if (inputDesc == NULL) {
-        ALOGW("closeInput() unknown input %d", input);
-        return;
-    }
-
-    nextAudioPortGeneration();
-
-    ssize_t index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
-    if (index >= 0) {
-        sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
-        status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
-        mAudioPatches.removeItemsAt(index);
-        mpClientInterface->onAudioPatchListUpdate();
-    }
-
-    mpClientInterface->closeInput(input);
-    mInputs.removeItem(input);
-}
-
-SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevice(audio_devices_t device,
-                        DefaultKeyedVector<audio_io_handle_t, sp<AudioOutputDescriptor> > openOutputs)
-{
-    SortedVector<audio_io_handle_t> outputs;
-
-    ALOGVV("getOutputsForDevice() device %04x", device);
-    for (size_t i = 0; i < openOutputs.size(); i++) {
-        ALOGVV("output %d isDuplicated=%d device=%04x",
-                i, openOutputs.valueAt(i)->isDuplicated(), openOutputs.valueAt(i)->supportedDevices());
-        if ((device & openOutputs.valueAt(i)->supportedDevices()) == device) {
-            ALOGVV("getOutputsForDevice() found output %d", openOutputs.keyAt(i));
-            outputs.add(openOutputs.keyAt(i));
-        }
-    }
-    return outputs;
-}
-
-bool AudioPolicyManager::vectorsEqual(SortedVector<audio_io_handle_t>& outputs1,
-                                   SortedVector<audio_io_handle_t>& outputs2)
-{
-    if (outputs1.size() != outputs2.size()) {
-        return false;
-    }
-    for (size_t i = 0; i < outputs1.size(); i++) {
-        if (outputs1[i] != outputs2[i]) {
-            return false;
-        }
-    }
-    return true;
-}
-
-void AudioPolicyManager::checkOutputForStrategy(routing_strategy strategy)
-{
-    audio_devices_t oldDevice = getDeviceForStrategy(strategy, true /*fromCache*/);
-    audio_devices_t newDevice = getDeviceForStrategy(strategy, false /*fromCache*/);
-    SortedVector<audio_io_handle_t> srcOutputs = getOutputsForDevice(oldDevice, mPreviousOutputs);
-    SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevice(newDevice, mOutputs);
-
-    // also take into account external policy-related changes: add all outputs which are
-    // associated with policies in the "before" and "after" output vectors
-    ALOGVV("checkOutputForStrategy(): policy related outputs");
-    for (size_t i = 0 ; i < mPreviousOutputs.size() ; i++) {
-        const sp<AudioOutputDescriptor> desc = mPreviousOutputs.valueAt(i);
-        if (desc != 0 && desc->mPolicyMix != NULL) {
-            srcOutputs.add(desc->mIoHandle);
-            ALOGVV(" previous outputs: adding %d", desc->mIoHandle);
-        }
-    }
-    for (size_t i = 0 ; i < mOutputs.size() ; i++) {
-        const sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
-        if (desc != 0 && desc->mPolicyMix != NULL) {
-            dstOutputs.add(desc->mIoHandle);
-            ALOGVV(" new outputs: adding %d", desc->mIoHandle);
-        }
-    }
-
-    if (!vectorsEqual(srcOutputs,dstOutputs)) {
-        ALOGV("checkOutputForStrategy() strategy %d, moving from output %d to output %d",
-              strategy, srcOutputs[0], dstOutputs[0]);
-        // mute strategy while moving tracks from one output to another
-        for (size_t i = 0; i < srcOutputs.size(); i++) {
-            sp<AudioOutputDescriptor> desc = mOutputs.valueFor(srcOutputs[i]);
-            if (desc->isStrategyActive(strategy)) {
-                setStrategyMute(strategy, true, srcOutputs[i]);
-                setStrategyMute(strategy, false, srcOutputs[i], MUTE_TIME_MS, newDevice);
-            }
-        }
-
-        // Move effects associated to this strategy from previous output to new output
-        if (strategy == STRATEGY_MEDIA) {
-            audio_io_handle_t fxOutput = selectOutputForEffects(dstOutputs);
-            SortedVector<audio_io_handle_t> moved;
-            for (size_t i = 0; i < mEffects.size(); i++) {
-                sp<EffectDescriptor> effectDesc = mEffects.valueAt(i);
-                if (effectDesc->mSession == AUDIO_SESSION_OUTPUT_MIX &&
-                        effectDesc->mIo != fxOutput) {
-                    if (moved.indexOf(effectDesc->mIo) < 0) {
-                        ALOGV("checkOutputForStrategy() moving effect %d to output %d",
-                              mEffects.keyAt(i), fxOutput);
-                        mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, effectDesc->mIo,
-                                                       fxOutput);
-                        moved.add(effectDesc->mIo);
-                    }
-                    effectDesc->mIo = fxOutput;
-                }
-            }
-        }
-        // Move tracks associated to this strategy from previous output to new output
-        for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
-            if (i == AUDIO_STREAM_PATCH) {
-                continue;
-            }
-            if (getStrategy((audio_stream_type_t)i) == strategy) {
-                mpClientInterface->invalidateStream((audio_stream_type_t)i);
-            }
-        }
-    }
-}
-
-void AudioPolicyManager::checkOutputForAllStrategies()
-{
-    if (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
-        checkOutputForStrategy(STRATEGY_ENFORCED_AUDIBLE);
-    checkOutputForStrategy(STRATEGY_PHONE);
-    if (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] != AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
-        checkOutputForStrategy(STRATEGY_ENFORCED_AUDIBLE);
-    checkOutputForStrategy(STRATEGY_SONIFICATION);
-    checkOutputForStrategy(STRATEGY_SONIFICATION_RESPECTFUL);
-    checkOutputForStrategy(STRATEGY_ACCESSIBILITY);
-    checkOutputForStrategy(STRATEGY_MEDIA);
-    checkOutputForStrategy(STRATEGY_DTMF);
-    checkOutputForStrategy(STRATEGY_REROUTING);
-}
-
-audio_io_handle_t AudioPolicyManager::getA2dpOutput()
-{
-    for (size_t i = 0; i < mOutputs.size(); i++) {
-        sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
-        if (!outputDesc->isDuplicated() && outputDesc->device() & AUDIO_DEVICE_OUT_ALL_A2DP) {
-            return mOutputs.keyAt(i);
-        }
-    }
-
-    return 0;
-}
-
-void AudioPolicyManager::checkA2dpSuspend()
-{
-    audio_io_handle_t a2dpOutput = getA2dpOutput();
-    if (a2dpOutput == 0) {
-        mA2dpSuspended = false;
-        return;
-    }
-
-    bool isScoConnected =
-            ((mAvailableInputDevices.types() & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET &
-                    ~AUDIO_DEVICE_BIT_IN) != 0) ||
-            ((mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_ALL_SCO) != 0);
-    // suspend A2DP output if:
-    //      (NOT already suspended) &&
-    //      ((SCO device is connected &&
-    //       (forced usage for communication || for record is SCO))) ||
-    //      (phone state is ringing || in call)
-    //
-    // restore A2DP output if:
-    //      (Already suspended) &&
-    //      ((SCO device is NOT connected ||
-    //       (forced usage NOT for communication && NOT for record is SCO))) &&
-    //      (phone state is NOT ringing && NOT in call)
-    //
-    if (mA2dpSuspended) {
-        if ((!isScoConnected ||
-             ((mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] != AUDIO_POLICY_FORCE_BT_SCO) &&
-              (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] != AUDIO_POLICY_FORCE_BT_SCO))) &&
-             ((mPhoneState != AUDIO_MODE_IN_CALL) &&
-              (mPhoneState != AUDIO_MODE_RINGTONE))) {
-
-            mpClientInterface->restoreOutput(a2dpOutput);
-            mA2dpSuspended = false;
-        }
-    } else {
-        if ((isScoConnected &&
-             ((mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] == AUDIO_POLICY_FORCE_BT_SCO) ||
-              (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO))) ||
-             ((mPhoneState == AUDIO_MODE_IN_CALL) ||
-              (mPhoneState == AUDIO_MODE_RINGTONE))) {
-
-            mpClientInterface->suspendOutput(a2dpOutput);
-            mA2dpSuspended = true;
-        }
-    }
-}
-
-audio_devices_t AudioPolicyManager::getNewOutputDevice(audio_io_handle_t output, bool fromCache)
-{
-    audio_devices_t device = AUDIO_DEVICE_NONE;
-
-    sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
-
-    ssize_t index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
-    if (index >= 0) {
-        sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
-        if (patchDesc->mUid != mUidCached) {
-            ALOGV("getNewOutputDevice() device %08x forced by patch %d",
-                  outputDesc->device(), outputDesc->mPatchHandle);
-            return outputDesc->device();
-        }
-    }
-
-    // check the following by order of priority to request a routing change if necessary:
-    // 1: the strategy enforced audible is active and enforced on the output:
-    //      use device for strategy enforced audible
-    // 2: we are in call or the strategy phone is active on the output:
-    //      use device for strategy phone
-    // 3: the strategy for enforced audible is active but not enforced on the output:
-    //      use the device for strategy enforced audible
-    // 4: the strategy sonification is active on the output:
-    //      use device for strategy sonification
-    // 5: the strategy "respectful" sonification is active on the output:
-    //      use device for strategy "respectful" sonification
-    // 6: the strategy accessibility is active on the output:
-    //      use device for strategy accessibility
-    // 7: the strategy media is active on the output:
-    //      use device for strategy media
-    // 8: the strategy DTMF is active on the output:
-    //      use device for strategy DTMF
-    // 9: the strategy for beacon, a.k.a. "transmitted through speaker" is active on the output:
-    //      use device for strategy t-t-s
-    if (outputDesc->isStrategyActive(STRATEGY_ENFORCED_AUDIBLE) &&
-        mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
-        device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
-    } else if (isInCall() ||
-                    outputDesc->isStrategyActive(STRATEGY_PHONE)) {
-        device = getDeviceForStrategy(STRATEGY_PHONE, fromCache);
-    } else if (outputDesc->isStrategyActive(STRATEGY_ENFORCED_AUDIBLE)) {
-        device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
-    } else if (outputDesc->isStrategyActive(STRATEGY_SONIFICATION)) {
-        device = getDeviceForStrategy(STRATEGY_SONIFICATION, fromCache);
-    } else if (outputDesc->isStrategyActive(STRATEGY_SONIFICATION_RESPECTFUL)) {
-        device = getDeviceForStrategy(STRATEGY_SONIFICATION_RESPECTFUL, fromCache);
-    } else if (outputDesc->isStrategyActive(STRATEGY_ACCESSIBILITY)) {
-        device = getDeviceForStrategy(STRATEGY_ACCESSIBILITY, fromCache);
-    } else if (outputDesc->isStrategyActive(STRATEGY_MEDIA)) {
-        device = getDeviceForStrategy(STRATEGY_MEDIA, fromCache);
-    } else if (outputDesc->isStrategyActive(STRATEGY_DTMF)) {
-        device = getDeviceForStrategy(STRATEGY_DTMF, fromCache);
-    } else if (outputDesc->isStrategyActive(STRATEGY_TRANSMITTED_THROUGH_SPEAKER)) {
-        device = getDeviceForStrategy(STRATEGY_TRANSMITTED_THROUGH_SPEAKER, fromCache);
-    } else if (outputDesc->isStrategyActive(STRATEGY_REROUTING)) {
-        device = getDeviceForStrategy(STRATEGY_REROUTING, fromCache);
-    }
-
-    ALOGV("getNewOutputDevice() selected device %x", device);
-    return device;
-}
-
-audio_devices_t AudioPolicyManager::getNewInputDevice(audio_io_handle_t input)
-{
-    sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input);
-
-    ssize_t index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
-    if (index >= 0) {
-        sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
-        if (patchDesc->mUid != mUidCached) {
-            ALOGV("getNewInputDevice() device %08x forced by patch %d",
-                  inputDesc->mDevice, inputDesc->mPatchHandle);
-            return inputDesc->mDevice;
-        }
-    }
-
-    audio_devices_t device = getDeviceAndMixForInputSource(inputDesc->mInputSource);
-
-    ALOGV("getNewInputDevice() selected device %x", device);
-    return device;
-}
-
-uint32_t AudioPolicyManager::getStrategyForStream(audio_stream_type_t stream) {
-    return (uint32_t)getStrategy(stream);
-}
-
-audio_devices_t AudioPolicyManager::getDevicesForStream(audio_stream_type_t stream) {
-    // By checking the range of stream before calling getStrategy, we avoid
-    // getStrategy's behavior for invalid streams.  getStrategy would do a ALOGE
-    // and then return STRATEGY_MEDIA, but we want to return the empty set.
-    if (stream < (audio_stream_type_t) 0 || stream >= AUDIO_STREAM_PUBLIC_CNT) {
-        return AUDIO_DEVICE_NONE;
-    }
-    audio_devices_t devices;
-    AudioPolicyManager::routing_strategy strategy = getStrategy(stream);
-    devices = getDeviceForStrategy(strategy, true /*fromCache*/);
-    SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(devices, mOutputs);
-    for (size_t i = 0; i < outputs.size(); i++) {
-        sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
-        if (outputDesc->isStrategyActive(strategy)) {
-            devices = outputDesc->device();
-            break;
-        }
-    }
-
-    /*Filter SPEAKER_SAFE out of results, as AudioService doesn't know about it
-      and doesn't really need to.*/
-    if (devices & AUDIO_DEVICE_OUT_SPEAKER_SAFE) {
-        devices |= AUDIO_DEVICE_OUT_SPEAKER;
-        devices &= ~AUDIO_DEVICE_OUT_SPEAKER_SAFE;
-    }
-
-    return devices;
-}
-
-AudioPolicyManager::routing_strategy AudioPolicyManager::getStrategy(
-        audio_stream_type_t stream) {
-
-    ALOG_ASSERT(stream != AUDIO_STREAM_PATCH,"getStrategy() called for AUDIO_STREAM_PATCH");
-
-    // stream to strategy mapping
-    switch (stream) {
-    case AUDIO_STREAM_VOICE_CALL:
-    case AUDIO_STREAM_BLUETOOTH_SCO:
-        return STRATEGY_PHONE;
-    case AUDIO_STREAM_RING:
-    case AUDIO_STREAM_ALARM:
-        return STRATEGY_SONIFICATION;
-    case AUDIO_STREAM_NOTIFICATION:
-        return STRATEGY_SONIFICATION_RESPECTFUL;
-    case AUDIO_STREAM_DTMF:
-        return STRATEGY_DTMF;
-    default:
-        ALOGE("unknown stream type %d", stream);
-    case AUDIO_STREAM_SYSTEM:
-        // NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
-        // while key clicks are played produces a poor result
-    case AUDIO_STREAM_MUSIC:
-        return STRATEGY_MEDIA;
-    case AUDIO_STREAM_ENFORCED_AUDIBLE:
-        return STRATEGY_ENFORCED_AUDIBLE;
-    case AUDIO_STREAM_TTS:
-        return STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
-    case AUDIO_STREAM_ACCESSIBILITY:
-        return STRATEGY_ACCESSIBILITY;
-    case AUDIO_STREAM_REROUTING:
-        return STRATEGY_REROUTING;
-    }
-}
-
-uint32_t AudioPolicyManager::getStrategyForAttr(const audio_attributes_t *attr) {
-    // flags to strategy mapping
-    if ((attr->flags & AUDIO_FLAG_BEACON) == AUDIO_FLAG_BEACON) {
-        return (uint32_t) STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
-    }
-    if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
-        return (uint32_t) STRATEGY_ENFORCED_AUDIBLE;
-    }
-
-    // usage to strategy mapping
-    switch (attr->usage) {
-    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
-        if (isStreamActive(AUDIO_STREAM_RING) || isStreamActive(AUDIO_STREAM_ALARM)) {
-            return (uint32_t) STRATEGY_SONIFICATION;
-        }
-        if (isInCall()) {
-            return (uint32_t) STRATEGY_PHONE;
-        }
-        return (uint32_t) STRATEGY_ACCESSIBILITY;
-
-    case AUDIO_USAGE_MEDIA:
-    case AUDIO_USAGE_GAME:
-    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
-    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
-        return (uint32_t) STRATEGY_MEDIA;
-
-    case AUDIO_USAGE_VOICE_COMMUNICATION:
-        return (uint32_t) STRATEGY_PHONE;
-
-    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
-        return (uint32_t) STRATEGY_DTMF;
-
-    case AUDIO_USAGE_ALARM:
-    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
-        return (uint32_t) STRATEGY_SONIFICATION;
-
-    case AUDIO_USAGE_NOTIFICATION:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
-    case AUDIO_USAGE_NOTIFICATION_EVENT:
-        return (uint32_t) STRATEGY_SONIFICATION_RESPECTFUL;
-
-    case AUDIO_USAGE_UNKNOWN:
-    default:
-        return (uint32_t) STRATEGY_MEDIA;
-    }
-}
-
-void AudioPolicyManager::handleNotificationRoutingForStream(audio_stream_type_t stream) {
-    switch(stream) {
-    case AUDIO_STREAM_MUSIC:
-        checkOutputForStrategy(STRATEGY_SONIFICATION_RESPECTFUL);
-        updateDevicesAndOutputs();
-        break;
-    default:
-        break;
-    }
-}
-
-bool AudioPolicyManager::isAnyOutputActive(audio_stream_type_t streamToIgnore) {
-    for (size_t s = 0 ; s < AUDIO_STREAM_CNT ; s++) {
-        if (s == (size_t) streamToIgnore) {
-            continue;
-        }
-        for (size_t i = 0; i < mOutputs.size(); i++) {
-            const sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
-            if (outputDesc->mRefCount[s] != 0) {
-                return true;
-            }
-        }
-    }
-    return false;
-}
-
-uint32_t AudioPolicyManager::handleEventForBeacon(int event) {
-    switch(event) {
-    case STARTING_OUTPUT:
-        mBeaconMuteRefCount++;
-        break;
-    case STOPPING_OUTPUT:
-        if (mBeaconMuteRefCount > 0) {
-            mBeaconMuteRefCount--;
-        }
-        break;
-    case STARTING_BEACON:
-        mBeaconPlayingRefCount++;
-        break;
-    case STOPPING_BEACON:
-        if (mBeaconPlayingRefCount > 0) {
-            mBeaconPlayingRefCount--;
-        }
-        break;
-    }
-
-    if (mBeaconMuteRefCount > 0) {
-        // any playback causes beacon to be muted
-        return setBeaconMute(true);
-    } else {
-        // no other playback: unmute when beacon starts playing, mute when it stops
-        return setBeaconMute(mBeaconPlayingRefCount == 0);
-    }
-}
-
-uint32_t AudioPolicyManager::setBeaconMute(bool mute) {
-    ALOGV("setBeaconMute(%d) mBeaconMuteRefCount=%d mBeaconPlayingRefCount=%d",
-            mute, mBeaconMuteRefCount, mBeaconPlayingRefCount);
-    // keep track of muted state to avoid repeating mute/unmute operations
-    if (mBeaconMuted != mute) {
-        // mute/unmute AUDIO_STREAM_TTS on all outputs
-        ALOGV("\t muting %d", mute);
-        uint32_t maxLatency = 0;
-        for (size_t i = 0; i < mOutputs.size(); i++) {
-            sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
-            setStreamMute(AUDIO_STREAM_TTS, mute/*on*/,
-                    desc->mIoHandle,
-                    0 /*delay*/, AUDIO_DEVICE_NONE);
-            const uint32_t latency = desc->latency() * 2;
-            if (latency > maxLatency) {
-                maxLatency = latency;
-            }
-        }
-        mBeaconMuted = mute;
-        return maxLatency;
-    }
-    return 0;
-}
-
-audio_devices_t AudioPolicyManager::getDeviceForStrategy(routing_strategy strategy,
-                                                             bool fromCache)
-{
-    uint32_t device = AUDIO_DEVICE_NONE;
-
-    if (fromCache) {
-        ALOGVV("getDeviceForStrategy() from cache strategy %d, device %x",
-              strategy, mDeviceForStrategy[strategy]);
-        return mDeviceForStrategy[strategy];
-    }
-    audio_devices_t availableOutputDeviceTypes = mAvailableOutputDevices.types();
-    switch (strategy) {
-
-    case STRATEGY_TRANSMITTED_THROUGH_SPEAKER:
-        device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
-        if (!device) {
-            ALOGE("getDeviceForStrategy() no device found for "\
-                    "STRATEGY_TRANSMITTED_THROUGH_SPEAKER");
-        }
-        break;
-
-    case STRATEGY_SONIFICATION_RESPECTFUL:
-        if (isInCall()) {
-            device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
-        } else if (isStreamActiveRemotely(AUDIO_STREAM_MUSIC,
-                SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
-            // while media is playing on a remote device, use the the sonification behavior.
-            // Note that we test this usecase before testing if media is playing because
-            //   the isStreamActive() method only informs about the activity of a stream, not
-            //   if it's for local playback. Note also that we use the same delay between both tests
-            device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
-            //user "safe" speaker if available instead of normal speaker to avoid triggering
-            //other acoustic safety mechanisms for notification
-            if (device == AUDIO_DEVICE_OUT_SPEAKER && (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER_SAFE))
-                device = AUDIO_DEVICE_OUT_SPEAKER_SAFE;
-        } else if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
-            // while media is playing (or has recently played), use the same device
-            device = getDeviceForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
-        } else {
-            // when media is not playing anymore, fall back on the sonification behavior
-            device = getDeviceForStrategy(STRATEGY_SONIFICATION, false /*fromCache*/);
-            //user "safe" speaker if available instead of normal speaker to avoid triggering
-            //other acoustic safety mechanisms for notification
-            if (device == AUDIO_DEVICE_OUT_SPEAKER && (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER_SAFE))
-                device = AUDIO_DEVICE_OUT_SPEAKER_SAFE;
-        }
-
-        break;
-
-    case STRATEGY_DTMF:
-        if (!isInCall()) {
-            // when off call, DTMF strategy follows the same rules as MEDIA strategy
-            device = getDeviceForStrategy(STRATEGY_MEDIA, false /*fromCache*/);
-            break;
-        }
-        // when in call, DTMF and PHONE strategies follow the same rules
-        // FALL THROUGH
-
-    case STRATEGY_PHONE:
-        // Force use of only devices on primary output if:
-        // - in call AND
-        //   - cannot route from voice call RX OR
-        //   - audio HAL version is < 3.0 and TX device is on the primary HW module
-        if (mPhoneState == AUDIO_MODE_IN_CALL) {
-            audio_devices_t txDevice =
-                    getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
-            sp<AudioOutputDescriptor> hwOutputDesc = mOutputs.valueFor(mPrimaryOutput);
-            if (((mAvailableInputDevices.types() &
-                    AUDIO_DEVICE_IN_TELEPHONY_RX & ~AUDIO_DEVICE_BIT_IN) == 0) ||
-                    (((txDevice & availablePrimaryInputDevices() & ~AUDIO_DEVICE_BIT_IN) != 0) &&
-                         (hwOutputDesc->getAudioPort()->mModule->mHalVersion <
-                             AUDIO_DEVICE_API_VERSION_3_0))) {
-                availableOutputDeviceTypes = availablePrimaryOutputDevices();
-            }
-        }
-        // for phone strategy, we first consider the forced use and then the available devices by order
-        // of priority
-        switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
-        case AUDIO_POLICY_FORCE_BT_SCO:
-            if (!isInCall() || strategy != STRATEGY_DTMF) {
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
-                if (device) break;
-            }
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
-            if (device) break;
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
-            if (device) break;
-            // if SCO device is requested but no SCO device is available, fall back to default case
-            // FALL THROUGH
-
-        default:    // FORCE_NONE
-            // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
-            if (!isInCall() &&
-                    (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
-                    (getA2dpOutput() != 0)) {
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
-                if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
-                if (device) break;
-            }
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
-            if (device) break;
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADSET;
-            if (device) break;
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE;
-            if (device) break;
-            if (mPhoneState != AUDIO_MODE_IN_CALL) {
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_ACCESSORY;
-                if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
-                if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_DIGITAL;
-                if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
-                if (device) break;
-            }
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_EARPIECE;
-            if (device) break;
-            device = mDefaultOutputDevice->mDeviceType;
-            if (device == AUDIO_DEVICE_NONE) {
-                ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE");
-            }
-            break;
-
-        case AUDIO_POLICY_FORCE_SPEAKER:
-            // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to
-            // A2DP speaker when forcing to speaker output
-            if (!isInCall() &&
-                    (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
-                    (getA2dpOutput() != 0)) {
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
-                if (device) break;
-            }
-            if (!isInCall()) {
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_ACCESSORY;
-                if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE;
-                if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
-                if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_DIGITAL;
-                if (device) break;
-                device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
-                if (device) break;
-            }
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_LINE;
-            if (device) break;
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
-            if (device) break;
-            device = mDefaultOutputDevice->mDeviceType;
-            if (device == AUDIO_DEVICE_NONE) {
-                ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE, FORCE_SPEAKER");
-            }
-            break;
-        }
-    break;
-
-    case STRATEGY_SONIFICATION:
-
-        // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
-        // handleIncallSonification().
-        if (isInCall()) {
-            device = getDeviceForStrategy(STRATEGY_PHONE, false /*fromCache*/);
-            break;
-        }
-        // FALL THROUGH
-
-    case STRATEGY_ENFORCED_AUDIBLE:
-        // strategy STRATEGY_ENFORCED_AUDIBLE uses same routing policy as STRATEGY_SONIFICATION
-        // except:
-        //   - when in call where it doesn't default to STRATEGY_PHONE behavior
-        //   - in countries where not enforced in which case it follows STRATEGY_MEDIA
-
-        if ((strategy == STRATEGY_SONIFICATION) ||
-                (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
-            device = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
-            if (device == AUDIO_DEVICE_NONE) {
-                ALOGE("getDeviceForStrategy() speaker device not found for STRATEGY_SONIFICATION");
-            }
-        }
-        // The second device used for sonification is the same as the device used by media strategy
-        // FALL THROUGH
-
-    // FIXME: STRATEGY_ACCESSIBILITY and STRATEGY_REROUTING follow STRATEGY_MEDIA for now
-    case STRATEGY_ACCESSIBILITY:
-        if (strategy == STRATEGY_ACCESSIBILITY) {
-            // do not route accessibility prompts to a digital output currently configured with a
-            // compressed format as they would likely not be mixed and dropped.
-            for (size_t i = 0; i < mOutputs.size(); i++) {
-                sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
-                audio_devices_t devices = desc->device() &
-                    (AUDIO_DEVICE_OUT_HDMI | AUDIO_DEVICE_OUT_SPDIF | AUDIO_DEVICE_OUT_HDMI_ARC);
-                if (desc->isActive() && !audio_is_linear_pcm(desc->mFormat) &&
-                        devices != AUDIO_DEVICE_NONE) {
-                    availableOutputDeviceTypes = availableOutputDeviceTypes & ~devices;
-                }
-            }
-        }
-        // FALL THROUGH
-
-    case STRATEGY_REROUTING:
-    case STRATEGY_MEDIA: {
-        uint32_t device2 = AUDIO_DEVICE_NONE;
-        if (strategy != STRATEGY_SONIFICATION) {
-            // no sonification on remote submix (e.g. WFD)
-            if (mAvailableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, String8("0")) != 0) {
-                device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
-            }
-        }
-        if ((device2 == AUDIO_DEVICE_NONE) &&
-                (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
-                (getA2dpOutput() != 0)) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
-            if (device2 == AUDIO_DEVICE_NONE) {
-                device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
-            }
-            if (device2 == AUDIO_DEVICE_NONE) {
-                device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
-            }
-        }
-        if ((device2 == AUDIO_DEVICE_NONE) &&
-            (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] == AUDIO_POLICY_FORCE_SPEAKER)) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
-        }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
-        }
-        if ((device2 == AUDIO_DEVICE_NONE)) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_LINE;
-        }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_WIRED_HEADSET;
-        }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_ACCESSORY;
-        }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_USB_DEVICE;
-        }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
-        }
-        if ((device2 == AUDIO_DEVICE_NONE) && (strategy != STRATEGY_SONIFICATION)) {
-            // no sonification on aux digital (e.g. HDMI)
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_DIGITAL;
-        }
-        if ((device2 == AUDIO_DEVICE_NONE) &&
-                (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
-        }
-        if (device2 == AUDIO_DEVICE_NONE) {
-            device2 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPEAKER;
-        }
-        int device3 = AUDIO_DEVICE_NONE;
-        if (strategy == STRATEGY_MEDIA) {
-            // ARC, SPDIF and AUX_LINE can co-exist with others.
-            device3 = availableOutputDeviceTypes & AUDIO_DEVICE_OUT_HDMI_ARC;
-            device3 |= (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_SPDIF);
-            device3 |= (availableOutputDeviceTypes & AUDIO_DEVICE_OUT_AUX_LINE);
-        }
-
-        device2 |= device3;
-        // device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION or
-        // STRATEGY_ENFORCED_AUDIBLE, AUDIO_DEVICE_NONE otherwise
-        device |= device2;
-
-        // If hdmi system audio mode is on, remove speaker out of output list.
-        if ((strategy == STRATEGY_MEDIA) &&
-            (mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] ==
-                AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
-            device &= ~AUDIO_DEVICE_OUT_SPEAKER;
-        }
-
-        if (device) break;
-        device = mDefaultOutputDevice->mDeviceType;
-        if (device == AUDIO_DEVICE_NONE) {
-            ALOGE("getDeviceForStrategy() no device found for STRATEGY_MEDIA");
-        }
-        } break;
-
-    default:
-        ALOGW("getDeviceForStrategy() unknown strategy: %d", strategy);
-        break;
-    }
-
-    ALOGVV("getDeviceForStrategy() strategy %d, device %x", strategy, device);
-    return device;
-}
-
-void AudioPolicyManager::updateDevicesAndOutputs()
-{
-    for (int i = 0; i < NUM_STRATEGIES; i++) {
-        mDeviceForStrategy[i] = getDeviceForStrategy((routing_strategy)i, false /*fromCache*/);
-    }
-    mPreviousOutputs = mOutputs;
-}
-
-uint32_t AudioPolicyManager::checkDeviceMuteStrategies(sp<AudioOutputDescriptor> outputDesc,
-                                                       audio_devices_t prevDevice,
-                                                       uint32_t delayMs)
-{
-    // mute/unmute strategies using an incompatible device combination
-    // if muting, wait for the audio in pcm buffer to be drained before proceeding
-    // if unmuting, unmute only after the specified delay
-    if (outputDesc->isDuplicated()) {
-        return 0;
-    }
-
-    uint32_t muteWaitMs = 0;
-    audio_devices_t device = outputDesc->device();
-    bool shouldMute = outputDesc->isActive() && (popcount(device) >= 2);
-
-    for (size_t i = 0; i < NUM_STRATEGIES; i++) {
-        audio_devices_t curDevice = getDeviceForStrategy((routing_strategy)i, false /*fromCache*/);
-        curDevice = curDevice & outputDesc->mProfile->mSupportedDevices.types();
-        bool mute = shouldMute && (curDevice & device) && (curDevice != device);
-        bool doMute = false;
-
-        if (mute && !outputDesc->mStrategyMutedByDevice[i]) {
-            doMute = true;
-            outputDesc->mStrategyMutedByDevice[i] = true;
-        } else if (!mute && outputDesc->mStrategyMutedByDevice[i]){
-            doMute = true;
-            outputDesc->mStrategyMutedByDevice[i] = false;
-        }
-        if (doMute) {
-            for (size_t j = 0; j < mOutputs.size(); j++) {
-                sp<AudioOutputDescriptor> desc = mOutputs.valueAt(j);
-                // skip output if it does not share any device with current output
-                if ((desc->supportedDevices() & outputDesc->supportedDevices())
-                        == AUDIO_DEVICE_NONE) {
-                    continue;
-                }
-                audio_io_handle_t curOutput = mOutputs.keyAt(j);
-                ALOGVV("checkDeviceMuteStrategies() %s strategy %d (curDevice %04x) on output %d",
-                      mute ? "muting" : "unmuting", i, curDevice, curOutput);
-                setStrategyMute((routing_strategy)i, mute, curOutput, mute ? 0 : delayMs);
-                if (desc->isStrategyActive((routing_strategy)i)) {
-                    if (mute) {
-                        // FIXME: should not need to double latency if volume could be applied
-                        // immediately by the audioflinger mixer. We must account for the delay
-                        // between now and the next time the audioflinger thread for this output
-                        // will process a buffer (which corresponds to one buffer size,
-                        // usually 1/2 or 1/4 of the latency).
-                        if (muteWaitMs < desc->latency() * 2) {
-                            muteWaitMs = desc->latency() * 2;
-                        }
-                    }
-                }
-            }
-        }
-    }
-
-    // temporary mute output if device selection changes to avoid volume bursts due to
-    // different per device volumes
-    if (outputDesc->isActive() && (device != prevDevice)) {
-        if (muteWaitMs < outputDesc->latency() * 2) {
-            muteWaitMs = outputDesc->latency() * 2;
-        }
-        for (size_t i = 0; i < NUM_STRATEGIES; i++) {
-            if (outputDesc->isStrategyActive((routing_strategy)i)) {
-                setStrategyMute((routing_strategy)i, true, outputDesc->mIoHandle);
-                // do tempMute unmute after twice the mute wait time
-                setStrategyMute((routing_strategy)i, false, outputDesc->mIoHandle,
-                                muteWaitMs *2, device);
-            }
-        }
-    }
-
-    // wait for the PCM output buffers to empty before proceeding with the rest of the command
-    if (muteWaitMs > delayMs) {
-        muteWaitMs -= delayMs;
-        usleep(muteWaitMs * 1000);
-        return muteWaitMs;
-    }
-    return 0;
-}
-
-uint32_t AudioPolicyManager::setOutputDevice(audio_io_handle_t output,
-                                             audio_devices_t device,
-                                             bool force,
-                                             int delayMs,
-                                             audio_patch_handle_t *patchHandle,
-                                             const char* address)
-{
-    ALOGV("setOutputDevice() output %d device %04x delayMs %d", output, device, delayMs);
-    sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
-    AudioParameter param;
-    uint32_t muteWaitMs;
-
-    if (outputDesc->isDuplicated()) {
-        muteWaitMs = setOutputDevice(outputDesc->mOutput1->mIoHandle, device, force, delayMs);
-        muteWaitMs += setOutputDevice(outputDesc->mOutput2->mIoHandle, device, force, delayMs);
-        return muteWaitMs;
-    }
-    // no need to proceed if new device is not AUDIO_DEVICE_NONE and not supported by current
-    // output profile
-    if ((device != AUDIO_DEVICE_NONE) &&
-            ((device & outputDesc->mProfile->mSupportedDevices.types()) == 0)) {
-        return 0;
-    }
-
-    // filter devices according to output selected
-    device = (audio_devices_t)(device & outputDesc->mProfile->mSupportedDevices.types());
-
-    audio_devices_t prevDevice = outputDesc->mDevice;
-
-    ALOGV("setOutputDevice() prevDevice %04x", prevDevice);
-
-    if (device != AUDIO_DEVICE_NONE) {
-        outputDesc->mDevice = device;
-    }
-    muteWaitMs = checkDeviceMuteStrategies(outputDesc, prevDevice, delayMs);
-
-    // Do not change the routing if:
-    //      the requested device is AUDIO_DEVICE_NONE
-    //      OR the requested device is the same as current device
-    //  AND force is not specified
-    //  AND the output is connected by a valid audio patch.
-    // Doing this check here allows the caller to call setOutputDevice() without conditions
-    if ((device == AUDIO_DEVICE_NONE || device == prevDevice) && !force &&
-            outputDesc->mPatchHandle != 0) {
-        ALOGV("setOutputDevice() setting same device %04x or null device for output %d",
-              device, output);
-        return muteWaitMs;
-    }
-
-    ALOGV("setOutputDevice() changing device");
-
-    // do the routing
-    if (device == AUDIO_DEVICE_NONE) {
-        resetOutputDevice(output, delayMs, NULL);
-    } else {
-        DeviceVector deviceList = (address == NULL) ?
-                mAvailableOutputDevices.getDevicesFromType(device)
-                : mAvailableOutputDevices.getDevicesFromTypeAddr(device, String8(address));
-        if (!deviceList.isEmpty()) {
-            struct audio_patch patch;
-            outputDesc->toAudioPortConfig(&patch.sources[0]);
-            patch.num_sources = 1;
-            patch.num_sinks = 0;
-            for (size_t i = 0; i < deviceList.size() && i < AUDIO_PATCH_PORTS_MAX; i++) {
-                deviceList.itemAt(i)->toAudioPortConfig(&patch.sinks[i]);
-                patch.num_sinks++;
-            }
-            ssize_t index;
-            if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
-                index = mAudioPatches.indexOfKey(*patchHandle);
-            } else {
-                index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
-            }
-            sp< AudioPatch> patchDesc;
-            audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
-            if (index >= 0) {
-                patchDesc = mAudioPatches.valueAt(index);
-                afPatchHandle = patchDesc->mAfPatchHandle;
-            }
-
-            status_t status = mpClientInterface->createAudioPatch(&patch,
-                                                                   &afPatchHandle,
-                                                                   delayMs);
-            ALOGV("setOutputDevice() createAudioPatch returned %d patchHandle %d"
-                    "num_sources %d num_sinks %d",
-                                       status, afPatchHandle, patch.num_sources, patch.num_sinks);
-            if (status == NO_ERROR) {
-                if (index < 0) {
-                    patchDesc = new AudioPatch((audio_patch_handle_t)nextUniqueId(),
-                                               &patch, mUidCached);
-                    addAudioPatch(patchDesc->mHandle, patchDesc);
-                } else {
-                    patchDesc->mPatch = patch;
-                }
-                patchDesc->mAfPatchHandle = afPatchHandle;
-                patchDesc->mUid = mUidCached;
-                if (patchHandle) {
-                    *patchHandle = patchDesc->mHandle;
-                }
-                outputDesc->mPatchHandle = patchDesc->mHandle;
-                nextAudioPortGeneration();
-                mpClientInterface->onAudioPatchListUpdate();
-            }
-        }
-
-        // inform all input as well
-        for (size_t i = 0; i < mInputs.size(); i++) {
-            const sp<AudioInputDescriptor>  inputDescriptor = mInputs.valueAt(i);
-            if (!isVirtualInputDevice(inputDescriptor->mDevice)) {
-                AudioParameter inputCmd = AudioParameter();
-                ALOGV("%s: inform input %d of device:%d", __func__,
-                      inputDescriptor->mIoHandle, device);
-                inputCmd.addInt(String8(AudioParameter::keyRouting),device);
-                mpClientInterface->setParameters(inputDescriptor->mIoHandle,
-                                                 inputCmd.toString(),
-                                                 delayMs);
-            }
-        }
-    }
-
-    // update stream volumes according to new device
-    applyStreamVolumes(output, device, delayMs);
-
-    return muteWaitMs;
-}
-
-status_t AudioPolicyManager::resetOutputDevice(audio_io_handle_t output,
-                                               int delayMs,
-                                               audio_patch_handle_t *patchHandle)
-{
-    sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
-    ssize_t index;
-    if (patchHandle) {
-        index = mAudioPatches.indexOfKey(*patchHandle);
-    } else {
-        index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
-    }
-    if (index < 0) {
-        return INVALID_OPERATION;
-    }
-    sp< AudioPatch> patchDesc = mAudioPatches.valueAt(index);
-    status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, delayMs);
-    ALOGV("resetOutputDevice() releaseAudioPatch returned %d", status);
-    outputDesc->mPatchHandle = 0;
-    removeAudioPatch(patchDesc->mHandle);
-    nextAudioPortGeneration();
-    mpClientInterface->onAudioPatchListUpdate();
-    return status;
-}
-
-status_t AudioPolicyManager::setInputDevice(audio_io_handle_t input,
-                                            audio_devices_t device,
-                                            bool force,
-                                            audio_patch_handle_t *patchHandle)
-{
-    status_t status = NO_ERROR;
-
-    sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input);
-    if ((device != AUDIO_DEVICE_NONE) && ((device != inputDesc->mDevice) || force)) {
-        inputDesc->mDevice = device;
-
-        DeviceVector deviceList = mAvailableInputDevices.getDevicesFromType(device);
-        if (!deviceList.isEmpty()) {
-            struct audio_patch patch;
-            inputDesc->toAudioPortConfig(&patch.sinks[0]);
-            // AUDIO_SOURCE_HOTWORD is for internal use only:
-            // handled as AUDIO_SOURCE_VOICE_RECOGNITION by the audio HAL
-            if (patch.sinks[0].ext.mix.usecase.source == AUDIO_SOURCE_HOTWORD &&
-                    !inputDesc->mIsSoundTrigger) {
-                patch.sinks[0].ext.mix.usecase.source = AUDIO_SOURCE_VOICE_RECOGNITION;
-            }
-            patch.num_sinks = 1;
-            //only one input device for now
-            deviceList.itemAt(0)->toAudioPortConfig(&patch.sources[0]);
-            patch.num_sources = 1;
-            ssize_t index;
-            if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
-                index = mAudioPatches.indexOfKey(*patchHandle);
-            } else {
-                index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
-            }
-            sp< AudioPatch> patchDesc;
-            audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
-            if (index >= 0) {
-                patchDesc = mAudioPatches.valueAt(index);
-                afPatchHandle = patchDesc->mAfPatchHandle;
-            }
-
-            status_t status = mpClientInterface->createAudioPatch(&patch,
-                                                                  &afPatchHandle,
-                                                                  0);
-            ALOGV("setInputDevice() createAudioPatch returned %d patchHandle %d",
-                                                                          status, afPatchHandle);
-            if (status == NO_ERROR) {
-                if (index < 0) {
-                    patchDesc = new AudioPatch((audio_patch_handle_t)nextUniqueId(),
-                                               &patch, mUidCached);
-                    addAudioPatch(patchDesc->mHandle, patchDesc);
-                } else {
-                    patchDesc->mPatch = patch;
-                }
-                patchDesc->mAfPatchHandle = afPatchHandle;
-                patchDesc->mUid = mUidCached;
-                if (patchHandle) {
-                    *patchHandle = patchDesc->mHandle;
-                }
-                inputDesc->mPatchHandle = patchDesc->mHandle;
-                nextAudioPortGeneration();
-                mpClientInterface->onAudioPatchListUpdate();
-            }
-        }
-    }
-    return status;
-}
-
-status_t AudioPolicyManager::resetInputDevice(audio_io_handle_t input,
-                                              audio_patch_handle_t *patchHandle)
-{
-    sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input);
-    ssize_t index;
-    if (patchHandle) {
-        index = mAudioPatches.indexOfKey(*patchHandle);
-    } else {
-        index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
-    }
-    if (index < 0) {
-        return INVALID_OPERATION;
-    }
-    sp< AudioPatch> patchDesc = mAudioPatches.valueAt(index);
-    status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
-    ALOGV("resetInputDevice() releaseAudioPatch returned %d", status);
-    inputDesc->mPatchHandle = 0;
-    removeAudioPatch(patchDesc->mHandle);
-    nextAudioPortGeneration();
-    mpClientInterface->onAudioPatchListUpdate();
-    return status;
-}
-
-sp<AudioPolicyManager::IOProfile> AudioPolicyManager::getInputProfile(audio_devices_t device,
-                                                   String8 address,
-                                                   uint32_t& samplingRate,
-                                                   audio_format_t format,
-                                                   audio_channel_mask_t channelMask,
-                                                   audio_input_flags_t flags)
-{
-    // Choose an input profile based on the requested capture parameters: select the first available
-    // profile supporting all requested parameters.
-
-    for (size_t i = 0; i < mHwModules.size(); i++)
-    {
-        if (mHwModules[i]->mHandle == 0) {
-            continue;
-        }
-        for (size_t j = 0; j < mHwModules[i]->mInputProfiles.size(); j++)
-        {
-            sp<IOProfile> profile = mHwModules[i]->mInputProfiles[j];
-            // profile->log();
-            if (profile->isCompatibleProfile(device, address, samplingRate,
-                                             &samplingRate /*updatedSamplingRate*/,
-                                             format, channelMask, (audio_output_flags_t) flags)) {
-
-                return profile;
-            }
-        }
-    }
-    return NULL;
-}
-
-
-audio_devices_t AudioPolicyManager::getDeviceAndMixForInputSource(audio_source_t inputSource,
-                                                            AudioMix **policyMix)
-{
-    audio_devices_t availableDeviceTypes = mAvailableInputDevices.types() &
-                                            ~AUDIO_DEVICE_BIT_IN;
-
-    for (size_t i = 0; i < mPolicyMixes.size(); i++) {
-        if (mPolicyMixes[i]->mMix.mMixType != MIX_TYPE_RECORDERS) {
-            continue;
-        }
-        for (size_t j = 0; j < mPolicyMixes[i]->mMix.mCriteria.size(); j++) {
-            if ((RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET == mPolicyMixes[i]->mMix.mCriteria[j].mRule &&
-                    mPolicyMixes[i]->mMix.mCriteria[j].mAttr.mSource == inputSource) ||
-               (RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET == mPolicyMixes[i]->mMix.mCriteria[j].mRule &&
-                    mPolicyMixes[i]->mMix.mCriteria[j].mAttr.mSource != inputSource)) {
-                if (availableDeviceTypes & AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
-                    if (policyMix != NULL) {
-                        *policyMix = &mPolicyMixes[i]->mMix;
-                    }
-                    return AUDIO_DEVICE_IN_REMOTE_SUBMIX;
-                }
-                break;
-            }
-        }
-    }
-
-    return getDeviceForInputSource(inputSource);
-}
-
-audio_devices_t AudioPolicyManager::getDeviceForInputSource(audio_source_t inputSource)
-{
-    uint32_t device = AUDIO_DEVICE_NONE;
-    audio_devices_t availableDeviceTypes = mAvailableInputDevices.types() &
-                                            ~AUDIO_DEVICE_BIT_IN;
-
-    switch (inputSource) {
-    case AUDIO_SOURCE_VOICE_UPLINK:
-      if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
-          device = AUDIO_DEVICE_IN_VOICE_CALL;
-          break;
-      }
-      break;
-
-    case AUDIO_SOURCE_DEFAULT:
-    case AUDIO_SOURCE_MIC:
-    if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) {
-        device = AUDIO_DEVICE_IN_BLUETOOTH_A2DP;
-    } else if ((mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO) &&
-        (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET)) {
-        device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
-    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
-        device = AUDIO_DEVICE_IN_WIRED_HEADSET;
-    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
-        device = AUDIO_DEVICE_IN_USB_DEVICE;
-    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
-        device = AUDIO_DEVICE_IN_BUILTIN_MIC;
-    }
-    break;
-
-    case AUDIO_SOURCE_VOICE_COMMUNICATION:
-        // Allow only use of devices on primary input if in call and HAL does not support routing
-        // to voice call path.
-        if ((mPhoneState == AUDIO_MODE_IN_CALL) &&
-                (mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_TELEPHONY_TX) == 0) {
-            availableDeviceTypes = availablePrimaryInputDevices() & ~AUDIO_DEVICE_BIT_IN;
-        }
-
-        switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
-        case AUDIO_POLICY_FORCE_BT_SCO:
-            // if SCO device is requested but no SCO device is available, fall back to default case
-            if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
-                device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
-                break;
-            }
-            // FALL THROUGH
-
-        default:    // FORCE_NONE
-            if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
-                device = AUDIO_DEVICE_IN_WIRED_HEADSET;
-            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
-                device = AUDIO_DEVICE_IN_USB_DEVICE;
-            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
-                device = AUDIO_DEVICE_IN_BUILTIN_MIC;
-            }
-            break;
-
-        case AUDIO_POLICY_FORCE_SPEAKER:
-            if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
-                device = AUDIO_DEVICE_IN_BACK_MIC;
-            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
-                device = AUDIO_DEVICE_IN_BUILTIN_MIC;
-            }
-            break;
-        }
-        break;
-
-    case AUDIO_SOURCE_VOICE_RECOGNITION:
-    case AUDIO_SOURCE_HOTWORD:
-        if (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO &&
-                availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
-            device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
-        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
-            device = AUDIO_DEVICE_IN_WIRED_HEADSET;
-        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
-            device = AUDIO_DEVICE_IN_USB_DEVICE;
-        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
-            device = AUDIO_DEVICE_IN_BUILTIN_MIC;
-        }
-        break;
-    case AUDIO_SOURCE_CAMCORDER:
-        if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
-            device = AUDIO_DEVICE_IN_BACK_MIC;
-        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
-            device = AUDIO_DEVICE_IN_BUILTIN_MIC;
-        }
-        break;
-    case AUDIO_SOURCE_VOICE_DOWNLINK:
-    case AUDIO_SOURCE_VOICE_CALL:
-        if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
-            device = AUDIO_DEVICE_IN_VOICE_CALL;
-        }
-        break;
-    case AUDIO_SOURCE_REMOTE_SUBMIX:
-        if (availableDeviceTypes & AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
-            device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
-        }
-        break;
-     case AUDIO_SOURCE_FM_TUNER:
-        if (availableDeviceTypes & AUDIO_DEVICE_IN_FM_TUNER) {
-            device = AUDIO_DEVICE_IN_FM_TUNER;
-        }
-        break;
-    default:
-        ALOGW("getDeviceForInputSource() invalid input source %d", inputSource);
-        break;
-    }
-    ALOGV("getDeviceForInputSource()input source %d, device %08x", inputSource, device);
-    return device;
-}
-
-bool AudioPolicyManager::isVirtualInputDevice(audio_devices_t device)
-{
-    if ((device & AUDIO_DEVICE_BIT_IN) != 0) {
-        device &= ~AUDIO_DEVICE_BIT_IN;
-        if ((popcount(device) == 1) && ((device & ~APM_AUDIO_IN_DEVICE_VIRTUAL_ALL) == 0))
-            return true;
-    }
-    return false;
-}
-
-bool AudioPolicyManager::deviceDistinguishesOnAddress(audio_devices_t device) {
-    return ((device & APM_AUDIO_DEVICE_MATCH_ADDRESS_ALL & ~AUDIO_DEVICE_BIT_IN) != 0);
-}
-
-audio_io_handle_t AudioPolicyManager::getActiveInput(bool ignoreVirtualInputs)
-{
-    for (size_t i = 0; i < mInputs.size(); i++) {
-        const sp<AudioInputDescriptor>  input_descriptor = mInputs.valueAt(i);
-        if ((input_descriptor->mRefCount > 0)
-                && (!ignoreVirtualInputs || !isVirtualInputDevice(input_descriptor->mDevice))) {
-            return mInputs.keyAt(i);
-        }
-    }
-    return 0;
-}
-
-uint32_t AudioPolicyManager::activeInputsCount() const
-{
-    uint32_t count = 0;
-    for (size_t i = 0; i < mInputs.size(); i++) {
-        const sp<AudioInputDescriptor>  desc = mInputs.valueAt(i);
-        if (desc->mRefCount > 0) {
-            count++;
-        }
-    }
-    return count;
-}
-
-
-audio_devices_t AudioPolicyManager::getDeviceForVolume(audio_devices_t device)
-{
-    if (device == AUDIO_DEVICE_NONE) {
-        // this happens when forcing a route update and no track is active on an output.
-        // In this case the returned category is not important.
-        device =  AUDIO_DEVICE_OUT_SPEAKER;
-    } else if (popcount(device) > 1) {
-        // Multiple device selection is either:
-        //  - speaker + one other device: give priority to speaker in this case.
-        //  - one A2DP device + another device: happens with duplicated output. In this case
-        // retain the device on the A2DP output as the other must not correspond to an active
-        // selection if not the speaker.
-        //  - HDMI-CEC system audio mode only output: give priority to available item in order.
-        if (device & AUDIO_DEVICE_OUT_SPEAKER) {
-            device = AUDIO_DEVICE_OUT_SPEAKER;
-        } else if (device & AUDIO_DEVICE_OUT_HDMI_ARC) {
-            device = AUDIO_DEVICE_OUT_HDMI_ARC;
-        } else if (device & AUDIO_DEVICE_OUT_AUX_LINE) {
-            device = AUDIO_DEVICE_OUT_AUX_LINE;
-        } else if (device & AUDIO_DEVICE_OUT_SPDIF) {
-            device = AUDIO_DEVICE_OUT_SPDIF;
-        } else {
-            device = (audio_devices_t)(device & AUDIO_DEVICE_OUT_ALL_A2DP);
-        }
-    }
-
-    /*SPEAKER_SAFE is an alias of SPEAKER for purposes of volume control*/
-    if (device == AUDIO_DEVICE_OUT_SPEAKER_SAFE)
-        device = AUDIO_DEVICE_OUT_SPEAKER;
-
-    ALOGW_IF(popcount(device) != 1,
-            "getDeviceForVolume() invalid device combination: %08x",
-            device);
-
-    return device;
-}
-
-AudioPolicyManager::device_category AudioPolicyManager::getDeviceCategory(audio_devices_t device)
-{
-    switch(getDeviceForVolume(device)) {
-        case AUDIO_DEVICE_OUT_EARPIECE:
-            return DEVICE_CATEGORY_EARPIECE;
-        case AUDIO_DEVICE_OUT_WIRED_HEADSET:
-        case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
-        case AUDIO_DEVICE_OUT_BLUETOOTH_SCO:
-        case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET:
-        case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP:
-        case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
-            return DEVICE_CATEGORY_HEADSET;
-        case AUDIO_DEVICE_OUT_LINE:
-        case AUDIO_DEVICE_OUT_AUX_DIGITAL:
-        /*USB?  Remote submix?*/
-            return DEVICE_CATEGORY_EXT_MEDIA;
-        case AUDIO_DEVICE_OUT_SPEAKER:
-        case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT:
-        case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER:
-        case AUDIO_DEVICE_OUT_USB_ACCESSORY:
-        case AUDIO_DEVICE_OUT_USB_DEVICE:
-        case AUDIO_DEVICE_OUT_REMOTE_SUBMIX:
-        default:
-            return DEVICE_CATEGORY_SPEAKER;
-    }
-}
-
-/* static */
-float AudioPolicyManager::volIndexToAmpl(audio_devices_t device, const StreamDescriptor& streamDesc,
-        int indexInUi)
-{
-    device_category deviceCategory = getDeviceCategory(device);
-    const VolumeCurvePoint *curve = streamDesc.mVolumeCurve[deviceCategory];
-
-    // the volume index in the UI is relative to the min and max volume indices for this stream type
-    int nbSteps = 1 + curve[VOLMAX].mIndex -
-            curve[VOLMIN].mIndex;
-    int volIdx = (nbSteps * (indexInUi - streamDesc.mIndexMin)) /
-            (streamDesc.mIndexMax - streamDesc.mIndexMin);
-
-    // find what part of the curve this index volume belongs to, or if it's out of bounds
-    int segment = 0;
-    if (volIdx < curve[VOLMIN].mIndex) {         // out of bounds
-        return 0.0f;
-    } else if (volIdx < curve[VOLKNEE1].mIndex) {
-        segment = 0;
-    } else if (volIdx < curve[VOLKNEE2].mIndex) {
-        segment = 1;
-    } else if (volIdx <= curve[VOLMAX].mIndex) {
-        segment = 2;
-    } else {                                                               // out of bounds
-        return 1.0f;
-    }
-
-    // linear interpolation in the attenuation table in dB
-    float decibels = curve[segment].mDBAttenuation +
-            ((float)(volIdx - curve[segment].mIndex)) *
-                ( (curve[segment+1].mDBAttenuation -
-                        curve[segment].mDBAttenuation) /
-                    ((float)(curve[segment+1].mIndex -
-                            curve[segment].mIndex)) );
-
-    float amplification = exp( decibels * 0.115129f); // exp( dB * ln(10) / 20 )
-
-    ALOGVV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f] ampl=%.5f",
-            curve[segment].mIndex, volIdx,
-            curve[segment+1].mIndex,
-            curve[segment].mDBAttenuation,
-            decibels,
-            curve[segment+1].mDBAttenuation,
-            amplification);
-
-    return amplification;
-}
-
-const AudioPolicyManager::VolumeCurvePoint
-    AudioPolicyManager::sDefaultVolumeCurve[AudioPolicyManager::VOLCNT] = {
-    {1, -49.5f}, {33, -33.5f}, {66, -17.0f}, {100, 0.0f}
-};
-
-const AudioPolicyManager::VolumeCurvePoint
-    AudioPolicyManager::sDefaultMediaVolumeCurve[AudioPolicyManager::VOLCNT] = {
-    {1, -58.0f}, {20, -40.0f}, {60, -17.0f}, {100, 0.0f}
-};
-
-const AudioPolicyManager::VolumeCurvePoint
-    AudioPolicyManager::sExtMediaSystemVolumeCurve[AudioPolicyManager::VOLCNT] = {
-    {1, -58.0f}, {20, -40.0f}, {60, -21.0f}, {100, -10.0f}
-};
-
-const AudioPolicyManager::VolumeCurvePoint
-    AudioPolicyManager::sSpeakerMediaVolumeCurve[AudioPolicyManager::VOLCNT] = {
-    {1, -56.0f}, {20, -34.0f}, {60, -11.0f}, {100, 0.0f}
-};
-
-const AudioPolicyManager::VolumeCurvePoint
-    AudioPolicyManager::sSpeakerMediaVolumeCurveDrc[AudioPolicyManager::VOLCNT] = {
-    {1, -55.0f}, {20, -43.0f}, {86, -12.0f}, {100, 0.0f}
-};
-
-const AudioPolicyManager::VolumeCurvePoint
-    AudioPolicyManager::sSpeakerSonificationVolumeCurve[AudioPolicyManager::VOLCNT] = {
-    {1, -29.7f}, {33, -20.1f}, {66, -10.2f}, {100, 0.0f}
-};
-
-const AudioPolicyManager::VolumeCurvePoint
-    AudioPolicyManager::sSpeakerSonificationVolumeCurveDrc[AudioPolicyManager::VOLCNT] = {
-    {1, -35.7f}, {33, -26.1f}, {66, -13.2f}, {100, 0.0f}
-};
-
-// AUDIO_STREAM_SYSTEM, AUDIO_STREAM_ENFORCED_AUDIBLE and AUDIO_STREAM_DTMF volume tracks
-// AUDIO_STREAM_RING on phones and AUDIO_STREAM_MUSIC on tablets.
-// AUDIO_STREAM_DTMF tracks AUDIO_STREAM_VOICE_CALL while in call (See AudioService.java).
-// The range is constrained between -24dB and -6dB over speaker and -30dB and -18dB over headset.
-
-const AudioPolicyManager::VolumeCurvePoint
-    AudioPolicyManager::sDefaultSystemVolumeCurve[AudioPolicyManager::VOLCNT] = {
-    {1, -24.0f}, {33, -18.0f}, {66, -12.0f}, {100, -6.0f}
-};
-
-const AudioPolicyManager::VolumeCurvePoint
-    AudioPolicyManager::sDefaultSystemVolumeCurveDrc[AudioPolicyManager::VOLCNT] = {
-    {1, -34.0f}, {33, -24.0f}, {66, -15.0f}, {100, -6.0f}
-};
-
-const AudioPolicyManager::VolumeCurvePoint
-    AudioPolicyManager::sHeadsetSystemVolumeCurve[AudioPolicyManager::VOLCNT] = {
-    {1, -30.0f}, {33, -26.0f}, {66, -22.0f}, {100, -18.0f}
-};
-
-const AudioPolicyManager::VolumeCurvePoint
-    AudioPolicyManager::sDefaultVoiceVolumeCurve[AudioPolicyManager::VOLCNT] = {
-    {0, -42.0f}, {33, -28.0f}, {66, -14.0f}, {100, 0.0f}
-};
-
-const AudioPolicyManager::VolumeCurvePoint
-    AudioPolicyManager::sSpeakerVoiceVolumeCurve[AudioPolicyManager::VOLCNT] = {
-    {0, -24.0f}, {33, -16.0f}, {66, -8.0f}, {100, 0.0f}
-};
-
-const AudioPolicyManager::VolumeCurvePoint
-    AudioPolicyManager::sLinearVolumeCurve[AudioPolicyManager::VOLCNT] = {
-    {0, -96.0f}, {33, -68.0f}, {66, -34.0f}, {100, 0.0f}
-};
-
-const AudioPolicyManager::VolumeCurvePoint
-    AudioPolicyManager::sSilentVolumeCurve[AudioPolicyManager::VOLCNT] = {
-    {0, -96.0f}, {1, -96.0f}, {2, -96.0f}, {100, -96.0f}
-};
-
-const AudioPolicyManager::VolumeCurvePoint
-    AudioPolicyManager::sFullScaleVolumeCurve[AudioPolicyManager::VOLCNT] = {
-    {0, 0.0f}, {1, 0.0f}, {2, 0.0f}, {100, 0.0f}
-};
-
-const AudioPolicyManager::VolumeCurvePoint
-            *AudioPolicyManager::sVolumeProfiles[AUDIO_STREAM_CNT]
-                                                   [AudioPolicyManager::DEVICE_CATEGORY_CNT] = {
-    { // AUDIO_STREAM_VOICE_CALL
-        sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_SYSTEM
-        sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultSystemVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
-        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_RING
-        sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
-        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_MUSIC
-        sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_ALARM
-        sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
-        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_NOTIFICATION
-        sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
-        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_BLUETOOTH_SCO
-        sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_ENFORCED_AUDIBLE
-        sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    {  // AUDIO_STREAM_DTMF
-        sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_TTS
-      // "Transmitted Through Speaker": always silent except on DEVICE_CATEGORY_SPEAKER
-        sSilentVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        sLinearVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sSilentVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        sSilentVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_ACCESSIBILITY
-        sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_REROUTING
-        sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        sFullScaleVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-    { // AUDIO_STREAM_PATCH
-        sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
-        sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
-        sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
-        sFullScaleVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
-    },
-};
-
-void AudioPolicyManager::initializeVolumeCurves()
-{
-    for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
-        for (int j = 0; j < DEVICE_CATEGORY_CNT; j++) {
-            mStreams[i].mVolumeCurve[j] =
-                    sVolumeProfiles[i][j];
-        }
-    }
-
-    // Check availability of DRC on speaker path: if available, override some of the speaker curves
-    if (mSpeakerDrcEnabled) {
-        mStreams[AUDIO_STREAM_SYSTEM].mVolumeCurve[DEVICE_CATEGORY_SPEAKER] =
-                sDefaultSystemVolumeCurveDrc;
-        mStreams[AUDIO_STREAM_RING].mVolumeCurve[DEVICE_CATEGORY_SPEAKER] =
-                sSpeakerSonificationVolumeCurveDrc;
-        mStreams[AUDIO_STREAM_ALARM].mVolumeCurve[DEVICE_CATEGORY_SPEAKER] =
-                sSpeakerSonificationVolumeCurveDrc;
-        mStreams[AUDIO_STREAM_NOTIFICATION].mVolumeCurve[DEVICE_CATEGORY_SPEAKER] =
-                sSpeakerSonificationVolumeCurveDrc;
-        mStreams[AUDIO_STREAM_MUSIC].mVolumeCurve[DEVICE_CATEGORY_SPEAKER] =
-                sSpeakerMediaVolumeCurveDrc;
-        mStreams[AUDIO_STREAM_ACCESSIBILITY].mVolumeCurve[DEVICE_CATEGORY_SPEAKER] =
-                sSpeakerMediaVolumeCurveDrc;
-    }
-}
-
-float AudioPolicyManager::computeVolume(audio_stream_type_t stream,
-                                            int index,
-                                            audio_io_handle_t output,
-                                            audio_devices_t device)
-{
-    float volume = 1.0;
-    sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
-    StreamDescriptor &streamDesc = mStreams[stream];
-
-    if (device == AUDIO_DEVICE_NONE) {
-        device = outputDesc->device();
-    }
-
-    volume = volIndexToAmpl(device, streamDesc, index);
-
-    // if a headset is connected, apply the following rules to ring tones and notifications
-    // to avoid sound level bursts in user's ears:
-    // - always attenuate ring tones and notifications volume by 6dB
-    // - if music is playing, always limit the volume to current music volume,
-    // with a minimum threshold at -36dB so that notification is always perceived.
-    const routing_strategy stream_strategy = getStrategy(stream);
-    if ((device & (AUDIO_DEVICE_OUT_BLUETOOTH_A2DP |
-            AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
-            AUDIO_DEVICE_OUT_WIRED_HEADSET |
-            AUDIO_DEVICE_OUT_WIRED_HEADPHONE)) &&
-        ((stream_strategy == STRATEGY_SONIFICATION)
-                || (stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL)
-                || (stream == AUDIO_STREAM_SYSTEM)
-                || ((stream_strategy == STRATEGY_ENFORCED_AUDIBLE) &&
-                    (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_NONE))) &&
-        streamDesc.mCanBeMuted) {
-        volume *= SONIFICATION_HEADSET_VOLUME_FACTOR;
-        // when the phone is ringing we must consider that music could have been paused just before
-        // by the music application and behave as if music was active if the last music track was
-        // just stopped
-        if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY) ||
-                mLimitRingtoneVolume) {
-            audio_devices_t musicDevice = getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/);
-            float musicVol = computeVolume(AUDIO_STREAM_MUSIC,
-                               mStreams[AUDIO_STREAM_MUSIC].getVolumeIndex(musicDevice),
-                               output,
-                               musicDevice);
-            float minVol = (musicVol > SONIFICATION_HEADSET_VOLUME_MIN) ?
-                                musicVol : SONIFICATION_HEADSET_VOLUME_MIN;
-            if (volume > minVol) {
-                volume = minVol;
-                ALOGV("computeVolume limiting volume to %f musicVol %f", minVol, musicVol);
-            }
-        }
-    }
-
-    return volume;
-}
-
-status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream,
-                                                   int index,
-                                                   audio_io_handle_t output,
-                                                   audio_devices_t device,
-                                                   int delayMs,
-                                                   bool force)
-{
-
-    // do not change actual stream volume if the stream is muted
-    if (mOutputs.valueFor(output)->mMuteCount[stream] != 0) {
-        ALOGVV("checkAndSetVolume() stream %d muted count %d",
-              stream, mOutputs.valueFor(output)->mMuteCount[stream]);
-        return NO_ERROR;
-    }
-
-    // do not change in call volume if bluetooth is connected and vice versa
-    if ((stream == AUDIO_STREAM_VOICE_CALL &&
-            mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] == AUDIO_POLICY_FORCE_BT_SCO) ||
-        (stream == AUDIO_STREAM_BLUETOOTH_SCO &&
-                mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION] != AUDIO_POLICY_FORCE_BT_SCO)) {
-        ALOGV("checkAndSetVolume() cannot set stream %d volume with force use = %d for comm",
-             stream, mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]);
-        return INVALID_OPERATION;
-    }
-
-    float volume = computeVolume(stream, index, output, device);
-    // unit gain if rerouting to external policy
-    if (device == AUDIO_DEVICE_OUT_REMOTE_SUBMIX) {
-        ssize_t index = mOutputs.indexOfKey(output);
-        if (index >= 0) {
-            sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
-            if (outputDesc->mPolicyMix != NULL) {
-                ALOGV("max gain when rerouting for output=%d", output);
-                volume = 1.0f;
-            }
-        }
-
-    }
-    // We actually change the volume if:
-    // - the float value returned by computeVolume() changed
-    // - the force flag is set
-    if (volume != mOutputs.valueFor(output)->mCurVolume[stream] ||
-            force) {
-        mOutputs.valueFor(output)->mCurVolume[stream] = volume;
-        ALOGVV("checkAndSetVolume() for output %d stream %d, volume %f, delay %d", output, stream, volume, delayMs);
-        // Force VOICE_CALL to track BLUETOOTH_SCO stream volume when bluetooth audio is
-        // enabled
-        if (stream == AUDIO_STREAM_BLUETOOTH_SCO) {
-            mpClientInterface->setStreamVolume(AUDIO_STREAM_VOICE_CALL, volume, output, delayMs);
-        }
-        mpClientInterface->setStreamVolume(stream, volume, output, delayMs);
-    }
-
-    if (stream == AUDIO_STREAM_VOICE_CALL ||
-        stream == AUDIO_STREAM_BLUETOOTH_SCO) {
-        float voiceVolume;
-        // Force voice volume to max for bluetooth SCO as volume is managed by the headset
-        if (stream == AUDIO_STREAM_VOICE_CALL) {
-            voiceVolume = (float)index/(float)mStreams[stream].mIndexMax;
-        } else {
-            voiceVolume = 1.0;
-        }
-
-        if (voiceVolume != mLastVoiceVolume && output == mPrimaryOutput) {
-            mpClientInterface->setVoiceVolume(voiceVolume, delayMs);
-            mLastVoiceVolume = voiceVolume;
-        }
-    }
-
-    return NO_ERROR;
-}
-
-void AudioPolicyManager::applyStreamVolumes(audio_io_handle_t output,
-                                                audio_devices_t device,
-                                                int delayMs,
-                                                bool force)
-{
-    ALOGVV("applyStreamVolumes() for output %d and device %x", output, device);
-
-    for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
-        if (stream == AUDIO_STREAM_PATCH) {
-            continue;
-        }
-        checkAndSetVolume((audio_stream_type_t)stream,
-                          mStreams[stream].getVolumeIndex(device),
-                          output,
-                          device,
-                          delayMs,
-                          force);
-    }
-}
-
-void AudioPolicyManager::setStrategyMute(routing_strategy strategy,
-                                             bool on,
-                                             audio_io_handle_t output,
-                                             int delayMs,
-                                             audio_devices_t device)
-{
-    ALOGVV("setStrategyMute() strategy %d, mute %d, output %d", strategy, on, output);
-    for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
-        if (stream == AUDIO_STREAM_PATCH) {
-            continue;
-        }
-        if (getStrategy((audio_stream_type_t)stream) == strategy) {
-            setStreamMute((audio_stream_type_t)stream, on, output, delayMs, device);
-        }
-    }
-}
-
-void AudioPolicyManager::setStreamMute(audio_stream_type_t stream,
-                                           bool on,
-                                           audio_io_handle_t output,
-                                           int delayMs,
-                                           audio_devices_t device)
-{
-    StreamDescriptor &streamDesc = mStreams[stream];
-    sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
-    if (device == AUDIO_DEVICE_NONE) {
-        device = outputDesc->device();
-    }
-
-    ALOGVV("setStreamMute() stream %d, mute %d, output %d, mMuteCount %d device %04x",
-          stream, on, output, outputDesc->mMuteCount[stream], device);
-
-    if (on) {
-        if (outputDesc->mMuteCount[stream] == 0) {
-            if (streamDesc.mCanBeMuted &&
-                    ((stream != AUDIO_STREAM_ENFORCED_AUDIBLE) ||
-                     (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_NONE))) {
-                checkAndSetVolume(stream, 0, output, device, delayMs);
-            }
-        }
-        // increment mMuteCount after calling checkAndSetVolume() so that volume change is not ignored
-        outputDesc->mMuteCount[stream]++;
-    } else {
-        if (outputDesc->mMuteCount[stream] == 0) {
-            ALOGV("setStreamMute() unmuting non muted stream!");
-            return;
-        }
-        if (--outputDesc->mMuteCount[stream] == 0) {
-            checkAndSetVolume(stream,
-                              streamDesc.getVolumeIndex(device),
-                              output,
-                              device,
-                              delayMs);
-        }
-    }
-}
-
-void AudioPolicyManager::handleIncallSonification(audio_stream_type_t stream,
-                                                      bool starting, bool stateChange)
-{
-    // if the stream pertains to sonification strategy and we are in call we must
-    // mute the stream if it is low visibility. If it is high visibility, we must play a tone
-    // in the device used for phone strategy and play the tone if the selected device does not
-    // interfere with the device used for phone strategy
-    // if stateChange is true, we are called from setPhoneState() and we must mute or unmute as
-    // many times as there are active tracks on the output
-    const routing_strategy stream_strategy = getStrategy(stream);
-    if ((stream_strategy == STRATEGY_SONIFICATION) ||
-            ((stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL))) {
-        sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(mPrimaryOutput);
-        ALOGV("handleIncallSonification() stream %d starting %d device %x stateChange %d",
-                stream, starting, outputDesc->mDevice, stateChange);
-        if (outputDesc->mRefCount[stream]) {
-            int muteCount = 1;
-            if (stateChange) {
-                muteCount = outputDesc->mRefCount[stream];
-            }
-            if (audio_is_low_visibility(stream)) {
-                ALOGV("handleIncallSonification() low visibility, muteCount %d", muteCount);
-                for (int i = 0; i < muteCount; i++) {
-                    setStreamMute(stream, starting, mPrimaryOutput);
-                }
-            } else {
-                ALOGV("handleIncallSonification() high visibility");
-                if (outputDesc->device() &
-                        getDeviceForStrategy(STRATEGY_PHONE, true /*fromCache*/)) {
-                    ALOGV("handleIncallSonification() high visibility muted, muteCount %d", muteCount);
-                    for (int i = 0; i < muteCount; i++) {
-                        setStreamMute(stream, starting, mPrimaryOutput);
-                    }
-                }
-                if (starting) {
-                    mpClientInterface->startTone(AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION,
-                                                 AUDIO_STREAM_VOICE_CALL);
-                } else {
-                    mpClientInterface->stopTone();
-                }
-            }
-        }
-    }
-}
-
-bool AudioPolicyManager::isInCall()
-{
-    return isStateInCall(mPhoneState);
-}
-
-bool AudioPolicyManager::isStateInCall(int state) {
-    return ((state == AUDIO_MODE_IN_CALL) ||
-            (state == AUDIO_MODE_IN_COMMUNICATION));
-}
-
-uint32_t AudioPolicyManager::getMaxEffectsCpuLoad()
-{
-    return MAX_EFFECTS_CPU_LOAD;
-}
-
-uint32_t AudioPolicyManager::getMaxEffectsMemory()
-{
-    return MAX_EFFECTS_MEMORY;
-}
-
-
-// --- AudioOutputDescriptor class implementation
-
-AudioPolicyManager::AudioOutputDescriptor::AudioOutputDescriptor(
-        const sp<IOProfile>& profile)
-    : mId(0), mIoHandle(0), mLatency(0),
-    mFlags((audio_output_flags_t)0), mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL),
-    mPatchHandle(0),
-    mOutput1(0), mOutput2(0), mProfile(profile), mDirectOpenCount(0)
-{
-    // clear usage count for all stream types
-    for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
-        mRefCount[i] = 0;
-        mCurVolume[i] = -1.0;
-        mMuteCount[i] = 0;
-        mStopTime[i] = 0;
-    }
-    for (int i = 0; i < NUM_STRATEGIES; i++) {
-        mStrategyMutedByDevice[i] = false;
-    }
-    if (profile != NULL) {
-        mFlags = (audio_output_flags_t)profile->mFlags;
-        mSamplingRate = profile->pickSamplingRate();
-        mFormat = profile->pickFormat();
-        mChannelMask = profile->pickChannelMask();
-        if (profile->mGains.size() > 0) {
-            profile->mGains[0]->getDefaultConfig(&mGain);
-        }
-    }
-}
-
-audio_devices_t AudioPolicyManager::AudioOutputDescriptor::device() const
-{
-    if (isDuplicated()) {
-        return (audio_devices_t)(mOutput1->mDevice | mOutput2->mDevice);
-    } else {
-        return mDevice;
-    }
-}
-
-uint32_t AudioPolicyManager::AudioOutputDescriptor::latency()
-{
-    if (isDuplicated()) {
-        return (mOutput1->mLatency > mOutput2->mLatency) ? mOutput1->mLatency : mOutput2->mLatency;
-    } else {
-        return mLatency;
-    }
-}
-
-bool AudioPolicyManager::AudioOutputDescriptor::sharesHwModuleWith(
-        const sp<AudioOutputDescriptor> outputDesc)
-{
-    if (isDuplicated()) {
-        return mOutput1->sharesHwModuleWith(outputDesc) || mOutput2->sharesHwModuleWith(outputDesc);
-    } else if (outputDesc->isDuplicated()){
-        return sharesHwModuleWith(outputDesc->mOutput1) || sharesHwModuleWith(outputDesc->mOutput2);
-    } else {
-        return (mProfile->mModule == outputDesc->mProfile->mModule);
-    }
-}
-
-void AudioPolicyManager::AudioOutputDescriptor::changeRefCount(audio_stream_type_t stream,
-                                                                   int delta)
-{
-    // forward usage count change to attached outputs
-    if (isDuplicated()) {
-        mOutput1->changeRefCount(stream, delta);
-        mOutput2->changeRefCount(stream, delta);
-    }
-    if ((delta + (int)mRefCount[stream]) < 0) {
-        ALOGW("changeRefCount() invalid delta %d for stream %d, refCount %d",
-              delta, stream, mRefCount[stream]);
-        mRefCount[stream] = 0;
-        return;
-    }
-    mRefCount[stream] += delta;
-    ALOGV("changeRefCount() stream %d, count %d", stream, mRefCount[stream]);
-}
-
-audio_devices_t AudioPolicyManager::AudioOutputDescriptor::supportedDevices()
-{
-    if (isDuplicated()) {
-        return (audio_devices_t)(mOutput1->supportedDevices() | mOutput2->supportedDevices());
-    } else {
-        return mProfile->mSupportedDevices.types() ;
-    }
-}
-
-bool AudioPolicyManager::AudioOutputDescriptor::isActive(uint32_t inPastMs) const
-{
-    return isStrategyActive(NUM_STRATEGIES, inPastMs);
-}
-
-bool AudioPolicyManager::AudioOutputDescriptor::isStrategyActive(routing_strategy strategy,
-                                                                       uint32_t inPastMs,
-                                                                       nsecs_t sysTime) const
-{
-    if ((sysTime == 0) && (inPastMs != 0)) {
-        sysTime = systemTime();
-    }
-    for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
-        if (i == AUDIO_STREAM_PATCH) {
-            continue;
-        }
-        if (((getStrategy((audio_stream_type_t)i) == strategy) ||
-                (NUM_STRATEGIES == strategy)) &&
-                isStreamActive((audio_stream_type_t)i, inPastMs, sysTime)) {
-            return true;
-        }
-    }
-    return false;
-}
-
-bool AudioPolicyManager::AudioOutputDescriptor::isStreamActive(audio_stream_type_t stream,
-                                                                       uint32_t inPastMs,
-                                                                       nsecs_t sysTime) const
-{
-    if (mRefCount[stream] != 0) {
-        return true;
-    }
-    if (inPastMs == 0) {
-        return false;
-    }
-    if (sysTime == 0) {
-        sysTime = systemTime();
-    }
-    if (ns2ms(sysTime - mStopTime[stream]) < inPastMs) {
-        return true;
-    }
-    return false;
-}
-
-void AudioPolicyManager::AudioOutputDescriptor::toAudioPortConfig(
-                                                 struct audio_port_config *dstConfig,
-                                                 const struct audio_port_config *srcConfig) const
-{
-    ALOG_ASSERT(!isDuplicated(), "toAudioPortConfig() called on duplicated output %d", mIoHandle);
-
-    dstConfig->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK|
-                            AUDIO_PORT_CONFIG_FORMAT|AUDIO_PORT_CONFIG_GAIN;
-    if (srcConfig != NULL) {
-        dstConfig->config_mask |= srcConfig->config_mask;
-    }
-    AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
-
-    dstConfig->id = mId;
-    dstConfig->role = AUDIO_PORT_ROLE_SOURCE;
-    dstConfig->type = AUDIO_PORT_TYPE_MIX;
-    dstConfig->ext.mix.hw_module = mProfile->mModule->mHandle;
-    dstConfig->ext.mix.handle = mIoHandle;
-    dstConfig->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
-}
-
-void AudioPolicyManager::AudioOutputDescriptor::toAudioPort(
-                                                    struct audio_port *port) const
-{
-    ALOG_ASSERT(!isDuplicated(), "toAudioPort() called on duplicated output %d", mIoHandle);
-    mProfile->toAudioPort(port);
-    port->id = mId;
-    toAudioPortConfig(&port->active_config);
-    port->ext.mix.hw_module = mProfile->mModule->mHandle;
-    port->ext.mix.handle = mIoHandle;
-    port->ext.mix.latency_class =
-            mFlags & AUDIO_OUTPUT_FLAG_FAST ? AUDIO_LATENCY_LOW : AUDIO_LATENCY_NORMAL;
-}
-
-status_t AudioPolicyManager::AudioOutputDescriptor::dump(int fd)
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    snprintf(buffer, SIZE, " ID: %d\n", mId);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Sampling rate: %d\n", mSamplingRate);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Format: %08x\n", mFormat);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Channels: %08x\n", mChannelMask);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Latency: %d\n", mLatency);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Flags %08x\n", mFlags);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Devices %08x\n", device());
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Stream volume refCount muteCount\n");
-    result.append(buffer);
-    for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
-        snprintf(buffer, SIZE, " %02d     %.03f     %02d       %02d\n",
-                 i, mCurVolume[i], mRefCount[i], mMuteCount[i]);
-        result.append(buffer);
-    }
-    write(fd, result.string(), result.size());
-
-    return NO_ERROR;
-}
-
-// --- AudioInputDescriptor class implementation
-
-AudioPolicyManager::AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile)
-    : mId(0), mIoHandle(0),
-      mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL), mPatchHandle(0), mRefCount(0),
-      mInputSource(AUDIO_SOURCE_DEFAULT), mProfile(profile), mIsSoundTrigger(false)
-{
-    if (profile != NULL) {
-        mSamplingRate = profile->pickSamplingRate();
-        mFormat = profile->pickFormat();
-        mChannelMask = profile->pickChannelMask();
-        if (profile->mGains.size() > 0) {
-            profile->mGains[0]->getDefaultConfig(&mGain);
-        }
-    }
-}
-
-void AudioPolicyManager::AudioInputDescriptor::toAudioPortConfig(
-                                                   struct audio_port_config *dstConfig,
-                                                   const struct audio_port_config *srcConfig) const
-{
-    ALOG_ASSERT(mProfile != 0,
-                "toAudioPortConfig() called on input with null profile %d", mIoHandle);
-    dstConfig->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK|
-                            AUDIO_PORT_CONFIG_FORMAT|AUDIO_PORT_CONFIG_GAIN;
-    if (srcConfig != NULL) {
-        dstConfig->config_mask |= srcConfig->config_mask;
-    }
-
-    AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
-
-    dstConfig->id = mId;
-    dstConfig->role = AUDIO_PORT_ROLE_SINK;
-    dstConfig->type = AUDIO_PORT_TYPE_MIX;
-    dstConfig->ext.mix.hw_module = mProfile->mModule->mHandle;
-    dstConfig->ext.mix.handle = mIoHandle;
-    dstConfig->ext.mix.usecase.source = mInputSource;
-}
-
-void AudioPolicyManager::AudioInputDescriptor::toAudioPort(
-                                                    struct audio_port *port) const
-{
-    ALOG_ASSERT(mProfile != 0, "toAudioPort() called on input with null profile %d", mIoHandle);
-
-    mProfile->toAudioPort(port);
-    port->id = mId;
-    toAudioPortConfig(&port->active_config);
-    port->ext.mix.hw_module = mProfile->mModule->mHandle;
-    port->ext.mix.handle = mIoHandle;
-    port->ext.mix.latency_class = AUDIO_LATENCY_NORMAL;
-}
-
-status_t AudioPolicyManager::AudioInputDescriptor::dump(int fd)
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    snprintf(buffer, SIZE, " ID: %d\n", mId);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Sampling rate: %d\n", mSamplingRate);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Format: %d\n", mFormat);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Channels: %08x\n", mChannelMask);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Devices %08x\n", mDevice);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Ref Count %d\n", mRefCount);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Open Ref Count %d\n", mOpenRefCount);
-    result.append(buffer);
-
-    write(fd, result.string(), result.size());
-
-    return NO_ERROR;
-}
-
-// --- StreamDescriptor class implementation
-
-AudioPolicyManager::StreamDescriptor::StreamDescriptor()
-    :   mIndexMin(0), mIndexMax(1), mCanBeMuted(true)
-{
-    mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT, 0);
-}
-
-int AudioPolicyManager::StreamDescriptor::getVolumeIndex(audio_devices_t device)
-{
-    device = AudioPolicyManager::getDeviceForVolume(device);
-    // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT
-    if (mIndexCur.indexOfKey(device) < 0) {
-        device = AUDIO_DEVICE_OUT_DEFAULT;
-    }
-    return mIndexCur.valueFor(device);
-}
-
-void AudioPolicyManager::StreamDescriptor::dump(int fd)
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    snprintf(buffer, SIZE, "%s         %02d         %02d         ",
-             mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
-    result.append(buffer);
-    for (size_t i = 0; i < mIndexCur.size(); i++) {
-        snprintf(buffer, SIZE, "%04x : %02d, ",
-                 mIndexCur.keyAt(i),
-                 mIndexCur.valueAt(i));
-        result.append(buffer);
-    }
-    result.append("\n");
-
-    write(fd, result.string(), result.size());
-}
-
-// --- EffectDescriptor class implementation
-
-status_t AudioPolicyManager::EffectDescriptor::dump(int fd)
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    snprintf(buffer, SIZE, " I/O: %d\n", mIo);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Strategy: %d\n", mStrategy);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Session: %d\n", mSession);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " Name: %s\n",  mDesc.name);
-    result.append(buffer);
-    snprintf(buffer, SIZE, " %s\n",  mEnabled ? "Enabled" : "Disabled");
-    result.append(buffer);
-    write(fd, result.string(), result.size());
-
-    return NO_ERROR;
-}
-
-// --- HwModule class implementation
-
-AudioPolicyManager::HwModule::HwModule(const char *name)
-    : mName(strndup(name, AUDIO_HARDWARE_MODULE_ID_MAX_LEN)),
-      mHalVersion(AUDIO_DEVICE_API_VERSION_MIN), mHandle(0)
-{
-}
-
-AudioPolicyManager::HwModule::~HwModule()
-{
-    for (size_t i = 0; i < mOutputProfiles.size(); i++) {
-        mOutputProfiles[i]->mSupportedDevices.clear();
-    }
-    for (size_t i = 0; i < mInputProfiles.size(); i++) {
-        mInputProfiles[i]->mSupportedDevices.clear();
-    }
-    free((void *)mName);
-}
-
-status_t AudioPolicyManager::HwModule::loadInput(cnode *root)
-{
-    cnode *node = root->first_child;
-
-    sp<IOProfile> profile = new IOProfile(String8(root->name), AUDIO_PORT_ROLE_SINK, this);
-
-    while (node) {
-        if (strcmp(node->name, SAMPLING_RATES_TAG) == 0) {
-            profile->loadSamplingRates((char *)node->value);
-        } else if (strcmp(node->name, FORMATS_TAG) == 0) {
-            profile->loadFormats((char *)node->value);
-        } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
-            profile->loadInChannels((char *)node->value);
-        } else if (strcmp(node->name, DEVICES_TAG) == 0) {
-            profile->mSupportedDevices.loadDevicesFromName((char *)node->value,
-                                                           mDeclaredDevices);
-        } else if (strcmp(node->name, FLAGS_TAG) == 0) {
-            profile->mFlags = parseInputFlagNames((char *)node->value);
-        } else if (strcmp(node->name, GAINS_TAG) == 0) {
-            profile->loadGains(node);
-        }
-        node = node->next;
-    }
-    ALOGW_IF(profile->mSupportedDevices.isEmpty(),
-            "loadInput() invalid supported devices");
-    ALOGW_IF(profile->mChannelMasks.size() == 0,
-            "loadInput() invalid supported channel masks");
-    ALOGW_IF(profile->mSamplingRates.size() == 0,
-            "loadInput() invalid supported sampling rates");
-    ALOGW_IF(profile->mFormats.size() == 0,
-            "loadInput() invalid supported formats");
-    if (!profile->mSupportedDevices.isEmpty() &&
-            (profile->mChannelMasks.size() != 0) &&
-            (profile->mSamplingRates.size() != 0) &&
-            (profile->mFormats.size() != 0)) {
-
-        ALOGV("loadInput() adding input Supported Devices %04x",
-              profile->mSupportedDevices.types());
-
-        mInputProfiles.add(profile);
-        return NO_ERROR;
-    } else {
-        return BAD_VALUE;
-    }
-}
-
-status_t AudioPolicyManager::HwModule::loadOutput(cnode *root)
-{
-    cnode *node = root->first_child;
-
-    sp<IOProfile> profile = new IOProfile(String8(root->name), AUDIO_PORT_ROLE_SOURCE, this);
-
-    while (node) {
-        if (strcmp(node->name, SAMPLING_RATES_TAG) == 0) {
-            profile->loadSamplingRates((char *)node->value);
-        } else if (strcmp(node->name, FORMATS_TAG) == 0) {
-            profile->loadFormats((char *)node->value);
-        } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
-            profile->loadOutChannels((char *)node->value);
-        } else if (strcmp(node->name, DEVICES_TAG) == 0) {
-            profile->mSupportedDevices.loadDevicesFromName((char *)node->value,
-                                                           mDeclaredDevices);
-        } else if (strcmp(node->name, FLAGS_TAG) == 0) {
-            profile->mFlags = parseOutputFlagNames((char *)node->value);
-        } else if (strcmp(node->name, GAINS_TAG) == 0) {
-            profile->loadGains(node);
-        }
-        node = node->next;
-    }
-    ALOGW_IF(profile->mSupportedDevices.isEmpty(),
-            "loadOutput() invalid supported devices");
-    ALOGW_IF(profile->mChannelMasks.size() == 0,
-            "loadOutput() invalid supported channel masks");
-    ALOGW_IF(profile->mSamplingRates.size() == 0,
-            "loadOutput() invalid supported sampling rates");
-    ALOGW_IF(profile->mFormats.size() == 0,
-            "loadOutput() invalid supported formats");
-    if (!profile->mSupportedDevices.isEmpty() &&
-            (profile->mChannelMasks.size() != 0) &&
-            (profile->mSamplingRates.size() != 0) &&
-            (profile->mFormats.size() != 0)) {
-
-        ALOGV("loadOutput() adding output Supported Devices %04x, mFlags %04x",
-              profile->mSupportedDevices.types(), profile->mFlags);
-
-        mOutputProfiles.add(profile);
-        return NO_ERROR;
-    } else {
-        return BAD_VALUE;
-    }
-}
-
-status_t AudioPolicyManager::HwModule::loadDevice(cnode *root)
-{
-    cnode *node = root->first_child;
-
-    audio_devices_t type = AUDIO_DEVICE_NONE;
-    while (node) {
-        if (strcmp(node->name, DEVICE_TYPE) == 0) {
-            type = parseDeviceNames((char *)node->value);
-            break;
-        }
-        node = node->next;
-    }
-    if (type == AUDIO_DEVICE_NONE ||
-            (!audio_is_input_device(type) && !audio_is_output_device(type))) {
-        ALOGW("loadDevice() bad type %08x", type);
-        return BAD_VALUE;
-    }
-    sp<DeviceDescriptor> deviceDesc = new DeviceDescriptor(String8(root->name), type);
-    deviceDesc->mModule = this;
-
-    node = root->first_child;
-    while (node) {
-        if (strcmp(node->name, DEVICE_ADDRESS) == 0) {
-            deviceDesc->mAddress = String8((char *)node->value);
-        } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
-            if (audio_is_input_device(type)) {
-                deviceDesc->loadInChannels((char *)node->value);
-            } else {
-                deviceDesc->loadOutChannels((char *)node->value);
-            }
-        } else if (strcmp(node->name, GAINS_TAG) == 0) {
-            deviceDesc->loadGains(node);
-        }
-        node = node->next;
-    }
-
-    ALOGV("loadDevice() adding device name %s type %08x address %s",
-          deviceDesc->mName.string(), type, deviceDesc->mAddress.string());
-
-    mDeclaredDevices.add(deviceDesc);
-
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::HwModule::addOutputProfile(String8 name, const audio_config_t *config,
-                                                  audio_devices_t device, String8 address)
-{
-    sp<IOProfile> profile = new IOProfile(name, AUDIO_PORT_ROLE_SOURCE, this);
-
-    profile->mSamplingRates.add(config->sample_rate);
-    profile->mChannelMasks.add(config->channel_mask);
-    profile->mFormats.add(config->format);
-
-    sp<DeviceDescriptor> devDesc = new DeviceDescriptor(String8(""), device);
-    devDesc->mAddress = address;
-    profile->mSupportedDevices.add(devDesc);
-
-    mOutputProfiles.add(profile);
-
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::HwModule::removeOutputProfile(String8 name)
-{
-    for (size_t i = 0; i < mOutputProfiles.size(); i++) {
-        if (mOutputProfiles[i]->mName == name) {
-            mOutputProfiles.removeAt(i);
-            break;
-        }
-    }
-
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::HwModule::addInputProfile(String8 name, const audio_config_t *config,
-                                                  audio_devices_t device, String8 address)
-{
-    sp<IOProfile> profile = new IOProfile(name, AUDIO_PORT_ROLE_SINK, this);
-
-    profile->mSamplingRates.add(config->sample_rate);
-    profile->mChannelMasks.add(config->channel_mask);
-    profile->mFormats.add(config->format);
-
-    sp<DeviceDescriptor> devDesc = new DeviceDescriptor(String8(""), device);
-    devDesc->mAddress = address;
-    profile->mSupportedDevices.add(devDesc);
-
-    ALOGV("addInputProfile() name %s rate %d mask 0x08", name.string(), config->sample_rate, config->channel_mask);
-
-    mInputProfiles.add(profile);
-
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::HwModule::removeInputProfile(String8 name)
-{
-    for (size_t i = 0; i < mInputProfiles.size(); i++) {
-        if (mInputProfiles[i]->mName == name) {
-            mInputProfiles.removeAt(i);
-            break;
-        }
-    }
-
-    return NO_ERROR;
-}
-
-
-void AudioPolicyManager::HwModule::dump(int fd)
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    snprintf(buffer, SIZE, "  - name: %s\n", mName);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "  - handle: %d\n", mHandle);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "  - version: %u.%u\n", mHalVersion >> 8, mHalVersion & 0xFF);
-    result.append(buffer);
-    write(fd, result.string(), result.size());
-    if (mOutputProfiles.size()) {
-        write(fd, "  - outputs:\n", strlen("  - outputs:\n"));
-        for (size_t i = 0; i < mOutputProfiles.size(); i++) {
-            snprintf(buffer, SIZE, "    output %zu:\n", i);
-            write(fd, buffer, strlen(buffer));
-            mOutputProfiles[i]->dump(fd);
-        }
-    }
-    if (mInputProfiles.size()) {
-        write(fd, "  - inputs:\n", strlen("  - inputs:\n"));
-        for (size_t i = 0; i < mInputProfiles.size(); i++) {
-            snprintf(buffer, SIZE, "    input %zu:\n", i);
-            write(fd, buffer, strlen(buffer));
-            mInputProfiles[i]->dump(fd);
-        }
-    }
-    if (mDeclaredDevices.size()) {
-        write(fd, "  - devices:\n", strlen("  - devices:\n"));
-        for (size_t i = 0; i < mDeclaredDevices.size(); i++) {
-            mDeclaredDevices[i]->dump(fd, 4, i);
-        }
-    }
-}
-
-// --- AudioPort class implementation
-
-
-AudioPolicyManager::AudioPort::AudioPort(const String8& name, audio_port_type_t type,
-          audio_port_role_t role, const sp<HwModule>& module) :
-    mName(name), mType(type), mRole(role), mModule(module), mFlags(0)
-{
-    mUseInChannelMask = ((type == AUDIO_PORT_TYPE_DEVICE) && (role == AUDIO_PORT_ROLE_SOURCE)) ||
-                    ((type == AUDIO_PORT_TYPE_MIX) && (role == AUDIO_PORT_ROLE_SINK));
-}
-
-void AudioPolicyManager::AudioPort::toAudioPort(struct audio_port *port) const
-{
-    port->role = mRole;
-    port->type = mType;
-    unsigned int i;
-    for (i = 0; i < mSamplingRates.size() && i < AUDIO_PORT_MAX_SAMPLING_RATES; i++) {
-        if (mSamplingRates[i] != 0) {
-            port->sample_rates[i] = mSamplingRates[i];
-        }
-    }
-    port->num_sample_rates = i;
-    for (i = 0; i < mChannelMasks.size() && i < AUDIO_PORT_MAX_CHANNEL_MASKS; i++) {
-        if (mChannelMasks[i] != 0) {
-            port->channel_masks[i] = mChannelMasks[i];
-        }
-    }
-    port->num_channel_masks = i;
-    for (i = 0; i < mFormats.size() && i < AUDIO_PORT_MAX_FORMATS; i++) {
-        if (mFormats[i] != 0) {
-            port->formats[i] = mFormats[i];
-        }
-    }
-    port->num_formats = i;
-
-    ALOGV("AudioPort::toAudioPort() num gains %zu", mGains.size());
-
-    for (i = 0; i < mGains.size() && i < AUDIO_PORT_MAX_GAINS; i++) {
-        port->gains[i] = mGains[i]->mGain;
-    }
-    port->num_gains = i;
-}
-
-void AudioPolicyManager::AudioPort::importAudioPort(const sp<AudioPort> port) {
-    for (size_t k = 0 ; k < port->mSamplingRates.size() ; k++) {
-        const uint32_t rate = port->mSamplingRates.itemAt(k);
-        if (rate != 0) { // skip "dynamic" rates
-            bool hasRate = false;
-            for (size_t l = 0 ; l < mSamplingRates.size() ; l++) {
-                if (rate == mSamplingRates.itemAt(l)) {
-                    hasRate = true;
-                    break;
-                }
-            }
-            if (!hasRate) { // never import a sampling rate twice
-                mSamplingRates.add(rate);
-            }
-        }
-    }
-    for (size_t k = 0 ; k < port->mChannelMasks.size() ; k++) {
-        const audio_channel_mask_t mask = port->mChannelMasks.itemAt(k);
-        if (mask != 0) { // skip "dynamic" masks
-            bool hasMask = false;
-            for (size_t l = 0 ; l < mChannelMasks.size() ; l++) {
-                if (mask == mChannelMasks.itemAt(l)) {
-                    hasMask = true;
-                    break;
-                }
-            }
-            if (!hasMask) { // never import a channel mask twice
-                mChannelMasks.add(mask);
-            }
-        }
-    }
-    for (size_t k = 0 ; k < port->mFormats.size() ; k++) {
-        const audio_format_t format = port->mFormats.itemAt(k);
-        if (format != 0) { // skip "dynamic" formats
-            bool hasFormat = false;
-            for (size_t l = 0 ; l < mFormats.size() ; l++) {
-                if (format == mFormats.itemAt(l)) {
-                    hasFormat = true;
-                    break;
-                }
-            }
-            if (!hasFormat) { // never import a channel mask twice
-                mFormats.add(format);
-            }
-        }
-    }
-    for (size_t k = 0 ; k < port->mGains.size() ; k++) {
-        sp<AudioGain> gain = port->mGains.itemAt(k);
-        if (gain != 0) {
-            bool hasGain = false;
-            for (size_t l = 0 ; l < mGains.size() ; l++) {
-                if (gain == mGains.itemAt(l)) {
-                    hasGain = true;
-                    break;
-                }
-            }
-            if (!hasGain) { // never import a gain twice
-                mGains.add(gain);
-            }
-        }
-    }
-}
-
-void AudioPolicyManager::AudioPort::clearCapabilities() {
-    mChannelMasks.clear();
-    mFormats.clear();
-    mSamplingRates.clear();
-    mGains.clear();
-}
-
-void AudioPolicyManager::AudioPort::loadSamplingRates(char *name)
-{
-    char *str = strtok(name, "|");
-
-    // by convention, "0' in the first entry in mSamplingRates indicates the supported sampling
-    // rates should be read from the output stream after it is opened for the first time
-    if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) {
-        mSamplingRates.add(0);
-        return;
-    }
-
-    while (str != NULL) {
-        uint32_t rate = atoi(str);
-        if (rate != 0) {
-            ALOGV("loadSamplingRates() adding rate %d", rate);
-            mSamplingRates.add(rate);
-        }
-        str = strtok(NULL, "|");
-    }
-}
-
-void AudioPolicyManager::AudioPort::loadFormats(char *name)
-{
-    char *str = strtok(name, "|");
-
-    // by convention, "0' in the first entry in mFormats indicates the supported formats
-    // should be read from the output stream after it is opened for the first time
-    if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) {
-        mFormats.add(AUDIO_FORMAT_DEFAULT);
-        return;
-    }
-
-    while (str != NULL) {
-        audio_format_t format = (audio_format_t)stringToEnum(sFormatNameToEnumTable,
-                                                             ARRAY_SIZE(sFormatNameToEnumTable),
-                                                             str);
-        if (format != AUDIO_FORMAT_DEFAULT) {
-            mFormats.add(format);
-        }
-        str = strtok(NULL, "|");
-    }
-}
-
-void AudioPolicyManager::AudioPort::loadInChannels(char *name)
-{
-    const char *str = strtok(name, "|");
-
-    ALOGV("loadInChannels() %s", name);
-
-    if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) {
-        mChannelMasks.add(0);
-        return;
-    }
-
-    while (str != NULL) {
-        audio_channel_mask_t channelMask =
-                (audio_channel_mask_t)stringToEnum(sInChannelsNameToEnumTable,
-                                                   ARRAY_SIZE(sInChannelsNameToEnumTable),
-                                                   str);
-        if (channelMask != 0) {
-            ALOGV("loadInChannels() adding channelMask %04x", channelMask);
-            mChannelMasks.add(channelMask);
-        }
-        str = strtok(NULL, "|");
-    }
-}
-
-void AudioPolicyManager::AudioPort::loadOutChannels(char *name)
-{
-    const char *str = strtok(name, "|");
-
-    ALOGV("loadOutChannels() %s", name);
-
-    // by convention, "0' in the first entry in mChannelMasks indicates the supported channel
-    // masks should be read from the output stream after it is opened for the first time
-    if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) {
-        mChannelMasks.add(0);
-        return;
-    }
-
-    while (str != NULL) {
-        audio_channel_mask_t channelMask =
-                (audio_channel_mask_t)stringToEnum(sOutChannelsNameToEnumTable,
-                                                   ARRAY_SIZE(sOutChannelsNameToEnumTable),
-                                                   str);
-        if (channelMask != 0) {
-            mChannelMasks.add(channelMask);
-        }
-        str = strtok(NULL, "|");
-    }
-    return;
-}
-
-audio_gain_mode_t AudioPolicyManager::AudioPort::loadGainMode(char *name)
-{
-    const char *str = strtok(name, "|");
-
-    ALOGV("loadGainMode() %s", name);
-    audio_gain_mode_t mode = 0;
-    while (str != NULL) {
-        mode |= (audio_gain_mode_t)stringToEnum(sGainModeNameToEnumTable,
-                                                ARRAY_SIZE(sGainModeNameToEnumTable),
-                                                str);
-        str = strtok(NULL, "|");
-    }
-    return mode;
-}
-
-void AudioPolicyManager::AudioPort::loadGain(cnode *root, int index)
-{
-    cnode *node = root->first_child;
-
-    sp<AudioGain> gain = new AudioGain(index, mUseInChannelMask);
-
-    while (node) {
-        if (strcmp(node->name, GAIN_MODE) == 0) {
-            gain->mGain.mode = loadGainMode((char *)node->value);
-        } else if (strcmp(node->name, GAIN_CHANNELS) == 0) {
-            if (mUseInChannelMask) {
-                gain->mGain.channel_mask =
-                        (audio_channel_mask_t)stringToEnum(sInChannelsNameToEnumTable,
-                                                           ARRAY_SIZE(sInChannelsNameToEnumTable),
-                                                           (char *)node->value);
-            } else {
-                gain->mGain.channel_mask =
-                        (audio_channel_mask_t)stringToEnum(sOutChannelsNameToEnumTable,
-                                                           ARRAY_SIZE(sOutChannelsNameToEnumTable),
-                                                           (char *)node->value);
-            }
-        } else if (strcmp(node->name, GAIN_MIN_VALUE) == 0) {
-            gain->mGain.min_value = atoi((char *)node->value);
-        } else if (strcmp(node->name, GAIN_MAX_VALUE) == 0) {
-            gain->mGain.max_value = atoi((char *)node->value);
-        } else if (strcmp(node->name, GAIN_DEFAULT_VALUE) == 0) {
-            gain->mGain.default_value = atoi((char *)node->value);
-        } else if (strcmp(node->name, GAIN_STEP_VALUE) == 0) {
-            gain->mGain.step_value = atoi((char *)node->value);
-        } else if (strcmp(node->name, GAIN_MIN_RAMP_MS) == 0) {
-            gain->mGain.min_ramp_ms = atoi((char *)node->value);
-        } else if (strcmp(node->name, GAIN_MAX_RAMP_MS) == 0) {
-            gain->mGain.max_ramp_ms = atoi((char *)node->value);
-        }
-        node = node->next;
-    }
-
-    ALOGV("loadGain() adding new gain mode %08x channel mask %08x min mB %d max mB %d",
-          gain->mGain.mode, gain->mGain.channel_mask, gain->mGain.min_value, gain->mGain.max_value);
-
-    if (gain->mGain.mode == 0) {
-        return;
-    }
-    mGains.add(gain);
-}
-
-void AudioPolicyManager::AudioPort::loadGains(cnode *root)
-{
-    cnode *node = root->first_child;
-    int index = 0;
-    while (node) {
-        ALOGV("loadGains() loading gain %s", node->name);
-        loadGain(node, index++);
-        node = node->next;
-    }
-}
-
-status_t AudioPolicyManager::AudioPort::checkExactSamplingRate(uint32_t samplingRate) const
-{
-    if (mSamplingRates.isEmpty()) {
-        return NO_ERROR;
-    }
-
-    for (size_t i = 0; i < mSamplingRates.size(); i ++) {
-        if (mSamplingRates[i] == samplingRate) {
-            return NO_ERROR;
-        }
-    }
-    return BAD_VALUE;
-}
-
-status_t AudioPolicyManager::AudioPort::checkCompatibleSamplingRate(uint32_t samplingRate,
-        uint32_t *updatedSamplingRate) const
-{
-    if (mSamplingRates.isEmpty()) {
-        return NO_ERROR;
-    }
-
-    // Search for the closest supported sampling rate that is above (preferred)
-    // or below (acceptable) the desired sampling rate, within a permitted ratio.
-    // The sampling rates do not need to be sorted in ascending order.
-    ssize_t maxBelow = -1;
-    ssize_t minAbove = -1;
-    uint32_t candidate;
-    for (size_t i = 0; i < mSamplingRates.size(); i++) {
-        candidate = mSamplingRates[i];
-        if (candidate == samplingRate) {
-            if (updatedSamplingRate != NULL) {
-                *updatedSamplingRate = candidate;
-            }
-            return NO_ERROR;
-        }
-        // candidate < desired
-        if (candidate < samplingRate) {
-            if (maxBelow < 0 || candidate > mSamplingRates[maxBelow]) {
-                maxBelow = i;
-            }
-        // candidate > desired
-        } else {
-            if (minAbove < 0 || candidate < mSamplingRates[minAbove]) {
-                minAbove = i;
-            }
-        }
-    }
-    // This uses hard-coded knowledge about AudioFlinger resampling ratios.
-    // TODO Move these assumptions out.
-    static const uint32_t kMaxDownSampleRatio = 6;  // beyond this aliasing occurs
-    static const uint32_t kMaxUpSampleRatio = 256;  // beyond this sample rate inaccuracies occur
-                                                    // due to approximation by an int32_t of the
-                                                    // phase increments
-    // Prefer to down-sample from a higher sampling rate, as we get the desired frequency spectrum.
-    if (minAbove >= 0) {
-        candidate = mSamplingRates[minAbove];
-        if (candidate / kMaxDownSampleRatio <= samplingRate) {
-            if (updatedSamplingRate != NULL) {
-                *updatedSamplingRate = candidate;
-            }
-            return NO_ERROR;
-        }
-    }
-    // But if we have to up-sample from a lower sampling rate, that's OK.
-    if (maxBelow >= 0) {
-        candidate = mSamplingRates[maxBelow];
-        if (candidate * kMaxUpSampleRatio >= samplingRate) {
-            if (updatedSamplingRate != NULL) {
-                *updatedSamplingRate = candidate;
-            }
-            return NO_ERROR;
-        }
-    }
-    // leave updatedSamplingRate unmodified
-    return BAD_VALUE;
-}
-
-status_t AudioPolicyManager::AudioPort::checkExactChannelMask(audio_channel_mask_t channelMask) const
-{
-    if (mChannelMasks.isEmpty()) {
-        return NO_ERROR;
-    }
-
-    for (size_t i = 0; i < mChannelMasks.size(); i++) {
-        if (mChannelMasks[i] == channelMask) {
-            return NO_ERROR;
-        }
-    }
-    return BAD_VALUE;
-}
-
-status_t AudioPolicyManager::AudioPort::checkCompatibleChannelMask(audio_channel_mask_t channelMask)
-        const
-{
-    if (mChannelMasks.isEmpty()) {
-        return NO_ERROR;
-    }
-
-    const bool isRecordThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SINK;
-    for (size_t i = 0; i < mChannelMasks.size(); i ++) {
-        // FIXME Does not handle multi-channel automatic conversions yet
-        audio_channel_mask_t supported = mChannelMasks[i];
-        if (supported == channelMask) {
-            return NO_ERROR;
-        }
-        if (isRecordThread) {
-            // This uses hard-coded knowledge that AudioFlinger can silently down-mix and up-mix.
-            // FIXME Abstract this out to a table.
-            if (((supported == AUDIO_CHANNEL_IN_FRONT_BACK || supported == AUDIO_CHANNEL_IN_STEREO)
-                    && channelMask == AUDIO_CHANNEL_IN_MONO) ||
-                (supported == AUDIO_CHANNEL_IN_MONO && (channelMask == AUDIO_CHANNEL_IN_FRONT_BACK
-                    || channelMask == AUDIO_CHANNEL_IN_STEREO))) {
-                return NO_ERROR;
-            }
-        }
-    }
-    return BAD_VALUE;
-}
-
-status_t AudioPolicyManager::AudioPort::checkFormat(audio_format_t format) const
-{
-    if (mFormats.isEmpty()) {
-        return NO_ERROR;
-    }
-
-    for (size_t i = 0; i < mFormats.size(); i ++) {
-        if (mFormats[i] == format) {
-            return NO_ERROR;
-        }
-    }
-    return BAD_VALUE;
-}
-
-
-uint32_t AudioPolicyManager::AudioPort::pickSamplingRate() const
-{
-    // special case for uninitialized dynamic profile
-    if (mSamplingRates.size() == 1 && mSamplingRates[0] == 0) {
-        return 0;
-    }
-
-    // For direct outputs, pick minimum sampling rate: this helps ensuring that the
-    // channel count / sampling rate combination chosen will be supported by the connected
-    // sink
-    if ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
-            (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD))) {
-        uint32_t samplingRate = UINT_MAX;
-        for (size_t i = 0; i < mSamplingRates.size(); i ++) {
-            if ((mSamplingRates[i] < samplingRate) && (mSamplingRates[i] > 0)) {
-                samplingRate = mSamplingRates[i];
-            }
-        }
-        return (samplingRate == UINT_MAX) ? 0 : samplingRate;
-    }
-
-    uint32_t samplingRate = 0;
-    uint32_t maxRate = MAX_MIXER_SAMPLING_RATE;
-
-    // For mixed output and inputs, use max mixer sampling rates. Do not
-    // limit sampling rate otherwise
-    if (mType != AUDIO_PORT_TYPE_MIX) {
-        maxRate = UINT_MAX;
-    }
-    for (size_t i = 0; i < mSamplingRates.size(); i ++) {
-        if ((mSamplingRates[i] > samplingRate) && (mSamplingRates[i] <= maxRate)) {
-            samplingRate = mSamplingRates[i];
-        }
-    }
-    return samplingRate;
-}
-
-audio_channel_mask_t AudioPolicyManager::AudioPort::pickChannelMask() const
-{
-    // special case for uninitialized dynamic profile
-    if (mChannelMasks.size() == 1 && mChannelMasks[0] == 0) {
-        return AUDIO_CHANNEL_NONE;
-    }
-    audio_channel_mask_t channelMask = AUDIO_CHANNEL_NONE;
-
-    // For direct outputs, pick minimum channel count: this helps ensuring that the
-    // channel count / sampling rate combination chosen will be supported by the connected
-    // sink
-    if ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
-            (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD))) {
-        uint32_t channelCount = UINT_MAX;
-        for (size_t i = 0; i < mChannelMasks.size(); i ++) {
-            uint32_t cnlCount;
-            if (mUseInChannelMask) {
-                cnlCount = audio_channel_count_from_in_mask(mChannelMasks[i]);
-            } else {
-                cnlCount = audio_channel_count_from_out_mask(mChannelMasks[i]);
-            }
-            if ((cnlCount < channelCount) && (cnlCount > 0)) {
-                channelMask = mChannelMasks[i];
-                channelCount = cnlCount;
-            }
-        }
-        return channelMask;
-    }
-
-    uint32_t channelCount = 0;
-    uint32_t maxCount = MAX_MIXER_CHANNEL_COUNT;
-
-    // For mixed output and inputs, use max mixer channel count. Do not
-    // limit channel count otherwise
-    if (mType != AUDIO_PORT_TYPE_MIX) {
-        maxCount = UINT_MAX;
-    }
-    for (size_t i = 0; i < mChannelMasks.size(); i ++) {
-        uint32_t cnlCount;
-        if (mUseInChannelMask) {
-            cnlCount = audio_channel_count_from_in_mask(mChannelMasks[i]);
-        } else {
-            cnlCount = audio_channel_count_from_out_mask(mChannelMasks[i]);
-        }
-        if ((cnlCount > channelCount) && (cnlCount <= maxCount)) {
-            channelMask = mChannelMasks[i];
-            channelCount = cnlCount;
-        }
-    }
-    return channelMask;
-}
-
-/* format in order of increasing preference */
-const audio_format_t AudioPolicyManager::AudioPort::sPcmFormatCompareTable[] = {
-        AUDIO_FORMAT_DEFAULT,
-        AUDIO_FORMAT_PCM_16_BIT,
-        AUDIO_FORMAT_PCM_8_24_BIT,
-        AUDIO_FORMAT_PCM_24_BIT_PACKED,
-        AUDIO_FORMAT_PCM_32_BIT,
-        AUDIO_FORMAT_PCM_FLOAT,
-};
-
-int AudioPolicyManager::AudioPort::compareFormats(audio_format_t format1,
-                                                  audio_format_t format2)
-{
-    // NOTE: AUDIO_FORMAT_INVALID is also considered not PCM and will be compared equal to any
-    // compressed format and better than any PCM format. This is by design of pickFormat()
-    if (!audio_is_linear_pcm(format1)) {
-        if (!audio_is_linear_pcm(format2)) {
-            return 0;
-        }
-        return 1;
-    }
-    if (!audio_is_linear_pcm(format2)) {
-        return -1;
-    }
-
-    int index1 = -1, index2 = -1;
-    for (size_t i = 0;
-            (i < ARRAY_SIZE(sPcmFormatCompareTable)) && ((index1 == -1) || (index2 == -1));
-            i ++) {
-        if (sPcmFormatCompareTable[i] == format1) {
-            index1 = i;
-        }
-        if (sPcmFormatCompareTable[i] == format2) {
-            index2 = i;
-        }
-    }
-    // format1 not found => index1 < 0 => format2 > format1
-    // format2 not found => index2 < 0 => format2 < format1
-    return index1 - index2;
-}
-
-audio_format_t AudioPolicyManager::AudioPort::pickFormat() const
-{
-    // special case for uninitialized dynamic profile
-    if (mFormats.size() == 1 && mFormats[0] == 0) {
-        return AUDIO_FORMAT_DEFAULT;
-    }
-
-    audio_format_t format = AUDIO_FORMAT_DEFAULT;
-    audio_format_t bestFormat =
-            AudioPolicyManager::AudioPort::sPcmFormatCompareTable[
-                ARRAY_SIZE(AudioPolicyManager::AudioPort::sPcmFormatCompareTable) - 1];
-    // For mixed output and inputs, use best mixer output format. Do not
-    // limit format otherwise
-    if ((mType != AUDIO_PORT_TYPE_MIX) ||
-            ((mRole == AUDIO_PORT_ROLE_SOURCE) &&
-             (((mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)) != 0)))) {
-        bestFormat = AUDIO_FORMAT_INVALID;
-    }
-
-    for (size_t i = 0; i < mFormats.size(); i ++) {
-        if ((compareFormats(mFormats[i], format) > 0) &&
-                (compareFormats(mFormats[i], bestFormat) <= 0)) {
-            format = mFormats[i];
-        }
-    }
-    return format;
-}
-
-status_t AudioPolicyManager::AudioPort::checkGain(const struct audio_gain_config *gainConfig,
-                                                  int index) const
-{
-    if (index < 0 || (size_t)index >= mGains.size()) {
-        return BAD_VALUE;
-    }
-    return mGains[index]->checkConfig(gainConfig);
-}
-
-void AudioPolicyManager::AudioPort::dump(int fd, int spaces) const
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    if (mName.size() != 0) {
-        snprintf(buffer, SIZE, "%*s- name: %s\n", spaces, "", mName.string());
-        result.append(buffer);
-    }
-
-    if (mSamplingRates.size() != 0) {
-        snprintf(buffer, SIZE, "%*s- sampling rates: ", spaces, "");
-        result.append(buffer);
-        for (size_t i = 0; i < mSamplingRates.size(); i++) {
-            if (i == 0 && mSamplingRates[i] == 0) {
-                snprintf(buffer, SIZE, "Dynamic");
-            } else {
-                snprintf(buffer, SIZE, "%d", mSamplingRates[i]);
-            }
-            result.append(buffer);
-            result.append(i == (mSamplingRates.size() - 1) ? "" : ", ");
-        }
-        result.append("\n");
-    }
-
-    if (mChannelMasks.size() != 0) {
-        snprintf(buffer, SIZE, "%*s- channel masks: ", spaces, "");
-        result.append(buffer);
-        for (size_t i = 0; i < mChannelMasks.size(); i++) {
-            ALOGV("AudioPort::dump mChannelMasks %zu %08x", i, mChannelMasks[i]);
-
-            if (i == 0 && mChannelMasks[i] == 0) {
-                snprintf(buffer, SIZE, "Dynamic");
-            } else {
-                snprintf(buffer, SIZE, "0x%04x", mChannelMasks[i]);
-            }
-            result.append(buffer);
-            result.append(i == (mChannelMasks.size() - 1) ? "" : ", ");
-        }
-        result.append("\n");
-    }
-
-    if (mFormats.size() != 0) {
-        snprintf(buffer, SIZE, "%*s- formats: ", spaces, "");
-        result.append(buffer);
-        for (size_t i = 0; i < mFormats.size(); i++) {
-            const char *formatStr = enumToString(sFormatNameToEnumTable,
-                                                 ARRAY_SIZE(sFormatNameToEnumTable),
-                                                 mFormats[i]);
-            if (i == 0 && strcmp(formatStr, "") == 0) {
-                snprintf(buffer, SIZE, "Dynamic");
-            } else {
-                snprintf(buffer, SIZE, "%s", formatStr);
-            }
-            result.append(buffer);
-            result.append(i == (mFormats.size() - 1) ? "" : ", ");
-        }
-        result.append("\n");
-    }
-    write(fd, result.string(), result.size());
-    if (mGains.size() != 0) {
-        snprintf(buffer, SIZE, "%*s- gains:\n", spaces, "");
-        write(fd, buffer, strlen(buffer) + 1);
-        result.append(buffer);
-        for (size_t i = 0; i < mGains.size(); i++) {
-            mGains[i]->dump(fd, spaces + 2, i);
-        }
-    }
-}
-
-// --- AudioGain class implementation
-
-AudioPolicyManager::AudioGain::AudioGain(int index, bool useInChannelMask)
-{
-    mIndex = index;
-    mUseInChannelMask = useInChannelMask;
-    memset(&mGain, 0, sizeof(struct audio_gain));
-}
-
-void AudioPolicyManager::AudioGain::getDefaultConfig(struct audio_gain_config *config)
-{
-    config->index = mIndex;
-    config->mode = mGain.mode;
-    config->channel_mask = mGain.channel_mask;
-    if ((mGain.mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
-        config->values[0] = mGain.default_value;
-    } else {
-        uint32_t numValues;
-        if (mUseInChannelMask) {
-            numValues = audio_channel_count_from_in_mask(mGain.channel_mask);
-        } else {
-            numValues = audio_channel_count_from_out_mask(mGain.channel_mask);
-        }
-        for (size_t i = 0; i < numValues; i++) {
-            config->values[i] = mGain.default_value;
-        }
-    }
-    if ((mGain.mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
-        config->ramp_duration_ms = mGain.min_ramp_ms;
-    }
-}
-
-status_t AudioPolicyManager::AudioGain::checkConfig(const struct audio_gain_config *config)
-{
-    if ((config->mode & ~mGain.mode) != 0) {
-        return BAD_VALUE;
-    }
-    if ((config->mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
-        if ((config->values[0] < mGain.min_value) ||
-                    (config->values[0] > mGain.max_value)) {
-            return BAD_VALUE;
-        }
-    } else {
-        if ((config->channel_mask & ~mGain.channel_mask) != 0) {
-            return BAD_VALUE;
-        }
-        uint32_t numValues;
-        if (mUseInChannelMask) {
-            numValues = audio_channel_count_from_in_mask(config->channel_mask);
-        } else {
-            numValues = audio_channel_count_from_out_mask(config->channel_mask);
-        }
-        for (size_t i = 0; i < numValues; i++) {
-            if ((config->values[i] < mGain.min_value) ||
-                    (config->values[i] > mGain.max_value)) {
-                return BAD_VALUE;
-            }
-        }
-    }
-    if ((config->mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
-        if ((config->ramp_duration_ms < mGain.min_ramp_ms) ||
-                    (config->ramp_duration_ms > mGain.max_ramp_ms)) {
-            return BAD_VALUE;
-        }
-    }
-    return NO_ERROR;
-}
-
-void AudioPolicyManager::AudioGain::dump(int fd, int spaces, int index) const
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    snprintf(buffer, SIZE, "%*sGain %d:\n", spaces, "", index+1);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- mode: %08x\n", spaces, "", mGain.mode);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- channel_mask: %08x\n", spaces, "", mGain.channel_mask);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- min_value: %d mB\n", spaces, "", mGain.min_value);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- max_value: %d mB\n", spaces, "", mGain.max_value);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- default_value: %d mB\n", spaces, "", mGain.default_value);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- step_value: %d mB\n", spaces, "", mGain.step_value);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- min_ramp_ms: %d ms\n", spaces, "", mGain.min_ramp_ms);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- max_ramp_ms: %d ms\n", spaces, "", mGain.max_ramp_ms);
-    result.append(buffer);
-
-    write(fd, result.string(), result.size());
-}
-
-// --- AudioPortConfig class implementation
-
-AudioPolicyManager::AudioPortConfig::AudioPortConfig()
-{
-    mSamplingRate = 0;
-    mChannelMask = AUDIO_CHANNEL_NONE;
-    mFormat = AUDIO_FORMAT_INVALID;
-    mGain.index = -1;
-}
-
-status_t AudioPolicyManager::AudioPortConfig::applyAudioPortConfig(
-                                                        const struct audio_port_config *config,
-                                                        struct audio_port_config *backupConfig)
-{
-    struct audio_port_config localBackupConfig;
-    status_t status = NO_ERROR;
-
-    localBackupConfig.config_mask = config->config_mask;
-    toAudioPortConfig(&localBackupConfig);
-
-    sp<AudioPort> audioport = getAudioPort();
-    if (audioport == 0) {
-        status = NO_INIT;
-        goto exit;
-    }
-    if (config->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
-        status = audioport->checkExactSamplingRate(config->sample_rate);
-        if (status != NO_ERROR) {
-            goto exit;
-        }
-        mSamplingRate = config->sample_rate;
-    }
-    if (config->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
-        status = audioport->checkExactChannelMask(config->channel_mask);
-        if (status != NO_ERROR) {
-            goto exit;
-        }
-        mChannelMask = config->channel_mask;
-    }
-    if (config->config_mask & AUDIO_PORT_CONFIG_FORMAT) {
-        status = audioport->checkFormat(config->format);
-        if (status != NO_ERROR) {
-            goto exit;
-        }
-        mFormat = config->format;
-    }
-    if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
-        status = audioport->checkGain(&config->gain, config->gain.index);
-        if (status != NO_ERROR) {
-            goto exit;
-        }
-        mGain = config->gain;
-    }
-
-exit:
-    if (status != NO_ERROR) {
-        applyAudioPortConfig(&localBackupConfig);
-    }
-    if (backupConfig != NULL) {
-        *backupConfig = localBackupConfig;
-    }
-    return status;
-}
-
-void AudioPolicyManager::AudioPortConfig::toAudioPortConfig(
-                                                    struct audio_port_config *dstConfig,
-                                                    const struct audio_port_config *srcConfig) const
-{
-    if (dstConfig->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
-        dstConfig->sample_rate = mSamplingRate;
-        if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE)) {
-            dstConfig->sample_rate = srcConfig->sample_rate;
-        }
-    } else {
-        dstConfig->sample_rate = 0;
-    }
-    if (dstConfig->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
-        dstConfig->channel_mask = mChannelMask;
-        if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK)) {
-            dstConfig->channel_mask = srcConfig->channel_mask;
-        }
-    } else {
-        dstConfig->channel_mask = AUDIO_CHANNEL_NONE;
-    }
-    if (dstConfig->config_mask & AUDIO_PORT_CONFIG_FORMAT) {
-        dstConfig->format = mFormat;
-        if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_FORMAT)) {
-            dstConfig->format = srcConfig->format;
-        }
-    } else {
-        dstConfig->format = AUDIO_FORMAT_INVALID;
-    }
-    if (dstConfig->config_mask & AUDIO_PORT_CONFIG_GAIN) {
-        dstConfig->gain = mGain;
-        if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_GAIN)) {
-            dstConfig->gain = srcConfig->gain;
-        }
-    } else {
-        dstConfig->gain.index = -1;
-    }
-    if (dstConfig->gain.index != -1) {
-        dstConfig->config_mask |= AUDIO_PORT_CONFIG_GAIN;
-    } else {
-        dstConfig->config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
-    }
-}
-
-// --- IOProfile class implementation
-
-AudioPolicyManager::IOProfile::IOProfile(const String8& name, audio_port_role_t role,
-                                         const sp<HwModule>& module)
-    : AudioPort(name, AUDIO_PORT_TYPE_MIX, role, module)
-{
-}
-
-AudioPolicyManager::IOProfile::~IOProfile()
-{
-}
-
-// checks if the IO profile is compatible with specified parameters.
-// Sampling rate, format and channel mask must be specified in order to
-// get a valid a match
-bool AudioPolicyManager::IOProfile::isCompatibleProfile(audio_devices_t device,
-                                                        String8 address,
-                                                        uint32_t samplingRate,
-                                                        uint32_t *updatedSamplingRate,
-                                                        audio_format_t format,
-                                                        audio_channel_mask_t channelMask,
-                                                        uint32_t flags) const
-{
-    const bool isPlaybackThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SOURCE;
-    const bool isRecordThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SINK;
-    ALOG_ASSERT(isPlaybackThread != isRecordThread);
-
-    if (device != AUDIO_DEVICE_NONE && mSupportedDevices.getDevice(device, address) == 0) {
-        return false;
-    }
-
-    if (samplingRate == 0) {
-         return false;
-    }
-    uint32_t myUpdatedSamplingRate = samplingRate;
-    if (isPlaybackThread && checkExactSamplingRate(samplingRate) != NO_ERROR) {
-         return false;
-    }
-    if (isRecordThread && checkCompatibleSamplingRate(samplingRate, &myUpdatedSamplingRate) !=
-            NO_ERROR) {
-         return false;
-    }
-
-    if (!audio_is_valid_format(format) || checkFormat(format) != NO_ERROR) {
-        return false;
-    }
-
-    if (isPlaybackThread && (!audio_is_output_channel(channelMask) ||
-            checkExactChannelMask(channelMask) != NO_ERROR)) {
-        return false;
-    }
-    if (isRecordThread && (!audio_is_input_channel(channelMask) ||
-            checkCompatibleChannelMask(channelMask) != NO_ERROR)) {
-        return false;
-    }
-
-    if (isPlaybackThread && (mFlags & flags) != flags) {
-        return false;
-    }
-    // The only input flag that is allowed to be different is the fast flag.
-    // An existing fast stream is compatible with a normal track request.
-    // An existing normal stream is compatible with a fast track request,
-    // but the fast request will be denied by AudioFlinger and converted to normal track.
-    if (isRecordThread && ((mFlags ^ flags) &
-            ~AUDIO_INPUT_FLAG_FAST)) {
-        return false;
-    }
-
-    if (updatedSamplingRate != NULL) {
-        *updatedSamplingRate = myUpdatedSamplingRate;
-    }
-    return true;
-}
-
-void AudioPolicyManager::IOProfile::dump(int fd)
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    AudioPort::dump(fd, 4);
-
-    snprintf(buffer, SIZE, "    - flags: 0x%04x\n", mFlags);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "    - devices:\n");
-    result.append(buffer);
-    write(fd, result.string(), result.size());
-    for (size_t i = 0; i < mSupportedDevices.size(); i++) {
-        mSupportedDevices[i]->dump(fd, 6, i);
-    }
-}
-
-void AudioPolicyManager::IOProfile::log()
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    ALOGV("    - sampling rates: ");
-    for (size_t i = 0; i < mSamplingRates.size(); i++) {
-        ALOGV("  %d", mSamplingRates[i]);
-    }
-
-    ALOGV("    - channel masks: ");
-    for (size_t i = 0; i < mChannelMasks.size(); i++) {
-        ALOGV("  0x%04x", mChannelMasks[i]);
-    }
-
-    ALOGV("    - formats: ");
-    for (size_t i = 0; i < mFormats.size(); i++) {
-        ALOGV("  0x%08x", mFormats[i]);
-    }
-
-    ALOGV("    - devices: 0x%04x\n", mSupportedDevices.types());
-    ALOGV("    - flags: 0x%04x\n", mFlags);
-}
-
-
-// --- DeviceDescriptor implementation
-
-
-AudioPolicyManager::DeviceDescriptor::DeviceDescriptor(const String8& name, audio_devices_t type) :
-                     AudioPort(name, AUDIO_PORT_TYPE_DEVICE,
-                               audio_is_output_device(type) ? AUDIO_PORT_ROLE_SINK :
-                                                              AUDIO_PORT_ROLE_SOURCE,
-                             NULL),
-                     mDeviceType(type), mAddress(""), mId(0)
-{
-}
-
-bool AudioPolicyManager::DeviceDescriptor::equals(const sp<DeviceDescriptor>& other) const
-{
-    // Devices are considered equal if they:
-    // - are of the same type (a device type cannot be AUDIO_DEVICE_NONE)
-    // - have the same address or one device does not specify the address
-    // - have the same channel mask or one device does not specify the channel mask
-    return (mDeviceType == other->mDeviceType) &&
-           (mAddress == "" || other->mAddress == "" || mAddress == other->mAddress) &&
-           (mChannelMask == 0 || other->mChannelMask == 0 ||
-                mChannelMask == other->mChannelMask);
-}
-
-void AudioPolicyManager::DeviceDescriptor::loadGains(cnode *root)
-{
-    AudioPort::loadGains(root);
-    if (mGains.size() > 0) {
-        mGains[0]->getDefaultConfig(&mGain);
-    }
-}
-
-
-void AudioPolicyManager::DeviceVector::refreshTypes()
-{
-    mDeviceTypes = AUDIO_DEVICE_NONE;
-    for(size_t i = 0; i < size(); i++) {
-        mDeviceTypes |= itemAt(i)->mDeviceType;
-    }
-    ALOGV("DeviceVector::refreshTypes() mDeviceTypes %08x", mDeviceTypes);
-}
-
-ssize_t AudioPolicyManager::DeviceVector::indexOf(const sp<DeviceDescriptor>& item) const
-{
-    for(size_t i = 0; i < size(); i++) {
-        if (item->equals(itemAt(i))) {
-            return i;
-        }
-    }
-    return -1;
-}
-
-ssize_t AudioPolicyManager::DeviceVector::add(const sp<DeviceDescriptor>& item)
-{
-    ssize_t ret = indexOf(item);
-
-    if (ret < 0) {
-        ret = SortedVector::add(item);
-        if (ret >= 0) {
-            refreshTypes();
-        }
-    } else {
-        ALOGW("DeviceVector::add device %08x already in", item->mDeviceType);
-        ret = -1;
-    }
-    return ret;
-}
-
-ssize_t AudioPolicyManager::DeviceVector::remove(const sp<DeviceDescriptor>& item)
-{
-    size_t i;
-    ssize_t ret = indexOf(item);
-
-    if (ret < 0) {
-        ALOGW("DeviceVector::remove device %08x not in", item->mDeviceType);
-    } else {
-        ret = SortedVector::removeAt(ret);
-        if (ret >= 0) {
-            refreshTypes();
-        }
-    }
-    return ret;
-}
-
-void AudioPolicyManager::DeviceVector::loadDevicesFromType(audio_devices_t types)
-{
-    DeviceVector deviceList;
-
-    uint32_t role_bit = AUDIO_DEVICE_BIT_IN & types;
-    types &= ~role_bit;
-
-    while (types) {
-        uint32_t i = 31 - __builtin_clz(types);
-        uint32_t type = 1 << i;
-        types &= ~type;
-        add(new DeviceDescriptor(String8(""), type | role_bit));
-    }
-}
-
-void AudioPolicyManager::DeviceVector::loadDevicesFromName(char *name,
-                                                           const DeviceVector& declaredDevices)
-{
-    char *devName = strtok(name, "|");
-    while (devName != NULL) {
-        if (strlen(devName) != 0) {
-            audio_devices_t type = stringToEnum(sDeviceNameToEnumTable,
-                                 ARRAY_SIZE(sDeviceNameToEnumTable),
-                                 devName);
-            if (type != AUDIO_DEVICE_NONE) {
-                sp<DeviceDescriptor> dev = new DeviceDescriptor(String8(""), type);
-                if (type == AUDIO_DEVICE_IN_REMOTE_SUBMIX ||
-                        type == AUDIO_DEVICE_OUT_REMOTE_SUBMIX ) {
-                    dev->mAddress = String8("0");
-                }
-                add(dev);
-            } else {
-                sp<DeviceDescriptor> deviceDesc =
-                        declaredDevices.getDeviceFromName(String8(devName));
-                if (deviceDesc != 0) {
-                    add(deviceDesc);
-                }
-            }
-         }
-         devName = strtok(NULL, "|");
-     }
-}
-
-sp<AudioPolicyManager::DeviceDescriptor> AudioPolicyManager::DeviceVector::getDevice(
-                                                        audio_devices_t type, String8 address) const
-{
-    sp<DeviceDescriptor> device;
-    for (size_t i = 0; i < size(); i++) {
-        if (itemAt(i)->mDeviceType == type) {
-            if (address == "" || itemAt(i)->mAddress == address) {
-                device = itemAt(i);
-                if (itemAt(i)->mAddress == address) {
-                    break;
-                }
-            }
-        }
-    }
-    ALOGV("DeviceVector::getDevice() for type %08x address %s found %p",
-          type, address.string(), device.get());
-    return device;
-}
-
-sp<AudioPolicyManager::DeviceDescriptor> AudioPolicyManager::DeviceVector::getDeviceFromId(
-                                                                    audio_port_handle_t id) const
-{
-    sp<DeviceDescriptor> device;
-    for (size_t i = 0; i < size(); i++) {
-        ALOGV("DeviceVector::getDeviceFromId(%d) itemAt(%zu)->mId %d", id, i, itemAt(i)->mId);
-        if (itemAt(i)->mId == id) {
-            device = itemAt(i);
-            break;
-        }
-    }
-    return device;
-}
-
-AudioPolicyManager::DeviceVector AudioPolicyManager::DeviceVector::getDevicesFromType(
-                                                                        audio_devices_t type) const
-{
-    DeviceVector devices;
-    bool isOutput = audio_is_output_devices(type);
-    type &= ~AUDIO_DEVICE_BIT_IN;
-    for (size_t i = 0; (i < size()) && (type != AUDIO_DEVICE_NONE); i++) {
-        bool curIsOutput = audio_is_output_devices(itemAt(i)->mDeviceType);
-        audio_devices_t curType = itemAt(i)->mDeviceType & ~AUDIO_DEVICE_BIT_IN;
-        if ((isOutput == curIsOutput) && ((type & curType) != 0)) {
-            devices.add(itemAt(i));
-            type &= ~curType;
-            ALOGV("DeviceVector::getDevicesFromType() for type %x found %p",
-                  itemAt(i)->mDeviceType, itemAt(i).get());
-        }
-    }
-    return devices;
-}
-
-AudioPolicyManager::DeviceVector AudioPolicyManager::DeviceVector::getDevicesFromTypeAddr(
-        audio_devices_t type, String8 address) const
-{
-    DeviceVector devices;
-    for (size_t i = 0; i < size(); i++) {
-        if (itemAt(i)->mDeviceType == type) {
-            if (itemAt(i)->mAddress == address) {
-                devices.add(itemAt(i));
-            }
-        }
-    }
-    return devices;
-}
-
-sp<AudioPolicyManager::DeviceDescriptor> AudioPolicyManager::DeviceVector::getDeviceFromName(
-        const String8& name) const
-{
-    sp<DeviceDescriptor> device;
-    for (size_t i = 0; i < size(); i++) {
-        if (itemAt(i)->mName == name) {
-            device = itemAt(i);
-            break;
-        }
-    }
-    return device;
-}
-
-void AudioPolicyManager::DeviceDescriptor::toAudioPortConfig(
-                                                    struct audio_port_config *dstConfig,
-                                                    const struct audio_port_config *srcConfig) const
-{
-    dstConfig->config_mask = AUDIO_PORT_CONFIG_CHANNEL_MASK|AUDIO_PORT_CONFIG_GAIN;
-    if (srcConfig != NULL) {
-        dstConfig->config_mask |= srcConfig->config_mask;
-    }
-
-    AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
-
-    dstConfig->id = mId;
-    dstConfig->role = audio_is_output_device(mDeviceType) ?
-                        AUDIO_PORT_ROLE_SINK : AUDIO_PORT_ROLE_SOURCE;
-    dstConfig->type = AUDIO_PORT_TYPE_DEVICE;
-    dstConfig->ext.device.type = mDeviceType;
-    dstConfig->ext.device.hw_module = mModule->mHandle;
-    strncpy(dstConfig->ext.device.address, mAddress.string(), AUDIO_DEVICE_MAX_ADDRESS_LEN);
-}
-
-void AudioPolicyManager::DeviceDescriptor::toAudioPort(struct audio_port *port) const
-{
-    ALOGV("DeviceDescriptor::toAudioPort() handle %d type %x", mId, mDeviceType);
-    AudioPort::toAudioPort(port);
-    port->id = mId;
-    toAudioPortConfig(&port->active_config);
-    port->ext.device.type = mDeviceType;
-    port->ext.device.hw_module = mModule->mHandle;
-    strncpy(port->ext.device.address, mAddress.string(), AUDIO_DEVICE_MAX_ADDRESS_LEN);
-}
-
-status_t AudioPolicyManager::DeviceDescriptor::dump(int fd, int spaces, int index) const
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-    snprintf(buffer, SIZE, "%*sDevice %d:\n", spaces, "", index+1);
-    result.append(buffer);
-    if (mId != 0) {
-        snprintf(buffer, SIZE, "%*s- id: %2d\n", spaces, "", mId);
-        result.append(buffer);
-    }
-    snprintf(buffer, SIZE, "%*s- type: %-48s\n", spaces, "",
-                                              enumToString(sDeviceNameToEnumTable,
-                                                           ARRAY_SIZE(sDeviceNameToEnumTable),
-                                                           mDeviceType));
-    result.append(buffer);
-    if (mAddress.size() != 0) {
-        snprintf(buffer, SIZE, "%*s- address: %-32s\n", spaces, "", mAddress.string());
-        result.append(buffer);
-    }
-    write(fd, result.string(), result.size());
-    AudioPort::dump(fd, spaces);
-
-    return NO_ERROR;
-}
-
-status_t AudioPolicyManager::AudioPatch::dump(int fd, int spaces, int index) const
-{
-    const size_t SIZE = 256;
-    char buffer[SIZE];
-    String8 result;
-
-
-    snprintf(buffer, SIZE, "%*sAudio patch %d:\n", spaces, "", index+1);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- handle: %2d\n", spaces, "", mHandle);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- audio flinger handle: %2d\n", spaces, "", mAfPatchHandle);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- owner uid: %2d\n", spaces, "", mUid);
-    result.append(buffer);
-    snprintf(buffer, SIZE, "%*s- %d sources:\n", spaces, "", mPatch.num_sources);
-    result.append(buffer);
-    for (size_t i = 0; i < mPatch.num_sources; i++) {
-        if (mPatch.sources[i].type == AUDIO_PORT_TYPE_DEVICE) {
-            snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
-                     mPatch.sources[i].id, enumToString(sDeviceNameToEnumTable,
-                                                        ARRAY_SIZE(sDeviceNameToEnumTable),
-                                                        mPatch.sources[i].ext.device.type));
-        } else {
-            snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
-                     mPatch.sources[i].id, mPatch.sources[i].ext.mix.handle);
-        }
-        result.append(buffer);
-    }
-    snprintf(buffer, SIZE, "%*s- %d sinks:\n", spaces, "", mPatch.num_sinks);
-    result.append(buffer);
-    for (size_t i = 0; i < mPatch.num_sinks; i++) {
-        if (mPatch.sinks[i].type == AUDIO_PORT_TYPE_DEVICE) {
-            snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
-                     mPatch.sinks[i].id, enumToString(sDeviceNameToEnumTable,
-                                                        ARRAY_SIZE(sDeviceNameToEnumTable),
-                                                        mPatch.sinks[i].ext.device.type));
-        } else {
-            snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
-                     mPatch.sinks[i].id, mPatch.sinks[i].ext.mix.handle);
-        }
-        result.append(buffer);
-    }
-
-    write(fd, result.string(), result.size());
-    return NO_ERROR;
-}
-
-// --- audio_policy.conf file parsing
-
-uint32_t AudioPolicyManager::parseOutputFlagNames(char *name)
-{
-    uint32_t flag = 0;
-
-    // it is OK to cast name to non const here as we are not going to use it after
-    // strtok() modifies it
-    char *flagName = strtok(name, "|");
-    while (flagName != NULL) {
-        if (strlen(flagName) != 0) {
-            flag |= stringToEnum(sOutputFlagNameToEnumTable,
-                               ARRAY_SIZE(sOutputFlagNameToEnumTable),
-                               flagName);
-        }
-        flagName = strtok(NULL, "|");
-    }
-    //force direct flag if offload flag is set: offloading implies a direct output stream
-    // and all common behaviors are driven by checking only the direct flag
-    // this should normally be set appropriately in the policy configuration file
-    if ((flag & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
-        flag |= AUDIO_OUTPUT_FLAG_DIRECT;
-    }
-
-    return flag;
-}
-
-uint32_t AudioPolicyManager::parseInputFlagNames(char *name)
-{
-    uint32_t flag = 0;
-
-    // it is OK to cast name to non const here as we are not going to use it after
-    // strtok() modifies it
-    char *flagName = strtok(name, "|");
-    while (flagName != NULL) {
-        if (strlen(flagName) != 0) {
-            flag |= stringToEnum(sInputFlagNameToEnumTable,
-                               ARRAY_SIZE(sInputFlagNameToEnumTable),
-                               flagName);
-        }
-        flagName = strtok(NULL, "|");
-    }
-    return flag;
-}
-
-audio_devices_t AudioPolicyManager::parseDeviceNames(char *name)
-{
-    uint32_t device = 0;
-
-    char *devName = strtok(name, "|");
-    while (devName != NULL) {
-        if (strlen(devName) != 0) {
-            device |= stringToEnum(sDeviceNameToEnumTable,
-                                 ARRAY_SIZE(sDeviceNameToEnumTable),
-                                 devName);
-         }
-        devName = strtok(NULL, "|");
-     }
-    return device;
-}
-
-void AudioPolicyManager::loadHwModule(cnode *root)
-{
-    status_t status = NAME_NOT_FOUND;
-    cnode *node;
-    sp<HwModule> module = new HwModule(root->name);
-
-    node = config_find(root, DEVICES_TAG);
-    if (node != NULL) {
-        node = node->first_child;
-        while (node) {
-            ALOGV("loadHwModule() loading device %s", node->name);
-            status_t tmpStatus = module->loadDevice(node);
-            if (status == NAME_NOT_FOUND || status == NO_ERROR) {
-                status = tmpStatus;
-            }
-            node = node->next;
-        }
-    }
-    node = config_find(root, OUTPUTS_TAG);
-    if (node != NULL) {
-        node = node->first_child;
-        while (node) {
-            ALOGV("loadHwModule() loading output %s", node->name);
-            status_t tmpStatus = module->loadOutput(node);
-            if (status == NAME_NOT_FOUND || status == NO_ERROR) {
-                status = tmpStatus;
-            }
-            node = node->next;
-        }
-    }
-    node = config_find(root, INPUTS_TAG);
-    if (node != NULL) {
-        node = node->first_child;
-        while (node) {
-            ALOGV("loadHwModule() loading input %s", node->name);
-            status_t tmpStatus = module->loadInput(node);
-            if (status == NAME_NOT_FOUND || status == NO_ERROR) {
-                status = tmpStatus;
-            }
-            node = node->next;
-        }
-    }
-    loadGlobalConfig(root, module);
-
-    if (status == NO_ERROR) {
-        mHwModules.add(module);
-    }
-}
-
-void AudioPolicyManager::loadHwModules(cnode *root)
-{
-    cnode *node = config_find(root, AUDIO_HW_MODULE_TAG);
-    if (node == NULL) {
-        return;
-    }
-
-    node = node->first_child;
-    while (node) {
-        ALOGV("loadHwModules() loading module %s", node->name);
-        loadHwModule(node);
-        node = node->next;
-    }
-}
-
-void AudioPolicyManager::loadGlobalConfig(cnode *root, const sp<HwModule>& module)
-{
-    cnode *node = config_find(root, GLOBAL_CONFIG_TAG);
-
-    if (node == NULL) {
-        return;
-    }
-    DeviceVector declaredDevices;
-    if (module != NULL) {
-        declaredDevices = module->mDeclaredDevices;
-    }
-
-    node = node->first_child;
-    while (node) {
-        if (strcmp(ATTACHED_OUTPUT_DEVICES_TAG, node->name) == 0) {
-            mAvailableOutputDevices.loadDevicesFromName((char *)node->value,
-                                                        declaredDevices);
-            ALOGV("loadGlobalConfig() Attached Output Devices %08x",
-                  mAvailableOutputDevices.types());
-        } else if (strcmp(DEFAULT_OUTPUT_DEVICE_TAG, node->name) == 0) {
-            audio_devices_t device = (audio_devices_t)stringToEnum(sDeviceNameToEnumTable,
-                                              ARRAY_SIZE(sDeviceNameToEnumTable),
-                                              (char *)node->value);
-            if (device != AUDIO_DEVICE_NONE) {
-                mDefaultOutputDevice = new DeviceDescriptor(String8(""), device);
-            } else {
-                ALOGW("loadGlobalConfig() default device not specified");
-            }
-            ALOGV("loadGlobalConfig() mDefaultOutputDevice %08x", mDefaultOutputDevice->mDeviceType);
-        } else if (strcmp(ATTACHED_INPUT_DEVICES_TAG, node->name) == 0) {
-            mAvailableInputDevices.loadDevicesFromName((char *)node->value,
-                                                       declaredDevices);
-            ALOGV("loadGlobalConfig() Available InputDevices %08x", mAvailableInputDevices.types());
-        } else if (strcmp(SPEAKER_DRC_ENABLED_TAG, node->name) == 0) {
-            mSpeakerDrcEnabled = stringToBool((char *)node->value);
-            ALOGV("loadGlobalConfig() mSpeakerDrcEnabled = %d", mSpeakerDrcEnabled);
-        } else if (strcmp(AUDIO_HAL_VERSION_TAG, node->name) == 0) {
-            uint32_t major, minor;
-            sscanf((char *)node->value, "%u.%u", &major, &minor);
-            module->mHalVersion = HARDWARE_DEVICE_API_VERSION(major, minor);
-            ALOGV("loadGlobalConfig() mHalVersion = %04x major %u minor %u",
-                  module->mHalVersion, major, minor);
-        }
-        node = node->next;
-    }
-}
-
-status_t AudioPolicyManager::loadAudioPolicyConfig(const char *path)
-{
-    cnode *root;
-    char *data;
-
-    data = (char *)load_file(path, NULL);
-    if (data == NULL) {
-        return -ENODEV;
-    }
-    root = config_node("", "");
-    config_load(root, data);
-
-    loadHwModules(root);
-    // legacy audio_policy.conf files have one global_configuration section
-    loadGlobalConfig(root, getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY));
-    config_free(root);
-    free(root);
-    free(data);
-
-    ALOGI("loadAudioPolicyConfig() loaded %s\n", path);
-
-    return NO_ERROR;
-}
-
-void AudioPolicyManager::defaultAudioPolicyConfig(void)
-{
-    sp<HwModule> module;
-    sp<IOProfile> profile;
-    sp<DeviceDescriptor> defaultInputDevice = new DeviceDescriptor(String8(""),
-                                                                   AUDIO_DEVICE_IN_BUILTIN_MIC);
-    mAvailableOutputDevices.add(mDefaultOutputDevice);
-    mAvailableInputDevices.add(defaultInputDevice);
-
-    module = new HwModule("primary");
-
-    profile = new IOProfile(String8("primary"), AUDIO_PORT_ROLE_SOURCE, module);
-    profile->mSamplingRates.add(44100);
-    profile->mFormats.add(AUDIO_FORMAT_PCM_16_BIT);
-    profile->mChannelMasks.add(AUDIO_CHANNEL_OUT_STEREO);
-    profile->mSupportedDevices.add(mDefaultOutputDevice);
-    profile->mFlags = AUDIO_OUTPUT_FLAG_PRIMARY;
-    module->mOutputProfiles.add(profile);
-
-    profile = new IOProfile(String8("primary"), AUDIO_PORT_ROLE_SINK, module);
-    profile->mSamplingRates.add(8000);
-    profile->mFormats.add(AUDIO_FORMAT_PCM_16_BIT);
-    profile->mChannelMasks.add(AUDIO_CHANNEL_IN_MONO);
-    profile->mSupportedDevices.add(defaultInputDevice);
-    module->mInputProfiles.add(profile);
-
-    mHwModules.add(module);
-}
-
-audio_stream_type_t AudioPolicyManager::streamTypefromAttributesInt(const audio_attributes_t *attr)
-{
-    // flags to stream type mapping
-    if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
-        return AUDIO_STREAM_ENFORCED_AUDIBLE;
-    }
-    if ((attr->flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
-        return AUDIO_STREAM_BLUETOOTH_SCO;
-    }
-    if ((attr->flags & AUDIO_FLAG_BEACON) == AUDIO_FLAG_BEACON) {
-        return AUDIO_STREAM_TTS;
-    }
-
-    // usage to stream type mapping
-    switch (attr->usage) {
-    case AUDIO_USAGE_MEDIA:
-    case AUDIO_USAGE_GAME:
-    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
-        return AUDIO_STREAM_MUSIC;
-    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
-        if (isStreamActive(AUDIO_STREAM_ALARM)) {
-            return AUDIO_STREAM_ALARM;
-        }
-        if (isStreamActive(AUDIO_STREAM_RING)) {
-            return AUDIO_STREAM_RING;
-        }
-        if (isInCall()) {
-            return AUDIO_STREAM_VOICE_CALL;
-        }
-        return AUDIO_STREAM_ACCESSIBILITY;
-    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
-        return AUDIO_STREAM_SYSTEM;
-    case AUDIO_USAGE_VOICE_COMMUNICATION:
-        return AUDIO_STREAM_VOICE_CALL;
-
-    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
-        return AUDIO_STREAM_DTMF;
-
-    case AUDIO_USAGE_ALARM:
-        return AUDIO_STREAM_ALARM;
-    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
-        return AUDIO_STREAM_RING;
-
-    case AUDIO_USAGE_NOTIFICATION:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
-    case AUDIO_USAGE_NOTIFICATION_EVENT:
-        return AUDIO_STREAM_NOTIFICATION;
-
-    case AUDIO_USAGE_UNKNOWN:
-    default:
-        return AUDIO_STREAM_MUSIC;
-    }
-}
-
-bool AudioPolicyManager::isValidAttributes(const audio_attributes_t *paa) {
-    // has flags that map to a strategy?
-    if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO | AUDIO_FLAG_BEACON)) != 0) {
-        return true;
-    }
-
-    // has known usage?
-    switch (paa->usage) {
-    case AUDIO_USAGE_UNKNOWN:
-    case AUDIO_USAGE_MEDIA:
-    case AUDIO_USAGE_VOICE_COMMUNICATION:
-    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
-    case AUDIO_USAGE_ALARM:
-    case AUDIO_USAGE_NOTIFICATION:
-    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
-    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
-    case AUDIO_USAGE_NOTIFICATION_EVENT:
-    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
-    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
-    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
-    case AUDIO_USAGE_GAME:
-    case AUDIO_USAGE_VIRTUAL_SOURCE:
-        break;
-    default:
-        return false;
-    }
-    return true;
-}
-
-}; // namespace android
diff --git a/services/audiopolicy/AudioPolicyManager.h b/services/audiopolicy/AudioPolicyManager.h
deleted file mode 100644
index cbdafa6..0000000
--- a/services/audiopolicy/AudioPolicyManager.h
+++ /dev/null
@@ -1,937 +0,0 @@
-/*
- * Copyright (C) 2009 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-
-#include <stdint.h>
-#include <sys/types.h>
-#include <cutils/config_utils.h>
-#include <cutils/misc.h>
-#include <utils/Timers.h>
-#include <utils/Errors.h>
-#include <utils/KeyedVector.h>
-#include <utils/SortedVector.h>
-#include <media/AudioPolicy.h>
-#include "AudioPolicyInterface.h"
-
-
-namespace android {
-
-// ----------------------------------------------------------------------------
-
-// Attenuation applied to STRATEGY_SONIFICATION streams when a headset is connected: 6dB
-#define SONIFICATION_HEADSET_VOLUME_FACTOR 0.5
-// Min volume for STRATEGY_SONIFICATION streams when limited by music volume: -36dB
-#define SONIFICATION_HEADSET_VOLUME_MIN  0.016
-// Time in milliseconds during which we consider that music is still active after a music
-// track was stopped - see computeVolume()
-#define SONIFICATION_HEADSET_MUSIC_DELAY  5000
-// Time in milliseconds after media stopped playing during which we consider that the
-// sonification should be as unobtrusive as during the time media was playing.
-#define SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY 5000
-// Time in milliseconds during witch some streams are muted while the audio path
-// is switched
-#define MUTE_TIME_MS 2000
-
-#define NUM_TEST_OUTPUTS 5
-
-#define NUM_VOL_CURVE_KNEES 2
-
-// Default minimum length allowed for offloading a compressed track
-// Can be overridden by the audio.offload.min.duration.secs property
-#define OFFLOAD_DEFAULT_MIN_DURATION_SECS 60
-
-#define MAX_MIXER_SAMPLING_RATE 48000
-#define MAX_MIXER_CHANNEL_COUNT 8
-
-// ----------------------------------------------------------------------------
-// AudioPolicyManager implements audio policy manager behavior common to all platforms.
-// ----------------------------------------------------------------------------
-
-class AudioPolicyManager: public AudioPolicyInterface
-#ifdef AUDIO_POLICY_TEST
-    , public Thread
-#endif //AUDIO_POLICY_TEST
-{
-
-public:
-                AudioPolicyManager(AudioPolicyClientInterface *clientInterface);
-        virtual ~AudioPolicyManager();
-
-        // AudioPolicyInterface
-        virtual status_t setDeviceConnectionState(audio_devices_t device,
-                                                          audio_policy_dev_state_t state,
-                                                          const char *device_address);
-        virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
-                                                                              const char *device_address);
-        virtual void setPhoneState(audio_mode_t state);
-        virtual void setForceUse(audio_policy_force_use_t usage,
-                                 audio_policy_forced_cfg_t config);
-        virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
-        virtual void setSystemProperty(const char* property, const char* value);
-        virtual status_t initCheck();
-        virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
-                                            uint32_t samplingRate,
-                                            audio_format_t format,
-                                            audio_channel_mask_t channelMask,
-                                            audio_output_flags_t flags,
-                                            const audio_offload_info_t *offloadInfo);
-        virtual status_t getOutputForAttr(const audio_attributes_t *attr,
-                                          audio_io_handle_t *output,
-                                          audio_session_t session,
-                                          audio_stream_type_t *stream,
-                                          uint32_t samplingRate,
-                                          audio_format_t format,
-                                          audio_channel_mask_t channelMask,
-                                          audio_output_flags_t flags,
-                                          const audio_offload_info_t *offloadInfo);
-        virtual status_t startOutput(audio_io_handle_t output,
-                                     audio_stream_type_t stream,
-                                     audio_session_t session);
-        virtual status_t stopOutput(audio_io_handle_t output,
-                                    audio_stream_type_t stream,
-                                    audio_session_t session);
-        virtual void releaseOutput(audio_io_handle_t output,
-                                   audio_stream_type_t stream,
-                                   audio_session_t session);
-        virtual status_t getInputForAttr(const audio_attributes_t *attr,
-                                         audio_io_handle_t *input,
-                                         audio_session_t session,
-                                         uint32_t samplingRate,
-                                         audio_format_t format,
-                                         audio_channel_mask_t channelMask,
-                                         audio_input_flags_t flags,
-                                         input_type_t *inputType);
-
-        // indicates to the audio policy manager that the input starts being used.
-        virtual status_t startInput(audio_io_handle_t input,
-                                    audio_session_t session);
-
-        // indicates to the audio policy manager that the input stops being used.
-        virtual status_t stopInput(audio_io_handle_t input,
-                                   audio_session_t session);
-        virtual void releaseInput(audio_io_handle_t input,
-                                  audio_session_t session);
-        virtual void closeAllInputs();
-        virtual void initStreamVolume(audio_stream_type_t stream,
-                                                    int indexMin,
-                                                    int indexMax);
-        virtual status_t setStreamVolumeIndex(audio_stream_type_t stream,
-                                              int index,
-                                              audio_devices_t device);
-        virtual status_t getStreamVolumeIndex(audio_stream_type_t stream,
-                                              int *index,
-                                              audio_devices_t device);
-
-        // return the strategy corresponding to a given stream type
-        virtual uint32_t getStrategyForStream(audio_stream_type_t stream);
-        // return the strategy corresponding to the given audio attributes
-        virtual uint32_t getStrategyForAttr(const audio_attributes_t *attr);
-
-        // return the enabled output devices for the given stream type
-        virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream);
-
-        virtual audio_io_handle_t getOutputForEffect(const effect_descriptor_t *desc = NULL);
-        virtual status_t registerEffect(const effect_descriptor_t *desc,
-                                        audio_io_handle_t io,
-                                        uint32_t strategy,
-                                        int session,
-                                        int id);
-        virtual status_t unregisterEffect(int id);
-        virtual status_t setEffectEnabled(int id, bool enabled);
-
-        virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
-        // return whether a stream is playing remotely, override to change the definition of
-        //   local/remote playback, used for instance by notification manager to not make
-        //   media players lose audio focus when not playing locally
-        //   For the base implementation, "remotely" means playing during screen mirroring which
-        //   uses an output for playback with a non-empty, non "0" address.
-        virtual bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
-        virtual bool isSourceActive(audio_source_t source) const;
-
-        virtual status_t dump(int fd);
-
-        virtual bool isOffloadSupported(const audio_offload_info_t& offloadInfo);
-
-        virtual status_t listAudioPorts(audio_port_role_t role,
-                                        audio_port_type_t type,
-                                        unsigned int *num_ports,
-                                        struct audio_port *ports,
-                                        unsigned int *generation);
-        virtual status_t getAudioPort(struct audio_port *port);
-        virtual status_t createAudioPatch(const struct audio_patch *patch,
-                                           audio_patch_handle_t *handle,
-                                           uid_t uid);
-        virtual status_t releaseAudioPatch(audio_patch_handle_t handle,
-                                              uid_t uid);
-        virtual status_t listAudioPatches(unsigned int *num_patches,
-                                          struct audio_patch *patches,
-                                          unsigned int *generation);
-        virtual status_t setAudioPortConfig(const struct audio_port_config *config);
-        virtual void clearAudioPatches(uid_t uid);
-
-        virtual status_t acquireSoundTriggerSession(audio_session_t *session,
-                                               audio_io_handle_t *ioHandle,
-                                               audio_devices_t *device);
-
-        virtual status_t releaseSoundTriggerSession(audio_session_t session);
-
-        virtual status_t registerPolicyMixes(Vector<AudioMix> mixes);
-        virtual status_t unregisterPolicyMixes(Vector<AudioMix> mixes);
-
-protected:
-
-        enum routing_strategy {
-            STRATEGY_MEDIA,
-            STRATEGY_PHONE,
-            STRATEGY_SONIFICATION,
-            STRATEGY_SONIFICATION_RESPECTFUL,
-            STRATEGY_DTMF,
-            STRATEGY_ENFORCED_AUDIBLE,
-            STRATEGY_TRANSMITTED_THROUGH_SPEAKER,
-            STRATEGY_ACCESSIBILITY,
-            STRATEGY_REROUTING,
-            NUM_STRATEGIES
-        };
-
-        // 4 points to define the volume attenuation curve, each characterized by the volume
-        // index (from 0 to 100) at which they apply, and the attenuation in dB at that index.
-        // we use 100 steps to avoid rounding errors when computing the volume in volIndexToAmpl()
-
-        enum { VOLMIN = 0, VOLKNEE1 = 1, VOLKNEE2 = 2, VOLMAX = 3, VOLCNT = 4};
-
-        class VolumeCurvePoint
-        {
-        public:
-            int mIndex;
-            float mDBAttenuation;
-        };
-
-        // device categories used for volume curve management.
-        enum device_category {
-            DEVICE_CATEGORY_HEADSET,
-            DEVICE_CATEGORY_SPEAKER,
-            DEVICE_CATEGORY_EARPIECE,
-            DEVICE_CATEGORY_EXT_MEDIA,
-            DEVICE_CATEGORY_CNT
-        };
-
-        class HwModule;
-
-        class AudioGain: public RefBase
-        {
-        public:
-            AudioGain(int index, bool useInChannelMask);
-            virtual ~AudioGain() {}
-
-            void dump(int fd, int spaces, int index) const;
-
-            void getDefaultConfig(struct audio_gain_config *config);
-            status_t checkConfig(const struct audio_gain_config *config);
-            int               mIndex;
-            struct audio_gain mGain;
-            bool              mUseInChannelMask;
-        };
-
-        class AudioPort: public virtual RefBase
-        {
-        public:
-            AudioPort(const String8& name, audio_port_type_t type,
-                      audio_port_role_t role, const sp<HwModule>& module);
-            virtual ~AudioPort() {}
-
-            virtual void toAudioPort(struct audio_port *port) const;
-
-            void importAudioPort(const sp<AudioPort> port);
-            void clearCapabilities();
-
-            void loadSamplingRates(char *name);
-            void loadFormats(char *name);
-            void loadOutChannels(char *name);
-            void loadInChannels(char *name);
-
-            audio_gain_mode_t loadGainMode(char *name);
-            void loadGain(cnode *root, int index);
-            virtual void loadGains(cnode *root);
-
-            // searches for an exact match
-            status_t checkExactSamplingRate(uint32_t samplingRate) const;
-            // searches for a compatible match, and returns the best match via updatedSamplingRate
-            status_t checkCompatibleSamplingRate(uint32_t samplingRate,
-                    uint32_t *updatedSamplingRate) const;
-            // searches for an exact match
-            status_t checkExactChannelMask(audio_channel_mask_t channelMask) const;
-            // searches for a compatible match, currently implemented for input channel masks only
-            status_t checkCompatibleChannelMask(audio_channel_mask_t channelMask) const;
-            status_t checkFormat(audio_format_t format) const;
-            status_t checkGain(const struct audio_gain_config *gainConfig, int index) const;
-
-            uint32_t pickSamplingRate() const;
-            audio_channel_mask_t pickChannelMask() const;
-            audio_format_t pickFormat() const;
-
-            static const audio_format_t sPcmFormatCompareTable[];
-            static int compareFormats(audio_format_t format1, audio_format_t format2);
-
-            void dump(int fd, int spaces) const;
-
-            String8           mName;
-            audio_port_type_t mType;
-            audio_port_role_t mRole;
-            bool              mUseInChannelMask;
-            // by convention, "0' in the first entry in mSamplingRates, mChannelMasks or mFormats
-            // indicates the supported parameters should be read from the output stream
-            // after it is opened for the first time
-            Vector <uint32_t> mSamplingRates; // supported sampling rates
-            Vector <audio_channel_mask_t> mChannelMasks; // supported channel masks
-            Vector <audio_format_t> mFormats; // supported audio formats
-            Vector < sp<AudioGain> > mGains; // gain controllers
-            sp<HwModule> mModule;                 // audio HW module exposing this I/O stream
-            uint32_t mFlags; // attribute flags (e.g primary output,
-                                                // direct output...).
-        };
-
-        class AudioPortConfig: public virtual RefBase
-        {
-        public:
-            AudioPortConfig();
-            virtual ~AudioPortConfig() {}
-
-            status_t applyAudioPortConfig(const struct audio_port_config *config,
-                                          struct audio_port_config *backupConfig = NULL);
-            virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
-                                   const struct audio_port_config *srcConfig = NULL) const = 0;
-            virtual sp<AudioPort> getAudioPort() const = 0;
-            uint32_t mSamplingRate;
-            audio_format_t mFormat;
-            audio_channel_mask_t mChannelMask;
-            struct audio_gain_config mGain;
-        };
-
-
-        class AudioPatch: public RefBase
-        {
-        public:
-            AudioPatch(audio_patch_handle_t handle,
-                       const struct audio_patch *patch, uid_t uid) :
-                           mHandle(handle), mPatch(*patch), mUid(uid), mAfPatchHandle(0) {}
-
-            status_t dump(int fd, int spaces, int index) const;
-
-            audio_patch_handle_t mHandle;
-            struct audio_patch mPatch;
-            uid_t mUid;
-            audio_patch_handle_t mAfPatchHandle;
-        };
-
-        class DeviceDescriptor: public AudioPort, public AudioPortConfig
-        {
-        public:
-            DeviceDescriptor(const String8& name, audio_devices_t type);
-
-            virtual ~DeviceDescriptor() {}
-
-            bool equals(const sp<DeviceDescriptor>& other) const;
-
-            // AudioPortConfig
-            virtual sp<AudioPort> getAudioPort() const { return (AudioPort*) this; }
-            virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
-                                   const struct audio_port_config *srcConfig = NULL) const;
-
-            // AudioPort
-            virtual void loadGains(cnode *root);
-            virtual void toAudioPort(struct audio_port *port) const;
-
-            status_t dump(int fd, int spaces, int index) const;
-
-            audio_devices_t mDeviceType;
-            String8 mAddress;
-            audio_port_handle_t mId;
-        };
-
-        class DeviceVector : public SortedVector< sp<DeviceDescriptor> >
-        {
-        public:
-            DeviceVector() : SortedVector(), mDeviceTypes(AUDIO_DEVICE_NONE) {}
-
-            ssize_t         add(const sp<DeviceDescriptor>& item);
-            ssize_t         remove(const sp<DeviceDescriptor>& item);
-            ssize_t         indexOf(const sp<DeviceDescriptor>& item) const;
-
-            audio_devices_t types() const { return mDeviceTypes; }
-
-            void loadDevicesFromType(audio_devices_t types);
-            void loadDevicesFromName(char *name, const DeviceVector& declaredDevices);
-
-            sp<DeviceDescriptor> getDevice(audio_devices_t type, String8 address) const;
-            DeviceVector getDevicesFromType(audio_devices_t types) const;
-            sp<DeviceDescriptor> getDeviceFromId(audio_port_handle_t id) const;
-            sp<DeviceDescriptor> getDeviceFromName(const String8& name) const;
-            DeviceVector getDevicesFromTypeAddr(audio_devices_t type, String8 address)
-                    const;
-
-        private:
-            void refreshTypes();
-            audio_devices_t mDeviceTypes;
-        };
-
-        // the IOProfile class describes the capabilities of an output or input stream.
-        // It is currently assumed that all combination of listed parameters are supported.
-        // It is used by the policy manager to determine if an output or input is suitable for
-        // a given use case,  open/close it accordingly and connect/disconnect audio tracks
-        // to/from it.
-        class IOProfile : public AudioPort
-        {
-        public:
-            IOProfile(const String8& name, audio_port_role_t role, const sp<HwModule>& module);
-            virtual ~IOProfile();
-
-            // This method is used for both output and input.
-            // If parameter updatedSamplingRate is non-NULL, it is assigned the actual sample rate.
-            // For input, flags is interpreted as audio_input_flags_t.
-            // TODO: merge audio_output_flags_t and audio_input_flags_t.
-            bool isCompatibleProfile(audio_devices_t device,
-                                     String8 address,
-                                     uint32_t samplingRate,
-                                     uint32_t *updatedSamplingRate,
-                                     audio_format_t format,
-                                     audio_channel_mask_t channelMask,
-                                     uint32_t flags) const;
-
-            void dump(int fd);
-            void log();
-
-            DeviceVector  mSupportedDevices; // supported devices
-                                             // (devices this output can be routed to)
-        };
-
-        class HwModule : public RefBase
-        {
-        public:
-                    HwModule(const char *name);
-                    ~HwModule();
-
-            status_t loadOutput(cnode *root);
-            status_t loadInput(cnode *root);
-            status_t loadDevice(cnode *root);
-
-            status_t addOutputProfile(String8 name, const audio_config_t *config,
-                                      audio_devices_t device, String8 address);
-            status_t removeOutputProfile(String8 name);
-            status_t addInputProfile(String8 name, const audio_config_t *config,
-                                      audio_devices_t device, String8 address);
-            status_t removeInputProfile(String8 name);
-
-            void dump(int fd);
-
-            const char *const        mName; // base name of the audio HW module (primary, a2dp ...)
-            uint32_t                 mHalVersion; // audio HAL API version
-            audio_module_handle_t    mHandle;
-            Vector < sp<IOProfile> > mOutputProfiles; // output profiles exposed by this module
-            Vector < sp<IOProfile> > mInputProfiles;  // input profiles exposed by this module
-            DeviceVector             mDeclaredDevices; // devices declared in audio_policy.conf
-
-        };
-
-        // default volume curve
-        static const VolumeCurvePoint sDefaultVolumeCurve[AudioPolicyManager::VOLCNT];
-        // default volume curve for media strategy
-        static const VolumeCurvePoint sDefaultMediaVolumeCurve[AudioPolicyManager::VOLCNT];
-        // volume curve for non-media audio on ext media outputs (HDMI, Line, etc)
-        static const VolumeCurvePoint sExtMediaSystemVolumeCurve[AudioPolicyManager::VOLCNT];
-        // volume curve for media strategy on speakers
-        static const VolumeCurvePoint sSpeakerMediaVolumeCurve[AudioPolicyManager::VOLCNT];
-        static const VolumeCurvePoint sSpeakerMediaVolumeCurveDrc[AudioPolicyManager::VOLCNT];
-        // volume curve for sonification strategy on speakers
-        static const VolumeCurvePoint sSpeakerSonificationVolumeCurve[AudioPolicyManager::VOLCNT];
-        static const VolumeCurvePoint sSpeakerSonificationVolumeCurveDrc[AudioPolicyManager::VOLCNT];
-        static const VolumeCurvePoint sDefaultSystemVolumeCurve[AudioPolicyManager::VOLCNT];
-        static const VolumeCurvePoint sDefaultSystemVolumeCurveDrc[AudioPolicyManager::VOLCNT];
-        static const VolumeCurvePoint sHeadsetSystemVolumeCurve[AudioPolicyManager::VOLCNT];
-        static const VolumeCurvePoint sDefaultVoiceVolumeCurve[AudioPolicyManager::VOLCNT];
-        static const VolumeCurvePoint sSpeakerVoiceVolumeCurve[AudioPolicyManager::VOLCNT];
-        static const VolumeCurvePoint sLinearVolumeCurve[AudioPolicyManager::VOLCNT];
-        static const VolumeCurvePoint sSilentVolumeCurve[AudioPolicyManager::VOLCNT];
-        static const VolumeCurvePoint sFullScaleVolumeCurve[AudioPolicyManager::VOLCNT];
-        // default volume curves per stream and device category. See initializeVolumeCurves()
-        static const VolumeCurvePoint *sVolumeProfiles[AUDIO_STREAM_CNT][DEVICE_CATEGORY_CNT];
-
-        // descriptor for audio outputs. Used to maintain current configuration of each opened audio output
-        // and keep track of the usage of this output by each audio stream type.
-        class AudioOutputDescriptor: public AudioPortConfig
-        {
-        public:
-            AudioOutputDescriptor(const sp<IOProfile>& profile);
-
-            status_t    dump(int fd);
-
-            audio_devices_t device() const;
-            void changeRefCount(audio_stream_type_t stream, int delta);
-
-            bool isDuplicated() const { return (mOutput1 != NULL && mOutput2 != NULL); }
-            audio_devices_t supportedDevices();
-            uint32_t latency();
-            bool sharesHwModuleWith(const sp<AudioOutputDescriptor> outputDesc);
-            bool isActive(uint32_t inPastMs = 0) const;
-            bool isStreamActive(audio_stream_type_t stream,
-                                uint32_t inPastMs = 0,
-                                nsecs_t sysTime = 0) const;
-            bool isStrategyActive(routing_strategy strategy,
-                             uint32_t inPastMs = 0,
-                             nsecs_t sysTime = 0) const;
-
-            virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
-                                   const struct audio_port_config *srcConfig = NULL) const;
-            virtual sp<AudioPort> getAudioPort() const { return mProfile; }
-            void toAudioPort(struct audio_port *port) const;
-
-            audio_port_handle_t mId;
-            audio_io_handle_t mIoHandle;              // output handle
-            uint32_t mLatency;                  //
-            audio_output_flags_t mFlags;   //
-            audio_devices_t mDevice;                   // current device this output is routed to
-            AudioMix *mPolicyMix;             // non NULL when used by a dynamic policy
-            audio_patch_handle_t mPatchHandle;
-            uint32_t mRefCount[AUDIO_STREAM_CNT]; // number of streams of each type using this output
-            nsecs_t mStopTime[AUDIO_STREAM_CNT];
-            sp<AudioOutputDescriptor> mOutput1;    // used by duplicated outputs: first output
-            sp<AudioOutputDescriptor> mOutput2;    // used by duplicated outputs: second output
-            float mCurVolume[AUDIO_STREAM_CNT];   // current stream volume
-            int mMuteCount[AUDIO_STREAM_CNT];     // mute request counter
-            const sp<IOProfile> mProfile;          // I/O profile this output derives from
-            bool mStrategyMutedByDevice[NUM_STRATEGIES]; // strategies muted because of incompatible
-                                                // device selection. See checkDeviceMuteStrategies()
-            uint32_t mDirectOpenCount; // number of clients using this output (direct outputs only)
-        };
-
-        // descriptor for audio inputs. Used to maintain current configuration of each opened audio input
-        // and keep track of the usage of this input.
-        class AudioInputDescriptor: public AudioPortConfig
-        {
-        public:
-            AudioInputDescriptor(const sp<IOProfile>& profile);
-
-            status_t    dump(int fd);
-
-            audio_port_handle_t           mId;
-            audio_io_handle_t             mIoHandle;       // input handle
-            audio_devices_t               mDevice;         // current device this input is routed to
-            AudioMix                      *mPolicyMix;     // non NULL when used by a dynamic policy
-            audio_patch_handle_t          mPatchHandle;
-            uint32_t                      mRefCount;       // number of AudioRecord clients using
-                                                           // this input
-            uint32_t                      mOpenRefCount;
-            audio_source_t                mInputSource;    // input source selected by application
-                                                           //(mediarecorder.h)
-            const sp<IOProfile>           mProfile;        // I/O profile this output derives from
-            SortedVector<audio_session_t> mSessions;       // audio sessions attached to this input
-            bool                          mIsSoundTrigger; // used by a soundtrigger capture
-
-            virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
-                                   const struct audio_port_config *srcConfig = NULL) const;
-            virtual sp<AudioPort> getAudioPort() const { return mProfile; }
-            void toAudioPort(struct audio_port *port) const;
-        };
-
-        // stream descriptor used for volume control
-        class StreamDescriptor
-        {
-        public:
-            StreamDescriptor();
-
-            int getVolumeIndex(audio_devices_t device);
-            void dump(int fd);
-
-            int mIndexMin;      // min volume index
-            int mIndexMax;      // max volume index
-            KeyedVector<audio_devices_t, int> mIndexCur;   // current volume index per device
-            bool mCanBeMuted;   // true is the stream can be muted
-
-            const VolumeCurvePoint *mVolumeCurve[DEVICE_CATEGORY_CNT];
-        };
-
-        // stream descriptor used for volume control
-        class EffectDescriptor : public RefBase
-        {
-        public:
-
-            status_t dump(int fd);
-
-            int mIo;                // io the effect is attached to
-            routing_strategy mStrategy; // routing strategy the effect is associated to
-            int mSession;               // audio session the effect is on
-            effect_descriptor_t mDesc;  // effect descriptor
-            bool mEnabled;              // enabled state: CPU load being used or not
-        };
-
-        void addOutput(audio_io_handle_t output, sp<AudioOutputDescriptor> outputDesc);
-        void addInput(audio_io_handle_t input, sp<AudioInputDescriptor> inputDesc);
-
-        // return the strategy corresponding to a given stream type
-        static routing_strategy getStrategy(audio_stream_type_t stream);
-
-        // return appropriate device for streams handled by the specified strategy according to current
-        // phone state, connected devices...
-        // if fromCache is true, the device is returned from mDeviceForStrategy[],
-        // otherwise it is determine by current state
-        // (device connected,phone state, force use, a2dp output...)
-        // This allows to:
-        //  1 speed up process when the state is stable (when starting or stopping an output)
-        //  2 access to either current device selection (fromCache == true) or
-        // "future" device selection (fromCache == false) when called from a context
-        //  where conditions are changing (setDeviceConnectionState(), setPhoneState()...) AND
-        //  before updateDevicesAndOutputs() is called.
-        virtual audio_devices_t getDeviceForStrategy(routing_strategy strategy,
-                                                     bool fromCache);
-
-        // change the route of the specified output. Returns the number of ms we have slept to
-        // allow new routing to take effect in certain cases.
-        virtual uint32_t setOutputDevice(audio_io_handle_t output,
-                             audio_devices_t device,
-                             bool force = false,
-                             int delayMs = 0,
-                             audio_patch_handle_t *patchHandle = NULL,
-                             const char* address = NULL);
-        status_t resetOutputDevice(audio_io_handle_t output,
-                                   int delayMs = 0,
-                                   audio_patch_handle_t *patchHandle = NULL);
-        status_t setInputDevice(audio_io_handle_t input,
-                                audio_devices_t device,
-                                bool force = false,
-                                audio_patch_handle_t *patchHandle = NULL);
-        status_t resetInputDevice(audio_io_handle_t input,
-                                  audio_patch_handle_t *patchHandle = NULL);
-
-        // select input device corresponding to requested audio source
-        virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource);
-
-        // return io handle of active input or 0 if no input is active
-        //    Only considers inputs from physical devices (e.g. main mic, headset mic) when
-        //    ignoreVirtualInputs is true.
-        audio_io_handle_t getActiveInput(bool ignoreVirtualInputs = true);
-
-        uint32_t activeInputsCount() const;
-
-        // initialize volume curves for each strategy and device category
-        void initializeVolumeCurves();
-
-        // compute the actual volume for a given stream according to the requested index and a particular
-        // device
-        virtual float computeVolume(audio_stream_type_t stream, int index,
-                                    audio_io_handle_t output, audio_devices_t device);
-
-        // check that volume change is permitted, compute and send new volume to audio hardware
-        virtual status_t checkAndSetVolume(audio_stream_type_t stream, int index,
-                                           audio_io_handle_t output,
-                                           audio_devices_t device,
-                                           int delayMs = 0, bool force = false);
-
-        // apply all stream volumes to the specified output and device
-        void applyStreamVolumes(audio_io_handle_t output, audio_devices_t device, int delayMs = 0, bool force = false);
-
-        // Mute or unmute all streams handled by the specified strategy on the specified output
-        void setStrategyMute(routing_strategy strategy,
-                             bool on,
-                             audio_io_handle_t output,
-                             int delayMs = 0,
-                             audio_devices_t device = (audio_devices_t)0);
-
-        // Mute or unmute the stream on the specified output
-        void setStreamMute(audio_stream_type_t stream,
-                           bool on,
-                           audio_io_handle_t output,
-                           int delayMs = 0,
-                           audio_devices_t device = (audio_devices_t)0);
-
-        // handle special cases for sonification strategy while in call: mute streams or replace by
-        // a special tone in the device used for communication
-        void handleIncallSonification(audio_stream_type_t stream, bool starting, bool stateChange);
-
-        // true if device is in a telephony or VoIP call
-        virtual bool isInCall();
-
-        // true if given state represents a device in a telephony or VoIP call
-        virtual bool isStateInCall(int state);
-
-        // when a device is connected, checks if an open output can be routed
-        // to this device. If none is open, tries to open one of the available outputs.
-        // Returns an output suitable to this device or 0.
-        // when a device is disconnected, checks if an output is not used any more and
-        // returns its handle if any.
-        // transfers the audio tracks and effects from one output thread to another accordingly.
-        status_t checkOutputsForDevice(const sp<DeviceDescriptor> devDesc,
-                                       audio_policy_dev_state_t state,
-                                       SortedVector<audio_io_handle_t>& outputs,
-                                       const String8 address);
-
-        status_t checkInputsForDevice(audio_devices_t device,
-                                      audio_policy_dev_state_t state,
-                                      SortedVector<audio_io_handle_t>& inputs,
-                                      const String8 address);
-
-        // close an output and its companion duplicating output.
-        void closeOutput(audio_io_handle_t output);
-
-        // close an input.
-        void closeInput(audio_io_handle_t input);
-
-        // checks and if necessary changes outputs used for all strategies.
-        // must be called every time a condition that affects the output choice for a given strategy
-        // changes: connected device, phone state, force use...
-        // Must be called before updateDevicesAndOutputs()
-        void checkOutputForStrategy(routing_strategy strategy);
-
-        // Same as checkOutputForStrategy() but for a all strategies in order of priority
-        void checkOutputForAllStrategies();
-
-        // manages A2DP output suspend/restore according to phone state and BT SCO usage
-        void checkA2dpSuspend();
-
-        // returns the A2DP output handle if it is open or 0 otherwise
-        audio_io_handle_t getA2dpOutput();
-
-        // selects the most appropriate device on output for current state
-        // must be called every time a condition that affects the device choice for a given output is
-        // changed: connected device, phone state, force use, output start, output stop..
-        // see getDeviceForStrategy() for the use of fromCache parameter
-        audio_devices_t getNewOutputDevice(audio_io_handle_t output, bool fromCache);
-
-        // updates cache of device used by all strategies (mDeviceForStrategy[])
-        // must be called every time a condition that affects the device choice for a given strategy is
-        // changed: connected device, phone state, force use...
-        // cached values are used by getDeviceForStrategy() if parameter fromCache is true.
-         // Must be called after checkOutputForAllStrategies()
-        void updateDevicesAndOutputs();
-
-        // selects the most appropriate device on input for current state
-        audio_devices_t getNewInputDevice(audio_io_handle_t input);
-
-        virtual uint32_t getMaxEffectsCpuLoad();
-        virtual uint32_t getMaxEffectsMemory();
-#ifdef AUDIO_POLICY_TEST
-        virtual     bool        threadLoop();
-                    void        exit();
-        int testOutputIndex(audio_io_handle_t output);
-#endif //AUDIO_POLICY_TEST
-
-        status_t setEffectEnabled(const sp<EffectDescriptor>& effectDesc, bool enabled);
-
-        // returns the category the device belongs to with regard to volume curve management
-        static device_category getDeviceCategory(audio_devices_t device);
-
-        // extract one device relevant for volume control from multiple device selection
-        static audio_devices_t getDeviceForVolume(audio_devices_t device);
-
-        SortedVector<audio_io_handle_t> getOutputsForDevice(audio_devices_t device,
-                        DefaultKeyedVector<audio_io_handle_t, sp<AudioOutputDescriptor> > openOutputs);
-        bool vectorsEqual(SortedVector<audio_io_handle_t>& outputs1,
-                                           SortedVector<audio_io_handle_t>& outputs2);
-
-        // mute/unmute strategies using an incompatible device combination
-        // if muting, wait for the audio in pcm buffer to be drained before proceeding
-        // if unmuting, unmute only after the specified delay
-        // Returns the number of ms waited
-        virtual uint32_t  checkDeviceMuteStrategies(sp<AudioOutputDescriptor> outputDesc,
-                                            audio_devices_t prevDevice,
-                                            uint32_t delayMs);
-
-        audio_io_handle_t selectOutput(const SortedVector<audio_io_handle_t>& outputs,
-                                       audio_output_flags_t flags,
-                                       audio_format_t format);
-        // samplingRate parameter is an in/out and so may be modified
-        sp<IOProfile> getInputProfile(audio_devices_t device,
-                                      String8 address,
-                                      uint32_t& samplingRate,
-                                      audio_format_t format,
-                                      audio_channel_mask_t channelMask,
-                                      audio_input_flags_t flags);
-        sp<IOProfile> getProfileForDirectOutput(audio_devices_t device,
-                                                       uint32_t samplingRate,
-                                                       audio_format_t format,
-                                                       audio_channel_mask_t channelMask,
-                                                       audio_output_flags_t flags);
-
-        audio_io_handle_t selectOutputForEffects(const SortedVector<audio_io_handle_t>& outputs);
-
-        bool isNonOffloadableEffectEnabled();
-
-        virtual status_t addAudioPatch(audio_patch_handle_t handle,
-                               const sp<AudioPatch>& patch);
-        virtual status_t removeAudioPatch(audio_patch_handle_t handle);
-
-        sp<AudioOutputDescriptor> getOutputFromId(audio_port_handle_t id) const;
-        sp<AudioInputDescriptor> getInputFromId(audio_port_handle_t id) const;
-        sp<HwModule> getModuleForDevice(audio_devices_t device) const;
-        sp<HwModule> getModuleFromName(const char *name) const;
-        audio_devices_t availablePrimaryOutputDevices();
-        audio_devices_t availablePrimaryInputDevices();
-
-        void updateCallRouting(audio_devices_t rxDevice, int delayMs = 0);
-
-        //
-        // Audio policy configuration file parsing (audio_policy.conf)
-        //
-        static uint32_t stringToEnum(const struct StringToEnum *table,
-                                     size_t size,
-                                     const char *name);
-        static const char *enumToString(const struct StringToEnum *table,
-                                      size_t size,
-                                      uint32_t value);
-        static bool stringToBool(const char *value);
-        static uint32_t parseOutputFlagNames(char *name);
-        static uint32_t parseInputFlagNames(char *name);
-        static audio_devices_t parseDeviceNames(char *name);
-        void loadHwModule(cnode *root);
-        void loadHwModules(cnode *root);
-        void loadGlobalConfig(cnode *root, const sp<HwModule>& module);
-        status_t loadAudioPolicyConfig(const char *path);
-        void defaultAudioPolicyConfig(void);
-
-
-        uid_t mUidCached;
-        AudioPolicyClientInterface *mpClientInterface;  // audio policy client interface
-        audio_io_handle_t mPrimaryOutput;              // primary output handle
-        // list of descriptors for outputs currently opened
-        DefaultKeyedVector<audio_io_handle_t, sp<AudioOutputDescriptor> > mOutputs;
-        // copy of mOutputs before setDeviceConnectionState() opens new outputs
-        // reset to mOutputs when updateDevicesAndOutputs() is called.
-        DefaultKeyedVector<audio_io_handle_t, sp<AudioOutputDescriptor> > mPreviousOutputs;
-        DefaultKeyedVector<audio_io_handle_t, sp<AudioInputDescriptor> > mInputs;     // list of input descriptors
-        DeviceVector  mAvailableOutputDevices; // all available output devices
-        DeviceVector  mAvailableInputDevices;  // all available input devices
-        int mPhoneState;                                                    // current phone state
-        audio_policy_forced_cfg_t mForceUse[AUDIO_POLICY_FORCE_USE_CNT];   // current forced use configuration
-
-        StreamDescriptor mStreams[AUDIO_STREAM_CNT];           // stream descriptors for volume control
-        bool    mLimitRingtoneVolume;                                       // limit ringtone volume to music volume if headset connected
-        audio_devices_t mDeviceForStrategy[NUM_STRATEGIES];
-        float   mLastVoiceVolume;                                           // last voice volume value sent to audio HAL
-
-        // Maximum CPU load allocated to audio effects in 0.1 MIPS (ARMv5TE, 0 WS memory) units
-        static const uint32_t MAX_EFFECTS_CPU_LOAD = 1000;
-        // Maximum memory allocated to audio effects in KB
-        static const uint32_t MAX_EFFECTS_MEMORY = 512;
-        uint32_t mTotalEffectsCpuLoad; // current CPU load used by effects
-        uint32_t mTotalEffectsMemory;  // current memory used by effects
-        KeyedVector<int, sp<EffectDescriptor> > mEffects;  // list of registered audio effects
-        bool    mA2dpSuspended;  // true if A2DP output is suspended
-        sp<DeviceDescriptor> mDefaultOutputDevice; // output device selected by default at boot time
-        bool mSpeakerDrcEnabled;// true on devices that use DRC on the DEVICE_CATEGORY_SPEAKER path
-                                // to boost soft sounds, used to adjust volume curves accordingly
-
-        Vector < sp<HwModule> > mHwModules;
-        volatile int32_t mNextUniqueId;
-        volatile int32_t mAudioPortGeneration;
-
-        DefaultKeyedVector<audio_patch_handle_t, sp<AudioPatch> > mAudioPatches;
-
-        DefaultKeyedVector<audio_session_t, audio_io_handle_t> mSoundTriggerSessions;
-
-        sp<AudioPatch> mCallTxPatch;
-        sp<AudioPatch> mCallRxPatch;
-
-        // for supporting "beacon" streams, i.e. streams that only play on speaker, and never
-        // when something other than STREAM_TTS (a.k.a. "Transmitted Through Speaker") is playing
-        enum {
-            STARTING_OUTPUT,
-            STARTING_BEACON,
-            STOPPING_OUTPUT,
-            STOPPING_BEACON
-        };
-        uint32_t mBeaconMuteRefCount;   // ref count for stream that would mute beacon
-        uint32_t mBeaconPlayingRefCount;// ref count for the playing beacon streams
-        bool mBeaconMuted;              // has STREAM_TTS been muted
-
-        // custom mix entry in mPolicyMixes
-        class AudioPolicyMix : public RefBase {
-        public:
-            AudioPolicyMix() {}
-
-            AudioMix    mMix;                   // Audio policy mix descriptor
-            sp<AudioOutputDescriptor> mOutput;  // Corresponding output stream
-        };
-        DefaultKeyedVector<String8, sp<AudioPolicyMix> > mPolicyMixes; // list of registered mixes
-
-
-#ifdef AUDIO_POLICY_TEST
-        Mutex   mLock;
-        Condition mWaitWorkCV;
-
-        int             mCurOutput;
-        bool            mDirectOutput;
-        audio_io_handle_t mTestOutputs[NUM_TEST_OUTPUTS];
-        int             mTestInput;
-        uint32_t        mTestDevice;
-        uint32_t        mTestSamplingRate;
-        uint32_t        mTestFormat;
-        uint32_t        mTestChannels;
-        uint32_t        mTestLatencyMs;
-#endif //AUDIO_POLICY_TEST
-        static float volIndexToAmpl(audio_devices_t device, const StreamDescriptor& streamDesc,
-                int indexInUi);
-        static bool isVirtualInputDevice(audio_devices_t device);
-        uint32_t nextUniqueId();
-        uint32_t nextAudioPortGeneration();
-private:
-        // updates device caching and output for streams that can influence the
-        //    routing of notifications
-        void handleNotificationRoutingForStream(audio_stream_type_t stream);
-        static bool deviceDistinguishesOnAddress(audio_devices_t device);
-        // find the outputs on a given output descriptor that have the given address.
-        // to be called on an AudioOutputDescriptor whose supported devices (as defined
-        //   in mProfile->mSupportedDevices) matches the device whose address is to be matched.
-        // see deviceDistinguishesOnAddress(audio_devices_t) for whether the device type is one
-        //   where addresses are used to distinguish between one connected device and another.
-        void findIoHandlesByAddress(sp<AudioOutputDescriptor> desc /*in*/,
-                const audio_devices_t device /*in*/,
-                const String8 address /*in*/,
-                SortedVector<audio_io_handle_t>& outputs /*out*/);
-        uint32_t curAudioPortGeneration() const { return mAudioPortGeneration; }
-        // internal method to return the output handle for the given device and format
-        audio_io_handle_t getOutputForDevice(
-                audio_devices_t device,
-                audio_session_t session,
-                audio_stream_type_t stream,
-                uint32_t samplingRate,
-                audio_format_t format,
-                audio_channel_mask_t channelMask,
-                audio_output_flags_t flags,
-                const audio_offload_info_t *offloadInfo);
-        // internal function to derive a stream type value from audio attributes
-        audio_stream_type_t streamTypefromAttributesInt(const audio_attributes_t *attr);
-        // return true if any output is playing anything besides the stream to ignore
-        bool isAnyOutputActive(audio_stream_type_t streamToIgnore);
-        // event is one of STARTING_OUTPUT, STARTING_BEACON, STOPPING_OUTPUT, STOPPING_BEACON
-        // returns 0 if no mute/unmute event happened, the largest latency of the device where
-        //   the mute/unmute happened
-        uint32_t handleEventForBeacon(int event);
-        uint32_t setBeaconMute(bool mute);
-        bool     isValidAttributes(const audio_attributes_t *paa);
-
-        // select input device corresponding to requested audio source and return associated policy
-        // mix if any. Calls getDeviceForInputSource().
-        audio_devices_t getDeviceAndMixForInputSource(audio_source_t inputSource,
-                                                        AudioMix **policyMix = NULL);
-
-        // Called by setDeviceConnectionState().
-        status_t setDeviceConnectionStateInt(audio_devices_t device,
-                                                          audio_policy_dev_state_t state,
-                                                          const char *device_address);
-        sp<DeviceDescriptor>  getDeviceDescriptor(const audio_devices_t device,
-                                                  const char *device_address);
-
-};
-
-};
diff --git a/services/audiopolicy/common/Android.mk b/services/audiopolicy/common/Android.mk
new file mode 100644
index 0000000..dcce8e3
--- /dev/null
+++ b/services/audiopolicy/common/Android.mk
@@ -0,0 +1,9 @@
+
+LOCAL_PATH := $(call my-dir)
+include $(CLEAR_VARS)
+
+#######################################################################
+# Recursive call sub-folder Android.mk
+#
+include $(call all-makefiles-under,$(LOCAL_PATH))
+
diff --git a/services/audiopolicy/common/include/RoutingStrategy.h b/services/audiopolicy/common/include/RoutingStrategy.h
new file mode 100644
index 0000000..d38967e
--- /dev/null
+++ b/services/audiopolicy/common/include/RoutingStrategy.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace android {
+
+// Time in milliseconds after media stopped playing during which we consider that the
+// sonification should be as unobtrusive as during the time media was playing.
+#define SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY 5000
+
+enum routing_strategy {
+    STRATEGY_MEDIA,
+    STRATEGY_PHONE,
+    STRATEGY_SONIFICATION,
+    STRATEGY_SONIFICATION_RESPECTFUL,
+    STRATEGY_DTMF,
+    STRATEGY_ENFORCED_AUDIBLE,
+    STRATEGY_TRANSMITTED_THROUGH_SPEAKER,
+    STRATEGY_ACCESSIBILITY,
+    STRATEGY_REROUTING,
+    NUM_STRATEGIES
+};
+
+}; //namespace android
diff --git a/services/audiopolicy/common/include/Volume.h b/services/audiopolicy/common/include/Volume.h
new file mode 100755
index 0000000..712f7a7
--- /dev/null
+++ b/services/audiopolicy/common/include/Volume.h
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+#include <utils/Log.h>
+#include <math.h>
+
+// Absolute min volume in dB (can be represented in single precision normal float value)
+#define VOLUME_MIN_DB (-758)
+
+class VolumeCurvePoint
+{
+public:
+    int mIndex;
+    float mDBAttenuation;
+};
+
+class Volume
+{
+public:
+    /**
+     * 4 points to define the volume attenuation curve, each characterized by the volume
+     * index (from 0 to 100) at which they apply, and the attenuation in dB at that index.
+     * we use 100 steps to avoid rounding errors when computing the volume in volIndexToDb()
+     *
+     * @todo shall become configurable
+     */
+    enum {
+        VOLMIN = 0,
+        VOLKNEE1 = 1,
+        VOLKNEE2 = 2,
+        VOLMAX = 3,
+
+        VOLCNT = 4
+    };
+
+    /**
+     * device categories used for volume curve management.
+     */
+    enum device_category {
+        DEVICE_CATEGORY_HEADSET,
+        DEVICE_CATEGORY_SPEAKER,
+        DEVICE_CATEGORY_EARPIECE,
+        DEVICE_CATEGORY_EXT_MEDIA,
+        DEVICE_CATEGORY_CNT
+    };
+
+    /**
+     * extract one device relevant for volume control from multiple device selection
+     *
+     * @param[in] device for which the volume category is associated
+     *
+     * @return subset of device required to limit the number of volume category per device
+     */
+    static audio_devices_t getDeviceForVolume(audio_devices_t device)
+    {
+        if (device == AUDIO_DEVICE_NONE) {
+            // this happens when forcing a route update and no track is active on an output.
+            // In this case the returned category is not important.
+            device =  AUDIO_DEVICE_OUT_SPEAKER;
+        } else if (popcount(device) > 1) {
+            // Multiple device selection is either:
+            //  - speaker + one other device: give priority to speaker in this case.
+            //  - one A2DP device + another device: happens with duplicated output. In this case
+            // retain the device on the A2DP output as the other must not correspond to an active
+            // selection if not the speaker.
+            //  - HDMI-CEC system audio mode only output: give priority to available item in order.
+            if (device & AUDIO_DEVICE_OUT_SPEAKER) {
+                device = AUDIO_DEVICE_OUT_SPEAKER;
+            } else if (device & AUDIO_DEVICE_OUT_SPEAKER_SAFE) {
+                device = AUDIO_DEVICE_OUT_SPEAKER_SAFE;
+            } else if (device & AUDIO_DEVICE_OUT_HDMI_ARC) {
+                device = AUDIO_DEVICE_OUT_HDMI_ARC;
+            } else if (device & AUDIO_DEVICE_OUT_AUX_LINE) {
+                device = AUDIO_DEVICE_OUT_AUX_LINE;
+            } else if (device & AUDIO_DEVICE_OUT_SPDIF) {
+                device = AUDIO_DEVICE_OUT_SPDIF;
+            } else {
+                device = (audio_devices_t)(device & AUDIO_DEVICE_OUT_ALL_A2DP);
+            }
+        }
+
+        /*SPEAKER_SAFE is an alias of SPEAKER for purposes of volume control*/
+        if (device == AUDIO_DEVICE_OUT_SPEAKER_SAFE)
+            device = AUDIO_DEVICE_OUT_SPEAKER;
+
+        ALOGW_IF(popcount(device) != 1,
+                 "getDeviceForVolume() invalid device combination: %08x",
+                 device);
+
+        return device;
+    }
+
+    /**
+     * returns the category the device belongs to with regard to volume curve management
+     *
+     * @param[in] device to check upon the category to whom it belongs to.
+     *
+     * @return device category.
+     */
+    static device_category getDeviceCategory(audio_devices_t device)
+    {
+        switch(getDeviceForVolume(device)) {
+        case AUDIO_DEVICE_OUT_EARPIECE:
+            return DEVICE_CATEGORY_EARPIECE;
+        case AUDIO_DEVICE_OUT_WIRED_HEADSET:
+        case AUDIO_DEVICE_OUT_WIRED_HEADPHONE:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_SCO:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
+            return DEVICE_CATEGORY_HEADSET;
+        case AUDIO_DEVICE_OUT_LINE:
+        case AUDIO_DEVICE_OUT_AUX_DIGITAL:
+            /*USB?  Remote submix?*/
+            return DEVICE_CATEGORY_EXT_MEDIA;
+        case AUDIO_DEVICE_OUT_SPEAKER:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT:
+        case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER:
+        case AUDIO_DEVICE_OUT_USB_ACCESSORY:
+        case AUDIO_DEVICE_OUT_USB_DEVICE:
+        case AUDIO_DEVICE_OUT_REMOTE_SUBMIX:
+        default:
+            return DEVICE_CATEGORY_SPEAKER;
+        }
+    }
+
+    static inline float DbToAmpl(float decibels)
+    {
+        if (decibels <= VOLUME_MIN_DB) {
+            return 0.0f;
+        }
+        return exp( decibels * 0.115129f); // exp( dB * ln(10) / 20 )
+    }
+
+    static inline float AmplToDb(float amplification)
+    {
+        if (amplification == 0) {
+            return VOLUME_MIN_DB;
+        }
+        return 20 * log10(amplification);
+    }
+
+};
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
new file mode 100755
index 0000000..4b73e3c
--- /dev/null
+++ b/services/audiopolicy/common/include/policy.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+
+// For mixed output and inputs, the policy will use max mixer sampling rates.
+// Do not limit sampling rate otherwise
+#define MAX_MIXER_SAMPLING_RATE 192000
+
+// For mixed output and inputs, the policy will use max mixer channel count.
+// Do not limit channel count otherwise
+#define MAX_MIXER_CHANNEL_COUNT 8
+
+/**
+ * A device mask for all audio input devices that are considered "virtual" when evaluating
+ * active inputs in getActiveInput()
+ */
+#define APM_AUDIO_IN_DEVICE_VIRTUAL_ALL  (AUDIO_DEVICE_IN_REMOTE_SUBMIX|AUDIO_DEVICE_IN_FM_TUNER)
+
+
+/**
+ * A device mask for all audio input and output devices where matching inputs/outputs on device
+ * type alone is not enough: the address must match too
+ */
+#define APM_AUDIO_DEVICE_OUT_MATCH_ADDRESS_ALL (AUDIO_DEVICE_OUT_REMOTE_SUBMIX)
+
+#define APM_AUDIO_DEVICE_IN_MATCH_ADDRESS_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX)
+
+/**
+ * Check if the state given correspond to an in call state.
+ * @TODO find a better name for widely call state
+ *
+ * @param[in] state to consider
+ *
+ * @return true if given state represents a device in a telephony or VoIP call
+ */
+static inline bool is_state_in_call(int state)
+{
+    return (state == AUDIO_MODE_IN_CALL) || (state == AUDIO_MODE_IN_COMMUNICATION);
+}
+
+/**
+ * Check if the input device given is considered as a virtual device.
+ *
+ * @param[in] device to consider
+ *
+ * @return true if the device is a virtual one, false otherwise.
+ */
+static inline bool is_virtual_input_device(audio_devices_t device)
+{
+    if ((device & AUDIO_DEVICE_BIT_IN) != 0) {
+        device &= ~AUDIO_DEVICE_BIT_IN;
+        if ((popcount(device) == 1) && ((device & ~APM_AUDIO_IN_DEVICE_VIRTUAL_ALL) == 0))
+            return true;
+    }
+    return false;
+}
+
+/**
+ * Check whether the device type is one
+ * where addresses are used to distinguish between one connected device and another
+ *
+ * @param[in] device to consider
+ *
+ * @return true if the device needs distinguish on address, false otherwise..
+ */
+static inline bool device_distinguishes_on_address(audio_devices_t device)
+{
+    return (((device & AUDIO_DEVICE_BIT_IN) != 0) &&
+            ((~AUDIO_DEVICE_BIT_IN & device & APM_AUDIO_DEVICE_IN_MATCH_ADDRESS_ALL) != 0)) ||
+           (((device & AUDIO_DEVICE_BIT_IN) == 0) &&
+            ((device & APM_AUDIO_DEVICE_OUT_MATCH_ADDRESS_ALL) != 0));
+}
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
new file mode 100644
index 0000000..8728ff3
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/Android.mk
@@ -0,0 +1,36 @@
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES:= \
+    src/DeviceDescriptor.cpp \
+    src/AudioGain.cpp \
+    src/StreamDescriptor.cpp \
+    src/HwModule.cpp \
+    src/IOProfile.cpp \
+    src/AudioPort.cpp \
+    src/AudioPolicyMix.cpp \
+    src/AudioPatch.cpp \
+    src/AudioInputDescriptor.cpp \
+    src/AudioOutputDescriptor.cpp \
+    src/EffectDescriptor.cpp \
+    src/ConfigParsingUtils.cpp \
+    src/SoundTriggerSession.cpp \
+    src/SessionRoute.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+    libcutils \
+    libutils \
+    liblog \
+
+LOCAL_C_INCLUDES += \
+    $(LOCAL_PATH)/include \
+    $(TOPDIR)frameworks/av/services/audiopolicy/common/include \
+    $(TOPDIR)frameworks/av/services/audiopolicy
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+    $(LOCAL_PATH)/include
+
+LOCAL_MODULE := libaudiopolicycomponents
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioGain.h b/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
new file mode 100644
index 0000000..21fbf9b
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <system/audio.h>
+
+namespace android {
+
+class AudioGain: public RefBase
+{
+public:
+    AudioGain(int index, bool useInChannelMask);
+    virtual ~AudioGain() {}
+
+    void dump(int fd, int spaces, int index) const;
+
+    void getDefaultConfig(struct audio_gain_config *config);
+    status_t checkConfig(const struct audio_gain_config *config);
+    int               mIndex;
+    struct audio_gain mGain;
+    bool              mUseInChannelMask;
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
new file mode 100644
index 0000000..18bcfdb
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "AudioPort.h"
+#include <utils/Errors.h>
+#include <system/audio.h>
+#include <utils/SortedVector.h>
+#include <utils/KeyedVector.h>
+
+namespace android {
+
+class IOProfile;
+class AudioMix;
+
+// descriptor for audio inputs. Used to maintain current configuration of each opened audio input
+// and keep track of the usage of this input.
+class AudioInputDescriptor: public AudioPortConfig
+{
+public:
+    AudioInputDescriptor(const sp<IOProfile>& profile);
+    void setIoHandle(audio_io_handle_t ioHandle);
+    audio_port_handle_t getId() const;
+    audio_module_handle_t getModuleHandle() const;
+
+    status_t    dump(int fd);
+
+    audio_io_handle_t             mIoHandle;       // input handle
+    audio_devices_t               mDevice;         // current device this input is routed to
+    AudioMix                      *mPolicyMix;     // non NULL when used by a dynamic policy
+    audio_patch_handle_t          mPatchHandle;
+    uint32_t                      mRefCount;       // number of AudioRecord clients using
+    // this input
+    uint32_t                      mOpenRefCount;
+    audio_source_t                mInputSource;    // input source selected by application
+    //(mediarecorder.h)
+    const sp<IOProfile>           mProfile;        // I/O profile this output derives from
+    SortedVector<audio_session_t> mSessions;       // audio sessions attached to this input
+    bool                          mIsSoundTrigger; // used by a soundtrigger capture
+
+    virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
+            const struct audio_port_config *srcConfig = NULL) const;
+    virtual sp<AudioPort> getAudioPort() const { return mProfile; }
+    void toAudioPort(struct audio_port *port) const;
+
+private:
+    audio_port_handle_t           mId;
+};
+
+class AudioInputCollection :
+        public DefaultKeyedVector< audio_io_handle_t, sp<AudioInputDescriptor> >
+{
+public:
+    bool isSourceActive(audio_source_t source) const;
+
+    sp<AudioInputDescriptor> getInputFromId(audio_port_handle_t id) const;
+
+    uint32_t activeInputsCount() const;
+
+    /**
+     * return io handle of active input or 0 if no input is active
+     * Only considers inputs from physical devices (e.g. main mic, headset mic) when
+     * ignoreVirtualInputs is true.
+     */
+    audio_io_handle_t getActiveInput(bool ignoreVirtualInputs = true);
+
+    audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
+
+    status_t dump(int fd) const;
+};
+
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
new file mode 100644
index 0000000..50f622d
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "AudioPort.h"
+#include <RoutingStrategy.h>
+#include <utils/Errors.h>
+#include <utils/Timers.h>
+#include <utils/KeyedVector.h>
+#include <system/audio.h>
+
+namespace android {
+
+class IOProfile;
+class AudioMix;
+class AudioPolicyClientInterface;
+
+// descriptor for audio outputs. Used to maintain current configuration of each opened audio output
+// and keep track of the usage of this output by each audio stream type.
+class AudioOutputDescriptor: public AudioPortConfig
+{
+public:
+    AudioOutputDescriptor(const sp<AudioPort>& port,
+                          AudioPolicyClientInterface *clientInterface);
+    virtual ~AudioOutputDescriptor() {}
+
+    status_t    dump(int fd);
+    void        log(const char* indent);
+
+    audio_port_handle_t getId() const;
+    virtual audio_devices_t device() const;
+    virtual bool sharesHwModuleWith(const sp<AudioOutputDescriptor> outputDesc);
+    virtual audio_devices_t supportedDevices();
+    virtual bool isDuplicated() const { return false; }
+    virtual uint32_t latency() { return 0; }
+    virtual bool isFixedVolume(audio_devices_t device);
+    virtual sp<AudioOutputDescriptor> subOutput1() { return 0; }
+    virtual sp<AudioOutputDescriptor> subOutput2() { return 0; }
+    virtual bool setVolume(float volume,
+                           audio_stream_type_t stream,
+                           audio_devices_t device,
+                           uint32_t delayMs,
+                           bool force);
+    virtual void changeRefCount(audio_stream_type_t stream, int delta);
+
+    bool isActive(uint32_t inPastMs = 0) const;
+    bool isStreamActive(audio_stream_type_t stream,
+                        uint32_t inPastMs = 0,
+                        nsecs_t sysTime = 0) const;
+
+    virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
+                           const struct audio_port_config *srcConfig = NULL) const;
+    virtual sp<AudioPort> getAudioPort() const { return mPort; }
+    virtual void toAudioPort(struct audio_port *port) const;
+
+    audio_module_handle_t getModuleHandle() const;
+
+    sp<AudioPort>       mPort;
+    audio_devices_t mDevice;                   // current device this output is routed to
+    audio_patch_handle_t mPatchHandle;
+    uint32_t mRefCount[AUDIO_STREAM_CNT]; // number of streams of each type using this output
+    nsecs_t mStopTime[AUDIO_STREAM_CNT];
+    float mCurVolume[AUDIO_STREAM_CNT];   // current stream volume in dB
+    int mMuteCount[AUDIO_STREAM_CNT];     // mute request counter
+    bool mStrategyMutedByDevice[NUM_STRATEGIES]; // strategies muted because of incompatible
+                                        // device selection. See checkDeviceMuteStrategies()
+    AudioPolicyClientInterface *mClientInterface;
+
+protected:
+    audio_port_handle_t mId;
+};
+
+// Audio output driven by a software mixer in audio flinger.
+class SwAudioOutputDescriptor: public AudioOutputDescriptor
+{
+public:
+    SwAudioOutputDescriptor(const sp<IOProfile>& profile,
+                            AudioPolicyClientInterface *clientInterface);
+    virtual ~SwAudioOutputDescriptor() {}
+
+    status_t    dump(int fd);
+
+    void setIoHandle(audio_io_handle_t ioHandle);
+
+    virtual audio_devices_t device() const;
+    virtual bool sharesHwModuleWith(const sp<AudioOutputDescriptor> outputDesc);
+    virtual audio_devices_t supportedDevices();
+    virtual uint32_t latency();
+    virtual bool isDuplicated() const { return (mOutput1 != NULL && mOutput2 != NULL); }
+    virtual bool isFixedVolume(audio_devices_t device);
+    virtual sp<AudioOutputDescriptor> subOutput1() { return mOutput1; }
+    virtual sp<AudioOutputDescriptor> subOutput2() { return mOutput2; }
+    virtual void changeRefCount(audio_stream_type_t stream, int delta);
+    virtual bool setVolume(float volume,
+                           audio_stream_type_t stream,
+                           audio_devices_t device,
+                           uint32_t delayMs,
+                           bool force);
+
+    virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
+                           const struct audio_port_config *srcConfig = NULL) const;
+    virtual void toAudioPort(struct audio_port *port) const;
+
+    const sp<IOProfile> mProfile;          // I/O profile this output derives from
+    audio_io_handle_t mIoHandle;           // output handle
+    uint32_t mLatency;                  //
+    audio_output_flags_t mFlags;   //
+    AudioMix *mPolicyMix;             // non NULL when used by a dynamic policy
+    sp<SwAudioOutputDescriptor> mOutput1;    // used by duplicated outputs: first output
+    sp<SwAudioOutputDescriptor> mOutput2;    // used by duplicated outputs: second output
+    uint32_t mDirectOpenCount; // number of clients using this output (direct outputs only)
+    uint32_t mGlobalRefCount;  // non-stream-specific ref count
+};
+
+class SwAudioOutputCollection :
+        public DefaultKeyedVector< audio_io_handle_t, sp<SwAudioOutputDescriptor> >
+{
+public:
+    bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+
+    /**
+     * return whether a stream is playing remotely, override to change the definition of
+     * local/remote playback, used for instance by notification manager to not make
+     * media players lose audio focus when not playing locally
+     * For the base implementation, "remotely" means playing during screen mirroring which
+     * uses an output for playback with a non-empty, non "0" address.
+     */
+    bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+
+    /**
+     * returns the A2DP output handle if it is open or 0 otherwise
+     */
+    audio_io_handle_t getA2dpOutput() const;
+
+    sp<SwAudioOutputDescriptor> getOutputFromId(audio_port_handle_t id) const;
+
+    sp<SwAudioOutputDescriptor> getPrimaryOutput() const;
+
+    /**
+     * return true if any output is playing anything besides the stream to ignore
+     */
+    bool isAnyOutputActive(audio_stream_type_t streamToIgnore) const;
+
+    audio_devices_t getSupportedDevices(audio_io_handle_t handle) const;
+
+    status_t dump(int fd) const;
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h b/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
new file mode 100644
index 0000000..385f257
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPatch.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+
+namespace android {
+
+class AudioPatch : public RefBase
+{
+public:
+    AudioPatch(const struct audio_patch *patch, uid_t uid);
+
+    status_t dump(int fd, int spaces, int index) const;
+
+    audio_patch_handle_t mHandle;
+    struct audio_patch mPatch;
+    uid_t mUid;
+    audio_patch_handle_t mAfPatchHandle;
+
+private:
+    static volatile int32_t mNextUniqueId;
+};
+
+class AudioPatchCollection : public DefaultKeyedVector<audio_patch_handle_t, sp<AudioPatch> >
+{
+public:
+    status_t addAudioPatch(audio_patch_handle_t handle, const sp<AudioPatch>& patch);
+
+    status_t removeAudioPatch(audio_patch_handle_t handle);
+
+    status_t listAudioPatches(unsigned int *num_patches, struct audio_patch *patches) const;
+
+    status_t dump(int fd) const;
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
new file mode 100644
index 0000000..d51f4e1
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <utils/RefBase.h>
+#include <media/AudioPolicy.h>
+#include <utils/KeyedVector.h>
+#include <hardware/audio.h>
+#include <utils/String8.h>
+
+namespace android {
+
+class SwAudioOutputDescriptor;
+
+/**
+ * custom mix entry in mPolicyMixes
+ */
+class AudioPolicyMix : public RefBase {
+public:
+    AudioPolicyMix() {}
+
+    const sp<SwAudioOutputDescriptor> &getOutput() const;
+
+    void setOutput(sp<SwAudioOutputDescriptor> &output);
+
+    void clearOutput();
+
+    android::AudioMix *getMix();
+
+    void setMix(AudioMix &mix);
+
+private:
+    AudioMix    mMix;                   // Audio policy mix descriptor
+    sp<SwAudioOutputDescriptor> mOutput;  // Corresponding output stream
+};
+
+
+class AudioPolicyMixCollection : public DefaultKeyedVector<String8, sp<AudioPolicyMix> >
+{
+public:
+    status_t getAudioPolicyMix(String8 address, sp<AudioPolicyMix> &policyMix) const;
+
+    status_t registerMix(String8 address, AudioMix mix);
+
+    status_t unregisterMix(String8 address);
+
+    void closeOutput(sp<SwAudioOutputDescriptor> &desc);
+
+    /**
+     * Try to find an output descriptor for the given attributes.
+     *
+     * @param[in] attributes to consider fowr the research of output descriptor.
+     * @param[out] desc to return if an output could be found.
+     *
+     * @return NO_ERROR if an output was found for the given attribute (in this case, the
+     *                  descriptor output param is initialized), error code otherwise.
+     */
+    status_t getOutputForAttr(audio_attributes_t attributes, sp<SwAudioOutputDescriptor> &desc);
+
+    audio_devices_t getDeviceAndMixForInputSource(audio_source_t inputSource,
+                                                  audio_devices_t availableDeviceTypes,
+                                                  AudioMix **policyMix);
+
+    status_t getInputMixForAttr(audio_attributes_t attr, AudioMix **policyMix);
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
new file mode 100644
index 0000000..4fdf5b4
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <utils/String8.h>
+#include <utils/Vector.h>
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+#include <system/audio.h>
+#include <cutils/config_utils.h>
+
+namespace android {
+
+class HwModule;
+class AudioGain;
+
+class AudioPort : public virtual RefBase
+{
+public:
+    AudioPort(const String8& name, audio_port_type_t type,
+              audio_port_role_t role);
+    virtual ~AudioPort() {}
+
+    virtual void attach(const sp<HwModule>& module);
+    bool isAttached() { return mModule != 0; }
+
+    static audio_port_handle_t getNextUniqueId();
+
+    virtual void toAudioPort(struct audio_port *port) const;
+
+    virtual void importAudioPort(const sp<AudioPort> port);
+    void clearCapabilities();
+
+    void loadSamplingRates(char *name);
+    void loadFormats(char *name);
+    void loadOutChannels(char *name);
+    void loadInChannels(char *name);
+
+    audio_gain_mode_t loadGainMode(char *name);
+    void loadGain(cnode *root, int index);
+    virtual void loadGains(cnode *root);
+
+    // searches for an exact match
+    status_t checkExactSamplingRate(uint32_t samplingRate) const;
+    // searches for a compatible match, and returns the best match via updatedSamplingRate
+    status_t checkCompatibleSamplingRate(uint32_t samplingRate,
+            uint32_t *updatedSamplingRate) const;
+    // searches for an exact match
+    status_t checkExactChannelMask(audio_channel_mask_t channelMask) const;
+    // searches for a compatible match, currently implemented for input channel masks only
+    status_t checkCompatibleChannelMask(audio_channel_mask_t channelMask,
+            audio_channel_mask_t *updatedChannelMask) const;
+
+    status_t checkExactFormat(audio_format_t format) const;
+    // searches for a compatible match, currently implemented for input formats only
+    status_t checkCompatibleFormat(audio_format_t format, audio_format_t *updatedFormat) const;
+    status_t checkGain(const struct audio_gain_config *gainConfig, int index) const;
+
+    uint32_t pickSamplingRate() const;
+    audio_channel_mask_t pickChannelMask() const;
+    audio_format_t pickFormat() const;
+
+    static const audio_format_t sPcmFormatCompareTable[];
+    static int compareFormats(const audio_format_t *format1, const audio_format_t *format2) {
+        return compareFormats(*format1, *format2);
+    }
+    static int compareFormats(audio_format_t format1, audio_format_t format2);
+
+    audio_module_handle_t getModuleHandle() const;
+    uint32_t getModuleVersion() const;
+    const char *getModuleName() const;
+
+    void dump(int fd, int spaces) const;
+    void log(const char* indent) const;
+
+    String8           mName;
+    audio_port_type_t mType;
+    audio_port_role_t mRole;
+    bool              mUseInChannelMask;
+    // by convention, "0' in the first entry in mSamplingRates, mChannelMasks or mFormats
+    // indicates the supported parameters should be read from the output stream
+    // after it is opened for the first time
+    Vector <uint32_t> mSamplingRates; // supported sampling rates
+    Vector <audio_channel_mask_t> mChannelMasks; // supported channel masks
+    Vector <audio_format_t> mFormats; // supported audio formats
+    Vector < sp<AudioGain> > mGains; // gain controllers
+    sp<HwModule> mModule;                 // audio HW module exposing this I/O stream
+    uint32_t mFlags; // attribute flags (e.g primary output,
+                     // direct output...).
+
+private:
+    static volatile int32_t mNextUniqueId;
+};
+
+class AudioPortConfig : public virtual RefBase
+{
+public:
+    AudioPortConfig();
+    virtual ~AudioPortConfig() {}
+
+    status_t applyAudioPortConfig(const struct audio_port_config *config,
+            struct audio_port_config *backupConfig = NULL);
+    virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
+            const struct audio_port_config *srcConfig = NULL) const = 0;
+    virtual sp<AudioPort> getAudioPort() const = 0;
+    uint32_t mSamplingRate;
+    audio_format_t mFormat;
+    audio_channel_mask_t mChannelMask;
+    struct audio_gain_config mGain;
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h b/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
new file mode 100644
index 0000000..78d2cdf
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/ConfigParsingUtils.h
@@ -0,0 +1,275 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "DeviceDescriptor.h"
+#include "HwModule.h"
+#include "audio_policy_conf.h"
+#include <system/audio.h>
+#include <utils/Log.h>
+#include <utils/Vector.h>
+#include <utils/SortedVector.h>
+#include <cutils/config_utils.h>
+#include <utils/RefBase.h>
+#include <system/audio_policy.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+// Definitions for audio_policy.conf file parsing
+// ----------------------------------------------------------------------------
+
+struct StringToEnum {
+    const char *name;
+    uint32_t value;
+};
+
+// TODO: move to a separate file. Should be in sync with audio.h.
+#define STRING_TO_ENUM(string) { #string, (uint32_t)string } // uint32_t cast removes warning
+#define NAME_TO_ENUM(name, value) { name, value }
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+#endif
+
+const StringToEnum sDeviceTypeToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_EARPIECE),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPEAKER_SAFE),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_SCO),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_A2DP),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_AUX_DIGITAL),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_HDMI),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_USB_ACCESSORY),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_USB_DEVICE),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_ALL_USB),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_REMOTE_SUBMIX),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_TELEPHONY_TX),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_LINE),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_HDMI_ARC),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_SPDIF),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_FM),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_AUX_LINE),
+    STRING_TO_ENUM(AUDIO_DEVICE_OUT_IP),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_AMBIENT),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_ALL_SCO),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_WIRED_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_AUX_DIGITAL),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_HDMI),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_TELEPHONY_RX),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_VOICE_CALL),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_BACK_MIC),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_REMOTE_SUBMIX),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_USB_ACCESSORY),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_USB_DEVICE),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_FM_TUNER),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_TV_TUNER),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_LINE),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_SPDIF),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_A2DP),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_LOOPBACK),
+    STRING_TO_ENUM(AUDIO_DEVICE_IN_IP),
+};
+
+const StringToEnum sDeviceNameToEnumTable[] = {
+    NAME_TO_ENUM("Earpiece", AUDIO_DEVICE_OUT_EARPIECE),
+    NAME_TO_ENUM("Speaker", AUDIO_DEVICE_OUT_SPEAKER),
+    NAME_TO_ENUM("Speaker Protected", AUDIO_DEVICE_OUT_SPEAKER_SAFE),
+    NAME_TO_ENUM("Wired Headset", AUDIO_DEVICE_OUT_WIRED_HEADSET),
+    NAME_TO_ENUM("Wired Headphones", AUDIO_DEVICE_OUT_WIRED_HEADPHONE),
+    NAME_TO_ENUM("BT SCO", AUDIO_DEVICE_OUT_BLUETOOTH_SCO),
+    NAME_TO_ENUM("BT SCO Headset", AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET),
+    NAME_TO_ENUM("BT SCO Car Kit", AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT),
+    NAME_TO_ENUM("", AUDIO_DEVICE_OUT_ALL_SCO),
+    NAME_TO_ENUM("BT A2DP Out", AUDIO_DEVICE_OUT_BLUETOOTH_A2DP),
+    NAME_TO_ENUM("BT A2DP Headphones", AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES),
+    NAME_TO_ENUM("BT A2DP Speaker", AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER),
+    NAME_TO_ENUM("", AUDIO_DEVICE_OUT_ALL_A2DP),
+    NAME_TO_ENUM("HDMI Out", AUDIO_DEVICE_OUT_AUX_DIGITAL),
+    NAME_TO_ENUM("HDMI Out", AUDIO_DEVICE_OUT_HDMI),
+    NAME_TO_ENUM("Analog Dock Out", AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET),
+    NAME_TO_ENUM("Digital Dock Out", AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET),
+    NAME_TO_ENUM("USB Host Out", AUDIO_DEVICE_OUT_USB_ACCESSORY),
+    NAME_TO_ENUM("USB Device Out", AUDIO_DEVICE_OUT_USB_DEVICE),
+    NAME_TO_ENUM("", AUDIO_DEVICE_OUT_ALL_USB),
+    NAME_TO_ENUM("Reroute Submix Out", AUDIO_DEVICE_OUT_REMOTE_SUBMIX),
+    NAME_TO_ENUM("Telephony Tx", AUDIO_DEVICE_OUT_TELEPHONY_TX),
+    NAME_TO_ENUM("Line Out", AUDIO_DEVICE_OUT_LINE),
+    NAME_TO_ENUM("HDMI ARC Out", AUDIO_DEVICE_OUT_HDMI_ARC),
+    NAME_TO_ENUM("S/PDIF Out", AUDIO_DEVICE_OUT_SPDIF),
+    NAME_TO_ENUM("FM transceiver Out", AUDIO_DEVICE_OUT_FM),
+    NAME_TO_ENUM("Aux Line Out", AUDIO_DEVICE_OUT_AUX_LINE),
+    NAME_TO_ENUM("IP Out", AUDIO_DEVICE_OUT_IP),
+    NAME_TO_ENUM("Ambient Mic", AUDIO_DEVICE_IN_AMBIENT),
+    NAME_TO_ENUM("Built-In Mic", AUDIO_DEVICE_IN_BUILTIN_MIC),
+    NAME_TO_ENUM("BT SCO Headset Mic", AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
+    NAME_TO_ENUM("", AUDIO_DEVICE_IN_ALL_SCO),
+    NAME_TO_ENUM("Wired Headset Mic", AUDIO_DEVICE_IN_WIRED_HEADSET),
+    NAME_TO_ENUM("HDMI In", AUDIO_DEVICE_IN_AUX_DIGITAL),
+    NAME_TO_ENUM("HDMI In", AUDIO_DEVICE_IN_HDMI),
+    NAME_TO_ENUM("Telephony Rx", AUDIO_DEVICE_IN_TELEPHONY_RX),
+    NAME_TO_ENUM("Telephony Rx", AUDIO_DEVICE_IN_VOICE_CALL),
+    NAME_TO_ENUM("Built-In Back Mic", AUDIO_DEVICE_IN_BACK_MIC),
+    NAME_TO_ENUM("Reroute Submix In", AUDIO_DEVICE_IN_REMOTE_SUBMIX),
+    NAME_TO_ENUM("Analog Dock In", AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET),
+    NAME_TO_ENUM("Digital Dock In", AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET),
+    NAME_TO_ENUM("USB Host In", AUDIO_DEVICE_IN_USB_ACCESSORY),
+    NAME_TO_ENUM("USB Device In", AUDIO_DEVICE_IN_USB_DEVICE),
+    NAME_TO_ENUM("FM Tuner In", AUDIO_DEVICE_IN_FM_TUNER),
+    NAME_TO_ENUM("TV Tuner In", AUDIO_DEVICE_IN_TV_TUNER),
+    NAME_TO_ENUM("Line In", AUDIO_DEVICE_IN_LINE),
+    NAME_TO_ENUM("S/PDIF In", AUDIO_DEVICE_IN_SPDIF),
+    NAME_TO_ENUM("BT A2DP In", AUDIO_DEVICE_IN_BLUETOOTH_A2DP),
+    NAME_TO_ENUM("Loopback In", AUDIO_DEVICE_IN_LOOPBACK),
+    NAME_TO_ENUM("IP In", AUDIO_DEVICE_IN_IP),
+};
+
+const StringToEnum sOutputFlagNameToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DIRECT),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_PRIMARY),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_FAST),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_DEEP_BUFFER),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_NON_BLOCKING),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_HW_AV_SYNC),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_TTS),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_RAW),
+    STRING_TO_ENUM(AUDIO_OUTPUT_FLAG_SYNC),
+};
+
+const StringToEnum sInputFlagNameToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_INPUT_FLAG_FAST),
+    STRING_TO_ENUM(AUDIO_INPUT_FLAG_HW_HOTWORD),
+    STRING_TO_ENUM(AUDIO_INPUT_FLAG_RAW),
+    STRING_TO_ENUM(AUDIO_INPUT_FLAG_SYNC),
+};
+
+const StringToEnum sFormatNameToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_16_BIT),
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_8_BIT),
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_32_BIT),
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_8_24_BIT),
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_FLOAT),
+    STRING_TO_ENUM(AUDIO_FORMAT_PCM_24_BIT_PACKED),
+    STRING_TO_ENUM(AUDIO_FORMAT_MP3),
+    STRING_TO_ENUM(AUDIO_FORMAT_AAC),
+    STRING_TO_ENUM(AUDIO_FORMAT_AAC_MAIN),
+    STRING_TO_ENUM(AUDIO_FORMAT_AAC_LC),
+    STRING_TO_ENUM(AUDIO_FORMAT_AAC_SSR),
+    STRING_TO_ENUM(AUDIO_FORMAT_AAC_LTP),
+    STRING_TO_ENUM(AUDIO_FORMAT_AAC_HE_V1),
+    STRING_TO_ENUM(AUDIO_FORMAT_AAC_SCALABLE),
+    STRING_TO_ENUM(AUDIO_FORMAT_AAC_ERLC),
+    STRING_TO_ENUM(AUDIO_FORMAT_AAC_LD),
+    STRING_TO_ENUM(AUDIO_FORMAT_AAC_HE_V2),
+    STRING_TO_ENUM(AUDIO_FORMAT_AAC_ELD),
+    STRING_TO_ENUM(AUDIO_FORMAT_VORBIS),
+    STRING_TO_ENUM(AUDIO_FORMAT_HE_AAC_V1),
+    STRING_TO_ENUM(AUDIO_FORMAT_HE_AAC_V2),
+    STRING_TO_ENUM(AUDIO_FORMAT_OPUS),
+    STRING_TO_ENUM(AUDIO_FORMAT_AC3),
+    STRING_TO_ENUM(AUDIO_FORMAT_E_AC3),
+    STRING_TO_ENUM(AUDIO_FORMAT_DTS),
+    STRING_TO_ENUM(AUDIO_FORMAT_DTS_HD),
+};
+
+const StringToEnum sOutChannelsNameToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_MONO),
+    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_STEREO),
+    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_QUAD),
+    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_5POINT1),
+    STRING_TO_ENUM(AUDIO_CHANNEL_OUT_7POINT1),
+};
+
+const StringToEnum sInChannelsNameToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_CHANNEL_IN_MONO),
+    STRING_TO_ENUM(AUDIO_CHANNEL_IN_STEREO),
+    STRING_TO_ENUM(AUDIO_CHANNEL_IN_FRONT_BACK),
+};
+
+const StringToEnum sIndexChannelsNameToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_CHANNEL_INDEX_MASK_1),
+    STRING_TO_ENUM(AUDIO_CHANNEL_INDEX_MASK_2),
+    STRING_TO_ENUM(AUDIO_CHANNEL_INDEX_MASK_3),
+    STRING_TO_ENUM(AUDIO_CHANNEL_INDEX_MASK_4),
+    STRING_TO_ENUM(AUDIO_CHANNEL_INDEX_MASK_5),
+    STRING_TO_ENUM(AUDIO_CHANNEL_INDEX_MASK_6),
+    STRING_TO_ENUM(AUDIO_CHANNEL_INDEX_MASK_7),
+    STRING_TO_ENUM(AUDIO_CHANNEL_INDEX_MASK_8),
+};
+
+const StringToEnum sGainModeNameToEnumTable[] = {
+    STRING_TO_ENUM(AUDIO_GAIN_MODE_JOINT),
+    STRING_TO_ENUM(AUDIO_GAIN_MODE_CHANNELS),
+    STRING_TO_ENUM(AUDIO_GAIN_MODE_RAMP),
+};
+
+class ConfigParsingUtils
+{
+public:
+    static uint32_t stringToEnum(const struct StringToEnum *table,
+            size_t size,
+            const char *name);
+    static const char *enumToString(const struct StringToEnum *table,
+            size_t size,
+            uint32_t value);
+    static bool stringToBool(const char *value);
+    static uint32_t parseOutputFlagNames(char *name);
+    static uint32_t parseInputFlagNames(char *name);
+    static audio_devices_t parseDeviceNames(char *name);
+
+    static void loadHwModules(cnode *root, HwModuleCollection &hwModules,
+                              DeviceVector &availableInputDevices,
+                              DeviceVector &availableOutputDevices,
+                              sp<DeviceDescriptor> &defaultOutputDevices,
+                              bool &isSpeakerDrcEnabled);
+
+    static void loadGlobalConfig(cnode *root, const sp<HwModule>& module,
+                                 DeviceVector &availableInputDevices,
+                                 DeviceVector &availableOutputDevices,
+                                 sp<DeviceDescriptor> &defaultOutputDevices,
+                                 bool &isSpeakerDrcEnabled);
+
+    static status_t loadAudioPolicyConfig(const char *path,
+                                          HwModuleCollection &hwModules,
+                                          DeviceVector &availableInputDevices,
+                                          DeviceVector &availableOutputDevices,
+                                          sp<DeviceDescriptor> &defaultOutputDevices,
+                                          bool &isSpeakerDrcEnabled);
+
+private:
+    static void loadHwModule(cnode *root, HwModuleCollection &hwModules,
+                             DeviceVector &availableInputDevices,
+                             DeviceVector &availableOutputDevices,
+                             sp<DeviceDescriptor> &defaultOutputDevices,
+                             bool &isSpeakerDrcEnabled);
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
new file mode 100644
index 0000000..c42ece6
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "AudioPort.h"
+#include <utils/Errors.h>
+#include <utils/String8.h>
+#include <utils/SortedVector.h>
+#include <cutils/config_utils.h>
+#include <system/audio.h>
+#include <system/audio_policy.h>
+
+namespace android {
+
+class DeviceDescriptor : public AudioPort, public AudioPortConfig
+{
+public:
+    DeviceDescriptor(audio_devices_t type);
+
+    virtual ~DeviceDescriptor() {}
+
+    bool equals(const sp<DeviceDescriptor>& other) const;
+
+    // AudioPortConfig
+    virtual sp<AudioPort> getAudioPort() const { return (AudioPort*) this; }
+    virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
+            const struct audio_port_config *srcConfig = NULL) const;
+
+    // AudioPort
+    virtual void attach(const sp<HwModule>& module);
+    virtual void loadGains(cnode *root);
+    virtual void toAudioPort(struct audio_port *port) const;
+    virtual void importAudioPort(const sp<AudioPort> port);
+
+    audio_port_handle_t getId() const;
+    audio_devices_t type() const { return mDeviceType; }
+    status_t dump(int fd, int spaces, int index) const;
+    void log() const;
+
+    String8 mTag;
+    String8 mAddress;
+
+private:
+    audio_devices_t     mDeviceType;
+    audio_port_handle_t mId;
+
+friend class DeviceVector;
+};
+
+class DeviceVector : public SortedVector< sp<DeviceDescriptor> >
+{
+public:
+    DeviceVector() : SortedVector(), mDeviceTypes(AUDIO_DEVICE_NONE) {}
+
+    ssize_t add(const sp<DeviceDescriptor>& item);
+    ssize_t remove(const sp<DeviceDescriptor>& item);
+    ssize_t indexOf(const sp<DeviceDescriptor>& item) const;
+
+    audio_devices_t types() const { return mDeviceTypes; }
+
+    void loadDevicesFromType(audio_devices_t types);
+    void loadDevicesFromTag(char *tag, const DeviceVector& declaredDevices);
+
+    sp<DeviceDescriptor> getDevice(audio_devices_t type, String8 address) const;
+    DeviceVector getDevicesFromType(audio_devices_t types) const;
+    sp<DeviceDescriptor> getDeviceFromId(audio_port_handle_t id) const;
+    sp<DeviceDescriptor> getDeviceFromTag(const String8& tag) const;
+    DeviceVector getDevicesFromTypeAddr(audio_devices_t type, String8 address) const;
+
+    audio_devices_t getDevicesFromHwModule(audio_module_handle_t moduleHandle) const;
+
+    audio_policy_dev_state_t getDeviceConnectionState(const sp<DeviceDescriptor> &devDesc) const;
+
+    status_t dump(int fd, const String8 &direction) const;
+
+private:
+    void refreshTypes();
+    audio_devices_t mDeviceTypes;
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
new file mode 100644
index 0000000..c9783a1
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/EffectDescriptor.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <RoutingStrategy.h>
+#include <hardware/audio_effect.h>
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+
+namespace android {
+
+
+class EffectDescriptor : public RefBase
+{
+public:
+    status_t dump(int fd);
+
+    int mIo;                // io the effect is attached to
+    routing_strategy mStrategy; // routing strategy the effect is associated to
+    int mSession;               // audio session the effect is on
+    effect_descriptor_t mDesc;  // effect descriptor
+    bool mEnabled;              // enabled state: CPU load being used or not
+};
+
+class EffectDescriptorCollection : public KeyedVector<int, sp<EffectDescriptor> >
+{
+public:
+    EffectDescriptorCollection();
+
+    status_t registerEffect(const effect_descriptor_t *desc, audio_io_handle_t io,
+                            uint32_t strategy, int session, int id);
+    status_t unregisterEffect(int id);
+    status_t setEffectEnabled(int id, bool enabled);
+    uint32_t getMaxEffectsCpuLoad() const;
+    uint32_t getMaxEffectsMemory() const;
+    bool isNonOffloadableEffectEnabled();
+
+    status_t dump(int fd);
+
+private:
+    status_t setEffectEnabled(const sp<EffectDescriptor> &effectDesc, bool enabled);
+
+    uint32_t mTotalEffectsCpuLoad; // current CPU load used by effects
+    uint32_t mTotalEffectsMemory;  // current memory used by effects
+
+    /**
+     * Maximum CPU load allocated to audio effects in 0.1 MIPS (ARMv5TE, 0 WS memory) units
+     */
+    static const uint32_t MAX_EFFECTS_CPU_LOAD = 1000;
+    /**
+     * Maximum memory allocated to audio effects in KB
+     */
+    static const uint32_t MAX_EFFECTS_MEMORY = 512;
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/HwModule.h b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
new file mode 100644
index 0000000..92c3ea2
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/HwModule.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "DeviceDescriptor.h"
+#include <utils/RefBase.h>
+#include <utils/String8.h>
+#include <utils/Errors.h>
+#include <utils/Vector.h>
+#include <system/audio.h>
+#include <cutils/config_utils.h>
+
+namespace android {
+
+class IOProfile;
+
+class HwModule : public RefBase
+{
+public:
+    HwModule(const char *name);
+    ~HwModule();
+
+    status_t loadOutput(cnode *root);
+    status_t loadInput(cnode *root);
+    status_t loadDevice(cnode *root);
+
+    status_t addOutputProfile(String8 name, const audio_config_t *config,
+            audio_devices_t device, String8 address);
+    status_t removeOutputProfile(String8 name);
+    status_t addInputProfile(String8 name, const audio_config_t *config,
+            audio_devices_t device, String8 address);
+    status_t removeInputProfile(String8 name);
+
+    audio_module_handle_t getHandle() const { return mHandle; }
+
+    void dump(int fd);
+
+    const char *const        mName; // base name of the audio HW module (primary, a2dp ...)
+    uint32_t                 mHalVersion; // audio HAL API version
+    audio_module_handle_t    mHandle;
+    Vector < sp<IOProfile> > mOutputProfiles; // output profiles exposed by this module
+    Vector < sp<IOProfile> > mInputProfiles;  // input profiles exposed by this module
+    DeviceVector             mDeclaredDevices; // devices declared in audio_policy.conf
+};
+
+class HwModuleCollection : public Vector< sp<HwModule> >
+{
+public:
+    sp<HwModule> getModuleFromName(const char *name) const;
+
+    sp <HwModule> getModuleForDevice(audio_devices_t device) const;
+
+    sp<DeviceDescriptor>  getDeviceDescriptor(const audio_devices_t device,
+                                              const char *device_address,
+                                              const char *device_name) const;
+
+    status_t dump(int fd) const;
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
new file mode 100644
index 0000000..ab6fcc1
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "AudioPort.h"
+#include "DeviceDescriptor.h"
+#include <utils/String8.h>
+#include <system/audio.h>
+
+namespace android {
+
+class HwModule;
+
+// the IOProfile class describes the capabilities of an output or input stream.
+// It is currently assumed that all combination of listed parameters are supported.
+// It is used by the policy manager to determine if an output or input is suitable for
+// a given use case,  open/close it accordingly and connect/disconnect audio tracks
+// to/from it.
+class IOProfile : public AudioPort
+{
+public:
+    IOProfile(const String8& name, audio_port_role_t role);
+    virtual ~IOProfile();
+
+    // This method is used for both output and input.
+    // If parameter updatedSamplingRate is non-NULL, it is assigned the actual sample rate.
+    // For input, flags is interpreted as audio_input_flags_t.
+    // TODO: merge audio_output_flags_t and audio_input_flags_t.
+    bool isCompatibleProfile(audio_devices_t device,
+                             String8 address,
+                             uint32_t samplingRate,
+                             uint32_t *updatedSamplingRate,
+                             audio_format_t format,
+                             audio_format_t *updatedFormat,
+                             audio_channel_mask_t channelMask,
+                             audio_channel_mask_t *updatedChannelMask,
+                             uint32_t flags) const;
+
+    void dump(int fd);
+    void log();
+
+    DeviceVector  mSupportedDevices; // supported devices
+                                     // (devices this output can be routed to)
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/SessionRoute.h b/services/audiopolicy/common/managerdefinitions/include/SessionRoute.h
new file mode 100644
index 0000000..b4feaf0
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/SessionRoute.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+#include <utils/KeyedVector.h>
+#include <utils/RefBase.h>
+#include <utils/Errors.h>
+
+namespace android {
+
+class DeviceDescriptor;
+
+class SessionRoute : public RefBase
+{
+public:
+    // For Input (Source) routes, use STREAM_TYPE_NA ("NA" = "not applicable)for the
+    // streamType argument
+    static const audio_stream_type_t STREAM_TYPE_NA = AUDIO_STREAM_DEFAULT;
+
+    // For Output (Sink) routes, use SOURCE_TYPE_NA ("NA" = "not applicable") for the
+    // source argument
+
+    static const audio_source_t SOURCE_TYPE_NA = AUDIO_SOURCE_DEFAULT;
+
+    SessionRoute(audio_session_t session,
+                 audio_stream_type_t streamType,
+                 audio_source_t source,
+                 sp<DeviceDescriptor> deviceDescriptor,
+                 uid_t uid)
+        : mUid(uid),
+          mSession(session),
+          mDeviceDescriptor(deviceDescriptor),
+          mRefCount(0),
+          mActivityCount(0),
+          mChanged(false),
+          mStreamType(streamType),
+          mSource(source)
+    {}
+
+    void log(const char* prefix);
+
+    bool isActive() {
+        return (mDeviceDescriptor != 0) && (mChanged || (mActivityCount > 0));
+    }
+
+    uid_t                       mUid;
+    audio_session_t             mSession;
+    sp<DeviceDescriptor>        mDeviceDescriptor;
+
+    // "reference" counting
+    int                         mRefCount;      // +/- on references
+    int                         mActivityCount; // +/- on start/stop
+    bool                        mChanged;
+    // for outputs
+    const audio_stream_type_t   mStreamType;
+    // for inputs
+    const audio_source_t        mSource;
+};
+
+class SessionRouteMap: public KeyedVector<audio_session_t, sp<SessionRoute> >
+{
+public:
+    // These constants identify the SessionRoutMap as holding EITHER input routes,
+    // or output routes.  An error will occur if an attempt is made to add a SessionRoute
+    // object with mStreamType == STREAM_TYPE_NA (i.e. an input SessionRoute) to a
+    // SessionRoutMap that is marked for output (i.e. mMapType == SESSION_ROUTE_MAP_OUTPUT)
+    // and similarly  for output SessionRoutes and Input SessionRouteMaps.
+    typedef enum
+    {
+        MAPTYPE_INPUT = 0,
+        MAPTYPE_OUTPUT = 1
+    } session_route_map_type_t;
+
+    SessionRouteMap(session_route_map_type_t mapType) :
+        mMapType(mapType)
+    {}
+
+    bool hasRoute(audio_session_t session);
+
+    void removeRoute(audio_session_t session);
+
+    int incRouteActivity(audio_session_t session);
+    int decRouteActivity(audio_session_t session);
+    bool hasRouteChanged(audio_session_t session); // also clears the changed flag
+    void log(const char* caption);
+
+    // Specify an Output(Sink) route by passing SessionRoute::SOURCE_TYPE_NA in the
+    // source argument.
+    // Specify an Input(Source) rout by passing SessionRoute::AUDIO_STREAM_DEFAULT
+    // in the streamType argument.
+    void addRoute(audio_session_t session,
+                  audio_stream_type_t streamType,
+                  audio_source_t source,
+                  sp<DeviceDescriptor> deviceDescriptor,
+                  uid_t uid);
+
+private:
+    // Used to mark a SessionRoute as for either inputs (mMapType == kSessionRouteMap_Input)
+    // or outputs (mMapType == kSessionRouteMap_Output)
+    const session_route_map_type_t mMapType;
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/AudioPolicyFactory.cpp b/services/audiopolicy/common/managerdefinitions/include/SoundTriggerSession.h
similarity index 60%
copy from services/audiopolicy/AudioPolicyFactory.cpp
copy to services/audiopolicy/common/managerdefinitions/include/SoundTriggerSession.h
index 2ae7bc1..420e6d7 100644
--- a/services/audiopolicy/AudioPolicyFactory.cpp
+++ b/services/audiopolicy/common/managerdefinitions/include/SoundTriggerSession.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2014 The Android Open Source Project
+ * Copyright (C) 2015 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,19 +14,20 @@
  * limitations under the License.
  */
 
-#include "AudioPolicyManager.h"
+#pragma once
+
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
 
 namespace android {
 
-extern "C" AudioPolicyInterface* createAudioPolicyManager(
-        AudioPolicyClientInterface *clientInterface)
+class SoundTriggerSessionCollection : public DefaultKeyedVector<audio_session_t, audio_io_handle_t>
 {
-    return new AudioPolicyManager(clientInterface);
-}
+public:
+    status_t releaseSession(audio_session_t session);
 
-extern "C" void destroyAudioPolicyManager(AudioPolicyInterface *interface)
-{
-    delete interface;
-}
+    status_t acquireSession(audio_session_t session, audio_io_handle_t ioHandle);
+};
 
 }; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
new file mode 100644
index 0000000..84db5ab
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/StreamDescriptor.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <Volume.h>
+#include <utils/KeyedVector.h>
+#include <utils/StrongPointer.h>
+#include <utils/SortedVector.h>
+#include <hardware/audio.h>
+
+namespace android {
+
+// stream descriptor used for volume control
+class StreamDescriptor
+{
+public:
+    StreamDescriptor();
+
+    int getVolumeIndex(audio_devices_t device) const;
+    bool canBeMuted() const { return mCanBeMuted; }
+    void clearCurrentVolumeIndex();
+    void addCurrentVolumeIndex(audio_devices_t device, int index);
+    int getVolumeIndexMin() const { return mIndexMin; }
+    int getVolumeIndexMax() const { return mIndexMax; }
+    void setVolumeIndexMin(int volIndexMin);
+    void setVolumeIndexMax(int volIndexMax);
+
+    void dump(int fd) const;
+
+    void setVolumeCurvePoint(Volume::device_category deviceCategory, const VolumeCurvePoint *point);
+    const VolumeCurvePoint *getVolumeCurvePoint(Volume::device_category deviceCategory) const
+    {
+        return mVolumeCurve[deviceCategory];
+    }
+
+private:
+    const VolumeCurvePoint *mVolumeCurve[Volume::DEVICE_CATEGORY_CNT];
+    KeyedVector<audio_devices_t, int> mIndexCur; /**< current volume index per device. */
+    int mIndexMin; /**< min volume index. */
+    int mIndexMax; /**< max volume index. */
+    bool mCanBeMuted; /**< true is the stream can be muted. */
+};
+
+/**
+ * stream descriptors collection for volume control
+ */
+class StreamDescriptorCollection : public DefaultKeyedVector<audio_stream_type_t, StreamDescriptor>
+{
+public:
+    StreamDescriptorCollection();
+
+    void clearCurrentVolumeIndex(audio_stream_type_t stream);
+    void addCurrentVolumeIndex(audio_stream_type_t stream, audio_devices_t device, int index);
+
+    bool canBeMuted(audio_stream_type_t stream);
+
+    status_t dump(int fd) const;
+
+    void setVolumeCurvePoint(audio_stream_type_t stream,
+                             Volume::device_category deviceCategory,
+                             const VolumeCurvePoint *point);
+
+    const VolumeCurvePoint *getVolumeCurvePoint(audio_stream_type_t stream,
+                                                Volume::device_category deviceCategory) const;
+
+    void setVolumeIndexMin(audio_stream_type_t stream,int volIndexMin);
+    void setVolumeIndexMax(audio_stream_type_t stream,int volIndexMax);
+
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/audio_policy_conf.h b/services/audiopolicy/common/managerdefinitions/include/audio_policy_conf.h
similarity index 91%
rename from services/audiopolicy/audio_policy_conf.h
rename to services/audiopolicy/common/managerdefinitions/include/audio_policy_conf.h
index 2535a67..a393e3b 100644
--- a/services/audiopolicy/audio_policy_conf.h
+++ b/services/audiopolicy/common/managerdefinitions/include/audio_policy_conf.h
@@ -14,9 +14,7 @@
  * limitations under the License.
  */
 
-
-#ifndef ANDROID_AUDIO_POLICY_CONF_H
-#define ANDROID_AUDIO_POLICY_CONF_H
+#pragma once
 
 
 /////////////////////////////////////////////////
@@ -53,9 +51,9 @@
                                     // "formats" in outputs descriptors indicating that supported
                                     // values should be queried after opening the output.
 
-#define DEVICES_TAG "devices"
-#define DEVICE_TYPE "type"
-#define DEVICE_ADDRESS "address"
+#define APM_DEVICES_TAG "devices"
+#define APM_DEVICE_TYPE "type"
+#define APM_DEVICE_ADDRESS "address"
 
 #define MIXERS_TAG "mixers"
 #define MIXER_TYPE "type"
@@ -71,7 +69,3 @@
 #define GAIN_STEP_VALUE "step_value_mB"
 #define GAIN_MIN_RAMP_MS "min_ramp_ms"
 #define GAIN_MAX_RAMP_MS "max_ramp_ms"
-
-
-
-#endif  // ANDROID_AUDIO_POLICY_CONF_H
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp
new file mode 100644
index 0000000..fc7b0cc
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioGain.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioGain"
+//#define LOG_NDEBUG 0
+
+//#define VERY_VERBOSE_LOGGING
+#ifdef VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+#include "AudioGain.h"
+#include "StreamDescriptor.h"
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+#include <math.h>
+
+namespace android {
+
+AudioGain::AudioGain(int index, bool useInChannelMask)
+{
+    mIndex = index;
+    mUseInChannelMask = useInChannelMask;
+    memset(&mGain, 0, sizeof(struct audio_gain));
+}
+
+void AudioGain::getDefaultConfig(struct audio_gain_config *config)
+{
+    config->index = mIndex;
+    config->mode = mGain.mode;
+    config->channel_mask = mGain.channel_mask;
+    if ((mGain.mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
+        config->values[0] = mGain.default_value;
+    } else {
+        uint32_t numValues;
+        if (mUseInChannelMask) {
+            numValues = audio_channel_count_from_in_mask(mGain.channel_mask);
+        } else {
+            numValues = audio_channel_count_from_out_mask(mGain.channel_mask);
+        }
+        for (size_t i = 0; i < numValues; i++) {
+            config->values[i] = mGain.default_value;
+        }
+    }
+    if ((mGain.mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
+        config->ramp_duration_ms = mGain.min_ramp_ms;
+    }
+}
+
+status_t AudioGain::checkConfig(const struct audio_gain_config *config)
+{
+    if ((config->mode & ~mGain.mode) != 0) {
+        return BAD_VALUE;
+    }
+    if ((config->mode & AUDIO_GAIN_MODE_JOINT) == AUDIO_GAIN_MODE_JOINT) {
+        if ((config->values[0] < mGain.min_value) ||
+                    (config->values[0] > mGain.max_value)) {
+            return BAD_VALUE;
+        }
+    } else {
+        if ((config->channel_mask & ~mGain.channel_mask) != 0) {
+            return BAD_VALUE;
+        }
+        uint32_t numValues;
+        if (mUseInChannelMask) {
+            numValues = audio_channel_count_from_in_mask(config->channel_mask);
+        } else {
+            numValues = audio_channel_count_from_out_mask(config->channel_mask);
+        }
+        for (size_t i = 0; i < numValues; i++) {
+            if ((config->values[i] < mGain.min_value) ||
+                    (config->values[i] > mGain.max_value)) {
+                return BAD_VALUE;
+            }
+        }
+    }
+    if ((config->mode & AUDIO_GAIN_MODE_RAMP) == AUDIO_GAIN_MODE_RAMP) {
+        if ((config->ramp_duration_ms < mGain.min_ramp_ms) ||
+                    (config->ramp_duration_ms > mGain.max_ramp_ms)) {
+            return BAD_VALUE;
+        }
+    }
+    return NO_ERROR;
+}
+
+void AudioGain::dump(int fd, int spaces, int index) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "%*sGain %d:\n", spaces, "", index+1);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- mode: %08x\n", spaces, "", mGain.mode);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- channel_mask: %08x\n", spaces, "", mGain.channel_mask);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- min_value: %d mB\n", spaces, "", mGain.min_value);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- max_value: %d mB\n", spaces, "", mGain.max_value);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- default_value: %d mB\n", spaces, "", mGain.default_value);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- step_value: %d mB\n", spaces, "", mGain.step_value);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- min_ramp_ms: %d ms\n", spaces, "", mGain.min_ramp_ms);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- max_ramp_ms: %d ms\n", spaces, "", mGain.max_ramp_ms);
+    result.append(buffer);
+
+    write(fd, result.string(), result.size());
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
new file mode 100644
index 0000000..937160b
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -0,0 +1,195 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioInputDescriptor"
+//#define LOG_NDEBUG 0
+
+#include "AudioInputDescriptor.h"
+#include "IOProfile.h"
+#include "AudioGain.h"
+#include "HwModule.h"
+#include <media/AudioPolicy.h>
+#include <policy.h>
+
+namespace android {
+
+AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile)
+    : mIoHandle(0),
+      mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL), mPatchHandle(0), mRefCount(0),
+      mInputSource(AUDIO_SOURCE_DEFAULT), mProfile(profile), mIsSoundTrigger(false), mId(0)
+{
+    if (profile != NULL) {
+        mSamplingRate = profile->pickSamplingRate();
+        mFormat = profile->pickFormat();
+        mChannelMask = profile->pickChannelMask();
+        if (profile->mGains.size() > 0) {
+            profile->mGains[0]->getDefaultConfig(&mGain);
+        }
+    }
+}
+
+void AudioInputDescriptor::setIoHandle(audio_io_handle_t ioHandle)
+{
+    mId = AudioPort::getNextUniqueId();
+    mIoHandle = ioHandle;
+}
+
+audio_module_handle_t AudioInputDescriptor::getModuleHandle() const
+{
+    if (mProfile == 0) {
+        return 0;
+    }
+    return mProfile->getModuleHandle();
+}
+
+audio_port_handle_t AudioInputDescriptor::getId() const
+{
+    return mId;
+}
+
+void AudioInputDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
+                                             const struct audio_port_config *srcConfig) const
+{
+    ALOG_ASSERT(mProfile != 0,
+                "toAudioPortConfig() called on input with null profile %d", mIoHandle);
+    dstConfig->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK|
+                            AUDIO_PORT_CONFIG_FORMAT|AUDIO_PORT_CONFIG_GAIN;
+    if (srcConfig != NULL) {
+        dstConfig->config_mask |= srcConfig->config_mask;
+    }
+
+    AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
+
+    dstConfig->id = mId;
+    dstConfig->role = AUDIO_PORT_ROLE_SINK;
+    dstConfig->type = AUDIO_PORT_TYPE_MIX;
+    dstConfig->ext.mix.hw_module = getModuleHandle();
+    dstConfig->ext.mix.handle = mIoHandle;
+    dstConfig->ext.mix.usecase.source = mInputSource;
+}
+
+void AudioInputDescriptor::toAudioPort(struct audio_port *port) const
+{
+    ALOG_ASSERT(mProfile != 0, "toAudioPort() called on input with null profile %d", mIoHandle);
+
+    mProfile->toAudioPort(port);
+    port->id = mId;
+    toAudioPortConfig(&port->active_config);
+    port->ext.mix.hw_module = getModuleHandle();
+    port->ext.mix.handle = mIoHandle;
+    port->ext.mix.latency_class = AUDIO_LATENCY_NORMAL;
+}
+
+status_t AudioInputDescriptor::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, " ID: %d\n", getId());
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Sampling rate: %d\n", mSamplingRate);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Format: %d\n", mFormat);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Channels: %08x\n", mChannelMask);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Devices %08x\n", mDevice);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Ref Count %d\n", mRefCount);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Open Ref Count %d\n", mOpenRefCount);
+    result.append(buffer);
+
+    write(fd, result.string(), result.size());
+
+    return NO_ERROR;
+}
+
+bool AudioInputCollection::isSourceActive(audio_source_t source) const
+{
+    for (size_t i = 0; i < size(); i++) {
+        const sp<AudioInputDescriptor>  inputDescriptor = valueAt(i);
+        if (inputDescriptor->mRefCount == 0) {
+            continue;
+        }
+        if (inputDescriptor->mInputSource == (int)source) {
+            return true;
+        }
+    }
+    return false;
+}
+
+sp<AudioInputDescriptor> AudioInputCollection::getInputFromId(audio_port_handle_t id) const
+{
+    sp<AudioInputDescriptor> inputDesc = NULL;
+    for (size_t i = 0; i < size(); i++) {
+        inputDesc = valueAt(i);
+        if (inputDesc->getId() == id) {
+            break;
+        }
+    }
+    return inputDesc;
+}
+
+uint32_t AudioInputCollection::activeInputsCount() const
+{
+    uint32_t count = 0;
+    for (size_t i = 0; i < size(); i++) {
+        const sp<AudioInputDescriptor>  desc = valueAt(i);
+        if (desc->mRefCount > 0) {
+            count++;
+        }
+    }
+    return count;
+}
+
+audio_io_handle_t AudioInputCollection::getActiveInput(bool ignoreVirtualInputs)
+{
+    for (size_t i = 0; i < size(); i++) {
+        const sp<AudioInputDescriptor>  input_descriptor = valueAt(i);
+        if ((input_descriptor->mRefCount > 0)
+                && (!ignoreVirtualInputs || !is_virtual_input_device(input_descriptor->mDevice))) {
+            return keyAt(i);
+        }
+    }
+    return 0;
+}
+
+audio_devices_t AudioInputCollection::getSupportedDevices(audio_io_handle_t handle) const
+{
+    sp<AudioInputDescriptor> inputDesc = valueFor(handle);
+    audio_devices_t devices = inputDesc->mProfile->mSupportedDevices.types();
+    return devices;
+}
+
+status_t AudioInputCollection::dump(int fd) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "\nInputs dump:\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        snprintf(buffer, SIZE, "- Input %d dump:\n", keyAt(i));
+        write(fd, buffer, strlen(buffer));
+        valueAt(i)->dump(fd);
+    }
+
+    return NO_ERROR;
+}
+
+}; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
new file mode 100644
index 0000000..a278375
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -0,0 +1,497 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioOutputDescriptor"
+//#define LOG_NDEBUG 0
+
+#include <AudioPolicyInterface.h>
+#include "AudioOutputDescriptor.h"
+#include "IOProfile.h"
+#include "AudioGain.h"
+#include "Volume.h"
+#include "HwModule.h"
+#include <media/AudioPolicy.h>
+
+// A device mask for all audio output devices that are considered "remote" when evaluating
+// active output devices in isStreamActiveRemotely()
+#define APM_AUDIO_OUT_DEVICE_REMOTE_ALL  AUDIO_DEVICE_OUT_REMOTE_SUBMIX
+
+namespace android {
+
+AudioOutputDescriptor::AudioOutputDescriptor(const sp<AudioPort>& port,
+                                             AudioPolicyClientInterface *clientInterface)
+    : mPort(port), mDevice(AUDIO_DEVICE_NONE),
+      mPatchHandle(0), mClientInterface(clientInterface), mId(0)
+{
+    // clear usage count for all stream types
+    for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
+        mRefCount[i] = 0;
+        mCurVolume[i] = -1.0;
+        mMuteCount[i] = 0;
+        mStopTime[i] = 0;
+    }
+    for (int i = 0; i < NUM_STRATEGIES; i++) {
+        mStrategyMutedByDevice[i] = false;
+    }
+    if (port != NULL) {
+        mSamplingRate = port->pickSamplingRate();
+        mFormat = port->pickFormat();
+        mChannelMask = port->pickChannelMask();
+        if (port->mGains.size() > 0) {
+            port->mGains[0]->getDefaultConfig(&mGain);
+        }
+    }
+}
+
+audio_module_handle_t AudioOutputDescriptor::getModuleHandle() const
+{
+    return mPort->getModuleHandle();
+}
+
+audio_port_handle_t AudioOutputDescriptor::getId() const
+{
+    return mId;
+}
+
+audio_devices_t AudioOutputDescriptor::device() const
+{
+    return mDevice;
+}
+
+audio_devices_t AudioOutputDescriptor::supportedDevices()
+{
+    return mDevice;
+}
+
+bool AudioOutputDescriptor::sharesHwModuleWith(
+        const sp<AudioOutputDescriptor> outputDesc)
+{
+    if (outputDesc->isDuplicated()) {
+        return sharesHwModuleWith(outputDesc->subOutput1()) ||
+                    sharesHwModuleWith(outputDesc->subOutput2());
+    } else {
+        return (getModuleHandle() == outputDesc->getModuleHandle());
+    }
+}
+
+void AudioOutputDescriptor::changeRefCount(audio_stream_type_t stream,
+                                                                   int delta)
+{
+    if ((delta + (int)mRefCount[stream]) < 0) {
+        ALOGW("changeRefCount() invalid delta %d for stream %d, refCount %d",
+              delta, stream, mRefCount[stream]);
+        mRefCount[stream] = 0;
+        return;
+    }
+    mRefCount[stream] += delta;
+    ALOGV("changeRefCount() stream %d, count %d", stream, mRefCount[stream]);
+}
+
+bool AudioOutputDescriptor::isActive(uint32_t inPastMs) const
+{
+    nsecs_t sysTime = 0;
+    if (inPastMs != 0) {
+        sysTime = systemTime();
+    }
+    for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
+        if (i == AUDIO_STREAM_PATCH) {
+            continue;
+        }
+        if (isStreamActive((audio_stream_type_t)i, inPastMs, sysTime)) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool AudioOutputDescriptor::isStreamActive(audio_stream_type_t stream,
+                                           uint32_t inPastMs,
+                                           nsecs_t sysTime) const
+{
+    if (mRefCount[stream] != 0) {
+        return true;
+    }
+    if (inPastMs == 0) {
+        return false;
+    }
+    if (sysTime == 0) {
+        sysTime = systemTime();
+    }
+    if (ns2ms(sysTime - mStopTime[stream]) < inPastMs) {
+        return true;
+    }
+    return false;
+}
+
+
+bool AudioOutputDescriptor::isFixedVolume(audio_devices_t device __unused)
+{
+    return false;
+}
+
+bool AudioOutputDescriptor::setVolume(float volume,
+                                      audio_stream_type_t stream,
+                                      audio_devices_t device __unused,
+                                      uint32_t delayMs,
+                                      bool force)
+{
+    // We actually change the volume if:
+    // - the float value returned by computeVolume() changed
+    // - the force flag is set
+    if (volume != mCurVolume[stream] || force) {
+        ALOGV("setVolume() for stream %d, volume %f, delay %d", stream, volume, delayMs);
+        mCurVolume[stream] = volume;
+        return true;
+    }
+    return false;
+}
+
+void AudioOutputDescriptor::toAudioPortConfig(
+                                                 struct audio_port_config *dstConfig,
+                                                 const struct audio_port_config *srcConfig) const
+{
+    dstConfig->config_mask = AUDIO_PORT_CONFIG_SAMPLE_RATE|AUDIO_PORT_CONFIG_CHANNEL_MASK|
+                            AUDIO_PORT_CONFIG_FORMAT|AUDIO_PORT_CONFIG_GAIN;
+    if (srcConfig != NULL) {
+        dstConfig->config_mask |= srcConfig->config_mask;
+    }
+    AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
+
+    dstConfig->id = mId;
+    dstConfig->role = AUDIO_PORT_ROLE_SOURCE;
+    dstConfig->type = AUDIO_PORT_TYPE_MIX;
+    dstConfig->ext.mix.hw_module = getModuleHandle();
+    dstConfig->ext.mix.usecase.stream = AUDIO_STREAM_DEFAULT;
+}
+
+void AudioOutputDescriptor::toAudioPort(
+                                                    struct audio_port *port) const
+{
+    mPort->toAudioPort(port);
+    port->id = mId;
+    port->ext.mix.hw_module = getModuleHandle();
+}
+
+status_t AudioOutputDescriptor::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, " ID: %d\n", mId);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Sampling rate: %d\n", mSamplingRate);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Format: %08x\n", mFormat);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Channels: %08x\n", mChannelMask);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Devices %08x\n", device());
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Stream volume refCount muteCount\n");
+    result.append(buffer);
+    for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
+        snprintf(buffer, SIZE, " %02d     %.03f     %02d       %02d\n",
+                 i, mCurVolume[i], mRefCount[i], mMuteCount[i]);
+        result.append(buffer);
+    }
+    write(fd, result.string(), result.size());
+
+    return NO_ERROR;
+}
+
+void AudioOutputDescriptor::log(const char* indent)
+{
+    ALOGI("%sID: %d,0x%X, [rt:%d fmt:0x%X ch:0x%X]",
+          indent, mId, mId, mSamplingRate, mFormat, mChannelMask);
+}
+
+// SwAudioOutputDescriptor implementation
+SwAudioOutputDescriptor::SwAudioOutputDescriptor(
+        const sp<IOProfile>& profile, AudioPolicyClientInterface *clientInterface)
+    : AudioOutputDescriptor(profile, clientInterface),
+    mProfile(profile), mIoHandle(0), mLatency(0),
+    mFlags((audio_output_flags_t)0), mPolicyMix(NULL),
+    mOutput1(0), mOutput2(0), mDirectOpenCount(0), mGlobalRefCount(0)
+{
+    if (profile != NULL) {
+        mFlags = (audio_output_flags_t)profile->mFlags;
+    }
+}
+
+void SwAudioOutputDescriptor::setIoHandle(audio_io_handle_t ioHandle)
+{
+    mId = AudioPort::getNextUniqueId();
+    mIoHandle = ioHandle;
+}
+
+
+status_t SwAudioOutputDescriptor::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, " Latency: %d\n", mLatency);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Flags %08x\n", mFlags);
+    result.append(buffer);
+    write(fd, result.string(), result.size());
+
+    AudioOutputDescriptor::dump(fd);
+
+    return NO_ERROR;
+}
+
+audio_devices_t SwAudioOutputDescriptor::device() const
+{
+    if (isDuplicated()) {
+        return (audio_devices_t)(mOutput1->mDevice | mOutput2->mDevice);
+    } else {
+        return mDevice;
+    }
+}
+
+bool SwAudioOutputDescriptor::sharesHwModuleWith(
+        const sp<AudioOutputDescriptor> outputDesc)
+{
+    if (isDuplicated()) {
+        return mOutput1->sharesHwModuleWith(outputDesc) || mOutput2->sharesHwModuleWith(outputDesc);
+    } else if (outputDesc->isDuplicated()){
+        return sharesHwModuleWith(outputDesc->subOutput1()) ||
+                    sharesHwModuleWith(outputDesc->subOutput2());
+    } else {
+        return AudioOutputDescriptor::sharesHwModuleWith(outputDesc);
+    }
+}
+
+audio_devices_t SwAudioOutputDescriptor::supportedDevices()
+{
+    if (isDuplicated()) {
+        return (audio_devices_t)(mOutput1->supportedDevices() | mOutput2->supportedDevices());
+    } else {
+        return mProfile->mSupportedDevices.types() ;
+    }
+}
+
+uint32_t SwAudioOutputDescriptor::latency()
+{
+    if (isDuplicated()) {
+        return (mOutput1->mLatency > mOutput2->mLatency) ? mOutput1->mLatency : mOutput2->mLatency;
+    } else {
+        return mLatency;
+    }
+}
+
+void SwAudioOutputDescriptor::changeRefCount(audio_stream_type_t stream,
+                                                                   int delta)
+{
+    // forward usage count change to attached outputs
+    if (isDuplicated()) {
+        mOutput1->changeRefCount(stream, delta);
+        mOutput2->changeRefCount(stream, delta);
+    }
+    AudioOutputDescriptor::changeRefCount(stream, delta);
+
+    // handle stream-independent ref count
+    uint32_t oldGlobalRefCount = mGlobalRefCount;
+    if ((delta + (int)mGlobalRefCount) < 0) {
+        ALOGW("changeRefCount() invalid delta %d globalRefCount %d", delta, mGlobalRefCount);
+        mGlobalRefCount = 0;
+    } else {
+        mGlobalRefCount += delta;
+    }
+    if ((oldGlobalRefCount == 0) && (mGlobalRefCount > 0)) {
+        if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
+        {
+            mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mRegistrationId,
+                    MIX_STATE_MIXING);
+        }
+
+    } else if ((oldGlobalRefCount > 0) && (mGlobalRefCount == 0)) {
+        if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
+        {
+            mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mRegistrationId,
+                    MIX_STATE_IDLE);
+        }
+    }
+}
+
+
+bool SwAudioOutputDescriptor::isFixedVolume(audio_devices_t device)
+{
+    // unit gain if rerouting to external policy
+    if (device == AUDIO_DEVICE_OUT_REMOTE_SUBMIX) {
+        if (mPolicyMix != NULL) {
+            ALOGV("max gain when rerouting for output=%d", mIoHandle);
+            return true;
+        }
+    }
+    return false;
+}
+
+void SwAudioOutputDescriptor::toAudioPortConfig(
+                                                 struct audio_port_config *dstConfig,
+                                                 const struct audio_port_config *srcConfig) const
+{
+
+    ALOG_ASSERT(!isDuplicated(), "toAudioPortConfig() called on duplicated output %d", mIoHandle);
+    AudioOutputDescriptor::toAudioPortConfig(dstConfig, srcConfig);
+
+    dstConfig->ext.mix.handle = mIoHandle;
+}
+
+void SwAudioOutputDescriptor::toAudioPort(
+                                                    struct audio_port *port) const
+{
+    ALOG_ASSERT(!isDuplicated(), "toAudioPort() called on duplicated output %d", mIoHandle);
+
+    AudioOutputDescriptor::toAudioPort(port);
+
+    toAudioPortConfig(&port->active_config);
+    port->ext.mix.handle = mIoHandle;
+    port->ext.mix.latency_class =
+            mFlags & AUDIO_OUTPUT_FLAG_FAST ? AUDIO_LATENCY_LOW : AUDIO_LATENCY_NORMAL;
+}
+
+bool SwAudioOutputDescriptor::setVolume(float volume,
+                                        audio_stream_type_t stream,
+                                        audio_devices_t device,
+                                        uint32_t delayMs,
+                                        bool force)
+{
+    bool changed = AudioOutputDescriptor::setVolume(volume, stream, device, delayMs, force);
+
+    if (changed) {
+        // Force VOICE_CALL to track BLUETOOTH_SCO stream volume when bluetooth audio is
+        // enabled
+        float volume = Volume::DbToAmpl(mCurVolume[stream]);
+        if (stream == AUDIO_STREAM_BLUETOOTH_SCO) {
+            mClientInterface->setStreamVolume(
+                    AUDIO_STREAM_VOICE_CALL, volume, mIoHandle, delayMs);
+        }
+        mClientInterface->setStreamVolume(stream, volume, mIoHandle, delayMs);
+    }
+    return changed;
+}
+
+// SwAudioOutputCollection implementation
+
+bool SwAudioOutputCollection::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
+{
+    nsecs_t sysTime = systemTime();
+    for (size_t i = 0; i < this->size(); i++) {
+        const sp<SwAudioOutputDescriptor> outputDesc = this->valueAt(i);
+        if (outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
+            return true;
+        }
+    }
+    return false;
+}
+
+bool SwAudioOutputCollection::isStreamActiveRemotely(audio_stream_type_t stream,
+                                                   uint32_t inPastMs) const
+{
+    nsecs_t sysTime = systemTime();
+    for (size_t i = 0; i < size(); i++) {
+        const sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
+        if (((outputDesc->device() & APM_AUDIO_OUT_DEVICE_REMOTE_ALL) != 0) &&
+                outputDesc->isStreamActive(stream, inPastMs, sysTime)) {
+            // do not consider re routing (when the output is going to a dynamic policy)
+            // as "remote playback"
+            if (outputDesc->mPolicyMix == NULL) {
+                return true;
+            }
+        }
+    }
+    return false;
+}
+
+audio_io_handle_t SwAudioOutputCollection::getA2dpOutput() const
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
+        if (!outputDesc->isDuplicated() && outputDesc->device() & AUDIO_DEVICE_OUT_ALL_A2DP) {
+            return this->keyAt(i);
+        }
+    }
+    return 0;
+}
+
+sp<SwAudioOutputDescriptor> SwAudioOutputCollection::getPrimaryOutput() const
+{
+    for (size_t i = 0; i < size(); i++) {
+        const sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
+        if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) {
+            return outputDesc;
+        }
+    }
+    return NULL;
+}
+
+sp<SwAudioOutputDescriptor> SwAudioOutputCollection::getOutputFromId(audio_port_handle_t id) const
+{
+    sp<SwAudioOutputDescriptor> outputDesc = NULL;
+    for (size_t i = 0; i < size(); i++) {
+        outputDesc = valueAt(i);
+        if (outputDesc->getId() == id) {
+            break;
+        }
+    }
+    return outputDesc;
+}
+
+bool SwAudioOutputCollection::isAnyOutputActive(audio_stream_type_t streamToIgnore) const
+{
+    for (size_t s = 0 ; s < AUDIO_STREAM_CNT ; s++) {
+        if (s == (size_t) streamToIgnore) {
+            continue;
+        }
+        for (size_t i = 0; i < size(); i++) {
+            const sp<SwAudioOutputDescriptor> outputDesc = valueAt(i);
+            if (outputDesc->mRefCount[s] != 0) {
+                return true;
+            }
+        }
+    }
+    return false;
+}
+
+audio_devices_t SwAudioOutputCollection::getSupportedDevices(audio_io_handle_t handle) const
+{
+    sp<SwAudioOutputDescriptor> outputDesc = valueFor(handle);
+    audio_devices_t devices = outputDesc->mProfile->mSupportedDevices.types();
+    return devices;
+}
+
+
+status_t SwAudioOutputCollection::dump(int fd) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "\nOutputs dump:\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        snprintf(buffer, SIZE, "- Output %d dump:\n", keyAt(i));
+        write(fd, buffer, strlen(buffer));
+        valueAt(i)->dump(fd);
+    }
+
+    return NO_ERROR;
+}
+
+}; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
new file mode 100644
index 0000000..a06d867
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPatch"
+//#define LOG_NDEBUG 0
+
+#include "AudioPatch.h"
+#include "AudioGain.h"
+#include "ConfigParsingUtils.h"
+#include <cutils/log.h>
+#include <utils/String8.h>
+
+namespace android {
+
+int32_t volatile AudioPatch::mNextUniqueId = 1;
+
+AudioPatch::AudioPatch(const struct audio_patch *patch, uid_t uid) :
+    mHandle(static_cast<audio_patch_handle_t>(android_atomic_inc(&mNextUniqueId))),
+    mPatch(*patch),
+    mUid(uid),
+    mAfPatchHandle(0)
+{
+}
+
+status_t AudioPatch::dump(int fd, int spaces, int index) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "%*sAudio patch %d:\n", spaces, "", index+1);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- handle: %2d\n", spaces, "", mHandle);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- audio flinger handle: %2d\n", spaces, "", mAfPatchHandle);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- owner uid: %2d\n", spaces, "", mUid);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "%*s- %d sources:\n", spaces, "", mPatch.num_sources);
+    result.append(buffer);
+    for (size_t i = 0; i < mPatch.num_sources; i++) {
+        if (mPatch.sources[i].type == AUDIO_PORT_TYPE_DEVICE) {
+            snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
+                     mPatch.sources[i].id, ConfigParsingUtils::enumToString(sDeviceTypeToEnumTable,
+                                                        ARRAY_SIZE(sDeviceTypeToEnumTable),
+                                                        mPatch.sources[i].ext.device.type));
+        } else {
+            snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
+                     mPatch.sources[i].id, mPatch.sources[i].ext.mix.handle);
+        }
+        result.append(buffer);
+    }
+    snprintf(buffer, SIZE, "%*s- %d sinks:\n", spaces, "", mPatch.num_sinks);
+    result.append(buffer);
+    for (size_t i = 0; i < mPatch.num_sinks; i++) {
+        if (mPatch.sinks[i].type == AUDIO_PORT_TYPE_DEVICE) {
+            snprintf(buffer, SIZE, "%*s- Device ID %d %s\n", spaces + 2, "",
+                     mPatch.sinks[i].id, ConfigParsingUtils::enumToString(sDeviceTypeToEnumTable,
+                                                        ARRAY_SIZE(sDeviceTypeToEnumTable),
+                                                        mPatch.sinks[i].ext.device.type));
+        } else {
+            snprintf(buffer, SIZE, "%*s- Mix ID %d I/O handle %d\n", spaces + 2, "",
+                     mPatch.sinks[i].id, mPatch.sinks[i].ext.mix.handle);
+        }
+        result.append(buffer);
+    }
+
+    write(fd, result.string(), result.size());
+    return NO_ERROR;
+}
+
+status_t AudioPatchCollection::addAudioPatch(audio_patch_handle_t handle,
+                                             const sp<AudioPatch>& patch)
+{
+    ssize_t index = indexOfKey(handle);
+
+    if (index >= 0) {
+        ALOGW("addAudioPatch() patch %d already in", handle);
+        return ALREADY_EXISTS;
+    }
+    add(handle, patch);
+    ALOGV("addAudioPatch() handle %d af handle %d num_sources %d num_sinks %d source handle %d"
+            "sink handle %d",
+          handle, patch->mAfPatchHandle, patch->mPatch.num_sources, patch->mPatch.num_sinks,
+          patch->mPatch.sources[0].id, patch->mPatch.sinks[0].id);
+    return NO_ERROR;
+}
+
+status_t AudioPatchCollection::removeAudioPatch(audio_patch_handle_t handle)
+{
+    ssize_t index = indexOfKey(handle);
+
+    if (index < 0) {
+        ALOGW("removeAudioPatch() patch %d not in", handle);
+        return ALREADY_EXISTS;
+    }
+    ALOGV("removeAudioPatch() handle %d af handle %d", handle, valueAt(index)->mAfPatchHandle);
+    removeItemsAt(index);
+    return NO_ERROR;
+}
+
+status_t AudioPatchCollection::listAudioPatches(unsigned int *num_patches,
+                                                struct audio_patch *patches) const
+{
+    if (num_patches == NULL || (*num_patches != 0 && patches == NULL)) {
+        return BAD_VALUE;
+    }
+    ALOGV("listAudioPatches() num_patches %d patches %p available patches %zu",
+          *num_patches, patches, size());
+    if (patches == NULL) {
+        *num_patches = 0;
+    }
+
+    size_t patchesWritten = 0;
+    size_t patchesMax = *num_patches;
+    for (size_t i = 0; i  < size() && patchesWritten < patchesMax; i++) {
+        const sp<AudioPatch>  patch = valueAt(i);
+        patches[patchesWritten] = patch->mPatch;
+        patches[patchesWritten++].id = patch->mHandle;
+        ALOGV("listAudioPatches() patch %zu num_sources %d num_sinks %d",
+              i, patch->mPatch.num_sources, patch->mPatch.num_sinks);
+    }
+    *num_patches = size();
+
+    ALOGV("listAudioPatches() got %zu patches needed %d", patchesWritten, *num_patches);
+    return NO_ERROR;
+}
+
+status_t AudioPatchCollection::dump(int fd) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    snprintf(buffer, SIZE, "\nAudio Patches:\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        valueAt(i)->dump(fd, 2, i);
+    }
+    return NO_ERROR;
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
new file mode 100644
index 0000000..6f1998c
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -0,0 +1,193 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyMix"
+//#define LOG_NDEBUG 0
+
+#include "AudioPolicyMix.h"
+#include "HwModule.h"
+#include "AudioPort.h"
+#include "IOProfile.h"
+#include "AudioGain.h"
+#include <AudioOutputDescriptor.h>
+
+namespace android {
+
+void AudioPolicyMix::setOutput(sp<SwAudioOutputDescriptor> &output)
+{
+    mOutput = output;
+}
+
+const sp<SwAudioOutputDescriptor> &AudioPolicyMix::getOutput() const
+{
+    return mOutput;
+}
+
+void AudioPolicyMix::clearOutput()
+{
+    mOutput.clear();
+}
+
+void AudioPolicyMix::setMix(AudioMix &mix)
+{
+    mMix = mix;
+}
+
+android::AudioMix *AudioPolicyMix::getMix()
+{
+    return &mMix;
+}
+
+status_t AudioPolicyMixCollection::registerMix(String8 address, AudioMix mix)
+{
+    ssize_t index = indexOfKey(address);
+    if (index >= 0) {
+        ALOGE("registerPolicyMixes(): mix for address %s already registered", address.string());
+        return BAD_VALUE;
+    }
+    sp<AudioPolicyMix> policyMix = new AudioPolicyMix();
+    policyMix->setMix(mix);
+    add(address, policyMix);
+    return NO_ERROR;
+}
+
+status_t AudioPolicyMixCollection::unregisterMix(String8 address)
+{
+    ssize_t index = indexOfKey(address);
+    if (index < 0) {
+        ALOGE("unregisterPolicyMixes(): mix for address %s not registered", address.string());
+        return BAD_VALUE;
+    }
+
+    removeItemsAt(index);
+    return NO_ERROR;
+}
+
+status_t AudioPolicyMixCollection::getAudioPolicyMix(String8 address,
+                                                     sp<AudioPolicyMix> &policyMix) const
+{
+    ssize_t index = indexOfKey(address);
+    if (index < 0) {
+        ALOGE("unregisterPolicyMixes(): mix for address %s not registered", address.string());
+        return BAD_VALUE;
+    }
+    policyMix = valueAt(index);
+    return NO_ERROR;
+}
+
+void AudioPolicyMixCollection::closeOutput(sp<SwAudioOutputDescriptor> &desc)
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioPolicyMix> policyMix = valueAt(i);
+        if (policyMix->getOutput() == desc) {
+            policyMix->clearOutput();
+        }
+    }
+}
+
+status_t AudioPolicyMixCollection::getOutputForAttr(audio_attributes_t attributes,
+                                                    sp<SwAudioOutputDescriptor> &desc)
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<AudioPolicyMix> policyMix = valueAt(i);
+        AudioMix *mix = policyMix->getMix();
+
+        if (mix->mMixType == MIX_TYPE_PLAYERS) {
+            for (size_t j = 0; j < mix->mCriteria.size(); j++) {
+                if ((RULE_MATCH_ATTRIBUTE_USAGE == mix->mCriteria[j].mRule &&
+                     mix->mCriteria[j].mAttr.mUsage == attributes.usage) ||
+                        (RULE_EXCLUDE_ATTRIBUTE_USAGE == mix->mCriteria[j].mRule &&
+                         mix->mCriteria[j].mAttr.mUsage != attributes.usage)) {
+                    desc = policyMix->getOutput();
+                    break;
+                }
+                if (strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
+                        strncmp(attributes.tags + strlen("addr="),
+                                mix->mRegistrationId.string(),
+                                AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
+                    desc = policyMix->getOutput();
+                    break;
+                }
+            }
+        } else if (mix->mMixType == MIX_TYPE_RECORDERS) {
+            if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE &&
+                    strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
+                    strncmp(attributes.tags + strlen("addr="),
+                            mix->mRegistrationId.string(),
+                            AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
+                desc = policyMix->getOutput();
+            }
+        }
+        if (desc != 0) {
+            desc->mPolicyMix = mix;
+            return NO_ERROR;
+        }
+    }
+    return BAD_VALUE;
+}
+
+audio_devices_t AudioPolicyMixCollection::getDeviceAndMixForInputSource(audio_source_t inputSource,
+                                                                        audio_devices_t availDevices,
+                                                                        AudioMix **policyMix)
+{
+    for (size_t i = 0; i < size(); i++) {
+        AudioMix *mix = valueAt(i)->getMix();
+
+        if (mix->mMixType != MIX_TYPE_RECORDERS) {
+            continue;
+        }
+        for (size_t j = 0; j < mix->mCriteria.size(); j++) {
+            if ((RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET == mix->mCriteria[j].mRule &&
+                    mix->mCriteria[j].mAttr.mSource == inputSource) ||
+               (RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET == mix->mCriteria[j].mRule &&
+                    mix->mCriteria[j].mAttr.mSource != inputSource)) {
+                if (availDevices & AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
+                    if (policyMix != NULL) {
+                        *policyMix = mix;
+                    }
+                    return AUDIO_DEVICE_IN_REMOTE_SUBMIX;
+                }
+                break;
+            }
+        }
+    }
+    return AUDIO_DEVICE_NONE;
+}
+
+status_t AudioPolicyMixCollection::getInputMixForAttr(audio_attributes_t attr, AudioMix **policyMix)
+{
+    if (strncmp(attr.tags, "addr=", strlen("addr=")) != 0) {
+        return BAD_VALUE;
+    }
+    String8 address(attr.tags + strlen("addr="));
+
+    ssize_t index = indexOfKey(address);
+    if (index < 0) {
+        ALOGW("getInputMixForAttr() no policy for address %s", address.string());
+        return BAD_VALUE;
+    }
+    sp<AudioPolicyMix> audioPolicyMix = valueAt(index);
+    AudioMix *mix = audioPolicyMix->getMix();
+
+    if (mix->mMixType != MIX_TYPE_PLAYERS) {
+        ALOGW("getInputMixForAttr() bad policy mix type for address %s", address.string());
+        return BAD_VALUE;
+    }
+    *policyMix = mix;
+    return NO_ERROR;
+}
+
+}; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
new file mode 100644
index 0000000..4e24f19
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -0,0 +1,929 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPort"
+//#define LOG_NDEBUG 0
+#include <media/AudioResamplerPublic.h>
+#include "AudioPort.h"
+#include "HwModule.h"
+#include "AudioGain.h"
+#include "ConfigParsingUtils.h"
+#include "audio_policy_conf.h"
+#include <policy.h>
+
+namespace android {
+
+int32_t volatile AudioPort::mNextUniqueId = 1;
+
+// --- AudioPort class implementation
+
+AudioPort::AudioPort(const String8& name, audio_port_type_t type,
+                     audio_port_role_t role) :
+    mName(name), mType(type), mRole(role), mFlags(0)
+{
+    mUseInChannelMask = ((type == AUDIO_PORT_TYPE_DEVICE) && (role == AUDIO_PORT_ROLE_SOURCE)) ||
+                    ((type == AUDIO_PORT_TYPE_MIX) && (role == AUDIO_PORT_ROLE_SINK));
+}
+
+void AudioPort::attach(const sp<HwModule>& module)
+{
+    mModule = module;
+}
+
+audio_port_handle_t AudioPort::getNextUniqueId()
+{
+    return static_cast<audio_port_handle_t>(android_atomic_inc(&mNextUniqueId));
+}
+
+audio_module_handle_t AudioPort::getModuleHandle() const
+{
+    if (mModule == 0) {
+        return 0;
+    }
+    return mModule->mHandle;
+}
+
+uint32_t AudioPort::getModuleVersion() const
+{
+    if (mModule == 0) {
+        return 0;
+    }
+    return mModule->mHalVersion;
+}
+
+const char *AudioPort::getModuleName() const
+{
+    if (mModule == 0) {
+        return "";
+    }
+    return mModule->mName;
+}
+
+void AudioPort::toAudioPort(struct audio_port *port) const
+{
+    port->role = mRole;
+    port->type = mType;
+    strlcpy(port->name, mName, AUDIO_PORT_MAX_NAME_LEN);
+    unsigned int i;
+    for (i = 0; i < mSamplingRates.size() && i < AUDIO_PORT_MAX_SAMPLING_RATES; i++) {
+        if (mSamplingRates[i] != 0) {
+            port->sample_rates[i] = mSamplingRates[i];
+        }
+    }
+    port->num_sample_rates = i;
+    for (i = 0; i < mChannelMasks.size() && i < AUDIO_PORT_MAX_CHANNEL_MASKS; i++) {
+        if (mChannelMasks[i] != 0) {
+            port->channel_masks[i] = mChannelMasks[i];
+        }
+    }
+    port->num_channel_masks = i;
+    for (i = 0; i < mFormats.size() && i < AUDIO_PORT_MAX_FORMATS; i++) {
+        if (mFormats[i] != 0) {
+            port->formats[i] = mFormats[i];
+        }
+    }
+    port->num_formats = i;
+
+    ALOGV("AudioPort::toAudioPort() num gains %zu", mGains.size());
+
+    for (i = 0; i < mGains.size() && i < AUDIO_PORT_MAX_GAINS; i++) {
+        port->gains[i] = mGains[i]->mGain;
+    }
+    port->num_gains = i;
+}
+
+void AudioPort::importAudioPort(const sp<AudioPort> port) {
+    for (size_t k = 0 ; k < port->mSamplingRates.size() ; k++) {
+        const uint32_t rate = port->mSamplingRates.itemAt(k);
+        if (rate != 0) { // skip "dynamic" rates
+            bool hasRate = false;
+            for (size_t l = 0 ; l < mSamplingRates.size() ; l++) {
+                if (rate == mSamplingRates.itemAt(l)) {
+                    hasRate = true;
+                    break;
+                }
+            }
+            if (!hasRate) { // never import a sampling rate twice
+                mSamplingRates.add(rate);
+            }
+        }
+    }
+    for (size_t k = 0 ; k < port->mChannelMasks.size() ; k++) {
+        const audio_channel_mask_t mask = port->mChannelMasks.itemAt(k);
+        if (mask != 0) { // skip "dynamic" masks
+            bool hasMask = false;
+            for (size_t l = 0 ; l < mChannelMasks.size() ; l++) {
+                if (mask == mChannelMasks.itemAt(l)) {
+                    hasMask = true;
+                    break;
+                }
+            }
+            if (!hasMask) { // never import a channel mask twice
+                mChannelMasks.add(mask);
+            }
+        }
+    }
+    for (size_t k = 0 ; k < port->mFormats.size() ; k++) {
+        const audio_format_t format = port->mFormats.itemAt(k);
+        if (format != 0) { // skip "dynamic" formats
+            bool hasFormat = false;
+            for (size_t l = 0 ; l < mFormats.size() ; l++) {
+                if (format == mFormats.itemAt(l)) {
+                    hasFormat = true;
+                    break;
+                }
+            }
+            if (!hasFormat) { // never import a format twice
+                mFormats.add(format);
+            }
+        }
+    }
+}
+
+void AudioPort::clearCapabilities() {
+    mChannelMasks.clear();
+    mFormats.clear();
+    mSamplingRates.clear();
+}
+
+void AudioPort::loadSamplingRates(char *name)
+{
+    char *str = strtok(name, "|");
+
+    // by convention, "0' in the first entry in mSamplingRates indicates the supported sampling
+    // rates should be read from the output stream after it is opened for the first time
+    if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) {
+        mSamplingRates.add(0);
+        return;
+    }
+
+    while (str != NULL) {
+        uint32_t rate = atoi(str);
+        if (rate != 0) {
+            ALOGV("loadSamplingRates() adding rate %d", rate);
+            mSamplingRates.add(rate);
+        }
+        str = strtok(NULL, "|");
+    }
+}
+
+void AudioPort::loadFormats(char *name)
+{
+    char *str = strtok(name, "|");
+
+    // by convention, "0' in the first entry in mFormats indicates the supported formats
+    // should be read from the output stream after it is opened for the first time
+    if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) {
+        mFormats.add(AUDIO_FORMAT_DEFAULT);
+        return;
+    }
+
+    while (str != NULL) {
+        audio_format_t format = (audio_format_t)ConfigParsingUtils::stringToEnum(sFormatNameToEnumTable,
+                                                             ARRAY_SIZE(sFormatNameToEnumTable),
+                                                             str);
+        if (format != AUDIO_FORMAT_DEFAULT) {
+            mFormats.add(format);
+        }
+        str = strtok(NULL, "|");
+    }
+    // we sort from worst to best, so that AUDIO_FORMAT_DEFAULT is always the first entry.
+    // TODO: compareFormats could be a lambda to convert between pointer-to-format to format:
+    // [](const audio_format_t *format1, const audio_format_t *format2) {
+    //     return compareFormats(*format1, *format2);
+    // }
+    mFormats.sort(compareFormats);
+}
+
+void AudioPort::loadInChannels(char *name)
+{
+    const char *str = strtok(name, "|");
+
+    ALOGV("loadInChannels() %s", name);
+
+    if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) {
+        mChannelMasks.add(0);
+        return;
+    }
+
+    while (str != NULL) {
+        audio_channel_mask_t channelMask =
+                (audio_channel_mask_t)ConfigParsingUtils::stringToEnum(sInChannelsNameToEnumTable,
+                                                   ARRAY_SIZE(sInChannelsNameToEnumTable),
+                                                   str);
+        if (channelMask == 0) { // if not found, check the channel index table
+            channelMask = (audio_channel_mask_t)
+                      ConfigParsingUtils::stringToEnum(sIndexChannelsNameToEnumTable,
+                              ARRAY_SIZE(sIndexChannelsNameToEnumTable),
+                              str);
+        }
+        if (channelMask != 0) {
+            ALOGV("loadInChannels() adding channelMask %#x", channelMask);
+            mChannelMasks.add(channelMask);
+        }
+        str = strtok(NULL, "|");
+    }
+}
+
+void AudioPort::loadOutChannels(char *name)
+{
+    const char *str = strtok(name, "|");
+
+    ALOGV("loadOutChannels() %s", name);
+
+    // by convention, "0' in the first entry in mChannelMasks indicates the supported channel
+    // masks should be read from the output stream after it is opened for the first time
+    if (str != NULL && strcmp(str, DYNAMIC_VALUE_TAG) == 0) {
+        mChannelMasks.add(0);
+        return;
+    }
+
+    while (str != NULL) {
+        audio_channel_mask_t channelMask =
+                (audio_channel_mask_t)ConfigParsingUtils::stringToEnum(sOutChannelsNameToEnumTable,
+                                                   ARRAY_SIZE(sOutChannelsNameToEnumTable),
+                                                   str);
+        if (channelMask == 0) { // if not found, check the channel index table
+            channelMask = (audio_channel_mask_t)
+                      ConfigParsingUtils::stringToEnum(sIndexChannelsNameToEnumTable,
+                              ARRAY_SIZE(sIndexChannelsNameToEnumTable),
+                              str);
+        }
+        if (channelMask != 0) {
+            mChannelMasks.add(channelMask);
+        }
+        str = strtok(NULL, "|");
+    }
+    return;
+}
+
+audio_gain_mode_t AudioPort::loadGainMode(char *name)
+{
+    const char *str = strtok(name, "|");
+
+    ALOGV("loadGainMode() %s", name);
+    audio_gain_mode_t mode = 0;
+    while (str != NULL) {
+        mode |= (audio_gain_mode_t)ConfigParsingUtils::stringToEnum(sGainModeNameToEnumTable,
+                                                ARRAY_SIZE(sGainModeNameToEnumTable),
+                                                str);
+        str = strtok(NULL, "|");
+    }
+    return mode;
+}
+
+void AudioPort::loadGain(cnode *root, int index)
+{
+    cnode *node = root->first_child;
+
+    sp<AudioGain> gain = new AudioGain(index, mUseInChannelMask);
+
+    while (node) {
+        if (strcmp(node->name, GAIN_MODE) == 0) {
+            gain->mGain.mode = loadGainMode((char *)node->value);
+        } else if (strcmp(node->name, GAIN_CHANNELS) == 0) {
+            if (mUseInChannelMask) {
+                gain->mGain.channel_mask =
+                        (audio_channel_mask_t)ConfigParsingUtils::stringToEnum(sInChannelsNameToEnumTable,
+                                                           ARRAY_SIZE(sInChannelsNameToEnumTable),
+                                                           (char *)node->value);
+            } else {
+                gain->mGain.channel_mask =
+                        (audio_channel_mask_t)ConfigParsingUtils::stringToEnum(sOutChannelsNameToEnumTable,
+                                                           ARRAY_SIZE(sOutChannelsNameToEnumTable),
+                                                           (char *)node->value);
+            }
+        } else if (strcmp(node->name, GAIN_MIN_VALUE) == 0) {
+            gain->mGain.min_value = atoi((char *)node->value);
+        } else if (strcmp(node->name, GAIN_MAX_VALUE) == 0) {
+            gain->mGain.max_value = atoi((char *)node->value);
+        } else if (strcmp(node->name, GAIN_DEFAULT_VALUE) == 0) {
+            gain->mGain.default_value = atoi((char *)node->value);
+        } else if (strcmp(node->name, GAIN_STEP_VALUE) == 0) {
+            gain->mGain.step_value = atoi((char *)node->value);
+        } else if (strcmp(node->name, GAIN_MIN_RAMP_MS) == 0) {
+            gain->mGain.min_ramp_ms = atoi((char *)node->value);
+        } else if (strcmp(node->name, GAIN_MAX_RAMP_MS) == 0) {
+            gain->mGain.max_ramp_ms = atoi((char *)node->value);
+        }
+        node = node->next;
+    }
+
+    ALOGV("loadGain() adding new gain mode %08x channel mask %08x min mB %d max mB %d",
+          gain->mGain.mode, gain->mGain.channel_mask, gain->mGain.min_value, gain->mGain.max_value);
+
+    if (gain->mGain.mode == 0) {
+        return;
+    }
+    mGains.add(gain);
+}
+
+void AudioPort::loadGains(cnode *root)
+{
+    cnode *node = root->first_child;
+    int index = 0;
+    while (node) {
+        ALOGV("loadGains() loading gain %s", node->name);
+        loadGain(node, index++);
+        node = node->next;
+    }
+}
+
+status_t AudioPort::checkExactSamplingRate(uint32_t samplingRate) const
+{
+    if (mSamplingRates.isEmpty()) {
+        return NO_ERROR;
+    }
+
+    for (size_t i = 0; i < mSamplingRates.size(); i ++) {
+        if (mSamplingRates[i] == samplingRate) {
+            return NO_ERROR;
+        }
+    }
+    return BAD_VALUE;
+}
+
+status_t AudioPort::checkCompatibleSamplingRate(uint32_t samplingRate,
+        uint32_t *updatedSamplingRate) const
+{
+    if (mSamplingRates.isEmpty()) {
+        if (updatedSamplingRate != NULL) {
+            *updatedSamplingRate = samplingRate;
+        }
+        return NO_ERROR;
+    }
+
+    // Search for the closest supported sampling rate that is above (preferred)
+    // or below (acceptable) the desired sampling rate, within a permitted ratio.
+    // The sampling rates do not need to be sorted in ascending order.
+    ssize_t maxBelow = -1;
+    ssize_t minAbove = -1;
+    uint32_t candidate;
+    for (size_t i = 0; i < mSamplingRates.size(); i++) {
+        candidate = mSamplingRates[i];
+        if (candidate == samplingRate) {
+            if (updatedSamplingRate != NULL) {
+                *updatedSamplingRate = candidate;
+            }
+            return NO_ERROR;
+        }
+        // candidate < desired
+        if (candidate < samplingRate) {
+            if (maxBelow < 0 || candidate > mSamplingRates[maxBelow]) {
+                maxBelow = i;
+            }
+        // candidate > desired
+        } else {
+            if (minAbove < 0 || candidate < mSamplingRates[minAbove]) {
+                minAbove = i;
+            }
+        }
+    }
+
+    // Prefer to down-sample from a higher sampling rate, as we get the desired frequency spectrum.
+    if (minAbove >= 0) {
+        candidate = mSamplingRates[minAbove];
+        if (candidate / AUDIO_RESAMPLER_DOWN_RATIO_MAX <= samplingRate) {
+            if (updatedSamplingRate != NULL) {
+                *updatedSamplingRate = candidate;
+            }
+            return NO_ERROR;
+        }
+    }
+    // But if we have to up-sample from a lower sampling rate, that's OK.
+    if (maxBelow >= 0) {
+        candidate = mSamplingRates[maxBelow];
+        if (candidate * AUDIO_RESAMPLER_UP_RATIO_MAX >= samplingRate) {
+            if (updatedSamplingRate != NULL) {
+                *updatedSamplingRate = candidate;
+            }
+            return NO_ERROR;
+        }
+    }
+    // leave updatedSamplingRate unmodified
+    return BAD_VALUE;
+}
+
+status_t AudioPort::checkExactChannelMask(audio_channel_mask_t channelMask) const
+{
+    if (mChannelMasks.isEmpty()) {
+        return NO_ERROR;
+    }
+
+    for (size_t i = 0; i < mChannelMasks.size(); i++) {
+        if (mChannelMasks[i] == channelMask) {
+            return NO_ERROR;
+        }
+    }
+    return BAD_VALUE;
+}
+
+status_t AudioPort::checkCompatibleChannelMask(audio_channel_mask_t channelMask,
+        audio_channel_mask_t *updatedChannelMask) const
+{
+    if (mChannelMasks.isEmpty()) {
+        if (updatedChannelMask != NULL) {
+            *updatedChannelMask = channelMask;
+        }
+        return NO_ERROR;
+    }
+
+    const bool isRecordThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SINK;
+    const bool isIndex = audio_channel_mask_get_representation(channelMask)
+            == AUDIO_CHANNEL_REPRESENTATION_INDEX;
+    int bestMatch = 0;
+    for (size_t i = 0; i < mChannelMasks.size(); i ++) {
+        audio_channel_mask_t supported = mChannelMasks[i];
+        if (supported == channelMask) {
+            // Exact matches always taken.
+            if (updatedChannelMask != NULL) {
+                *updatedChannelMask = channelMask;
+            }
+            return NO_ERROR;
+        }
+
+        // AUDIO_CHANNEL_NONE (value: 0) is used for dynamic channel support
+        if (isRecordThread && supported != AUDIO_CHANNEL_NONE) {
+            // Approximate (best) match:
+            // The match score measures how well the supported channel mask matches the
+            // desired mask, where increasing-is-better.
+            //
+            // TODO: Some tweaks may be needed.
+            // Should be a static function of the data processing library.
+            //
+            // In priority:
+            // match score = 1000 if legacy channel conversion equivalent (always prefer this)
+            // OR
+            // match score += 100 if the channel mask representations match
+            // match score += number of channels matched.
+            //
+            // If there are no matched channels, the mask may still be accepted
+            // but the playback or record will be silent.
+            const bool isSupportedIndex = (audio_channel_mask_get_representation(supported)
+                    == AUDIO_CHANNEL_REPRESENTATION_INDEX);
+            int match;
+            if (isIndex && isSupportedIndex) {
+                // index equivalence
+                match = 100 + __builtin_popcount(
+                        audio_channel_mask_get_bits(channelMask)
+                            & audio_channel_mask_get_bits(supported));
+            } else if (isIndex && !isSupportedIndex) {
+                const uint32_t equivalentBits =
+                        (1 << audio_channel_count_from_in_mask(supported)) - 1 ;
+                match = __builtin_popcount(
+                        audio_channel_mask_get_bits(channelMask) & equivalentBits);
+            } else if (!isIndex && isSupportedIndex) {
+                const uint32_t equivalentBits =
+                        (1 << audio_channel_count_from_in_mask(channelMask)) - 1;
+                match = __builtin_popcount(
+                        equivalentBits & audio_channel_mask_get_bits(supported));
+            } else {
+                // positional equivalence
+                match = 100 + __builtin_popcount(
+                        audio_channel_mask_get_bits(channelMask)
+                            & audio_channel_mask_get_bits(supported));
+                switch (supported) {
+                case AUDIO_CHANNEL_IN_FRONT_BACK:
+                case AUDIO_CHANNEL_IN_STEREO:
+                    if (channelMask == AUDIO_CHANNEL_IN_MONO) {
+                        match = 1000;
+                    }
+                    break;
+                case AUDIO_CHANNEL_IN_MONO:
+                    if (channelMask == AUDIO_CHANNEL_IN_FRONT_BACK
+                            || channelMask == AUDIO_CHANNEL_IN_STEREO) {
+                        match = 1000;
+                    }
+                    break;
+                default:
+                    break;
+                }
+            }
+            if (match > bestMatch) {
+                bestMatch = match;
+                if (updatedChannelMask != NULL) {
+                    *updatedChannelMask = supported;
+                } else {
+                    return NO_ERROR; // any match will do in this case.
+                }
+            }
+        }
+    }
+    return bestMatch > 0 ? NO_ERROR : BAD_VALUE;
+}
+
+status_t AudioPort::checkExactFormat(audio_format_t format) const
+{
+    if (mFormats.isEmpty()) {
+        return NO_ERROR;
+    }
+
+    for (size_t i = 0; i < mFormats.size(); i ++) {
+        if (mFormats[i] == format) {
+            return NO_ERROR;
+        }
+    }
+    return BAD_VALUE;
+}
+
+status_t AudioPort::checkCompatibleFormat(audio_format_t format, audio_format_t *updatedFormat)
+        const
+{
+    if (mFormats.isEmpty()) {
+        if (updatedFormat != NULL) {
+            *updatedFormat = format;
+        }
+        return NO_ERROR;
+    }
+
+    const bool checkInexact = // when port is input and format is linear pcm
+            mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SINK
+            && audio_is_linear_pcm(format);
+
+    // iterate from best format to worst format (reverse order)
+    for (ssize_t i = mFormats.size() - 1; i >= 0 ; --i) {
+        if (mFormats[i] == format ||
+                (checkInexact
+                        && mFormats[i] != AUDIO_FORMAT_DEFAULT
+                        && audio_is_linear_pcm(mFormats[i]))) {
+            // for inexact checks we take the first linear pcm format due to sorting.
+            if (updatedFormat != NULL) {
+                *updatedFormat = mFormats[i];
+            }
+            return NO_ERROR;
+        }
+    }
+    return BAD_VALUE;
+}
+
+uint32_t AudioPort::pickSamplingRate() const
+{
+    // special case for uninitialized dynamic profile
+    if (mSamplingRates.size() == 1 && mSamplingRates[0] == 0) {
+        return 0;
+    }
+
+    // For direct outputs, pick minimum sampling rate: this helps ensuring that the
+    // channel count / sampling rate combination chosen will be supported by the connected
+    // sink
+    if ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
+            (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD))) {
+        uint32_t samplingRate = UINT_MAX;
+        for (size_t i = 0; i < mSamplingRates.size(); i ++) {
+            if ((mSamplingRates[i] < samplingRate) && (mSamplingRates[i] > 0)) {
+                samplingRate = mSamplingRates[i];
+            }
+        }
+        return (samplingRate == UINT_MAX) ? 0 : samplingRate;
+    }
+
+    uint32_t samplingRate = 0;
+    uint32_t maxRate = MAX_MIXER_SAMPLING_RATE;
+
+    // For mixed output and inputs, use max mixer sampling rates. Do not
+    // limit sampling rate otherwise
+    // For inputs, also see checkCompatibleSamplingRate().
+    if (mType != AUDIO_PORT_TYPE_MIX) {
+        maxRate = UINT_MAX;
+    }
+    // TODO: should mSamplingRates[] be ordered in terms of our preference
+    // and we return the first (and hence most preferred) match?  This is of concern if
+    // we want to choose 96kHz over 192kHz for USB driver stability or resource constraints.
+    for (size_t i = 0; i < mSamplingRates.size(); i ++) {
+        if ((mSamplingRates[i] > samplingRate) && (mSamplingRates[i] <= maxRate)) {
+            samplingRate = mSamplingRates[i];
+        }
+    }
+    return samplingRate;
+}
+
+audio_channel_mask_t AudioPort::pickChannelMask() const
+{
+    // special case for uninitialized dynamic profile
+    if (mChannelMasks.size() == 1 && mChannelMasks[0] == 0) {
+        return AUDIO_CHANNEL_NONE;
+    }
+    audio_channel_mask_t channelMask = AUDIO_CHANNEL_NONE;
+
+    // For direct outputs, pick minimum channel count: this helps ensuring that the
+    // channel count / sampling rate combination chosen will be supported by the connected
+    // sink
+    if ((mType == AUDIO_PORT_TYPE_MIX) && (mRole == AUDIO_PORT_ROLE_SOURCE) &&
+            (mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD))) {
+        uint32_t channelCount = UINT_MAX;
+        for (size_t i = 0; i < mChannelMasks.size(); i ++) {
+            uint32_t cnlCount;
+            if (mUseInChannelMask) {
+                cnlCount = audio_channel_count_from_in_mask(mChannelMasks[i]);
+            } else {
+                cnlCount = audio_channel_count_from_out_mask(mChannelMasks[i]);
+            }
+            if ((cnlCount < channelCount) && (cnlCount > 0)) {
+                channelMask = mChannelMasks[i];
+                channelCount = cnlCount;
+            }
+        }
+        return channelMask;
+    }
+
+    uint32_t channelCount = 0;
+    uint32_t maxCount = MAX_MIXER_CHANNEL_COUNT;
+
+    // For mixed output and inputs, use max mixer channel count. Do not
+    // limit channel count otherwise
+    if (mType != AUDIO_PORT_TYPE_MIX) {
+        maxCount = UINT_MAX;
+    }
+    for (size_t i = 0; i < mChannelMasks.size(); i ++) {
+        uint32_t cnlCount;
+        if (mUseInChannelMask) {
+            cnlCount = audio_channel_count_from_in_mask(mChannelMasks[i]);
+        } else {
+            cnlCount = audio_channel_count_from_out_mask(mChannelMasks[i]);
+        }
+        if ((cnlCount > channelCount) && (cnlCount <= maxCount)) {
+            channelMask = mChannelMasks[i];
+            channelCount = cnlCount;
+        }
+    }
+    return channelMask;
+}
+
+/* format in order of increasing preference */
+const audio_format_t AudioPort::sPcmFormatCompareTable[] = {
+        AUDIO_FORMAT_DEFAULT,
+        AUDIO_FORMAT_PCM_16_BIT,
+        AUDIO_FORMAT_PCM_8_24_BIT,
+        AUDIO_FORMAT_PCM_24_BIT_PACKED,
+        AUDIO_FORMAT_PCM_32_BIT,
+        AUDIO_FORMAT_PCM_FLOAT,
+};
+
+int AudioPort::compareFormats(audio_format_t format1,
+                                                  audio_format_t format2)
+{
+    // NOTE: AUDIO_FORMAT_INVALID is also considered not PCM and will be compared equal to any
+    // compressed format and better than any PCM format. This is by design of pickFormat()
+    if (!audio_is_linear_pcm(format1)) {
+        if (!audio_is_linear_pcm(format2)) {
+            return 0;
+        }
+        return 1;
+    }
+    if (!audio_is_linear_pcm(format2)) {
+        return -1;
+    }
+
+    int index1 = -1, index2 = -1;
+    for (size_t i = 0;
+            (i < ARRAY_SIZE(sPcmFormatCompareTable)) && ((index1 == -1) || (index2 == -1));
+            i ++) {
+        if (sPcmFormatCompareTable[i] == format1) {
+            index1 = i;
+        }
+        if (sPcmFormatCompareTable[i] == format2) {
+            index2 = i;
+        }
+    }
+    // format1 not found => index1 < 0 => format2 > format1
+    // format2 not found => index2 < 0 => format2 < format1
+    return index1 - index2;
+}
+
+audio_format_t AudioPort::pickFormat() const
+{
+    // special case for uninitialized dynamic profile
+    if (mFormats.size() == 1 && mFormats[0] == 0) {
+        return AUDIO_FORMAT_DEFAULT;
+    }
+
+    audio_format_t format = AUDIO_FORMAT_DEFAULT;
+    audio_format_t bestFormat =
+            AudioPort::sPcmFormatCompareTable[
+                ARRAY_SIZE(AudioPort::sPcmFormatCompareTable) - 1];
+    // For mixed output and inputs, use best mixer output format. Do not
+    // limit format otherwise
+    if ((mType != AUDIO_PORT_TYPE_MIX) ||
+            ((mRole == AUDIO_PORT_ROLE_SOURCE) &&
+             (((mFlags & (AUDIO_OUTPUT_FLAG_DIRECT | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)) != 0)))) {
+        bestFormat = AUDIO_FORMAT_INVALID;
+    }
+
+    for (size_t i = 0; i < mFormats.size(); i ++) {
+        if ((compareFormats(mFormats[i], format) > 0) &&
+                (compareFormats(mFormats[i], bestFormat) <= 0)) {
+            format = mFormats[i];
+        }
+    }
+    return format;
+}
+
+status_t AudioPort::checkGain(const struct audio_gain_config *gainConfig,
+                                                  int index) const
+{
+    if (index < 0 || (size_t)index >= mGains.size()) {
+        return BAD_VALUE;
+    }
+    return mGains[index]->checkConfig(gainConfig);
+}
+
+void AudioPort::dump(int fd, int spaces) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    if (mName.length() != 0) {
+        snprintf(buffer, SIZE, "%*s- name: %s\n", spaces, "", mName.string());
+        result.append(buffer);
+    }
+
+    if (mSamplingRates.size() != 0) {
+        snprintf(buffer, SIZE, "%*s- sampling rates: ", spaces, "");
+        result.append(buffer);
+        for (size_t i = 0; i < mSamplingRates.size(); i++) {
+            if (i == 0 && mSamplingRates[i] == 0) {
+                snprintf(buffer, SIZE, "Dynamic");
+            } else {
+                snprintf(buffer, SIZE, "%d", mSamplingRates[i]);
+            }
+            result.append(buffer);
+            result.append(i == (mSamplingRates.size() - 1) ? "" : ", ");
+        }
+        result.append("\n");
+    }
+
+    if (mChannelMasks.size() != 0) {
+        snprintf(buffer, SIZE, "%*s- channel masks: ", spaces, "");
+        result.append(buffer);
+        for (size_t i = 0; i < mChannelMasks.size(); i++) {
+            ALOGV("AudioPort::dump mChannelMasks %zu %08x", i, mChannelMasks[i]);
+
+            if (i == 0 && mChannelMasks[i] == 0) {
+                snprintf(buffer, SIZE, "Dynamic");
+            } else {
+                snprintf(buffer, SIZE, "0x%04x", mChannelMasks[i]);
+            }
+            result.append(buffer);
+            result.append(i == (mChannelMasks.size() - 1) ? "" : ", ");
+        }
+        result.append("\n");
+    }
+
+    if (mFormats.size() != 0) {
+        snprintf(buffer, SIZE, "%*s- formats: ", spaces, "");
+        result.append(buffer);
+        for (size_t i = 0; i < mFormats.size(); i++) {
+            const char *formatStr = ConfigParsingUtils::enumToString(sFormatNameToEnumTable,
+                                                 ARRAY_SIZE(sFormatNameToEnumTable),
+                                                 mFormats[i]);
+            const bool isEmptyStr = formatStr[0] == 0;
+            if (i == 0 && isEmptyStr) {
+                snprintf(buffer, SIZE, "Dynamic");
+            } else {
+                if (isEmptyStr) {
+                    snprintf(buffer, SIZE, "%#x", mFormats[i]);
+                } else {
+                    snprintf(buffer, SIZE, "%s", formatStr);
+                }
+            }
+            result.append(buffer);
+            result.append(i == (mFormats.size() - 1) ? "" : ", ");
+        }
+        result.append("\n");
+    }
+    write(fd, result.string(), result.size());
+    if (mGains.size() != 0) {
+        snprintf(buffer, SIZE, "%*s- gains:\n", spaces, "");
+        write(fd, buffer, strlen(buffer) + 1);
+        for (size_t i = 0; i < mGains.size(); i++) {
+            mGains[i]->dump(fd, spaces + 2, i);
+        }
+    }
+}
+
+void AudioPort::log(const char* indent) const
+{
+    ALOGI("%s Port[nm:%s, type:%d, role:%d]", indent, mName.string(), mType, mRole);
+}
+
+// --- AudioPortConfig class implementation
+
+AudioPortConfig::AudioPortConfig()
+{
+    mSamplingRate = 0;
+    mChannelMask = AUDIO_CHANNEL_NONE;
+    mFormat = AUDIO_FORMAT_INVALID;
+    mGain.index = -1;
+}
+
+status_t AudioPortConfig::applyAudioPortConfig(
+                                                        const struct audio_port_config *config,
+                                                        struct audio_port_config *backupConfig)
+{
+    struct audio_port_config localBackupConfig;
+    status_t status = NO_ERROR;
+
+    localBackupConfig.config_mask = config->config_mask;
+    toAudioPortConfig(&localBackupConfig);
+
+    sp<AudioPort> audioport = getAudioPort();
+    if (audioport == 0) {
+        status = NO_INIT;
+        goto exit;
+    }
+    if (config->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
+        status = audioport->checkExactSamplingRate(config->sample_rate);
+        if (status != NO_ERROR) {
+            goto exit;
+        }
+        mSamplingRate = config->sample_rate;
+    }
+    if (config->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
+        status = audioport->checkExactChannelMask(config->channel_mask);
+        if (status != NO_ERROR) {
+            goto exit;
+        }
+        mChannelMask = config->channel_mask;
+    }
+    if (config->config_mask & AUDIO_PORT_CONFIG_FORMAT) {
+        status = audioport->checkExactFormat(config->format);
+        if (status != NO_ERROR) {
+            goto exit;
+        }
+        mFormat = config->format;
+    }
+    if (config->config_mask & AUDIO_PORT_CONFIG_GAIN) {
+        status = audioport->checkGain(&config->gain, config->gain.index);
+        if (status != NO_ERROR) {
+            goto exit;
+        }
+        mGain = config->gain;
+    }
+
+exit:
+    if (status != NO_ERROR) {
+        applyAudioPortConfig(&localBackupConfig);
+    }
+    if (backupConfig != NULL) {
+        *backupConfig = localBackupConfig;
+    }
+    return status;
+}
+
+void AudioPortConfig::toAudioPortConfig(struct audio_port_config *dstConfig,
+                                        const struct audio_port_config *srcConfig) const
+{
+    if (dstConfig->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
+        dstConfig->sample_rate = mSamplingRate;
+        if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE)) {
+            dstConfig->sample_rate = srcConfig->sample_rate;
+        }
+    } else {
+        dstConfig->sample_rate = 0;
+    }
+    if (dstConfig->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
+        dstConfig->channel_mask = mChannelMask;
+        if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK)) {
+            dstConfig->channel_mask = srcConfig->channel_mask;
+        }
+    } else {
+        dstConfig->channel_mask = AUDIO_CHANNEL_NONE;
+    }
+    if (dstConfig->config_mask & AUDIO_PORT_CONFIG_FORMAT) {
+        dstConfig->format = mFormat;
+        if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_FORMAT)) {
+            dstConfig->format = srcConfig->format;
+        }
+    } else {
+        dstConfig->format = AUDIO_FORMAT_INVALID;
+    }
+    if (dstConfig->config_mask & AUDIO_PORT_CONFIG_GAIN) {
+        dstConfig->gain = mGain;
+        if ((srcConfig != NULL) && (srcConfig->config_mask & AUDIO_PORT_CONFIG_GAIN)) {
+            dstConfig->gain = srcConfig->gain;
+        }
+    } else {
+        dstConfig->gain.index = -1;
+    }
+    if (dstConfig->gain.index != -1) {
+        dstConfig->config_mask |= AUDIO_PORT_CONFIG_GAIN;
+    } else {
+        dstConfig->config_mask &= ~AUDIO_PORT_CONFIG_GAIN;
+    }
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
new file mode 100644
index 0000000..89ef045
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/ConfigParsingUtils.cpp
@@ -0,0 +1,288 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::ConfigParsingUtils"
+//#define LOG_NDEBUG 0
+
+#include "ConfigParsingUtils.h"
+#include "AudioGain.h"
+#include <hardware/audio.h>
+#include <utils/Log.h>
+#include <cutils/misc.h>
+
+namespace android {
+
+//static
+uint32_t ConfigParsingUtils::stringToEnum(const struct StringToEnum *table,
+                                              size_t size,
+                                              const char *name)
+{
+    for (size_t i = 0; i < size; i++) {
+        if (strcmp(table[i].name, name) == 0) {
+            ALOGV("stringToEnum() found %s", table[i].name);
+            return table[i].value;
+        }
+    }
+    return 0;
+}
+
+//static
+const char *ConfigParsingUtils::enumToString(const struct StringToEnum *table,
+                                              size_t size,
+                                              uint32_t value)
+{
+    for (size_t i = 0; i < size; i++) {
+        if (table[i].value == value) {
+            return table[i].name;
+        }
+    }
+    return "";
+}
+
+//static
+bool ConfigParsingUtils::stringToBool(const char *value)
+{
+    return ((strcasecmp("true", value) == 0) || (strcmp("1", value) == 0));
+}
+
+
+// --- audio_policy.conf file parsing
+//static
+uint32_t ConfigParsingUtils::parseOutputFlagNames(char *name)
+{
+    uint32_t flag = 0;
+
+    // it is OK to cast name to non const here as we are not going to use it after
+    // strtok() modifies it
+    char *flagName = strtok(name, "|");
+    while (flagName != NULL) {
+        if (strlen(flagName) != 0) {
+            flag |= ConfigParsingUtils::stringToEnum(sOutputFlagNameToEnumTable,
+                               ARRAY_SIZE(sOutputFlagNameToEnumTable),
+                               flagName);
+        }
+        flagName = strtok(NULL, "|");
+    }
+    //force direct flag if offload flag is set: offloading implies a direct output stream
+    // and all common behaviors are driven by checking only the direct flag
+    // this should normally be set appropriately in the policy configuration file
+    if ((flag & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+        flag |= AUDIO_OUTPUT_FLAG_DIRECT;
+    }
+
+    return flag;
+}
+
+//static
+uint32_t ConfigParsingUtils::parseInputFlagNames(char *name)
+{
+    uint32_t flag = 0;
+
+    // it is OK to cast name to non const here as we are not going to use it after
+    // strtok() modifies it
+    char *flagName = strtok(name, "|");
+    while (flagName != NULL) {
+        if (strlen(flagName) != 0) {
+            flag |= stringToEnum(sInputFlagNameToEnumTable,
+                               ARRAY_SIZE(sInputFlagNameToEnumTable),
+                               flagName);
+        }
+        flagName = strtok(NULL, "|");
+    }
+    return flag;
+}
+
+//static
+audio_devices_t ConfigParsingUtils::parseDeviceNames(char *name)
+{
+    uint32_t device = 0;
+
+    char *devName = strtok(name, "|");
+    while (devName != NULL) {
+        if (strlen(devName) != 0) {
+            device |= stringToEnum(sDeviceTypeToEnumTable,
+                                 ARRAY_SIZE(sDeviceTypeToEnumTable),
+                                 devName);
+         }
+        devName = strtok(NULL, "|");
+     }
+    return device;
+}
+
+//static
+void ConfigParsingUtils::loadHwModule(cnode *root, HwModuleCollection &hwModules,
+                                      DeviceVector &availableInputDevices,
+                                      DeviceVector &availableOutputDevices,
+                                      sp<DeviceDescriptor> &defaultOutputDevices,
+                                      bool &isSpeakerDrcEnable)
+{
+    status_t status = NAME_NOT_FOUND;
+    cnode *node;
+    sp<HwModule> module = new HwModule(root->name);
+
+    node = config_find(root, DEVICES_TAG);
+    if (node != NULL) {
+        node = node->first_child;
+        while (node) {
+            ALOGV("loadHwModule() loading device %s", node->name);
+            status_t tmpStatus = module->loadDevice(node);
+            if (status == NAME_NOT_FOUND || status == NO_ERROR) {
+                status = tmpStatus;
+            }
+            node = node->next;
+        }
+    }
+    node = config_find(root, OUTPUTS_TAG);
+    if (node != NULL) {
+        node = node->first_child;
+        while (node) {
+            ALOGV("loadHwModule() loading output %s", node->name);
+            status_t tmpStatus = module->loadOutput(node);
+            if (status == NAME_NOT_FOUND || status == NO_ERROR) {
+                status = tmpStatus;
+            }
+            node = node->next;
+        }
+    }
+    node = config_find(root, INPUTS_TAG);
+    if (node != NULL) {
+        node = node->first_child;
+        while (node) {
+            ALOGV("loadHwModule() loading input %s", node->name);
+            status_t tmpStatus = module->loadInput(node);
+            if (status == NAME_NOT_FOUND || status == NO_ERROR) {
+                status = tmpStatus;
+            }
+            node = node->next;
+        }
+    }
+    loadGlobalConfig(root, module, availableInputDevices, availableOutputDevices,
+                     defaultOutputDevices, isSpeakerDrcEnable);
+
+    if (status == NO_ERROR) {
+        hwModules.add(module);
+    }
+}
+
+//static
+void ConfigParsingUtils::loadHwModules(cnode *root, HwModuleCollection &hwModules,
+                                       DeviceVector &availableInputDevices,
+                                       DeviceVector &availableOutputDevices,
+                                       sp<DeviceDescriptor> &defaultOutputDevices,
+                                       bool &isSpeakerDrcEnabled)
+{
+    cnode *node = config_find(root, AUDIO_HW_MODULE_TAG);
+    if (node == NULL) {
+        return;
+    }
+
+    node = node->first_child;
+    while (node) {
+        ALOGV("loadHwModules() loading module %s", node->name);
+        loadHwModule(node, hwModules, availableInputDevices, availableOutputDevices,
+                     defaultOutputDevices, isSpeakerDrcEnabled);
+        node = node->next;
+    }
+}
+
+//static
+void ConfigParsingUtils::loadGlobalConfig(cnode *root, const sp<HwModule>& module,
+                                          DeviceVector &availableInputDevices,
+                                          DeviceVector &availableOutputDevices,
+                                          sp<DeviceDescriptor> &defaultOutputDevice,
+                                          bool &speakerDrcEnabled)
+{
+    cnode *node = config_find(root, GLOBAL_CONFIG_TAG);
+
+    if (node == NULL) {
+        return;
+    }
+    DeviceVector declaredDevices;
+    if (module != NULL) {
+        declaredDevices = module->mDeclaredDevices;
+    }
+
+    node = node->first_child;
+    while (node) {
+        if (strcmp(ATTACHED_OUTPUT_DEVICES_TAG, node->name) == 0) {
+            availableOutputDevices.loadDevicesFromTag((char *)node->value,
+                                                        declaredDevices);
+            ALOGV("loadGlobalConfig() Attached Output Devices %08x",
+                  availableOutputDevices.types());
+        } else if (strcmp(DEFAULT_OUTPUT_DEVICE_TAG, node->name) == 0) {
+            audio_devices_t device = (audio_devices_t)stringToEnum(
+                    sDeviceTypeToEnumTable,
+                    ARRAY_SIZE(sDeviceTypeToEnumTable),
+                    (char *)node->value);
+            if (device != AUDIO_DEVICE_NONE) {
+                defaultOutputDevice = new DeviceDescriptor(device);
+            } else {
+                ALOGW("loadGlobalConfig() default device not specified");
+            }
+            ALOGV("loadGlobalConfig() mDefaultOutputDevice %08x", defaultOutputDevice->type());
+        } else if (strcmp(ATTACHED_INPUT_DEVICES_TAG, node->name) == 0) {
+            availableInputDevices.loadDevicesFromTag((char *)node->value,
+                                                       declaredDevices);
+            ALOGV("loadGlobalConfig() Available InputDevices %08x", availableInputDevices.types());
+        } else if (strcmp(SPEAKER_DRC_ENABLED_TAG, node->name) == 0) {
+            speakerDrcEnabled = stringToBool((char *)node->value);
+            ALOGV("loadGlobalConfig() mSpeakerDrcEnabled = %d", speakerDrcEnabled);
+        } else if (strcmp(AUDIO_HAL_VERSION_TAG, node->name) == 0) {
+            uint32_t major, minor;
+            sscanf((char *)node->value, "%u.%u", &major, &minor);
+            module->mHalVersion = HARDWARE_DEVICE_API_VERSION(major, minor);
+            ALOGV("loadGlobalConfig() mHalVersion = %04x major %u minor %u",
+                  module->mHalVersion, major, minor);
+        }
+        node = node->next;
+    }
+}
+
+//static
+status_t ConfigParsingUtils::loadAudioPolicyConfig(const char *path,
+                                                   HwModuleCollection &hwModules,
+                                                   DeviceVector &availableInputDevices,
+                                                   DeviceVector &availableOutputDevices,
+                                                   sp<DeviceDescriptor> &defaultOutputDevices,
+                                                   bool &isSpeakerDrcEnabled)
+{
+    cnode *root;
+    char *data;
+
+    data = (char *)load_file(path, NULL);
+    if (data == NULL) {
+        return -ENODEV;
+    }
+    root = config_node("", "");
+    config_load(root, data);
+
+    loadHwModules(root, hwModules,
+                  availableInputDevices, availableOutputDevices,
+                  defaultOutputDevices, isSpeakerDrcEnabled);
+    // legacy audio_policy.conf files have one global_configuration section
+    loadGlobalConfig(root, hwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY),
+                     availableInputDevices, availableOutputDevices,
+                     defaultOutputDevices, isSpeakerDrcEnabled);
+    config_free(root);
+    free(root);
+    free(data);
+
+    ALOGI("loadAudioPolicyConfig() loaded %s\n", path);
+
+    return NO_ERROR;
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
new file mode 100644
index 0000000..1f1fca3
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -0,0 +1,350 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::Devices"
+//#define LOG_NDEBUG 0
+
+#include "DeviceDescriptor.h"
+#include "AudioGain.h"
+#include "HwModule.h"
+#include "ConfigParsingUtils.h"
+
+namespace android {
+
+DeviceDescriptor::DeviceDescriptor(audio_devices_t type) :
+    AudioPort(String8(""), AUDIO_PORT_TYPE_DEVICE,
+              audio_is_output_device(type) ? AUDIO_PORT_ROLE_SINK :
+                                             AUDIO_PORT_ROLE_SOURCE),
+    mTag(""), mAddress(""), mDeviceType(type), mId(0)
+{
+
+}
+
+audio_port_handle_t DeviceDescriptor::getId() const
+{
+    return mId;
+}
+
+void DeviceDescriptor::attach(const sp<HwModule>& module)
+{
+    AudioPort::attach(module);
+    mId = getNextUniqueId();
+}
+
+bool DeviceDescriptor::equals(const sp<DeviceDescriptor>& other) const
+{
+    // Devices are considered equal if they:
+    // - are of the same type (a device type cannot be AUDIO_DEVICE_NONE)
+    // - have the same address or one device does not specify the address
+    // - have the same channel mask or one device does not specify the channel mask
+    if (other == 0) {
+        return false;
+    }
+    return (mDeviceType == other->mDeviceType) &&
+           (mAddress == "" || other->mAddress == "" || mAddress == other->mAddress) &&
+           (mChannelMask == 0 || other->mChannelMask == 0 ||
+                mChannelMask == other->mChannelMask);
+}
+
+void DeviceDescriptor::loadGains(cnode *root)
+{
+    AudioPort::loadGains(root);
+    if (mGains.size() > 0) {
+        mGains[0]->getDefaultConfig(&mGain);
+    }
+}
+
+void DeviceVector::refreshTypes()
+{
+    mDeviceTypes = AUDIO_DEVICE_NONE;
+    for(size_t i = 0; i < size(); i++) {
+        mDeviceTypes |= itemAt(i)->type();
+    }
+    ALOGV("DeviceVector::refreshTypes() mDeviceTypes %08x", mDeviceTypes);
+}
+
+ssize_t DeviceVector::indexOf(const sp<DeviceDescriptor>& item) const
+{
+    for(size_t i = 0; i < size(); i++) {
+        if (item->equals(itemAt(i))) {
+            return i;
+        }
+    }
+    return -1;
+}
+
+ssize_t DeviceVector::add(const sp<DeviceDescriptor>& item)
+{
+    ssize_t ret = indexOf(item);
+
+    if (ret < 0) {
+        ret = SortedVector::add(item);
+        if (ret >= 0) {
+            refreshTypes();
+        }
+    } else {
+        ALOGW("DeviceVector::add device %08x already in", item->type());
+        ret = -1;
+    }
+    return ret;
+}
+
+ssize_t DeviceVector::remove(const sp<DeviceDescriptor>& item)
+{
+    size_t i;
+    ssize_t ret = indexOf(item);
+
+    if (ret < 0) {
+        ALOGW("DeviceVector::remove device %08x not in", item->type());
+    } else {
+        ret = SortedVector::removeAt(ret);
+        if (ret >= 0) {
+            refreshTypes();
+        }
+    }
+    return ret;
+}
+
+audio_devices_t DeviceVector::getDevicesFromHwModule(audio_module_handle_t moduleHandle) const
+{
+    audio_devices_t devices = AUDIO_DEVICE_NONE;
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->getModuleHandle() == moduleHandle) {
+            devices |= itemAt(i)->type();
+        }
+    }
+    return devices;
+}
+
+void DeviceVector::loadDevicesFromType(audio_devices_t types)
+{
+    DeviceVector deviceList;
+
+    uint32_t role_bit = AUDIO_DEVICE_BIT_IN & types;
+    types &= ~role_bit;
+
+    while (types) {
+        uint32_t i = 31 - __builtin_clz(types);
+        uint32_t type = 1 << i;
+        types &= ~type;
+        add(new DeviceDescriptor(type | role_bit));
+    }
+}
+
+void DeviceVector::loadDevicesFromTag(char *tag,
+                                       const DeviceVector& declaredDevices)
+{
+    char *devTag = strtok(tag, "|");
+    while (devTag != NULL) {
+        if (strlen(devTag) != 0) {
+            audio_devices_t type = ConfigParsingUtils::stringToEnum(sDeviceTypeToEnumTable,
+                                 ARRAY_SIZE(sDeviceTypeToEnumTable),
+                                 devTag);
+            if (type != AUDIO_DEVICE_NONE) {
+                sp<DeviceDescriptor> dev = new DeviceDescriptor(type);
+                if (type == AUDIO_DEVICE_IN_REMOTE_SUBMIX ||
+                        type == AUDIO_DEVICE_OUT_REMOTE_SUBMIX ) {
+                    dev->mAddress = String8("0");
+                }
+                add(dev);
+            } else {
+                sp<DeviceDescriptor> deviceDesc =
+                        declaredDevices.getDeviceFromTag(String8(devTag));
+                if (deviceDesc != 0) {
+                    add(deviceDesc);
+                }
+            }
+         }
+         devTag = strtok(NULL, "|");
+     }
+}
+
+sp<DeviceDescriptor> DeviceVector::getDevice(audio_devices_t type, String8 address) const
+{
+    sp<DeviceDescriptor> device;
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->type() == type) {
+            if (address == "" || itemAt(i)->mAddress == address) {
+                device = itemAt(i);
+                if (itemAt(i)->mAddress == address) {
+                    break;
+                }
+            }
+        }
+    }
+    ALOGV("DeviceVector::getDevice() for type %08x address %s found %p",
+          type, address.string(), device.get());
+    return device;
+}
+
+sp<DeviceDescriptor> DeviceVector::getDeviceFromId(audio_port_handle_t id) const
+{
+    sp<DeviceDescriptor> device;
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->getId() == id) {
+            device = itemAt(i);
+            break;
+        }
+    }
+    return device;
+}
+
+DeviceVector DeviceVector::getDevicesFromType(audio_devices_t type) const
+{
+    DeviceVector devices;
+    bool isOutput = audio_is_output_devices(type);
+    type &= ~AUDIO_DEVICE_BIT_IN;
+    for (size_t i = 0; (i < size()) && (type != AUDIO_DEVICE_NONE); i++) {
+        bool curIsOutput = audio_is_output_devices(itemAt(i)->mDeviceType);
+        audio_devices_t curType = itemAt(i)->mDeviceType & ~AUDIO_DEVICE_BIT_IN;
+        if ((isOutput == curIsOutput) && ((type & curType) != 0)) {
+            devices.add(itemAt(i));
+            type &= ~curType;
+            ALOGV("DeviceVector::getDevicesFromType() for type %x found %p",
+                  itemAt(i)->type(), itemAt(i).get());
+        }
+    }
+    return devices;
+}
+
+DeviceVector DeviceVector::getDevicesFromTypeAddr(
+        audio_devices_t type, String8 address) const
+{
+    DeviceVector devices;
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->type() == type) {
+            if (itemAt(i)->mAddress == address) {
+                devices.add(itemAt(i));
+            }
+        }
+    }
+    return devices;
+}
+
+sp<DeviceDescriptor> DeviceVector::getDeviceFromTag(const String8& tag) const
+{
+    sp<DeviceDescriptor> device;
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->mTag == tag) {
+            device = itemAt(i);
+            break;
+        }
+    }
+    return device;
+}
+
+
+status_t DeviceVector::dump(int fd, const String8 &direction) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "\n Available %s devices:\n", direction.string());
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        itemAt(i)->dump(fd, 2, i);
+    }
+    return NO_ERROR;
+}
+
+audio_policy_dev_state_t DeviceVector::getDeviceConnectionState(const sp<DeviceDescriptor> &devDesc) const
+{
+    ssize_t index = indexOf(devDesc);
+    return index >= 0 ? AUDIO_POLICY_DEVICE_STATE_AVAILABLE : AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
+}
+
+void DeviceDescriptor::toAudioPortConfig(struct audio_port_config *dstConfig,
+                                         const struct audio_port_config *srcConfig) const
+{
+    dstConfig->config_mask = AUDIO_PORT_CONFIG_CHANNEL_MASK|AUDIO_PORT_CONFIG_GAIN;
+    if (srcConfig != NULL) {
+        dstConfig->config_mask |= srcConfig->config_mask;
+    }
+
+    AudioPortConfig::toAudioPortConfig(dstConfig, srcConfig);
+
+    dstConfig->id = mId;
+    dstConfig->role = audio_is_output_device(mDeviceType) ?
+                        AUDIO_PORT_ROLE_SINK : AUDIO_PORT_ROLE_SOURCE;
+    dstConfig->type = AUDIO_PORT_TYPE_DEVICE;
+    dstConfig->ext.device.type = mDeviceType;
+
+    //TODO Understand why this test is necessary. i.e. why at boot time does it crash
+    // without the test?
+    // This has been demonstrated to NOT be true (at start up)
+    // ALOG_ASSERT(mModule != NULL);
+    dstConfig->ext.device.hw_module = mModule != 0 ? mModule->mHandle : AUDIO_IO_HANDLE_NONE;
+    strncpy(dstConfig->ext.device.address, mAddress.string(), AUDIO_DEVICE_MAX_ADDRESS_LEN);
+}
+
+void DeviceDescriptor::toAudioPort(struct audio_port *port) const
+{
+    ALOGV("DeviceDescriptor::toAudioPort() handle %d type %x", mId, mDeviceType);
+    AudioPort::toAudioPort(port);
+    port->id = mId;
+    toAudioPortConfig(&port->active_config);
+    port->ext.device.type = mDeviceType;
+    port->ext.device.hw_module = mModule->mHandle;
+    strncpy(port->ext.device.address, mAddress.string(), AUDIO_DEVICE_MAX_ADDRESS_LEN);
+}
+
+void DeviceDescriptor::importAudioPort(const sp<AudioPort> port) {
+    AudioPort::importAudioPort(port);
+    mSamplingRate = port->pickSamplingRate();
+    mFormat = port->pickFormat();
+    mChannelMask = port->pickChannelMask();
+}
+
+status_t DeviceDescriptor::dump(int fd, int spaces, int index) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "%*sDevice %d:\n", spaces, "", index+1);
+    result.append(buffer);
+    if (mId != 0) {
+        snprintf(buffer, SIZE, "%*s- id: %2d\n", spaces, "", mId);
+        result.append(buffer);
+    }
+    snprintf(buffer, SIZE, "%*s- type: %-48s\n", spaces, "",
+            ConfigParsingUtils::enumToString(sDeviceTypeToEnumTable,
+                    ARRAY_SIZE(sDeviceTypeToEnumTable),
+                    mDeviceType));
+    result.append(buffer);
+    if (mAddress.size() != 0) {
+        snprintf(buffer, SIZE, "%*s- address: %-32s\n", spaces, "", mAddress.string());
+        result.append(buffer);
+    }
+    write(fd, result.string(), result.size());
+    AudioPort::dump(fd, spaces);
+
+    return NO_ERROR;
+}
+
+void DeviceDescriptor::log() const
+{
+    ALOGI("Device id:%d type:0x%X:%s, addr:%s",
+          mId,
+          mDeviceType,
+          ConfigParsingUtils::enumToString(
+             sDeviceNameToEnumTable, ARRAY_SIZE(sDeviceNameToEnumTable), mDeviceType),
+          mAddress.string());
+
+    AudioPort::log("  ");
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
new file mode 100644
index 0000000..33d838d
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/EffectDescriptor.cpp
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::EffectDescriptor"
+//#define LOG_NDEBUG 0
+
+#include "EffectDescriptor.h"
+#include <utils/String8.h>
+
+namespace android {
+
+status_t EffectDescriptor::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, " I/O: %d\n", mIo);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Strategy: %d\n", mStrategy);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Session: %d\n", mSession);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Name: %s\n",  mDesc.name);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " %s\n",  mEnabled ? "Enabled" : "Disabled");
+    result.append(buffer);
+    write(fd, result.string(), result.size());
+
+    return NO_ERROR;
+}
+
+EffectDescriptorCollection::EffectDescriptorCollection() :
+    mTotalEffectsCpuLoad(0),
+    mTotalEffectsMemory(0)
+{
+
+}
+
+status_t EffectDescriptorCollection::registerEffect(const effect_descriptor_t *desc,
+                                                    audio_io_handle_t io,
+                                                    uint32_t strategy,
+                                                    int session,
+                                                    int id)
+{
+    if (mTotalEffectsMemory + desc->memoryUsage > getMaxEffectsMemory()) {
+        ALOGW("registerEffect() memory limit exceeded for Fx %s, Memory %d KB",
+                desc->name, desc->memoryUsage);
+        return INVALID_OPERATION;
+    }
+    mTotalEffectsMemory += desc->memoryUsage;
+    ALOGV("registerEffect() effect %s, io %d, strategy %d session %d id %d",
+            desc->name, io, strategy, session, id);
+    ALOGV("registerEffect() memory %d, total memory %d", desc->memoryUsage, mTotalEffectsMemory);
+
+    sp<EffectDescriptor> effectDesc = new EffectDescriptor();
+    memcpy (&effectDesc->mDesc, desc, sizeof(effect_descriptor_t));
+    effectDesc->mIo = io;
+    effectDesc->mStrategy = static_cast<routing_strategy>(strategy);
+    effectDesc->mSession = session;
+    effectDesc->mEnabled = false;
+
+    add(id, effectDesc);
+
+    return NO_ERROR;
+}
+
+status_t EffectDescriptorCollection::unregisterEffect(int id)
+{
+    ssize_t index = indexOfKey(id);
+    if (index < 0) {
+        ALOGW("unregisterEffect() unknown effect ID %d", id);
+        return INVALID_OPERATION;
+    }
+
+    sp<EffectDescriptor> effectDesc = valueAt(index);
+
+    setEffectEnabled(effectDesc, false);
+
+    if (mTotalEffectsMemory < effectDesc->mDesc.memoryUsage) {
+        ALOGW("unregisterEffect() memory %d too big for total %d",
+                effectDesc->mDesc.memoryUsage, mTotalEffectsMemory);
+        effectDesc->mDesc.memoryUsage = mTotalEffectsMemory;
+    }
+    mTotalEffectsMemory -= effectDesc->mDesc.memoryUsage;
+    ALOGV("unregisterEffect() effect %s, ID %d, memory %d total memory %d",
+            effectDesc->mDesc.name, id, effectDesc->mDesc.memoryUsage, mTotalEffectsMemory);
+
+    removeItem(id);
+
+    return NO_ERROR;
+}
+
+status_t EffectDescriptorCollection::setEffectEnabled(int id, bool enabled)
+{
+    ssize_t index = indexOfKey(id);
+    if (index < 0) {
+        ALOGW("unregisterEffect() unknown effect ID %d", id);
+        return INVALID_OPERATION;
+    }
+
+    return setEffectEnabled(valueAt(index), enabled);
+}
+
+
+status_t EffectDescriptorCollection::setEffectEnabled(const sp<EffectDescriptor> &effectDesc,
+                                                      bool enabled)
+{
+    if (enabled == effectDesc->mEnabled) {
+        ALOGV("setEffectEnabled(%s) effect already %s",
+             enabled?"true":"false", enabled?"enabled":"disabled");
+        return INVALID_OPERATION;
+    }
+
+    if (enabled) {
+        if (mTotalEffectsCpuLoad + effectDesc->mDesc.cpuLoad > getMaxEffectsCpuLoad()) {
+            ALOGW("setEffectEnabled(true) CPU Load limit exceeded for Fx %s, CPU %f MIPS",
+                 effectDesc->mDesc.name, (float)effectDesc->mDesc.cpuLoad/10);
+            return INVALID_OPERATION;
+        }
+        mTotalEffectsCpuLoad += effectDesc->mDesc.cpuLoad;
+        ALOGV("setEffectEnabled(true) total CPU %d", mTotalEffectsCpuLoad);
+    } else {
+        if (mTotalEffectsCpuLoad < effectDesc->mDesc.cpuLoad) {
+            ALOGW("setEffectEnabled(false) CPU load %d too high for total %d",
+                    effectDesc->mDesc.cpuLoad, mTotalEffectsCpuLoad);
+            effectDesc->mDesc.cpuLoad = mTotalEffectsCpuLoad;
+        }
+        mTotalEffectsCpuLoad -= effectDesc->mDesc.cpuLoad;
+        ALOGV("setEffectEnabled(false) total CPU %d", mTotalEffectsCpuLoad);
+    }
+    effectDesc->mEnabled = enabled;
+    return NO_ERROR;
+}
+
+bool EffectDescriptorCollection::isNonOffloadableEffectEnabled()
+{
+    for (size_t i = 0; i < size(); i++) {
+        sp<EffectDescriptor> effectDesc = valueAt(i);
+        if (effectDesc->mEnabled && (effectDesc->mStrategy == STRATEGY_MEDIA) &&
+                ((effectDesc->mDesc.flags & EFFECT_FLAG_OFFLOAD_SUPPORTED) == 0)) {
+            ALOGV("isNonOffloadableEffectEnabled() non offloadable effect %s enabled on session %d",
+                  effectDesc->mDesc.name, effectDesc->mSession);
+            return true;
+        }
+    }
+    return false;
+}
+
+uint32_t EffectDescriptorCollection::getMaxEffectsCpuLoad() const
+{
+    return MAX_EFFECTS_CPU_LOAD;
+}
+
+uint32_t EffectDescriptorCollection::getMaxEffectsMemory() const
+{
+    return MAX_EFFECTS_MEMORY;
+}
+
+status_t EffectDescriptorCollection::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "\nTotal Effects CPU: %f MIPS, Total Effects memory: %d KB\n",
+             (float)mTotalEffectsCpuLoad/10, mTotalEffectsMemory);
+    write(fd, buffer, strlen(buffer));
+
+    snprintf(buffer, SIZE, "Registered effects:\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        snprintf(buffer, SIZE, "- Effect %d dump:\n", keyAt(i));
+        write(fd, buffer, strlen(buffer));
+        valueAt(i)->dump(fd);
+    }
+    return NO_ERROR;
+}
+
+}; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
new file mode 100644
index 0000000..7e2050b
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -0,0 +1,375 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::HwModule"
+//#define LOG_NDEBUG 0
+
+#include "HwModule.h"
+#include "IOProfile.h"
+#include "AudioGain.h"
+#include "ConfigParsingUtils.h"
+#include "audio_policy_conf.h"
+#include <hardware/audio.h>
+#include <policy.h>
+
+namespace android {
+
+HwModule::HwModule(const char *name)
+    : mName(strndup(name, AUDIO_HARDWARE_MODULE_ID_MAX_LEN)),
+      mHalVersion(AUDIO_DEVICE_API_VERSION_MIN), mHandle(0)
+{
+}
+
+HwModule::~HwModule()
+{
+    for (size_t i = 0; i < mOutputProfiles.size(); i++) {
+        mOutputProfiles[i]->mSupportedDevices.clear();
+    }
+    for (size_t i = 0; i < mInputProfiles.size(); i++) {
+        mInputProfiles[i]->mSupportedDevices.clear();
+    }
+    free((void *)mName);
+}
+
+status_t HwModule::loadInput(cnode *root)
+{
+    cnode *node = root->first_child;
+
+    sp<IOProfile> profile = new IOProfile(String8(root->name), AUDIO_PORT_ROLE_SINK);
+
+    while (node) {
+        if (strcmp(node->name, SAMPLING_RATES_TAG) == 0) {
+            profile->loadSamplingRates((char *)node->value);
+        } else if (strcmp(node->name, FORMATS_TAG) == 0) {
+            profile->loadFormats((char *)node->value);
+        } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
+            profile->loadInChannels((char *)node->value);
+        } else if (strcmp(node->name, DEVICES_TAG) == 0) {
+            profile->mSupportedDevices.loadDevicesFromTag((char *)node->value,
+                                                           mDeclaredDevices);
+        } else if (strcmp(node->name, FLAGS_TAG) == 0) {
+            profile->mFlags = ConfigParsingUtils::parseInputFlagNames((char *)node->value);
+        } else if (strcmp(node->name, GAINS_TAG) == 0) {
+            profile->loadGains(node);
+        }
+        node = node->next;
+    }
+    ALOGW_IF(profile->mSupportedDevices.isEmpty(),
+            "loadInput() invalid supported devices");
+    ALOGW_IF(profile->mChannelMasks.size() == 0,
+            "loadInput() invalid supported channel masks");
+    ALOGW_IF(profile->mSamplingRates.size() == 0,
+            "loadInput() invalid supported sampling rates");
+    ALOGW_IF(profile->mFormats.size() == 0,
+            "loadInput() invalid supported formats");
+    if (!profile->mSupportedDevices.isEmpty() &&
+            (profile->mChannelMasks.size() != 0) &&
+            (profile->mSamplingRates.size() != 0) &&
+            (profile->mFormats.size() != 0)) {
+
+        ALOGV("loadInput() adding input Supported Devices %04x",
+              profile->mSupportedDevices.types());
+
+        profile->attach(this);
+        mInputProfiles.add(profile);
+        return NO_ERROR;
+    } else {
+        return BAD_VALUE;
+    }
+}
+
+status_t HwModule::loadOutput(cnode *root)
+{
+    cnode *node = root->first_child;
+
+    sp<IOProfile> profile = new IOProfile(String8(root->name), AUDIO_PORT_ROLE_SOURCE);
+
+    while (node) {
+        if (strcmp(node->name, SAMPLING_RATES_TAG) == 0) {
+            profile->loadSamplingRates((char *)node->value);
+        } else if (strcmp(node->name, FORMATS_TAG) == 0) {
+            profile->loadFormats((char *)node->value);
+        } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
+            profile->loadOutChannels((char *)node->value);
+        } else if (strcmp(node->name, DEVICES_TAG) == 0) {
+            profile->mSupportedDevices.loadDevicesFromTag((char *)node->value,
+                                                           mDeclaredDevices);
+        } else if (strcmp(node->name, FLAGS_TAG) == 0) {
+            profile->mFlags = ConfigParsingUtils::parseOutputFlagNames((char *)node->value);
+        } else if (strcmp(node->name, GAINS_TAG) == 0) {
+            profile->loadGains(node);
+        }
+        node = node->next;
+    }
+    ALOGW_IF(profile->mSupportedDevices.isEmpty(),
+            "loadOutput() invalid supported devices");
+    ALOGW_IF(profile->mChannelMasks.size() == 0,
+            "loadOutput() invalid supported channel masks");
+    ALOGW_IF(profile->mSamplingRates.size() == 0,
+            "loadOutput() invalid supported sampling rates");
+    ALOGW_IF(profile->mFormats.size() == 0,
+            "loadOutput() invalid supported formats");
+    if (!profile->mSupportedDevices.isEmpty() &&
+            (profile->mChannelMasks.size() != 0) &&
+            (profile->mSamplingRates.size() != 0) &&
+            (profile->mFormats.size() != 0)) {
+
+        ALOGV("loadOutput() adding output Supported Devices %04x, mFlags %04x",
+              profile->mSupportedDevices.types(), profile->mFlags);
+        profile->attach(this);
+        mOutputProfiles.add(profile);
+        return NO_ERROR;
+    } else {
+        return BAD_VALUE;
+    }
+}
+
+status_t HwModule::loadDevice(cnode *root)
+{
+    cnode *node = root->first_child;
+
+    audio_devices_t type = AUDIO_DEVICE_NONE;
+    while (node) {
+        if (strcmp(node->name, APM_DEVICE_TYPE) == 0) {
+            type = ConfigParsingUtils::parseDeviceNames((char *)node->value);
+            break;
+        }
+        node = node->next;
+    }
+    if (type == AUDIO_DEVICE_NONE ||
+            (!audio_is_input_device(type) && !audio_is_output_device(type))) {
+        ALOGW("loadDevice() bad type %08x", type);
+        return BAD_VALUE;
+    }
+    sp<DeviceDescriptor> deviceDesc = new DeviceDescriptor(type);
+    deviceDesc->mTag = String8(root->name);
+
+    node = root->first_child;
+    while (node) {
+        if (strcmp(node->name, APM_DEVICE_ADDRESS) == 0) {
+            deviceDesc->mAddress = String8((char *)node->value);
+        } else if (strcmp(node->name, CHANNELS_TAG) == 0) {
+            if (audio_is_input_device(type)) {
+                deviceDesc->loadInChannels((char *)node->value);
+            } else {
+                deviceDesc->loadOutChannels((char *)node->value);
+            }
+        } else if (strcmp(node->name, GAINS_TAG) == 0) {
+            deviceDesc->loadGains(node);
+        }
+        node = node->next;
+    }
+
+    ALOGV("loadDevice() adding device tag %s type %08x address %s",
+          deviceDesc->mTag.string(), type, deviceDesc->mAddress.string());
+
+    mDeclaredDevices.add(deviceDesc);
+
+    return NO_ERROR;
+}
+
+status_t HwModule::addOutputProfile(String8 name, const audio_config_t *config,
+                                                  audio_devices_t device, String8 address)
+{
+    sp<IOProfile> profile = new IOProfile(name, AUDIO_PORT_ROLE_SOURCE);
+
+    profile->mSamplingRates.add(config->sample_rate);
+    profile->mChannelMasks.add(config->channel_mask);
+    profile->mFormats.add(config->format);
+
+    sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
+    devDesc->mAddress = address;
+    profile->mSupportedDevices.add(devDesc);
+
+    profile->attach(this);
+    mOutputProfiles.add(profile);
+
+    return NO_ERROR;
+}
+
+status_t HwModule::removeOutputProfile(String8 name)
+{
+    for (size_t i = 0; i < mOutputProfiles.size(); i++) {
+        if (mOutputProfiles[i]->mName == name) {
+            mOutputProfiles.removeAt(i);
+            break;
+        }
+    }
+
+    return NO_ERROR;
+}
+
+status_t HwModule::addInputProfile(String8 name, const audio_config_t *config,
+                                                  audio_devices_t device, String8 address)
+{
+    sp<IOProfile> profile = new IOProfile(name, AUDIO_PORT_ROLE_SINK);
+
+    profile->mSamplingRates.add(config->sample_rate);
+    profile->mChannelMasks.add(config->channel_mask);
+    profile->mFormats.add(config->format);
+
+    sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
+    devDesc->mAddress = address;
+    profile->mSupportedDevices.add(devDesc);
+
+    ALOGV("addInputProfile() name %s rate %d mask 0x08", name.string(), config->sample_rate, config->channel_mask);
+
+    profile->attach(this);
+    mInputProfiles.add(profile);
+
+    return NO_ERROR;
+}
+
+status_t HwModule::removeInputProfile(String8 name)
+{
+    for (size_t i = 0; i < mInputProfiles.size(); i++) {
+        if (mInputProfiles[i]->mName == name) {
+            mInputProfiles.removeAt(i);
+            break;
+        }
+    }
+
+    return NO_ERROR;
+}
+
+
+void HwModule::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "  - name: %s\n", mName);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "  - handle: %d\n", mHandle);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "  - version: %u.%u\n", mHalVersion >> 8, mHalVersion & 0xFF);
+    result.append(buffer);
+    write(fd, result.string(), result.size());
+    if (mOutputProfiles.size()) {
+        write(fd, "  - outputs:\n", strlen("  - outputs:\n"));
+        for (size_t i = 0; i < mOutputProfiles.size(); i++) {
+            snprintf(buffer, SIZE, "    output %zu:\n", i);
+            write(fd, buffer, strlen(buffer));
+            mOutputProfiles[i]->dump(fd);
+        }
+    }
+    if (mInputProfiles.size()) {
+        write(fd, "  - inputs:\n", strlen("  - inputs:\n"));
+        for (size_t i = 0; i < mInputProfiles.size(); i++) {
+            snprintf(buffer, SIZE, "    input %zu:\n", i);
+            write(fd, buffer, strlen(buffer));
+            mInputProfiles[i]->dump(fd);
+        }
+    }
+    if (mDeclaredDevices.size()) {
+        write(fd, "  - devices:\n", strlen("  - devices:\n"));
+        for (size_t i = 0; i < mDeclaredDevices.size(); i++) {
+            mDeclaredDevices[i]->dump(fd, 4, i);
+        }
+    }
+}
+
+sp <HwModule> HwModuleCollection::getModuleFromName(const char *name) const
+{
+    sp <HwModule> module;
+
+    for (size_t i = 0; i < size(); i++)
+    {
+        if (strcmp(itemAt(i)->mName, name) == 0) {
+            return itemAt(i);
+        }
+    }
+    return module;
+}
+
+
+sp <HwModule> HwModuleCollection::getModuleForDevice(audio_devices_t device) const
+{
+    sp <HwModule> module;
+
+    for (size_t i = 0; i < size(); i++) {
+        if (itemAt(i)->mHandle == 0) {
+            continue;
+        }
+        if (audio_is_output_device(device)) {
+            for (size_t j = 0; j < itemAt(i)->mOutputProfiles.size(); j++)
+            {
+                if (itemAt(i)->mOutputProfiles[j]->mSupportedDevices.types() & device) {
+                    return itemAt(i);
+                }
+            }
+        } else {
+            for (size_t j = 0; j < itemAt(i)->mInputProfiles.size(); j++) {
+                if (itemAt(i)->mInputProfiles[j]->mSupportedDevices.types() &
+                        device & ~AUDIO_DEVICE_BIT_IN) {
+                    return itemAt(i);
+                }
+            }
+        }
+    }
+    return module;
+}
+
+sp<DeviceDescriptor>  HwModuleCollection::getDeviceDescriptor(const audio_devices_t device,
+                                                              const char *device_address,
+                                                              const char *device_name) const
+{
+    String8 address = (device_address == NULL) ? String8("") : String8(device_address);
+    // handle legacy remote submix case where the address was not always specified
+    if (device_distinguishes_on_address(device) && (address.length() == 0)) {
+        address = String8("0");
+    }
+
+    for (size_t i = 0; i < size(); i++) {
+        const sp<HwModule> hwModule = itemAt(i);
+        if (hwModule->mHandle == 0) {
+            continue;
+        }
+        DeviceVector deviceList =
+                hwModule->mDeclaredDevices.getDevicesFromTypeAddr(device, address);
+        if (!deviceList.isEmpty()) {
+            return deviceList.itemAt(0);
+        }
+        deviceList = hwModule->mDeclaredDevices.getDevicesFromType(device);
+        if (!deviceList.isEmpty()) {
+            return deviceList.itemAt(0);
+        }
+    }
+
+    sp<DeviceDescriptor> devDesc =
+            new DeviceDescriptor(device);
+    devDesc->mName = device_name;
+    devDesc->mAddress = address;
+    return devDesc;
+}
+
+status_t HwModuleCollection::dump(int fd) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "\nHW Modules dump:\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        snprintf(buffer, SIZE, "- HW Module %zu:\n", i + 1);
+        write(fd, buffer, strlen(buffer));
+        itemAt(i)->dump(fd);
+    }
+    return NO_ERROR;
+}
+
+} //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
new file mode 100644
index 0000000..7b6d51d
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::IOProfile"
+//#define LOG_NDEBUG 0
+
+#include "IOProfile.h"
+#include "HwModule.h"
+#include "AudioGain.h"
+
+namespace android {
+
+IOProfile::IOProfile(const String8& name, audio_port_role_t role)
+    : AudioPort(name, AUDIO_PORT_TYPE_MIX, role)
+{
+}
+
+IOProfile::~IOProfile()
+{
+}
+
+// checks if the IO profile is compatible with specified parameters.
+// Sampling rate, format and channel mask must be specified in order to
+// get a valid a match
+bool IOProfile::isCompatibleProfile(audio_devices_t device,
+                                    String8 address,
+                                    uint32_t samplingRate,
+                                    uint32_t *updatedSamplingRate,
+                                    audio_format_t format,
+                                    audio_format_t *updatedFormat,
+                                    audio_channel_mask_t channelMask,
+                                    audio_channel_mask_t *updatedChannelMask,
+                                    uint32_t flags) const
+{
+    const bool isPlaybackThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SOURCE;
+    const bool isRecordThread = mType == AUDIO_PORT_TYPE_MIX && mRole == AUDIO_PORT_ROLE_SINK;
+    ALOG_ASSERT(isPlaybackThread != isRecordThread);
+
+
+    if (device != AUDIO_DEVICE_NONE) {
+        // just check types if multiple devices are selected
+        if (popcount(device & ~AUDIO_DEVICE_BIT_IN) > 1) {
+            if ((mSupportedDevices.types() & device) != device) {
+                return false;
+            }
+        } else if (mSupportedDevices.getDevice(device, address) == 0) {
+            return false;
+        }
+    }
+
+    if (samplingRate == 0) {
+         return false;
+    }
+    uint32_t myUpdatedSamplingRate = samplingRate;
+    if (isPlaybackThread && checkExactSamplingRate(samplingRate) != NO_ERROR) {
+         return false;
+    }
+    if (isRecordThread && checkCompatibleSamplingRate(samplingRate, &myUpdatedSamplingRate) !=
+            NO_ERROR) {
+         return false;
+    }
+
+    if (!audio_is_valid_format(format)) {
+        return false;
+    }
+    if (isPlaybackThread && checkExactFormat(format) != NO_ERROR) {
+        return false;
+    }
+    audio_format_t myUpdatedFormat = format;
+    if (isRecordThread && checkCompatibleFormat(format, &myUpdatedFormat) != NO_ERROR) {
+        return false;
+    }
+
+    if (isPlaybackThread && (!audio_is_output_channel(channelMask) ||
+            checkExactChannelMask(channelMask) != NO_ERROR)) {
+        return false;
+    }
+    audio_channel_mask_t myUpdatedChannelMask = channelMask;
+    if (isRecordThread && (!audio_is_input_channel(channelMask) ||
+            checkCompatibleChannelMask(channelMask, &myUpdatedChannelMask) != NO_ERROR)) {
+        return false;
+    }
+
+    if (isPlaybackThread && (mFlags & flags) != flags) {
+        return false;
+    }
+    // The only input flag that is allowed to be different is the fast flag.
+    // An existing fast stream is compatible with a normal track request.
+    // An existing normal stream is compatible with a fast track request,
+    // but the fast request will be denied by AudioFlinger and converted to normal track.
+    if (isRecordThread && ((mFlags ^ flags) &
+            ~AUDIO_INPUT_FLAG_FAST)) {
+        return false;
+    }
+
+    if (updatedSamplingRate != NULL) {
+        *updatedSamplingRate = myUpdatedSamplingRate;
+    }
+    if (updatedFormat != NULL) {
+        *updatedFormat = myUpdatedFormat;
+    }
+    if (updatedChannelMask != NULL) {
+        *updatedChannelMask = myUpdatedChannelMask;
+    }
+    return true;
+}
+
+void IOProfile::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    AudioPort::dump(fd, 4);
+
+    snprintf(buffer, SIZE, "    - flags: 0x%04x\n", mFlags);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "    - devices:\n");
+    result.append(buffer);
+    write(fd, result.string(), result.size());
+    for (size_t i = 0; i < mSupportedDevices.size(); i++) {
+        mSupportedDevices[i]->dump(fd, 6, i);
+    }
+}
+
+void IOProfile::log()
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    ALOGV("    - sampling rates: ");
+    for (size_t i = 0; i < mSamplingRates.size(); i++) {
+        ALOGV("  %d", mSamplingRates[i]);
+    }
+
+    ALOGV("    - channel masks: ");
+    for (size_t i = 0; i < mChannelMasks.size(); i++) {
+        ALOGV("  0x%04x", mChannelMasks[i]);
+    }
+
+    ALOGV("    - formats: ");
+    for (size_t i = 0; i < mFormats.size(); i++) {
+        ALOGV("  0x%08x", mFormats[i]);
+    }
+
+    ALOGV("    - devices: 0x%04x\n", mSupportedDevices.types());
+    ALOGV("    - flags: 0x%04x\n", mFlags);
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp b/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
new file mode 100644
index 0000000..7ecfa44
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/SessionRoute.cpp
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::SessionRoute"
+//#define LOG_NDEBUG 0
+
+#include "SessionRoute.h"
+#include "HwModule.h"
+#include "AudioGain.h"
+#include "DeviceDescriptor.h"
+#include <utils/Log.h>
+
+namespace android {
+
+// --- SessionRoute class implementation
+void SessionRoute::log(const char* prefix)
+{
+    ALOGI("%s[SessionRoute strm:0x%X, src:%d, sess:0x%X, dev:0x%X refs:%d act:%d",
+          prefix, mStreamType, mSource, mSession,
+          mDeviceDescriptor != 0 ? mDeviceDescriptor->type() : AUDIO_DEVICE_NONE,
+          mRefCount, mActivityCount);
+}
+
+// --- SessionRouteMap class implementation
+bool SessionRouteMap::hasRoute(audio_session_t session)
+{
+    return indexOfKey(session) >= 0 && valueFor(session)->mDeviceDescriptor != 0;
+}
+
+bool SessionRouteMap::hasRouteChanged(audio_session_t session)
+{
+    if (indexOfKey(session) >= 0) {
+        if (valueFor(session)->mChanged) {
+            valueFor(session)->mChanged = false;
+            return true;
+        }
+    }
+    return false;
+}
+
+void SessionRouteMap::removeRoute(audio_session_t session)
+{
+    sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor(session) : 0;
+    if (route != 0) {
+        ALOG_ASSERT(route->mRefCount > 0);
+        --route->mRefCount;
+        if (route->mRefCount <= 0) {
+            removeItem(session);
+        }
+    }
+}
+
+int SessionRouteMap::incRouteActivity(audio_session_t session)
+{
+    sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor(session) : 0;
+    return route != 0 ? ++(route->mActivityCount) : -1;
+}
+
+int SessionRouteMap::decRouteActivity(audio_session_t session)
+{
+    sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor(session) : 0;
+    if (route != 0 && route->mActivityCount > 0) {
+        return --(route->mActivityCount);
+    } else {
+        return -1;
+    }
+}
+
+void SessionRouteMap::log(const char* caption)
+{
+    ALOGI("%s ----", caption);
+    for(size_t index = 0; index < size(); index++) {
+        valueAt(index)->log("  ");
+    }
+}
+
+void SessionRouteMap::addRoute(audio_session_t session,
+                               audio_stream_type_t streamType,
+                               audio_source_t source,
+                               sp<DeviceDescriptor> descriptor,
+                               uid_t uid)
+{
+    if (mMapType == MAPTYPE_INPUT && streamType != SessionRoute::STREAM_TYPE_NA) {
+        ALOGE("Adding Output Route to InputRouteMap");
+        return;
+    } else if (mMapType == MAPTYPE_OUTPUT && source != SessionRoute::SOURCE_TYPE_NA) {
+        ALOGE("Adding Input Route to OutputRouteMap");
+        return;
+    }
+
+    sp<SessionRoute> route = indexOfKey(session) >= 0 ? valueFor(session) : 0;
+
+    if (route != 0) {
+        if (((route->mDeviceDescriptor == 0) && (descriptor != 0)) ||
+                ((route->mDeviceDescriptor != 0) &&
+                 ((descriptor == 0) || (!route->mDeviceDescriptor->equals(descriptor))))) {
+            route->mChanged = true;
+        }
+        route->mRefCount++;
+        route->mDeviceDescriptor = descriptor;
+    } else {
+        route = new SessionRoute(session, streamType, source, descriptor, uid);
+        route->mRefCount++;
+        add(session, route);
+        if (descriptor != 0) {
+            route->mChanged = true;
+        }
+    }
+}
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/SoundTriggerSession.cpp b/services/audiopolicy/common/managerdefinitions/src/SoundTriggerSession.cpp
new file mode 100644
index 0000000..8ca3ae0
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/SoundTriggerSession.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::SoundTriggerSession"
+//#define LOG_NDEBUG 0
+
+#include "SoundTriggerSession.h"
+
+
+namespace android {
+
+status_t SoundTriggerSessionCollection::acquireSession(audio_session_t session,
+                                                                   audio_io_handle_t ioHandle)
+{
+    add(session, ioHandle);
+
+    return NO_ERROR;
+}
+
+status_t SoundTriggerSessionCollection::releaseSession(audio_session_t session)
+{
+    ssize_t index = indexOfKey(session);
+    if (index < 0) {
+        ALOGW("acquireSoundTriggerSession() session %d not registered", session);
+        return BAD_VALUE;
+    }
+
+    removeItem(session);
+    return NO_ERROR;
+}
+
+}; //namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
new file mode 100644
index 0000000..b682e2c
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/src/StreamDescriptor.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::Volumes"
+//#define LOG_NDEBUG 0
+
+//#define VERY_VERBOSE_LOGGING
+#ifdef VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+#include "StreamDescriptor.h"
+#include <utils/Log.h>
+#include <utils/String8.h>
+
+namespace android {
+
+// --- StreamDescriptor class implementation
+
+StreamDescriptor::StreamDescriptor()
+    :   mIndexMin(0), mIndexMax(1), mCanBeMuted(true)
+{
+    mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT, 0);
+}
+
+int StreamDescriptor::getVolumeIndex(audio_devices_t device) const
+{
+    device = Volume::getDeviceForVolume(device);
+    // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT
+    if (mIndexCur.indexOfKey(device) < 0) {
+        device = AUDIO_DEVICE_OUT_DEFAULT;
+    }
+    return mIndexCur.valueFor(device);
+}
+
+void StreamDescriptor::clearCurrentVolumeIndex()
+{
+    mIndexCur.clear();
+}
+
+void StreamDescriptor::addCurrentVolumeIndex(audio_devices_t device, int index)
+{
+    mIndexCur.add(device, index);
+}
+
+void StreamDescriptor::setVolumeIndexMin(int volIndexMin)
+{
+    mIndexMin = volIndexMin;
+}
+
+void StreamDescriptor::setVolumeIndexMax(int volIndexMax)
+{
+    mIndexMax = volIndexMax;
+}
+
+void StreamDescriptor::setVolumeCurvePoint(Volume::device_category deviceCategory,
+                                           const VolumeCurvePoint *point)
+{
+    mVolumeCurve[deviceCategory] = point;
+}
+
+void StreamDescriptor::dump(int fd) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "%s         %02d         %02d         ",
+             mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
+    result.append(buffer);
+    for (size_t i = 0; i < mIndexCur.size(); i++) {
+        snprintf(buffer, SIZE, "%04x : %02d, ",
+                 mIndexCur.keyAt(i),
+                 mIndexCur.valueAt(i));
+        result.append(buffer);
+    }
+    result.append("\n");
+
+    write(fd, result.string(), result.size());
+}
+
+StreamDescriptorCollection::StreamDescriptorCollection()
+{
+    for (size_t stream = 0 ; stream < AUDIO_STREAM_CNT; stream++) {
+        add(static_cast<audio_stream_type_t>(stream), StreamDescriptor());
+    }
+}
+
+bool StreamDescriptorCollection::canBeMuted(audio_stream_type_t stream)
+{
+    return valueAt(stream).canBeMuted();
+}
+
+void StreamDescriptorCollection::clearCurrentVolumeIndex(audio_stream_type_t stream)
+{
+    editValueAt(stream).clearCurrentVolumeIndex();
+}
+
+void StreamDescriptorCollection::addCurrentVolumeIndex(audio_stream_type_t stream,
+                                                       audio_devices_t device, int index)
+{
+    editValueAt(stream).addCurrentVolumeIndex(device, index);
+}
+
+void StreamDescriptorCollection::setVolumeCurvePoint(audio_stream_type_t stream,
+                                                     Volume::device_category deviceCategory,
+                                                     const VolumeCurvePoint *point)
+{
+    editValueAt(stream).setVolumeCurvePoint(deviceCategory, point);
+}
+
+const VolumeCurvePoint *StreamDescriptorCollection::getVolumeCurvePoint(audio_stream_type_t stream,
+                                                                        Volume::device_category deviceCategory) const
+{
+    return valueAt(stream).getVolumeCurvePoint(deviceCategory);
+}
+
+void StreamDescriptorCollection::setVolumeIndexMin(audio_stream_type_t stream,int volIndexMin)
+{
+    return editValueAt(stream).setVolumeIndexMin(volIndexMin);
+}
+
+void StreamDescriptorCollection::setVolumeIndexMax(audio_stream_type_t stream,int volIndexMax)
+{
+    return editValueAt(stream).setVolumeIndexMax(volIndexMax);
+}
+
+status_t StreamDescriptorCollection::dump(int fd) const
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "\nStreams dump:\n");
+    write(fd, buffer, strlen(buffer));
+    snprintf(buffer, SIZE,
+             " Stream  Can be muted  Index Min  Index Max  Index Cur [device : index]...\n");
+    write(fd, buffer, strlen(buffer));
+    for (size_t i = 0; i < size(); i++) {
+        snprintf(buffer, SIZE, " %02zu      ", i);
+        write(fd, buffer, strlen(buffer));
+        valueAt(i).dump(fd);
+    }
+
+    return NO_ERROR;
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
new file mode 100755
index 0000000..e73e543
--- /dev/null
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <AudioPolicyManagerObserver.h>
+#include <RoutingStrategy.h>
+#include <Volume.h>
+#include <HwModule.h>
+#include <DeviceDescriptor.h>
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <utils/Errors.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+/**
+ * This interface is dedicated to the policy manager that a Policy Engine shall implement.
+ */
+class AudioPolicyManagerInterface
+{
+public:
+    /**
+     * Checks if the engine was correctly initialized.
+     *
+     * @return NO_ERROR if initialization has been done correctly, error code otherwise..
+     */
+    virtual status_t initCheck() = 0;
+
+    /**
+     * Sets the Manager observer that allows the engine to retrieve information on collection
+     * of devices, streams, HwModules, ...
+     *
+     * @param[in] observer handle on the manager.
+     */
+    virtual void setObserver(AudioPolicyManagerObserver *observer) = 0;
+
+    /**
+     * Get the input device selected for a given input source.
+     *
+     * @param[in] inputSource to get the selected input device associated to
+     *
+     * @return selected input device for the given input source, may be none if error.
+     */
+    virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const = 0;
+
+    /**
+     * Get the output device associated to a given strategy.
+     *
+     * @param[in] stream type for which the selected ouput device is requested.
+     *
+     * @return selected ouput device for the given strategy, may be none if error.
+     */
+    virtual audio_devices_t getDeviceForStrategy(routing_strategy stategy) const = 0;
+
+    /**
+     * Get the strategy selected for a given stream type.
+     *
+     * @param[in] stream: for which the selected strategy followed by is requested.
+     *
+     * @return strategy to be followed.
+     */
+    virtual routing_strategy getStrategyForStream(audio_stream_type_t stream) = 0;
+
+    /**
+     * Get the strategy selected for a given usage.
+     *
+     * @param[in] usage to get the selected strategy followed by.
+     *
+     * @return strategy to be followed.
+     */
+    virtual routing_strategy getStrategyForUsage(audio_usage_t usage) = 0;
+
+    /**
+     * Set the Telephony Mode.
+     *
+     * @param[in] mode: Android Phone state (normal, ringtone, csv, in communication)
+     *
+     * @return NO_ERROR if Telephony Mode set correctly, error code otherwise.
+     */
+    virtual status_t setPhoneState(audio_mode_t mode) = 0;
+
+    /**
+     * Get the telephony Mode
+     *
+     * @return the current telephony mode
+     */
+    virtual audio_mode_t getPhoneState() const = 0;
+
+    /**
+     * Set Force Use config for a given usage.
+     *
+     * @param[in] usage for which a configuration shall be forced.
+     * @param[in] config wished to be forced for the given usage.
+     *
+     * @return NO_ERROR if the Force Use config was set correctly, error code otherwise (e.g. config
+     * not allowed a given usage...)
+     */
+    virtual status_t setForceUse(audio_policy_force_use_t usage,
+                                 audio_policy_forced_cfg_t config) = 0;
+
+    /**
+     * Get Force Use config for a given usage.
+     *
+     * @param[in] usage for which a configuration shall be forced.
+     *
+     * @return config wished to be forced for the given usage.
+     */
+    virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const = 0;
+
+    /**
+     * Set the connection state of device(s).
+     *
+     * @param[in] devDesc for which the state has changed.
+     * @param[in] state of availability of this(these) device(s).
+     *
+     * @return NO_ERROR if devices criterion updated correctly, error code otherwise.
+     */
+    virtual status_t setDeviceConnectionState(const android::sp<android::DeviceDescriptor> devDesc,
+                                              audio_policy_dev_state_t state) = 0;
+
+    /**
+     * Translate a volume index given by the UI to an amplification value in dB for a stream type
+     * and a device category.
+     *
+     * @param[in] deviceCategory for which the conversion is requested.
+     * @param[in] stream type for which the conversion is requested.
+     * @param[in] indexInUi index received from the UI to be translated.
+     *
+     * @return amplification value in dB matching the UI index for this given device and stream.
+     */
+    virtual float volIndexToDb(Volume::device_category deviceCategory, audio_stream_type_t stream,
+                                 int indexInUi) = 0;
+
+    /**
+     * Initialize the min / max index of volume applicable for a given stream type. These indexes
+     * will be used upon conversion of UI index to volume amplification.
+     *
+     * @param[in] stream type for which the indexes need to be set
+     * @param[in] indexMin Minimum index allowed for this stream.
+     * @param[in] indexMax Maximum index allowed for this stream.
+     */
+    virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax) = 0;
+
+    /**
+     * Initialize volume curves for each strategy and device category
+     *
+     * @param[in] isSpeakerDrcEnabled true on devices that use DRC on the DEVICE_CATEGORY_SPEAKER
+                  path to boost soft sounds, used to adjust volume curves accordingly
+     */
+    virtual void initializeVolumeCurves(bool isSpeakerDrcEnabled) = 0;
+
+protected:
+    virtual ~AudioPolicyManagerInterface() {}
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
new file mode 100755
index 0000000..6d43df2
--- /dev/null
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <AudioGain.h>
+#include <AudioPort.h>
+#include <AudioPatch.h>
+#include <IOProfile.h>
+#include <DeviceDescriptor.h>
+#include <AudioInputDescriptor.h>
+#include <AudioOutputDescriptor.h>
+#include <AudioPolicyMix.h>
+#include <SoundTriggerSession.h>
+#include <StreamDescriptor.h>
+
+namespace android {
+
+/**
+ * This interface is an observer that the manager shall implement to allows e.g. the engine
+ * to access to policy pillars elements (like output / input descritors collections,
+ * HwModule collections, AudioMix, ...
+ */
+class AudioPolicyManagerObserver
+{
+public:
+    virtual const AudioPatchCollection &getAudioPatches() const = 0;
+
+    virtual const SoundTriggerSessionCollection &getSoundTriggerSessionCollection() const = 0;
+
+    virtual const AudioPolicyMixCollection &getAudioPolicyMixCollection() const = 0;
+
+    virtual const SwAudioOutputCollection &getOutputs() const = 0;
+
+    virtual const AudioInputCollection &getInputs() const = 0;
+
+    virtual const DeviceVector &getAvailableOutputDevices() const = 0;
+
+    virtual const DeviceVector &getAvailableInputDevices() const = 0;
+
+    virtual StreamDescriptorCollection &getStreamDescriptors() = 0;
+
+    virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const = 0;
+
+protected:
+    virtual ~AudioPolicyManagerObserver() {}
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/engineconfigurable/Android.mk b/services/audiopolicy/engineconfigurable/Android.mk
new file mode 100755
index 0000000..b18c520
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/Android.mk
@@ -0,0 +1,59 @@
+ifeq ($(USE_CONFIGURABLE_AUDIO_POLICY), 1)
+
+LOCAL_PATH := $(call my-dir)
+
+# Component build
+#######################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+    src/Engine.cpp \
+    src/EngineInstance.cpp \
+    src/Stream.cpp \
+    src/Strategy.cpp \
+    src/Usage.cpp \
+    src/InputSource.cpp \
+
+audio_policy_engine_includes_common := \
+    $(TOPDIR)frameworks/av/services/audiopolicy/engineconfigurable/include \
+    $(TOPDIR)frameworks/av/services/audiopolicy/engineconfigurable/interface \
+    $(TOPDIR)frameworks/av/services/audiopolicy/engine/interface
+
+LOCAL_CFLAGS += \
+    -Wall \
+    -Werror \
+    -Wextra \
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+    $(audio_policy_engine_includes_common)
+
+LOCAL_C_INCLUDES := \
+    $(audio_policy_engine_includes_common) \
+    $(TARGET_OUT_HEADERS)/hw \
+    $(call include-path-for, frameworks-av) \
+    $(call include-path-for, audio-utils) \
+    $(TOPDIR)frameworks/av/services/audiopolicy/common/include
+
+
+LOCAL_MODULE := libaudiopolicyengineconfigurable
+LOCAL_MODULE_TAGS := optional
+LOCAL_STATIC_LIBRARIES := \
+    libmedia_helper \
+    libaudiopolicypfwwrapper \
+    libaudiopolicycomponents
+
+LOCAL_SHARED_LIBRARIES := \
+    libcutils \
+    libutils \
+    libaudioutils \
+    libparameter
+
+include $(BUILD_SHARED_LIBRARY)
+
+#######################################################################
+# Recursive call sub-folder Android.mk
+#
+include $(call all-makefiles-under,$(LOCAL_PATH))
+
+endif
diff --git a/services/audiopolicy/engineconfigurable/include/AudioPolicyEngineInstance.h b/services/audiopolicy/engineconfigurable/include/AudioPolicyEngineInstance.h
new file mode 100755
index 0000000..6c4be2c
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/include/AudioPolicyEngineInstance.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+class AudioPolicyManagerInterface;
+class AudioPolicyPluginInterface;
+
+namespace android
+{
+namespace audio_policy
+{
+
+class Engine;
+
+class EngineInstance
+{
+protected:
+    EngineInstance();
+
+public:
+    virtual ~EngineInstance();
+
+    /**
+     * Get Audio Policy Engine instance.
+     *
+     * @return pointer to Route Manager Instance object.
+     */
+    static EngineInstance *getInstance();
+
+    /**
+     * Interface query.
+     * The first client of an interface of the policy engine will start the singleton.
+     *
+     * @tparam RequestedInterface: interface that the client is wishing to retrieve.
+     *
+     * @return interface handle.
+     */
+    template <class RequestedInterface>
+    RequestedInterface *queryInterface() const;
+
+protected:
+    /**
+     * Get Audio Policy Engine instance.
+     *
+     * @return Audio Policy Engine singleton.
+     */
+    Engine *getEngine() const;
+
+private:
+    /* Copy facilities are put private to disable copy. */
+    EngineInstance(const EngineInstance &object);
+    EngineInstance &operator=(const EngineInstance &object);
+};
+
+/**
+ * Limit template instantation to supported type interfaces.
+ * Compile time error will claim if invalid interface is requested.
+ */
+template <>
+AudioPolicyManagerInterface *EngineInstance::queryInterface() const;
+
+template <>
+AudioPolicyPluginInterface *EngineInstance::queryInterface() const;
+
+}; // namespace audio_policy
+
+}; // namespace android
diff --git a/include/media/nbaio/roundup.h b/services/audiopolicy/engineconfigurable/include/EngineDefinition.h
old mode 100644
new mode 100755
similarity index 68%
rename from include/media/nbaio/roundup.h
rename to services/audiopolicy/engineconfigurable/include/EngineDefinition.h
index 4c3cc25..54d8db5
--- a/include/media/nbaio/roundup.h
+++ b/services/audiopolicy/engineconfigurable/include/EngineDefinition.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 The Android Open Source Project
+ * Copyright (C) 2015 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,18 +14,9 @@
  * limitations under the License.
  */
 
-#ifndef ROUNDUP_H
-#define ROUNDUP_H
+#pragma once
 
-#ifdef __cplusplus
-extern "C" {
-#endif
+#include <Volume.h>
+#include <vector>
 
-// Round up to the next highest power of 2
-unsigned roundup(unsigned v);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif  // ROUNDUP_H
+typedef std::vector<VolumeCurvePoint> VolumeCurvePoints;
diff --git a/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h b/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
new file mode 100755
index 0000000..74daba5
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/interface/AudioPolicyPluginInterface.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <RoutingStrategy.h>
+#include <EngineDefinition.h>
+#include <Volume.h>
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <string>
+#include <vector>
+
+namespace android {
+
+/**
+ * This interface allows the parameter plugin to:
+ *  - instantiate all the members of the policy engine (strategies, input sources, usages, profiles)
+ *  - keep up to date the attributes of these policy members ( i.e. devices to be used for a
+ *    strategy, strategy to be followed by a usage or a stream, ...)
+ */
+class AudioPolicyPluginInterface
+{
+public:
+    /**
+     * Add a strategy to the engine
+     *
+     * @param[in] name of the strategy to add
+     * @param[in] identifier: the numerical value associated to this member. It MUST match either
+     *            system/audio.h or system/audio_policy.h enumration value in order to link the
+     *            parameter controled by the PFW and the policy manager component.
+     *
+     * @return NO_ERROR if the strategy has been added successfully, error code otherwise.
+     *
+     */
+    virtual android::status_t addStrategy(const std::string &name, routing_strategy id) = 0;
+
+    /**
+     * Add a streams to the engine.
+     *
+     * @param[in] name of the stream to add
+     * @param[in] identifier: the numerical value associated to this member. It MUST match either
+     *            system/audio.h or system/audio_policy.h enumration value in order to link the
+     *            parameter controled by the PFW and the policy manager component.
+     *
+     * @return NO_ERROR if the stream has been added successfully, error code otherwise.
+     *
+     */
+    virtual android::status_t addStream(const std::string &name, audio_stream_type_t id) = 0;
+
+    /**
+     * Add a usage to the engine
+     *
+     * @param[in] name of the usage to add
+     * @param[in] identifier: the numerical value associated to this member. It MUST match either
+     *            system/audio.h or system/audio_policy.h enumration value in order to link the
+     *            parameter controled by the PFW and the policy manager component.
+     *
+     * @return NO_ERROR if the usage has been added successfully, error code otherwise.
+     *
+     */
+    virtual android::status_t addUsage(const std::string &name, audio_usage_t id) = 0;
+
+    /**
+     * Add an input source to the engine
+     *
+     * @param[in] name of the input source to add
+     * @param[in] identifier: the numerical value associated to this member. It MUST match either
+     *            system/audio.h or system/audio_policy.h enumration value in order to link the
+     *            parameter controled by the PFW and the policy manager component.
+     *
+     * @return NO_ERROR if the input source has been added successfully, error code otherwise.
+     *
+     */
+    virtual android::status_t addInputSource(const std::string &name, audio_source_t id) = 0;
+
+    /**
+     * Set the device to be used by a strategy.
+     *
+     * @param[in] strategy: name of the strategy for which the device to use has to be set
+     * @param[in] devices; mask of devices to be used for the given strategy.
+     *
+     * @return true if the devices were set correclty for this strategy, false otherwise.
+     */
+    virtual bool setDeviceForStrategy(const routing_strategy &strategy, audio_devices_t devices) = 0;
+
+    /**
+     * Set the strategy to be followed by a stream.
+     *
+     * @param[in] stream: name of the stream for which the strategy to use has to be set
+     * @param[in] strategy to follow for the given stream.
+     *
+     * @return true if the strategy were set correclty for this stream, false otherwise.
+     */
+    virtual bool setStrategyForStream(const audio_stream_type_t &stream, routing_strategy strategy) = 0;
+
+    /**
+     * Set the strategy to be followed by a stream.
+     *
+     * @param[in] stream: name of the stream for which the strategy to use has to be set
+     * @param[in] strategy to follow for the given stream.
+     *
+     * @return true if the strategy were set correclty for this stream, false otherwise.
+     */
+    virtual bool setVolumeProfileForStream(const audio_stream_type_t &stream,
+                                           Volume::device_category category,
+                                           const VolumeCurvePoints &points) = 0;
+
+    /**
+     * Set the strategy to be followed by a usage.
+     *
+     * @param[in] usage: name of the usage for which the strategy to use has to be set
+     * @param[in] strategy to follow for the given usage.
+     *
+     * @return true if the strategy were set correclty for this usage, false otherwise.
+     */
+    virtual bool setStrategyForUsage(const audio_usage_t &usage, routing_strategy strategy) = 0;
+
+    /**
+     * Set the input device to be used by an input source.
+     *
+     * @param[in] inputSource: name of the input source for which the device to use has to be set
+     * @param[in] devices; mask of devices to be used for the given input source.
+     *
+     * @return true if the devices were set correclty for this input source, false otherwise.
+     */
+    virtual bool setDeviceForInputSource(const audio_source_t &inputSource,
+                                         audio_devices_t device) = 0;
+
+protected:
+    virtual ~AudioPolicyPluginInterface() {}
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/Android.mk
new file mode 100644
index 0000000..c402fd5
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/Android.mk
@@ -0,0 +1,7 @@
+LOCAL_PATH := $(call my-dir)
+
+#######################################################################
+# Recursive call sub-folder Android.mk
+#######################################################################
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/example/Android.mk
new file mode 100644
index 0000000..e9b1902
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Android.mk
@@ -0,0 +1,105 @@
+################################################################################################
+#
+# @NOTE:
+# Audio Policy Engine configurable example for generic device build
+#
+# Any vendor shall have its own configuration within the corresponding device folder
+#
+################################################################################################
+
+
+LOCAL_PATH := $(call my-dir)
+
+PFW_CORE := external/parameter-framework
+BUILD_PFW_SETTINGS := $(PFW_CORE)/support/android/build_pfw_settings.mk
+PFW_DEFAULT_SCHEMAS_DIR := $(PFW_CORE)/Schemas
+PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
+
+##################################################################
+# CONFIGURATION FILES
+##################################################################
+######### Policy PFW top level file #########
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := ParameterFrameworkConfigurationPolicy.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/parameter-framework
+LOCAL_SRC_FILES := $(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+
+########## Policy PFW Structures #########
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := PolicyClass.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := PolicySubsystem.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_REQUIRED_MODULES := \
+    PolicySubsystem-CommonTypes.xml \
+    PolicySubsystem-Volume.xml \
+    libpolicy-subsystem \
+
+LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := PolicySubsystem-CommonTypes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := PolicySubsystem-Volume.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/parameter-framework/Structure/Policy
+LOCAL_SRC_FILES := Structure/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+######### Policy PFW Settings #########
+include $(CLEAR_VARS)
+LOCAL_MODULE := parameter-framework.policy
+LOCAL_MODULE_STEM := PolicyConfigurableDomains.xml
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_RELATIVE_PATH := parameter-framework/Settings/Policy
+LOCAL_ADDITIONAL_DEPENDENCIES := \
+        PolicyClass.xml \
+        PolicySubsystem.xml \
+        ParameterFrameworkConfigurationPolicy.xml
+
+ifeq ($(pfw_rebuild_settings),true)
+PFW_TOPLEVEL_FILE := $(TARGET_OUT_ETC)/parameter-framework/ParameterFrameworkConfigurationPolicy.xml
+PFW_CRITERIA_FILE := $(LOCAL_PATH)/policy_criteria.txt
+PFW_EDD_FILES := \
+        $(LOCAL_PATH)/Settings/device_for_strategy_media.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_phone.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_sonification.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_sonification_respectful.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_dtmf.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_enforced_audible.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_transmitted_through_speaker.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_accessibility.pfw \
+        $(LOCAL_PATH)/Settings/device_for_strategy_rerouting.pfw \
+        $(LOCAL_PATH)/Settings/strategy_for_stream.pfw \
+        $(LOCAL_PATH)/Settings/strategy_for_usage.pfw \
+        $(LOCAL_PATH)/Settings/device_for_input_source.pfw \
+        $(LOCAL_PATH)/Settings/volumes.pfw
+
+include $(BUILD_PFW_SETTINGS)
+else
+# Use the existing file
+LOCAL_SRC_FILES := Settings/$(LOCAL_MODULE_STEM)
+include $(BUILD_PREBUILT)
+endif # pfw_rebuild_settings
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/ParameterFrameworkConfigurationPolicy.xml b/services/audiopolicy/engineconfigurable/parameter-framework/example/ParameterFrameworkConfigurationPolicy.xml
new file mode 100755
index 0000000..6905201
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/ParameterFrameworkConfigurationPolicy.xml
@@ -0,0 +1,15 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ParameterFrameworkConfiguration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+    xsi:noNamespaceSchemaLocation="Schemas/ParameterFrameworkConfiguration.xsd"
+    SystemClassName="Policy" ServerPort="5019" TuningAllowed="true">
+
+    <SubsystemPlugins>
+        <Location Folder="">
+            <Plugin Name="libpolicy-subsystem.so"/>
+        </Location>
+    </SubsystemPlugins>
+    <StructureDescriptionFileLocation Path="Structure/Policy/PolicyClass.xml"/>
+    <SettingsConfiguration>
+        <ConfigurableDomainsFileLocation Path="Settings/Policy/PolicyConfigurableDomains.xml"/>
+    </SettingsConfiguration>
+</ParameterFrameworkConfiguration>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/README.md b/services/audiopolicy/engineconfigurable/parameter-framework/example/README.md
new file mode 100644
index 0000000..92668e1
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/README.md
@@ -0,0 +1,11 @@
+Configurable Policy Engine Example
+================================
+
+This folder exposes a generic functional configurable policy engine configuration files
+to provide to have a product following the nexus experience.
+
+A vendor wishing to customize the behavior shall provides its own set of configuration files
+within the device folder for the product to customize.
+
+For any question about the parameter framework and configuration files,
+See [the wiki on github](https://github.com/01org/parameter-framework/wiki).
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/PolicyConfigurableDomains.xml b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/PolicyConfigurableDomains.xml
new file mode 100644
index 0000000..8cb0723
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/PolicyConfigurableDomains.xml
@@ -0,0 +1,11298 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- #### DO NOT EDIT THIS FILE #### -->
+<ConfigurableDomains xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="Schemas/ConfigurableDomains.xsd" SystemClassName="Policy">
+  <ConfigurableDomain Name="DeviceForStrategy.Media.UnreachableDevices" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Calibration">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/fm"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker_safe"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/earpiece"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_sco"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_sco_carkit"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/telephony_tx"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Calibration">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Media.Device2" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="RemoteSubmix">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="RemoteSubmix"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dp">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dp"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpHeadphone">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpHeadphones"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpSpeaker">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpSpeaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="ForceSpeaker">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForHdmiSystemAudio" MatchesWhen="IsNot" Value="ForceHdmiSystemEnforced"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadphone">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadphone"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Line">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Line"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbAccessory">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbAccessory"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbDevice"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="DgtlDockHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="DgtlDockHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="AuxDigital">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Hdmi"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="AnlgDockHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="AnlgDockHeadset"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForDock" MatchesWhen="Is" Value="ForceAnalogDock"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Speaker">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForHdmiSystemAudio" MatchesWhen="IsNot" Value="ForceHdmiSystemEnforced"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Default">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/angl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headphone"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_speaker"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_headphones"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/line"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="RemoteSubmix">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dp">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpHeadphone">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpSpeaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="ForceSpeaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadphone">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Line">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/line">
+          <BitParameter Name="line">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbAccessory">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="DgtlDockHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="AuxDigital">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="AnlgDockHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Speaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Default">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Media.Arc" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Selected">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="HdmiArc"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Selected">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc">
+          <BitParameter Name="hdmi_arc">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc">
+          <BitParameter Name="hdmi_arc">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Media.Spdif" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Selected">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Spdif"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/spdif"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Selected">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/spdif">
+          <BitParameter Name="spdif">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Media.AuxLine" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Selected">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="AuxLine"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/aux_line"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Selected">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/aux_line">
+          <BitParameter Name="aux_line">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/aux_line">
+          <BitParameter Name="aux_line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Phone.UnreachableDevices" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Calibration">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi_arc"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/aux_line"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/fm"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker_safe"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Calibration">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi_arc">
+          <BitParameter Name="hdmi_arc">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/aux_line">
+          <BitParameter Name="aux_line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Phone.Device" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="ScoCarkit">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothScoCarkit"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothScoHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothScoHeadset"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothSco">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothSco"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dp">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+            <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceNone"/>
+          </CompoundRule>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpHeadphones">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpHeadphones"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+            <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceNone"/>
+          </CompoundRule>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpSpeaker">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadphone">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadphone"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadset"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbDevice"/>
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+            <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceNone"/>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="UsbAccessory"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+            </CompoundRule>
+          </CompoundRule>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbAccessory">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbAccessory"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="DgtlDockHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="DgtlDockHeadset"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Hdmi">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Hdmi"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="AnlgDockHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="AnlgDockHeadset"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Earpiece">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Earpiece"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Line">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Line"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Speaker">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Default">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="ScoCarkit">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothScoHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothSco">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dp">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpHeadphones">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpSpeaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadphone">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbAccessory">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="DgtlDockHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Hdmi">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="AnlgDockHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Earpiece">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Line">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Speaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Default">
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/phone/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Sonification.UnreachableDevices" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Calibration">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/hdmi_arc"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/fm"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/speaker_safe"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/aux_line"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/hdmi"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Calibration">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/hdmi_arc">
+          <BitParameter Name="hdmi_arc">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/aux_line">
+          <BitParameter Name="aux_line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Sonification.Speaker" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Selected">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="Line"/>
+            </CompoundRule>
+          </CompoundRule>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/speaker"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Selected">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Sonification.Device2" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="BluetoothA2dp">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpHeadphones">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpHeadphones"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpSpeaker">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="ScoCarkit">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothScoCarkit"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="ScoHeadset">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothScoHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Sco">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothSco"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadphone">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadphone"/>
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+          </CompoundRule>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Line">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Line"/>
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+          </CompoundRule>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadset"/>
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+          </CompoundRule>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbDevice"/>
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="UsbAccessory"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+          </CompoundRule>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbAccessory">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbAccessory"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="DgtlDockHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="DgtlDockHeadset"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="AnlgDockHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="AnlgDockHeadset"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForDock" MatchesWhen="Is" Value="ForceAnalogDock"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Earpiece">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Earpiece"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="None">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/earpiece"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headphone"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_carkit"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_headphones"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_speaker"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/angl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/telephony_tx"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/line"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="BluetoothA2dp">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpHeadphones">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpSpeaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="ScoCarkit">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="ScoHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Sco">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadphone">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Line">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/line">
+          <BitParameter Name="line">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbAccessory">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="DgtlDockHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="AnlgDockHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Earpiece">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="None">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.SonificationRespectful.UnreachableDevices" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Calibration">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi_arc"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/aux_line"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/fm"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/telephony_tx"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Calibration">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi_arc">
+          <BitParameter Name="hdmi_arc">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/aux_line">
+          <BitParameter Name="aux_line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.SonificationRespectful.Speakers" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="SpeakerSafe">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="SpeakerSafe"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Speaker">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="Line"/>
+            </CompoundRule>
+          </CompoundRule>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="None">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/speaker_safe"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/speaker"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="SpeakerSafe">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Speaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="None">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.SonificationRespectful.Device2" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="BluetoothA2dp">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dp"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpHeadphones">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpHeadphones"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpSpeaker">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpSpeaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothScoCarkit">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothScoCarkit"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothScoHeadset">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothScoHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothSco">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothSco"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadphone">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadphone"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="LineWhenFollowMediaStrategy">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadphone"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="UsbAccessory"/>
+            </CompoundRule>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbDevice"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbAccessory">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbAccessory"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="DgtlDockHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="DgtlDockHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="AuxDigital">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Hdmi"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="AnlgDockHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForDock" MatchesWhen="Is" Value="ForceAnalogDock"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="AnlgDockHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Earpiece">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Earpiece"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Line">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Line"/>
+        </CompoundRule>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="BluetoothA2dp">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpHeadphones">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpSpeaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothScoCarkit">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothScoHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothSco">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadphone">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="LineWhenFollowMediaStrategy">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line">
+          <BitParameter Name="line">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbAccessory">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="DgtlDockHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="AuxDigital">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="AnlgDockHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Earpiece">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Line">
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/line">
+          <BitParameter Name="line">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/sonification_respectful/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Dtmf.UnreachableDevices" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Calibration">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/fm"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker_safe"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_carkit"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Calibration">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Dtmf.Device2" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="RemoteSubmix">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="RemoteSubmix"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dp">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dp"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpHeadphones">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpHeadphones"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpSpeaker">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpSpeaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="ForceSpeakerWhenNotInCall">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForHdmiSystemAudio" MatchesWhen="IsNot" Value="ForceHdmiSystemEnforced"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothScoHeadset">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothScoHeadset"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothSco">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothSco"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadphone">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+            </CompoundRule>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadphone"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="LineWhenFollowingMedia">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Line"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+            </CompoundRule>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="UsbAccessory"/>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+            </CompoundRule>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbDevice"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbAccessory">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbAccessory"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="DgtlDockHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="DgtlDockHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Hdmi">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Hdmi"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="AnlgDockHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForDock" MatchesWhen="Is" Value="ForceAnalogDock"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="AnlgDockHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Earpiece">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Earpiece"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="LineWhenFallThroughPhone">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+            <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Line"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Speaker">
+        <CompoundRule Type="All">
+          <CompoundRule Type="Any">
+            <CompoundRule Type="All">
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+                <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+              </CompoundRule>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+              <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+              <SelectionCriterionRule SelectionCriterion="ForceUseForHdmiSystemAudio" MatchesWhen="IsNot" Value="ForceHdmiSystemEnforced"/>
+            </CompoundRule>
+          </CompoundRule>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Default">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="RemoteSubmix">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dp">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpHeadphones">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpSpeaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="ForceSpeakerWhenNotInCall">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothScoHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothSco">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadphone">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="LineWhenFollowingMedia">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbAccessory">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="DgtlDockHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Hdmi">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="AnlgDockHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Earpiece">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="LineWhenFallThroughPhone">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Speaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Default">
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Dtmf.Arc" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Selected">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="HdmiArc"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Selected">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc">
+          <BitParameter Name="hdmi_arc">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc">
+          <BitParameter Name="hdmi_arc">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Dtmf.Spdif" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Selected">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Spdif"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/spdif"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Selected">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/spdif">
+          <BitParameter Name="spdif">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Dtmf.AuxLine" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Selected">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="AuxLine"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/aux_line"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Selected">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/aux_line">
+          <BitParameter Name="aux_line">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/aux_line">
+          <BitParameter Name="aux_line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.EnforcedAudible.UnreachableDevices" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Calibration">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi_arc"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/aux_line"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Calibration">
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi_arc">
+          <BitParameter Name="hdmi_arc">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/aux_line">
+          <BitParameter Name="aux_line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.EnforcedAudible.Speaker" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Selected">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="ForceUseForSystem" MatchesWhen="Is" Value="ForceSystemEnforced"/>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceSpeaker"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="RemoteSubmix"/>
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+                <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="BluetoothA2dp"/>
+                <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="BluetoothA2dpHeadphones"/>
+                <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="BluetoothA2dpSpeaker"/>
+              </CompoundRule>
+            </CompoundRule>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="RemoteSubmix"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="WiredHeadphone"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="Line"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="WiredHeadset"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="UsbAccessory"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="UsbDevice"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="DgtlDockHeadset"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="Hdmi"/>
+              <CompoundRule Type="Any">
+                <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="AnlgDockHeadset"/>
+                <SelectionCriterionRule SelectionCriterion="ForceUseForDock" MatchesWhen="IsNot" Value="ForceAnalogDock"/>
+              </CompoundRule>
+            </CompoundRule>
+          </CompoundRule>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Selected">
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.EnforcedAudible.Device2" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="RemoteSubmix">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="RemoteSubmix"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dp">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpHeadphones">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpHeadphones"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpSpeaker">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadphone">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadphone"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Line">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Line"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbAccessory">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbAccessory"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbDevice"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="DgtlDockHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="DgtlDockHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Hdmi">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Hdmi"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="AnlgDockHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForDock" MatchesWhen="Is" Value="ForceAnalogDock"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="AnlgDockHeadset"/>
+        </CompoundRule>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/earpiece"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headphone"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_carkit"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_headphones"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_speaker"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/angl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/telephony_tx"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="RemoteSubmix">
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dp">
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpHeadphones">
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpSpeaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadphone">
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Line">
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
+          <BitParameter Name="line">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbAccessory">
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="DgtlDockHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Hdmi">
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="AnlgDockHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.TransmittedThroughSpeaker.UnreacheableDevices" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Calibration">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/hdmi_arc"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/aux_line"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/fm"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/speaker_safe"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/earpiece"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/wired_headphone"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/bluetooth_sco"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/bluetooth_sco_carkit"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/bluetooth_a2dp_headphones"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/bluetooth_a2dp_speaker"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/angl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/telephony_tx"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/line"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Calibration">
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/hdmi_arc">
+          <BitParameter Name="hdmi_arc">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/aux_line">
+          <BitParameter Name="aux_line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.TransmittedThroughSpeaker.Speaker" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Selected">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/speaker"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Selected">
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="NotSelected">
+        <ConfigurableElement Path="/Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Accessibility.UnreachableDevices" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Calibration">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi_arc"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/aux_line"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/fm"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker_safe"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/telephony_tx"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Calibration">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi_arc">
+          <BitParameter Name="hdmi_arc">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/aux_line">
+          <BitParameter Name="aux_line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Accessibility.Device2" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="RemoteSubmix">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="RemoteSubmix"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dp">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dp"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpHeadphone">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpHeadphones"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpSpeaker">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpSpeaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="ForceSpeaker">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadphone">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadphone"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Line">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Line"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbAccessory">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbAccessory"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbDevice"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="DgtlDockHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="DgtlDockHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="AuxDigital">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Hdmi"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="AnlgDockHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="AnlgDockHeadset"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForDock" MatchesWhen="Is" Value="ForceAnalogDock"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Speaker">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Default">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="RemoteSubmix">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dp">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpHeadphone">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpSpeaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="ForceSpeaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadphone">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Line">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbAccessory">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="DgtlDockHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="AuxDigital">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="AnlgDockHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Speaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Default">
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/accessibility/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Rerouting.UnreachableDevices" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Calibration">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi_arc"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/aux_line"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/fm"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker_safe"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/earpiece"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_sco"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_sco_carkit"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/telephony_tx"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Calibration">
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi_arc">
+          <BitParameter Name="hdmi_arc">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/aux_line">
+          <BitParameter Name="aux_line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/fm">
+          <BitParameter Name="fm">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker_safe">
+          <BitParameter Name="speaker_safe">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/earpiece">
+          <BitParameter Name="earpiece">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_sco">
+          <BitParameter Name="bluetooth_sco">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_sco_carkit">
+          <BitParameter Name="bluetooth_sco_carkit">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/telephony_tx">
+          <BitParameter Name="telephony_tx">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForStrategy.Rerouting.Device2" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="RemoteSubmix">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="RemoteSubmix"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dp">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dp"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpHeadphone">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpHeadphones"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpSpeaker">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpSpeaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="ForceSpeaker">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadphone">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadphone"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Line">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Line"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="WiredHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbAccessory">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbAccessory"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbDevice"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="DgtlDockHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="DgtlDockHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="AuxDigital">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Hdmi"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="AnlgDockHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="AnlgDockHeadset"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForDock" MatchesWhen="Is" Value="ForceAnalogDock"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Speaker">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="Speaker"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Default">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_headphones"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_speaker"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headphone"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/angl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="RemoteSubmix">
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dp">
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpHeadphone">
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BluetoothA2dpSpeaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="ForceSpeaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadphone">
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Line">
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/line">
+          <BitParameter Name="line">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbAccessory">
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="DgtlDockHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="AuxDigital">
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="AnlgDockHeadset">
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Speaker">
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Default">
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_headphones">
+          <BitParameter Name="bluetooth_a2dp_headphones">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/bluetooth_a2dp_speaker">
+          <BitParameter Name="bluetooth_a2dp_speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/speaker">
+          <BitParameter Name="speaker">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/wired_headphone">
+          <BitParameter Name="wired_headphone">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/angl_dock_headset">
+          <BitParameter Name="angl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/strategies/rerouting/selected_output_devices/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="StrategyForStream" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Calibration">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/applicable_strategy/strategy"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Calibration">
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">phone</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">media</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">sonification</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">media</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">sonification</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">sonification_respectful</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">phone</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">enforced_audible</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">dtmf</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">transmitted_through_speaker</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">accessibility</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">rerouting</EnumParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="SelectedStrategyForUsages.Calibration" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Calibration">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/usages/unknown/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/usages/media/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/usages/voice_communication/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/usages/voice_communication_signalling/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/usages/alarm/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/usages/notification/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/usages/notification_telephony_ringtone/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/usages/notification_communication_request/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/usages/notification_communication_instant/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/usages/notification_communication_delayed/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/usages/notification_event/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/usages/assistance_navigation_guidance/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/usages/assistance_sonification/applicable_strategy/strategy"/>
+      <ConfigurableElement Path="/Policy/policy/usages/game/applicable_strategy/strategy"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Calibration">
+        <ConfigurableElement Path="/Policy/policy/usages/unknown/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">media</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/usages/media/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">media</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/usages/voice_communication/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">phone</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/usages/voice_communication_signalling/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">dtmf</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/usages/alarm/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">sonification</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/usages/notification/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">sonification_respectful</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/usages/notification_telephony_ringtone/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">sonification</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/usages/notification_communication_request/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">sonification_respectful</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/usages/notification_communication_instant/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">sonification_respectful</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/usages/notification_communication_delayed/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">sonification_respectful</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/usages/notification_event/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">sonification_respectful</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/usages/assistance_navigation_guidance/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">media</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/usages/assistance_sonification/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">media</EnumParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/usages/game/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">media</EnumParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="SelectedStrategyForUsages.AssistanceAccessibility" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Sonification">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="RingTone"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Phone">
+        <CompoundRule Type="Any">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Accessibility">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/usages/assistance_accessibility/applicable_strategy/strategy"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Sonification">
+        <ConfigurableElement Path="/Policy/policy/usages/assistance_accessibility/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">sonification</EnumParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Phone">
+        <ConfigurableElement Path="/Policy/policy/usages/assistance_accessibility/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">phone</EnumParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Accessibility">
+        <ConfigurableElement Path="/Policy/policy/usages/assistance_accessibility/applicable_strategy/strategy">
+          <EnumParameter Name="strategy">accessibility</EnumParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForInputSource.Calibration" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Calibration">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/communication"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/ambient"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/telephony_rx"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/back_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/anlg_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/fm_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/tv_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/loopback"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/communication"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/ambient"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/telephony_rx"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/back_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/anlg_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/fm_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/tv_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/loopback"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/communication"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/ambient"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/builtin_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/back_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/anlg_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/fm_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/tv_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/loopback"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/communication"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/ambient"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/builtin_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/back_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/anlg_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/fm_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/tv_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/loopback"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/communication"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/ambient"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/builtin_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/back_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/anlg_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/fm_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/tv_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/loopback"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/communication"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/ambient"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/telephony_rx"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/anlg_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/fm_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/tv_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/loopback"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/communication"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/ambient"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/telephony_rx"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/back_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/anlg_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/fm_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/tv_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/loopback"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/communication"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/ambient"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/telephony_rx"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/anlg_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/fm_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/tv_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/loopback"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/communication"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/ambient"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/builtin_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/telephony_rx"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/back_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/anlg_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/fm_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/tv_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/loopback"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/communication"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/ambient"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/telephony_rx"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/back_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/anlg_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/fm_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/tv_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/loopback"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/in"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/communication"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/ambient"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/hdmi"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/telephony_rx"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/builtin_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/back_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/remote_submix"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/anlg_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/dgtl_dock_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/usb_accessory"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/fm_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/tv_tuner"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/line"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/spdif"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/loopback"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Calibration">
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/in">
+          <BitParameter Name="in">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/communication">
+          <BitParameter Name="communication">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/ambient">
+          <BitParameter Name="ambient">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/telephony_rx">
+          <BitParameter Name="telephony_rx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/anlg_dock_headset">
+          <BitParameter Name="anlg_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/fm_tuner">
+          <BitParameter Name="fm_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/tv_tuner">
+          <BitParameter Name="tv_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/loopback">
+          <BitParameter Name="loopback">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/in">
+          <BitParameter Name="in">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/communication">
+          <BitParameter Name="communication">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/ambient">
+          <BitParameter Name="ambient">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/telephony_rx">
+          <BitParameter Name="telephony_rx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/anlg_dock_headset">
+          <BitParameter Name="anlg_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/fm_tuner">
+          <BitParameter Name="fm_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/tv_tuner">
+          <BitParameter Name="tv_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/loopback">
+          <BitParameter Name="loopback">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/in">
+          <BitParameter Name="in">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/communication">
+          <BitParameter Name="communication">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/ambient">
+          <BitParameter Name="ambient">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/anlg_dock_headset">
+          <BitParameter Name="anlg_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/fm_tuner">
+          <BitParameter Name="fm_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/tv_tuner">
+          <BitParameter Name="tv_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/loopback">
+          <BitParameter Name="loopback">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/in">
+          <BitParameter Name="in">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/communication">
+          <BitParameter Name="communication">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/ambient">
+          <BitParameter Name="ambient">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/anlg_dock_headset">
+          <BitParameter Name="anlg_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/fm_tuner">
+          <BitParameter Name="fm_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/tv_tuner">
+          <BitParameter Name="tv_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/loopback">
+          <BitParameter Name="loopback">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/in">
+          <BitParameter Name="in">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/communication">
+          <BitParameter Name="communication">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/ambient">
+          <BitParameter Name="ambient">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/anlg_dock_headset">
+          <BitParameter Name="anlg_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/fm_tuner">
+          <BitParameter Name="fm_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/tv_tuner">
+          <BitParameter Name="tv_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/loopback">
+          <BitParameter Name="loopback">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/in">
+          <BitParameter Name="in">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/communication">
+          <BitParameter Name="communication">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/ambient">
+          <BitParameter Name="ambient">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/telephony_rx">
+          <BitParameter Name="telephony_rx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/anlg_dock_headset">
+          <BitParameter Name="anlg_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/fm_tuner">
+          <BitParameter Name="fm_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/tv_tuner">
+          <BitParameter Name="tv_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/loopback">
+          <BitParameter Name="loopback">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/in">
+          <BitParameter Name="in">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/communication">
+          <BitParameter Name="communication">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/ambient">
+          <BitParameter Name="ambient">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/telephony_rx">
+          <BitParameter Name="telephony_rx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/anlg_dock_headset">
+          <BitParameter Name="anlg_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/fm_tuner">
+          <BitParameter Name="fm_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/tv_tuner">
+          <BitParameter Name="tv_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/loopback">
+          <BitParameter Name="loopback">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/in">
+          <BitParameter Name="in">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/communication">
+          <BitParameter Name="communication">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/ambient">
+          <BitParameter Name="ambient">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/telephony_rx">
+          <BitParameter Name="telephony_rx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/anlg_dock_headset">
+          <BitParameter Name="anlg_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/fm_tuner">
+          <BitParameter Name="fm_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/tv_tuner">
+          <BitParameter Name="tv_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/loopback">
+          <BitParameter Name="loopback">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/in">
+          <BitParameter Name="in">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/communication">
+          <BitParameter Name="communication">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/ambient">
+          <BitParameter Name="ambient">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/telephony_rx">
+          <BitParameter Name="telephony_rx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/anlg_dock_headset">
+          <BitParameter Name="anlg_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/fm_tuner">
+          <BitParameter Name="fm_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/tv_tuner">
+          <BitParameter Name="tv_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/loopback">
+          <BitParameter Name="loopback">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/in">
+          <BitParameter Name="in">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/communication">
+          <BitParameter Name="communication">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/ambient">
+          <BitParameter Name="ambient">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/telephony_rx">
+          <BitParameter Name="telephony_rx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/anlg_dock_headset">
+          <BitParameter Name="anlg_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/fm_tuner">
+          <BitParameter Name="fm_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/tv_tuner">
+          <BitParameter Name="tv_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/loopback">
+          <BitParameter Name="loopback">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/in">
+          <BitParameter Name="in">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/communication">
+          <BitParameter Name="communication">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/ambient">
+          <BitParameter Name="ambient">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/hdmi">
+          <BitParameter Name="hdmi">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/telephony_rx">
+          <BitParameter Name="telephony_rx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/anlg_dock_headset">
+          <BitParameter Name="anlg_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/dgtl_dock_headset">
+          <BitParameter Name="dgtl_dock_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/usb_accessory">
+          <BitParameter Name="usb_accessory">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/fm_tuner">
+          <BitParameter Name="fm_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/tv_tuner">
+          <BitParameter Name="tv_tuner">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/line">
+          <BitParameter Name="line">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/spdif">
+          <BitParameter Name="spdif">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/loopback">
+          <BitParameter Name="loopback">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForInputSource.DefaultAndMic" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="A2dp">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="BluetoothA2dp"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Sco">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="BluetoothScoHeadset"/>
+          <SelectionCriterionRule SelectionCriterion="ForceUseForRecord" MatchesWhen="Is" Value="ForceBtSco"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="WiredHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="UsbDevice"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BuiltinMic">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="BuiltinMic"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Default">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/builtin_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/bluetooth_a2dp"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/builtin_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/bluetooth_sco_headset"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="A2dp">
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Sco">
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BuiltinMic">
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Default">
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/default/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/bluetooth_a2dp">
+          <BitParameter Name="bluetooth_a2dp">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/mic/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForInputSource.VoiceUplinkAndVoiceDownlinkAndVoiceCall" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="VoiceCall">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="TelephonyRx"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Default">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/telephony_rx"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/telephony_rx"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/telephony_rx"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="VoiceCall">
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/telephony_rx">
+          <BitParameter Name="telephony_rx">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/telephony_rx">
+          <BitParameter Name="telephony_rx">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/telephony_rx">
+          <BitParameter Name="telephony_rx">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Default">
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_downlink/applicable_input_device/mask/telephony_rx">
+          <BitParameter Name="telephony_rx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_call/applicable_input_device/mask/telephony_rx">
+          <BitParameter Name="telephony_rx">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_uplink/applicable_input_device/mask/telephony_rx">
+          <BitParameter Name="telephony_rx">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForInputSource.Camcorder" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="BackMic">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="BackMic"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BuiltinMic">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="BuiltinMic"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Default">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/back_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/builtin_mic"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="BackMic">
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BuiltinMic">
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Default">
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/camcorder/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForInputSource.VoiceRecognitionAndHotword" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="ScoHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForRecord" MatchesWhen="Is" Value="ForceBtSco"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="BluetoothScoHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="WiredHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="UsbDevice"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BuiltinMic">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="BuiltinMic"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Default">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/builtin_mic"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="ScoHeadset">
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BuiltinMic">
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Default">
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForInputSource.VoiceCommunication" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="ScoHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceBtSco"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="BluetoothScoHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceNone"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="WiredHeadset"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceNone"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="UsbDevice"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BuiltinMic">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="BuiltinMic"/>
+          <CompoundRule Type="Any">
+            <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceNone"/>
+            <CompoundRule Type="All">
+              <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+              <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Excludes" Value="BackMic"/>
+            </CompoundRule>
+          </CompoundRule>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="BackMic">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="BackMic"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Default">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/bluetooth_sco_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/wired_headset"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/usb_device"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/builtin_mic"/>
+      <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/back_mic"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="ScoHeadset">
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="WiredHeadset">
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="UsbDevice">
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BuiltinMic">
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">1</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="BackMic">
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Default">
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/bluetooth_sco_headset">
+          <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/wired_headset">
+          <BitParameter Name="wired_headset">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/usb_device">
+          <BitParameter Name="usb_device">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/builtin_mic">
+          <BitParameter Name="builtin_mic">0</BitParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/input_sources/voice_communication/applicable_input_device/mask/back_mic">
+          <BitParameter Name="back_mic">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForInputSource.RemoteSubmix" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="RemoteSubmix">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="RemoteSubmix"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Default">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/remote_submix"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="RemoteSubmix">
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/remote_submix">
+          <BitParameter Name="remote_submix">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Default">
+        <ConfigurableElement Path="/Policy/policy/input_sources/remote_submix/applicable_input_device/mask/remote_submix">
+          <BitParameter Name="remote_submix">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="DeviceForInputSource.FmTuner" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="FmTuner">
+        <CompoundRule Type="All">
+          <SelectionCriterionRule SelectionCriterion="AvailableInputDevices" MatchesWhen="Includes" Value="FmTuner"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="Default">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/fm_tuner"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="FmTuner">
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/fm_tuner">
+          <BitParameter Name="fm_tuner">1</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="Default">
+        <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/fm_tuner">
+          <BitParameter Name="fm_tuner">0</BitParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="VolumeProfilesForStream.Calibration" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="Calibration">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="Calibration">
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-16.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-8.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/voice_call/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-30.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-26.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-22.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-21.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/system/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-10.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-49.50000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-33.50000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-35.69921875</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-26.10156250</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-13.19921875</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-49.50000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-33.50000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-27.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/ring/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-10.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-56.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-34.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-11.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/music/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-49.50000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-33.50000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-35.69921875</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-26.10156250</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-13.19921875</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-49.50000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-33.50000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-27.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/alarm/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-10.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-49.50000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-33.50000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-35.69921875</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-26.10156250</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-13.19921875</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-49.50000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-33.50000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-27.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/notification/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-10.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-16.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-8.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/bluetooth_sco/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-30.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-26.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-22.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-27.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/enforced_audible/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-10.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">2</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-68.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-34.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">2</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">2</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/tts/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-96.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-56.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-34.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-11.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/accessibility/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">2</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">2</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">2</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">2</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/rerouting/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">2</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">2</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">2</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">2</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/patch/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+  <ConfigurableDomain Name="VolumeProfilesForStream.Dtmf" SequenceAware="false">
+    <Configurations>
+      <Configuration Name="InCall">
+        <CompoundRule Type="Any">
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCall"/>
+          <SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="Is" Value="InCommunication"/>
+        </CompoundRule>
+      </Configuration>
+      <Configuration Name="OutOfCall">
+        <CompoundRule Type="All"/>
+      </Configuration>
+    </Configurations>
+    <ConfigurableElements>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/0/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/1/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/2/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/3/index"/>
+      <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation"/>
+    </ConfigurableElements>
+    <Settings>
+      <Configuration Name="InCall">
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-30.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-26.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-22.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-27.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-10.00000000</FixedPointParameter>
+        </ConfigurableElement>
+      </Configuration>
+      <Configuration Name="OutOfCall">
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/headset_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-16.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-8.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/speaker_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">0</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-24.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-18.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-12.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/earpiece_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-6.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/0/index">
+          <IntegerParameter Name="index">1</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/0/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-58.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/1/index">
+          <IntegerParameter Name="index">33</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/1/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-40.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/2/index">
+          <IntegerParameter Name="index">66</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/2/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">-17.00000000</FixedPointParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/3/index">
+          <IntegerParameter Name="index">100</IntegerParameter>
+        </ConfigurableElement>
+        <ConfigurableElement Path="/Policy/policy/streams/dtmf/volume_profiles/extmedia_device_category/curve_points/3/db_attenuation">
+          <FixedPointParameter Name="db_attenuation">0.00000000</FixedPointParameter>
+        </ConfigurableElement>
+      </Configuration>
+    </Settings>
+  </ConfigurableDomain>
+</ConfigurableDomains>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_input_source.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_input_source.pfw
new file mode 100644
index 0000000..d4bc370
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_input_source.pfw
@@ -0,0 +1,515 @@
+supDomain: DeviceForInputSource
+	domain: Calibration
+		conf: Calibration
+			#
+			# Note that ALL input devices must have the sign bit set to 1.
+			# As the devices is a mask, use the "in" bit as a direction indicator.
+			#
+			component: /Policy/policy/input_sources/default/applicable_input_device/mask
+				in = 1
+				communication = 0
+				ambient = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				loopback = 0
+			component: /Policy/policy/input_sources/mic/applicable_input_device/mask
+				in = 1
+				communication = 0
+				ambient = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				loopback = 0
+			component: /Policy/policy/input_sources/voice_downlink/applicable_input_device/mask
+				in = 1
+				communication = 0
+				ambient = 0
+				builtin_mic = 0
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				hdmi = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+			component: /Policy/policy/input_sources/voice_call/applicable_input_device/mask
+				in = 1
+				communication = 0
+				ambient = 0
+				builtin_mic = 0
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				hdmi = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+			component: /Policy/policy/input_sources/voice_uplink/applicable_input_device/mask
+				in = 1
+				communication = 0
+				ambient = 0
+				builtin_mic = 0
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				hdmi = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+			component: /Policy/policy/input_sources/camcorder/applicable_input_device/mask
+				in = 1
+				communication = 0
+				ambient = 0
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				hdmi = 0
+				telephony_rx = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+			component: /Policy/policy/input_sources/voice_recognition/applicable_input_device/mask
+				in = 1
+				communication = 0
+				ambient = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+			component: /Policy/policy/input_sources/voice_communication/applicable_input_device/mask
+				in = 1
+				communication = 0
+				ambient = 0
+				hdmi = 0
+				telephony_rx = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+			component: /Policy/policy/input_sources/remote_submix/applicable_input_device/mask
+				in = 1
+				communication = 0
+				ambient = 0
+				builtin_mic = 0
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+			component: /Policy/policy/input_sources/hotword/applicable_input_device/mask
+				in = 1
+				communication = 0
+				ambient = 0
+				hdmi = 0
+				telephony_rx = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+			component: /Policy/policy/input_sources/fm_tuner/applicable_input_device/mask
+				in = 1
+				communication = 0
+				ambient = 0
+				bluetooth_sco_headset = 0
+				hdmi = 0
+				telephony_rx = 0
+				builtin_mic = 0
+				wired_headset = 0
+				back_mic = 0
+				remote_submix = 0
+				anlg_dock_headset = 0
+				dgtl_dock_headset = 0
+				usb_accessory = 0
+				usb_device = 0
+				fm_tuner = 0
+				tv_tuner = 0
+				line = 0
+				spdif = 0
+				bluetooth_a2dp = 0
+				loopback = 0
+
+	domain: DefaultAndMic
+		conf: A2dp
+			AvailableInputDevices Includes BluetoothA2dp
+
+			component: /Policy/policy/input_sources
+				component: default/applicable_input_device/mask/
+					bluetooth_a2dp = 1
+					wired_headset = 0
+					usb_device = 0
+					builtin_mic = 0
+					bluetooth_sco_headset = 0
+				component: mic/applicable_input_device/mask/
+					bluetooth_a2dp = 1
+					wired_headset = 0
+					usb_device = 0
+					builtin_mic = 0
+					bluetooth_sco_headset = 0
+
+		conf: Sco
+			AvailableInputDevices Includes BluetoothScoHeadset
+			ForceUseForRecord Is ForceBtSco
+
+			component: /Policy/policy/input_sources
+				component: default/applicable_input_device/mask/
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					usb_device = 0
+					builtin_mic = 0
+					bluetooth_sco_headset = 1
+				component: mic/applicable_input_device/mask/
+					bluetooth_a2dp = 0
+					wired_headset = 1
+					usb_device = 0
+					builtin_mic = 0
+					bluetooth_sco_headset = 1
+
+		conf: WiredHeadset
+			AvailableInputDevices Includes WiredHeadset
+
+			component: /Policy/policy/input_sources
+				component: default/applicable_input_device/mask/
+					bluetooth_a2dp = 0
+					wired_headset = 1
+					usb_device = 0
+					builtin_mic = 0
+					bluetooth_sco_headset = 0
+				component: mic/applicable_input_device/mask/
+					bluetooth_a2dp = 0
+					wired_headset = 1
+					usb_device = 0
+					builtin_mic = 0
+					bluetooth_sco_headset = 0
+
+		conf: UsbDevice
+			AvailableInputDevices Includes UsbDevice
+
+			component: /Policy/policy/input_sources
+				component: default/applicable_input_device/mask/
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					usb_device = 1
+					builtin_mic = 0
+					bluetooth_sco_headset = 0
+				component: mic/applicable_input_device/mask/
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					usb_device = 1
+					builtin_mic = 0
+					bluetooth_sco_headset = 0
+
+		conf: BuiltinMic
+			AvailableInputDevices Includes BuiltinMic
+
+			component: /Policy/policy/input_sources
+				component: default/applicable_input_device/mask/
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					usb_device = 0
+					builtin_mic = 1
+					bluetooth_sco_headset = 0
+				component: mic/applicable_input_device/mask/
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					usb_device = 0
+					builtin_mic = 1
+					bluetooth_sco_headset = 0
+
+		conf: Default
+			component: /Policy/policy/input_sources
+				component: default/applicable_input_device/mask/
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					usb_device = 0
+					builtin_mic = 0
+					bluetooth_sco_headset = 0
+				component: mic/applicable_input_device/mask/
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					usb_device = 0
+					builtin_mic = 0
+					bluetooth_sco_headset = 0
+
+	domain: VoiceUplinkAndVoiceDownlinkAndVoiceCall
+		conf: VoiceCall
+			AvailableInputDevices Includes TelephonyRx
+
+			component: /Policy/policy/input_sources
+				voice_downlink/applicable_input_device/mask/telephony_rx = 1
+				voice_call/applicable_input_device/mask/telephony_rx = 1
+				voice_uplink/applicable_input_device/mask/telephony_rx = 1
+
+		conf: Default
+			component: /Policy/policy/input_sources
+				voice_downlink/applicable_input_device/mask/telephony_rx = 0
+				voice_call/applicable_input_device/mask/telephony_rx = 0
+				voice_uplink/applicable_input_device/mask/telephony_rx = 0
+
+	domain: Camcorder
+		conf: BackMic
+			AvailableInputDevices Includes BackMic
+
+			component: /Policy/policy/input_sources/camcorder/applicable_input_device/mask
+				back_mic = 1
+				builtin_mic = 0
+
+		conf: BuiltinMic
+			AvailableInputDevices Includes BuiltinMic
+
+			component: /Policy/policy/input_sources/camcorder/applicable_input_device/mask
+				back_mic = 0
+				builtin_mic = 1
+
+		conf: Default
+			component: /Policy/policy/input_sources/camcorder/applicable_input_device/mask
+				back_mic = 0
+				builtin_mic = 0
+
+	domain: VoiceRecognitionAndHotword
+		conf: ScoHeadset
+			ForceUseForRecord Is ForceBtSco
+			AvailableInputDevices Includes BluetoothScoHeadset
+
+			component: /Policy/policy/input_sources
+				component: voice_recognition/applicable_input_device/mask
+					bluetooth_sco_headset = 1
+					wired_headset = 0
+					usb_device = 0
+					builtin_mic = 0
+				component: hotword/applicable_input_device/mask
+					bluetooth_sco_headset = 1
+					wired_headset = 0
+					usb_device = 0
+					builtin_mic = 0
+
+		conf: WiredHeadset
+			AvailableInputDevices Includes WiredHeadset
+
+			component: /Policy/policy/input_sources
+				component: voice_recognition/applicable_input_device/mask
+					bluetooth_sco_headset = 0
+					wired_headset = 1
+					usb_device = 0
+					builtin_mic = 0
+				component: hotword/applicable_input_device/mask
+					bluetooth_sco_headset = 0
+					wired_headset = 1
+					usb_device = 0
+					builtin_mic = 0
+
+		conf: UsbDevice
+			AvailableInputDevices Includes UsbDevice
+
+			component: /Policy/policy/input_sources
+				component: voice_recognition/applicable_input_device/mask
+					bluetooth_sco_headset = 0
+					wired_headset = 0
+					usb_device = 1
+					builtin_mic = 0
+				component: hotword/applicable_input_device/mask
+					bluetooth_sco_headset = 0
+					wired_headset = 0
+					usb_device = 1
+					builtin_mic = 0
+
+		conf: BuiltinMic
+			AvailableInputDevices Includes BuiltinMic
+
+			component: /Policy/policy/input_sources
+				component: voice_recognition/applicable_input_device/mask
+					bluetooth_sco_headset = 0
+					wired_headset = 0
+					usb_device = 0
+					builtin_mic = 1
+				component: hotword/applicable_input_device/mask
+					bluetooth_sco_headset = 0
+					wired_headset = 0
+					usb_device = 0
+					builtin_mic = 1
+
+		conf: Default
+			component: /Policy/policy/input_sources
+				component: voice_recognition/applicable_input_device/mask
+					bluetooth_sco_headset = 0
+					wired_headset = 0
+					usb_device = 0
+					builtin_mic = 0
+				component: hotword/applicable_input_device/mask
+					bluetooth_sco_headset = 0
+					wired_headset = 0
+					usb_device = 0
+					builtin_mic = 0
+
+	domain: VoiceCommunication
+		conf: ScoHeadset
+			#
+			# SCO device may be requested but no SCO device is available
+			#
+			ForceUseForCommunication Is ForceBtSco
+			AvailableInputDevices Includes BluetoothScoHeadset
+
+			component: /Policy/policy/input_sources/voice_communication/applicable_input_device/mask
+				bluetooth_sco_headset = 1
+				wired_headset = 0
+				usb_device = 0
+				builtin_mic = 0
+				back_mic = 0
+
+		conf: WiredHeadset
+			ForceUseForCommunication Is ForceNone
+			AvailableInputDevices Includes WiredHeadset
+
+			component: /Policy/policy/input_sources/voice_communication/applicable_input_device/mask
+				bluetooth_sco_headset = 0
+				wired_headset = 1
+				usb_device = 0
+				builtin_mic = 0
+				back_mic = 0
+
+		conf: UsbDevice
+			ForceUseForCommunication Is ForceNone
+			AvailableInputDevices Includes UsbDevice
+
+			component: /Policy/policy/input_sources/voice_communication/applicable_input_device/mask
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				usb_device = 1
+				builtin_mic = 0
+				back_mic = 0
+
+		conf: BuiltinMic
+			AvailableInputDevices Includes BuiltinMic
+			ANY
+				ForceUseForCommunication Is ForceNone
+				ALL
+					ForceUseForCommunication Is ForceSpeaker
+					AvailableInputDevices Excludes BackMic
+
+			component: /Policy/policy/input_sources/voice_communication/applicable_input_device/mask
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				usb_device = 0
+				builtin_mic = 1
+				back_mic = 0
+
+		conf: BackMic
+			ForceUseForCommunication Is ForceSpeaker
+			AvailableInputDevices Includes BackMic
+
+			component: /Policy/policy/input_sources/voice_communication/applicable_input_device/mask
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				usb_device = 0
+				builtin_mic = 0
+				back_mic = 1
+
+		conf: Default
+			component: /Policy/policy/input_sources/voice_communication/applicable_input_device/mask
+				bluetooth_sco_headset = 0
+				wired_headset = 0
+				usb_device = 0
+				builtin_mic = 0
+				back_mic = 0
+
+	domain: RemoteSubmix
+		conf: RemoteSubmix
+			AvailableInputDevices Includes RemoteSubmix
+
+			component: /Policy/policy/input_sources/remote_submix/applicable_input_device/mask
+				remote_submix = 1
+
+		conf: Default
+			component: /Policy/policy/input_sources/remote_submix/applicable_input_device/mask
+				remote_submix = 0
+
+	domain: FmTuner
+		conf: FmTuner
+			AvailableInputDevices Includes FmTuner
+
+			component: /Policy/policy/input_sources/fm_tuner/applicable_input_device/mask
+				fm_tuner = 1
+
+		conf: Default
+			component: /Policy/policy/input_sources/fm_tuner/applicable_input_device/mask
+				fm_tuner = 0
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_accessibility.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_accessibility.pfw
new file mode 100644
index 0000000..e8ab33b
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_accessibility.pfw
@@ -0,0 +1,302 @@
+supDomain: DeviceForStrategy
+
+	supDomain: Accessibility
+		#
+		# @FIXME: STRATEGY_ACCESSIBILITY follows STRATEGY_MEDIA for now
+		#
+		# @FIXME: How to disable HDMI if !audio_is_linear_pcm other than programmatically???
+		#
+		domain: UnreachableDevices
+			conf: Calibration
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					hdmi_arc = 0
+					spdif = 0
+					aux_line = 0
+					fm = 0
+					speaker_safe = 0
+					earpiece = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					telephony_tx = 0
+
+		domain: Device2
+			conf: RemoteSubmix
+				AvailableOutputDevices Includes RemoteSubmix
+
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					remote_submix = 1
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: BluetoothA2dp
+				ForceUseForMedia IsNot ForceNoBtA2dp
+				AvailableOutputDevices Includes BluetoothA2dp
+
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 1
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: BluetoothA2dpHeadphone
+				ForceUseForMedia IsNot ForceNoBtA2dp
+				AvailableOutputDevices Includes BluetoothA2dpHeadphones
+
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 1
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: BluetoothA2dpSpeaker
+				ForceUseForMedia IsNot ForceNoBtA2dp
+				AvailableOutputDevices Includes BluetoothA2dpSpeaker
+
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 1
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: ForceSpeaker
+				ForceUseForMedia Is ForceSpeaker
+				AvailableOutputDevices Includes Speaker
+
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 1
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: WiredHeadphone
+				AvailableOutputDevices Includes WiredHeadphone
+
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 1
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: Line
+				AvailableOutputDevices Includes Line
+
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 1
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: WiredHeadset
+				AvailableOutputDevices Includes WiredHeadset
+
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 1
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: UsbAccessory
+				AvailableOutputDevices Includes UsbAccessory
+
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 1
+					usb_device = 0
+					hdmi = 0
+
+			conf: UsbDevice
+				AvailableOutputDevices Includes UsbDevice
+
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 1
+					hdmi = 0
+
+			conf: DgtlDockHeadset
+				AvailableOutputDevices Includes DgtlDockHeadset
+
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 1
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: AuxDigital
+				#
+				# Do not route accessibility prompts to a digital output currently configured with a
+				# compressed format as they would likely not be mixed and dropped.
+				#
+				# @TODO How to translate the following condition(???)
+				# desc->isActive() && !audio_is_linear_pcm(desc->mFormat) && devices != AUDIO_DEVICE_NONE
+				#
+				AvailableOutputDevices Includes Hdmi
+
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 1
+
+			conf: AnlgDockHeadset
+				AvailableOutputDevices Includes AnlgDockHeadset
+				ForceUseForDock Is ForceAnalogDock
+
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 1
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: Speaker
+				AvailableOutputDevices Includes Speaker
+
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 1
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: Default
+				component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_dtmf.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_dtmf.pfw
new file mode 100644
index 0000000..85273b2
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_dtmf.pfw
@@ -0,0 +1,637 @@
+supDomain: DeviceForStrategy
+
+	supDomain: Dtmf
+
+		domain: UnreachableDevices
+			conf: Calibration
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					fm = 0
+					speaker_safe = 0
+					bluetooth_sco_carkit = 0
+
+		domain: Device2
+			conf: RemoteSubmix
+				#
+				# DTMF follows Media strategy if not in call
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				AvailableOutputDevices Includes RemoteSubmix
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 1
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: BluetoothA2dp
+				#
+				# DTMF follows Media strategy if not in call
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia IsNot ForceNoBtA2dp
+				AvailableOutputDevices Includes BluetoothA2dp
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 1
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: BluetoothA2dpHeadphones
+				#
+				# DTMF follows Media strategy if not in call
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia IsNot ForceNoBtA2dp
+				AvailableOutputDevices Includes BluetoothA2dpHeadphones
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 1
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: BluetoothA2dpSpeaker
+				#
+				# DTMF follows Media strategy if not in call
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia IsNot ForceNoBtA2dp
+				AvailableOutputDevices Includes BluetoothA2dpSpeaker
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 1
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: ForceSpeakerWhenNotInCall
+				#
+				# DTMF follows Media strategy if not in call
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia Is ForceSpeaker
+				ForceUseForHdmiSystemAudio IsNot ForceHdmiSystemEnforced
+				AvailableOutputDevices Includes Speaker
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 1
+
+			conf: BluetoothScoHeadset
+				#
+				# DTMF falls through Phone strategy if in call
+				#
+				ANY
+					TelephonyMode Is InCall
+					TelephonyMode Is InCommunication
+				AvailableOutputDevices Includes BluetoothScoHeadset
+				ForceUseForCommunication Is ForceBtSco
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 1
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: BluetoothSco
+				#
+				# DTMF falls through Phone strategy if in call
+				#
+				ANY
+					TelephonyMode Is InCall
+					TelephonyMode Is InCommunication
+				AvailableOutputDevices Includes BluetoothSco
+				ForceUseForCommunication Is ForceBtSco
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 1
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: WiredHeadphone
+				ANY
+					#
+					# DTMF falls through Phone strategy if in call
+					#
+					ALL
+						ANY
+							TelephonyMode Is InCall
+							TelephonyMode Is InCommunication
+						ForceUseForCommunication IsNot ForceSpeaker
+					#
+					# DTMF follows Media strategy if not in call
+					#
+					ALL
+						TelephonyMode IsNot InCall
+						TelephonyMode IsNot InCommunication
+				AvailableOutputDevices Includes WiredHeadphone
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 1
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: LineWhenFollowingMedia
+				#
+				# DTMF follows Media strategy if not in call
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				AvailableOutputDevices Includes Line
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 1
+					speaker = 0
+
+			conf: WiredHeadset
+				ANY
+					#
+					# DTMF falls through Phone strategy if in call
+					#
+					ALL
+						ANY
+							TelephonyMode Is InCall
+							TelephonyMode Is InCommunication
+						ForceUseForCommunication IsNot ForceSpeaker
+					#
+					# DTMF follows Media strategy if not in call
+					#
+					ALL
+						TelephonyMode IsNot InCall
+						TelephonyMode IsNot InCommunication
+				AvailableOutputDevices Includes WiredHeadset
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 1
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: UsbDevice
+				ANY
+					#
+					# DTMF falls through Phone strategy if in call (widely speaking)
+					#
+					ALL
+						ANY
+							TelephonyMode Is InCall
+							TelephonyMode Is InCommunication
+						ForceUseForCommunication IsNot ForceSpeaker
+					#
+					# DTMF follows Media strategy if not in call
+					# Media strategy inverts the priority of USB device vs accessory
+					#
+					ALL
+						TelephonyMode IsNot InCall
+						TelephonyMode IsNot InCommunication
+						AvailableOutputDevices Excludes UsbAccessory
+						ForceUseForCommunication Is ForceSpeaker
+				AvailableOutputDevices Includes UsbDevice
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 1
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: UsbAccessory
+				#
+				# DTMF falls through Phone strategy if in call (widely speaking)
+				# but USB accessory not reachable in call
+				#
+				# DTMF follows Media strategy if not in call
+				# Media strategy inverts the priority of USB device vs accessory
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				AvailableOutputDevices Includes UsbAccessory
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 1
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: DgtlDockHeadset
+				#
+				# DTMF falls through Phone strategy if in call (widely speaking)
+				# but DgtlDockHeadset not reachable in call
+				#
+				# DTMF follows Media strategy if not in call
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				AvailableOutputDevices Includes DgtlDockHeadset
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 1
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: Hdmi
+				#
+				# DTMF falls through Phone strategy if in call (widely speaking)
+				# but Hdmi not reachable in call
+				#
+				# DTMF follows Media strategy if not in call
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				AvailableOutputDevices Includes Hdmi
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 1
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: AnlgDockHeadset
+				#
+				# DTMF falls through Phone strategy if in call (widely speaking)
+				# but AnlgDockHeadset not reachable in call
+				#
+				# DTMF follows Media strategy if not in call
+				# Media strategy inverts the priority of USB device vs accessory
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForDock Is ForceAnalogDock
+				AvailableOutputDevices Includes AnlgDockHeadset
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 1
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: Earpiece
+				#
+				# DTMF falls through Phone strategy if in call
+				#
+				ANY
+					TelephonyMode Is InCall
+					TelephonyMode Is InCommunication
+				AvailableOutputDevices Includes Earpiece
+				ForceUseForCommunication IsNot ForceSpeaker
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 1
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: LineWhenFallThroughPhone
+				#
+				# DTMF falls through Phone strategy if in call
+				#
+				ANY
+					TelephonyMode Is InCall
+					TelephonyMode Is InCommunication
+				AvailableOutputDevices Includes Line
+				ForceUseForCommunication Is ForceSpeaker
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 1
+					speaker = 0
+
+			conf: Speaker
+				ANY
+					#
+					# DTMF falls through Phone strategy if in call
+					#
+					ALL
+						ANY
+							TelephonyMode Is InCall
+							TelephonyMode Is InCommunication
+						ForceUseForCommunication Is ForceSpeaker
+					#
+					# DTMF follows Media strategy if not in call
+					#
+					ALL
+						TelephonyMode IsNot InCall
+						TelephonyMode IsNot InCommunication
+						ForceUseForHdmiSystemAudio IsNot ForceHdmiSystemEnforced
+				AvailableOutputDevices Includes Speaker
+
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 1
+
+			conf: Default
+				component: /Policy/policy/strategies/dtmf/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+		domain: Arc
+			#
+			# DTMF strategy follows media strategy if not in call
+			# these following domains consists in device(s) that can co-exist with others
+			# e.g. ARC, SPDIF, AUX_LINE
+			#
+			conf: Selected
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				AvailableOutputDevices Includes HdmiArc
+
+				/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc = 1
+
+			conf: NotSelected
+				/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc = 0
+
+		domain: Spdif
+			#
+			# DTMF strategy follows media strategy if not in call
+			# these following domains consists in device(s) that can co-exist with others
+			# e.g. ARC, SPDIF, AUX_LINE
+			#
+			conf: Selected
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				AvailableOutputDevices Includes Spdif
+
+				/Policy/policy/strategies/media/selected_output_devices/mask/spdif = 1
+
+			conf: NotSelected
+				/Policy/policy/strategies/media/selected_output_devices/mask/spdif = 0
+
+		domain: AuxLine
+			#
+			# DTMF strategy follows media strategy if not in call
+			# these following domains consists in device(s) that can co-exist with others
+			# e.g. ARC, SPDIF, AUX_LINE
+			#
+			conf: Selected
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				AvailableOutputDevices Includes AuxLine
+
+				/Policy/policy/strategies/media/selected_output_devices/mask/aux_line = 1
+
+			conf: NotSelected
+				/Policy/policy/strategies/media/selected_output_devices/mask/aux_line = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_enforced_audible.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_enforced_audible.pfw
new file mode 100644
index 0000000..d714743
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_enforced_audible.pfw
@@ -0,0 +1,358 @@
+supDomain: DeviceForStrategy
+
+	supDomain: EnforcedAudible
+
+		domain: UnreachableDevices
+			conf: Calibration
+				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+					# no enforced_audible on remote submix (e.g. WFD)
+					remote_submix = 0
+					hdmi_arc = 0
+					spdif = 0
+					aux_line = 0
+					speaker_safe = 0
+
+		domain: Speaker
+			conf: Selected
+				#
+				# strategy STRATEGY_ENFORCED_AUDIBLE uses same routing policy as STRATEGY_SONIFICATION
+				# except:
+				#    - when in call where it doesn't default to STRATEGY_PHONE behavior
+				#    - in countries where not enforced in which case it follows STRATEGY_MEDIA
+				#
+				AvailableOutputDevices Includes Speaker
+				ANY
+					ForceUseForSystem Is ForceSystemEnforced
+					ALL
+						ForceUseForMedia Is ForceSpeaker
+						AvailableOutputDevices Excludes RemoteSubmix
+						ANY
+							ForceUseForMedia IsNot ForceNoBtA2dp
+							AvailableOutputDevices Excludes BluetoothA2dp
+							AvailableOutputDevices Excludes BluetoothA2dpHeadphones
+							AvailableOutputDevices Excludes BluetoothA2dpSpeaker
+					#
+					# Speaker is also the fallback device if any of the device from Device2 domain
+					# is selected.
+					#
+					ALL
+						AvailableOutputDevices Excludes RemoteSubmix
+						AvailableOutputDevices Excludes WiredHeadphone
+						AvailableOutputDevices Excludes Line
+						AvailableOutputDevices Excludes WiredHeadset
+						AvailableOutputDevices Excludes UsbAccessory
+						AvailableOutputDevices Excludes UsbDevice
+						AvailableOutputDevices Excludes DgtlDockHeadset
+						AvailableOutputDevices Excludes Hdmi
+						ANY
+							AvailableOutputDevices Excludes AnlgDockHeadset
+							ForceUseForDock IsNot ForceAnalogDock
+
+				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+					speaker = 1
+
+			conf: NotSelected
+				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+					speaker = 0
+
+		domain: Device2
+			conf: RemoteSubmix
+				AvailableOutputDevices Includes RemoteSubmix
+
+				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+					remote_submix = 1
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					fm = 0
+					speaker_safe = 0
+
+			conf: BluetoothA2dp
+				AvailableOutputDevices Includes BluetoothA2dp
+				ForceUseForMedia IsNot ForceNoBtA2dp
+
+				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 1
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					fm = 0
+					speaker_safe = 0
+
+			conf: BluetoothA2dpHeadphones
+				AvailableOutputDevices Includes BluetoothA2dpHeadphones
+				ForceUseForMedia IsNot ForceNoBtA2dp
+
+				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 1
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					fm = 0
+					speaker_safe = 0
+
+			conf: BluetoothA2dpSpeaker
+				AvailableOutputDevices Includes BluetoothA2dpSpeaker
+				ForceUseForMedia IsNot ForceNoBtA2dp
+
+				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 1
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					fm = 0
+					speaker_safe = 0
+
+			conf: WiredHeadphone
+				ForceUseForMedia IsNot ForceSpeaker
+				AvailableOutputDevices Includes WiredHeadphone
+
+				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 1
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					fm = 0
+					speaker_safe = 0
+
+			conf: Line
+				ForceUseForMedia IsNot ForceSpeaker
+				AvailableOutputDevices Includes Line
+
+				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 1
+					fm = 0
+					speaker_safe = 0
+
+			conf: WiredHeadset
+				ForceUseForMedia IsNot ForceSpeaker
+				AvailableOutputDevices Includes WiredHeadset
+
+				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 1
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					fm = 0
+					speaker_safe = 0
+
+			conf: UsbAccessory
+				ForceUseForMedia IsNot ForceSpeaker
+				AvailableOutputDevices Includes UsbAccessory
+
+				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 1
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					fm = 0
+					speaker_safe = 0
+
+			conf: UsbDevice
+				ForceUseForMedia IsNot ForceSpeaker
+				AvailableOutputDevices Includes UsbDevice
+
+				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 1
+					telephony_tx = 0
+					line = 0
+					fm = 0
+					speaker_safe = 0
+
+			conf: DgtlDockHeadset
+				ForceUseForMedia IsNot ForceSpeaker
+				AvailableOutputDevices Includes DgtlDockHeadset
+
+				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 1
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					fm = 0
+					speaker_safe = 0
+
+			conf: Hdmi
+				ForceUseForMedia IsNot ForceSpeaker
+				AvailableOutputDevices Includes Hdmi
+
+				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 1
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					fm = 0
+					speaker_safe = 0
+
+			conf: AnlgDockHeadset
+				ForceUseForMedia IsNot ForceSpeaker
+				ForceUseForDock Is ForceAnalogDock
+				AvailableOutputDevices Includes AnlgDockHeadset
+
+				component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
+					remote_submix = 0
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 1
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					fm = 0
+					speaker_safe = 0
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_media.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_media.pfw
new file mode 100644
index 0000000..38bede5
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_media.pfw
@@ -0,0 +1,331 @@
+domainGroup: DeviceForStrategy
+
+	domainGroup: Media
+
+		domain: UnreachableDevices
+			conf: Calibration
+				component: /Policy/policy/strategies/media/selected_output_devices/mask
+					fm = 0
+					speaker_safe = 0
+					earpiece = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					telephony_tx = 0
+
+		domain: Device2
+			conf: RemoteSubmix
+				AvailableOutputDevices Includes RemoteSubmix
+
+				component: /Policy/policy/strategies/media/selected_output_devices/mask
+					speaker = 0
+					hdmi = 0
+					dgtl_dock_headset = 0
+					angl_dock_headset = 0
+					usb_device = 0
+					usb_accessory = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp = 0
+					remote_submix = 1
+					line = 0
+
+			conf: BluetoothA2dp
+				ForceUseForMedia IsNot ForceNoBtA2dp
+				AvailableOutputDevices Includes BluetoothA2dp
+
+				component: /Policy/policy/strategies/media/selected_output_devices/mask
+					speaker = 0
+					hdmi = 0
+					dgtl_dock_headset = 0
+					angl_dock_headset = 0
+					usb_device = 0
+					usb_accessory = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp = 1
+					remote_submix = 0
+					line = 0
+
+			conf: BluetoothA2dpHeadphone
+				ForceUseForMedia IsNot ForceNoBtA2dp
+				AvailableOutputDevices Includes BluetoothA2dpHeadphones
+
+				component: /Policy/policy/strategies/media/selected_output_devices/mask
+					speaker = 0
+					hdmi = 0
+					dgtl_dock_headset = 0
+					angl_dock_headset = 0
+					usb_device = 0
+					usb_accessory = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp_headphones = 1
+					bluetooth_a2dp = 0
+					remote_submix = 0
+					line = 0
+
+			conf: BluetoothA2dpSpeaker
+				ForceUseForMedia IsNot ForceNoBtA2dp
+				AvailableOutputDevices Includes BluetoothA2dpSpeaker
+
+				component: /Policy/policy/strategies/media/selected_output_devices/mask
+					speaker = 0
+					hdmi = 0
+					dgtl_dock_headset = 0
+					angl_dock_headset = 0
+					usb_device = 0
+					usb_accessory = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_a2dp_speaker = 1
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp = 0
+					remote_submix = 0
+					line = 0
+
+			conf: ForceSpeaker
+				ForceUseForMedia Is ForceSpeaker
+				AvailableOutputDevices Includes Speaker
+				#
+				# If hdmi system audio mode is on, remove speaker out of output list.
+				#
+				ForceUseForHdmiSystemAudio IsNot ForceHdmiSystemEnforced
+
+				component: /Policy/policy/strategies/media/selected_output_devices/mask
+					speaker = 1
+					hdmi = 0
+					dgtl_dock_headset = 0
+					angl_dock_headset = 0
+					usb_device = 0
+					usb_accessory = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp = 0
+					remote_submix = 0
+					line = 0
+
+			conf: WiredHeadphone
+				AvailableOutputDevices Includes WiredHeadphone
+
+				component: /Policy/policy/strategies/media/selected_output_devices/mask
+					speaker = 0
+					hdmi = 0
+					dgtl_dock_headset = 0
+					angl_dock_headset = 0
+					usb_device = 0
+					usb_accessory = 0
+					wired_headset = 0
+					wired_headphone = 1
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp = 0
+					remote_submix = 0
+					line = 0
+
+			conf: Line
+				AvailableOutputDevices Includes Line
+
+				component: /Policy/policy/strategies/media/selected_output_devices/mask
+					speaker = 0
+					hdmi = 0
+					dgtl_dock_headset = 0
+					angl_dock_headset = 0
+					usb_device = 0
+					usb_accessory = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp = 0
+					remote_submix = 0
+					line = 1
+
+			conf: WiredHeadset
+				AvailableOutputDevices Includes WiredHeadset
+
+				component: /Policy/policy/strategies/media/selected_output_devices/mask
+					speaker = 0
+					hdmi = 0
+					dgtl_dock_headset = 0
+					angl_dock_headset = 0
+					usb_device = 0
+					usb_accessory = 0
+					wired_headset = 1
+					wired_headphone = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp = 0
+					remote_submix = 0
+					line = 0
+
+			conf: UsbAccessory
+				AvailableOutputDevices Includes UsbAccessory
+
+				component: /Policy/policy/strategies/media/selected_output_devices/mask
+					speaker = 0
+					hdmi = 0
+					dgtl_dock_headset = 0
+					angl_dock_headset = 0
+					usb_device = 0
+					usb_accessory = 1
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp = 0
+					remote_submix = 0
+					line = 0
+
+			conf: UsbDevice
+				AvailableOutputDevices Includes UsbDevice
+
+				component: /Policy/policy/strategies/media/selected_output_devices/mask
+					speaker = 0
+					hdmi = 0
+					dgtl_dock_headset = 0
+					angl_dock_headset = 0
+					usb_device = 1
+					usb_accessory = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp = 0
+					remote_submix = 0
+					line = 0
+
+			conf: DgtlDockHeadset
+				AvailableOutputDevices Includes DgtlDockHeadset
+
+				component: /Policy/policy/strategies/media/selected_output_devices/mask
+					speaker = 0
+					hdmi = 0
+					dgtl_dock_headset = 1
+					angl_dock_headset = 0
+					usb_device = 0
+					usb_accessory = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp = 0
+					remote_submix = 0
+					line = 0
+
+			conf: AuxDigital
+				AvailableOutputDevices Includes Hdmi
+
+				component: /Policy/policy/strategies/media/selected_output_devices/mask
+					speaker = 0
+					hdmi = 1
+					dgtl_dock_headset = 0
+					angl_dock_headset = 0
+					usb_device = 0
+					usb_accessory = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp = 0
+					remote_submix = 0
+					line = 0
+
+			conf: AnlgDockHeadset
+				AvailableOutputDevices Includes AnlgDockHeadset
+				ForceUseForDock Is ForceAnalogDock
+
+				component: /Policy/policy/strategies/media/selected_output_devices/mask
+					speaker = 0
+					hdmi = 0
+					dgtl_dock_headset = 0
+					angl_dock_headset = 1
+					usb_device = 0
+					usb_accessory = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp = 0
+					remote_submix = 0
+					line = 0
+
+			conf: Speaker
+				AvailableOutputDevices Includes Speaker
+				#
+				# If hdmi system audio mode is on, remove speaker out of output list.
+				#
+				ForceUseForHdmiSystemAudio IsNot ForceHdmiSystemEnforced
+
+				component: /Policy/policy/strategies/media/selected_output_devices/mask
+					speaker = 1
+					hdmi = 0
+					dgtl_dock_headset = 0
+					angl_dock_headset = 0
+					usb_device = 0
+					usb_accessory = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp = 0
+					remote_submix = 0
+					line = 0
+
+			conf: Default
+				component: /Policy/policy/strategies/media/selected_output_devices/mask
+					speaker = 0
+					hdmi = 0
+					dgtl_dock_headset = 0
+					angl_dock_headset = 0
+					usb_device = 0
+					usb_accessory = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp = 0
+					remote_submix = 0
+					line = 0
+
+		domain: Arc
+			#
+			# these following domains consists in device(s) that can co-exist with others
+			# e.g. ARC, SPDIF, AUX_LINE
+			#
+			conf: Selected
+				AvailableOutputDevices Includes HdmiArc
+
+				/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc = 1
+
+			conf: NotSelected
+				/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc = 0
+
+		domain: Spdif
+			#
+			# these following domains consists in device(s) that can co-exist with others
+			# e.g. ARC, SPDIF, AUX_LINE
+			#
+			conf: Selected
+				AvailableOutputDevices Includes Spdif
+
+				/Policy/policy/strategies/media/selected_output_devices/mask/spdif = 1
+
+			conf: NotSelected
+				/Policy/policy/strategies/media/selected_output_devices/mask/spdif = 0
+
+		domain: AuxLine
+			conf: Selected
+				AvailableOutputDevices Includes AuxLine
+
+				/Policy/policy/strategies/media/selected_output_devices/mask/aux_line = 1
+
+			conf: NotSelected
+				/Policy/policy/strategies/media/selected_output_devices/mask/aux_line = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_phone.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_phone.pfw
new file mode 100644
index 0000000..7b01491
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_phone.pfw
@@ -0,0 +1,485 @@
+supDomain: DeviceForStrategy
+
+	supDomain: Phone
+
+		domain: UnreachableDevices
+			conf: Calibration
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					# no sonification on remote submix (e.g. WFD)
+					remote_submix = 0
+					hdmi_arc = 0
+					aux_line = 0
+					spdif = 0
+					fm = 0
+					speaker_safe = 0
+
+		domain: Device
+			conf: ScoCarkit
+				AvailableOutputDevices Includes BluetoothScoCarkit
+				ForceUseForCommunication Is ForceBtSco
+
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 1
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: BluetoothScoHeadset
+				AvailableOutputDevices Includes BluetoothScoHeadset
+				ForceUseForCommunication Is ForceBtSco
+
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 1
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: BluetoothSco
+				AvailableOutputDevices Includes BluetoothSco
+				ForceUseForCommunication Is ForceBtSco
+
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 1
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: BluetoothA2dp
+				#
+				# Fallback BT Sco devices in case of FORCE_BT_SCO
+				# or FORCE_NONE
+				#
+				AvailableOutputDevices Includes BluetoothA2dp
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia IsNot ForceNoBtA2dp
+				ANY
+					ForceUseForCommunication Is ForceBtSco
+					ForceUseForCommunication Is ForceNone
+
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 1
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: BluetoothA2dpHeadphones
+				#
+				# Fallback BT Sco devices in case of FORCE_BT_SCO
+				# or FORCE_NONE
+				#
+				AvailableOutputDevices Includes BluetoothA2dpHeadphones
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia IsNot ForceNoBtA2dp
+				ANY
+					ForceUseForCommunication Is ForceBtSco
+					ForceUseForCommunication Is ForceNone
+
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 1
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: BluetoothA2dpSpeaker
+				#
+				# Fallback BT Sco devices in case of FORCE_BT_SCO
+				# or FORCE_NONE
+				#
+				AvailableOutputDevices Includes BluetoothA2dpSpeaker
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia IsNot ForceNoBtA2dp
+				ForceUseForCommunication Is ForceSpeaker
+
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 1
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: WiredHeadphone
+				#
+				# Fallback BT Sco devices in case of FORCE_BT_SCO
+				# or FORCE_NONE
+				#
+				AvailableOutputDevices Includes WiredHeadphone
+				ForceUseForCommunication IsNot ForceSpeaker
+
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 1
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: WiredHeadset
+				#
+				# Fallback BT Sco devices in case of FORCE_BT_SCO
+				# or FORCE_NONE
+				#
+				AvailableOutputDevices Includes WiredHeadset
+				ForceUseForCommunication IsNot ForceSpeaker
+
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 1
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: UsbDevice
+				#
+				# Fallback BT Sco devices in case of FORCE_BT_SCO
+				# or FORCE_NONE
+				#
+				AvailableOutputDevices Includes UsbDevice
+				ANY
+					ForceUseForCommunication Is ForceBtSco
+					ForceUseForCommunication Is ForceNone
+					ALL
+						ForceUseForCommunication Is ForceSpeaker
+						#
+						# In case of Force Speaker, priority between device and accessory are
+						# inverted compared to Force None or Bt Sco
+						#
+						AvailableOutputDevices Excludes UsbAccessory
+						TelephonyMode IsNot InCall
+						TelephonyMode IsNot InCommunication
+
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 1
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: UsbAccessory
+				#
+				# Fallback BT Sco devices in case of FORCE_BT_SCO
+				# or FORCE_NONE
+				#
+				AvailableOutputDevices Includes UsbAccessory
+				TelephonyMode IsNot InCommunication
+				TelephonyMode IsNot InCall
+
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 1
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: DgtlDockHeadset
+				#
+				# Fallback BT Sco devices in case of FORCE_BT_SCO
+				# or FORCE_NONE
+				#
+				AvailableOutputDevices Includes DgtlDockHeadset
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 1
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: Hdmi
+				#
+				# Fallback BT Sco devices in case of FORCE_BT_SCO
+				# or FORCE_NONE
+				#
+				AvailableOutputDevices Includes Hdmi
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 1
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: AnlgDockHeadset
+				#
+				# Fallback BT Sco devices in case of FORCE_BT_SCO
+				# or FORCE_NONE
+				#
+				AvailableOutputDevices Includes AnlgDockHeadset
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 1
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: Earpiece
+				#
+				# Fallback BT Sco devices in case of FORCE_BT_SCO
+				# or FORCE_NONE
+				#
+				AvailableOutputDevices Includes Earpiece
+				ForceUseForCommunication IsNot ForceSpeaker
+
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 1
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+			conf: Line
+				#
+				# Fallback BT Sco devices in case of FORCE_BT_SCO
+				# or FORCE_NONE
+				#
+				AvailableOutputDevices Includes Line
+				ForceUseForCommunication Is ForceSpeaker
+
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 1
+					speaker = 0
+
+			conf: Speaker
+				#
+				# Fallback BT Sco devices in case of FORCE_BT_SCO
+				# or FORCE_NONE
+				#
+				AvailableOutputDevices Includes Speaker
+				ForceUseForCommunication Is ForceSpeaker
+
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 1
+
+			conf: Default
+				component: /Policy/policy/strategies/phone/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+					speaker = 0
+
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_rerouting.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_rerouting.pfw
new file mode 100644
index 0000000..d390a33
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_rerouting.pfw
@@ -0,0 +1,297 @@
+domainGroup: DeviceForStrategy
+
+	domainGroup: Rerouting
+		#
+		# Falls through media strategy
+		#
+		domain: UnreachableDevices
+			conf: Calibration
+				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
+					hdmi_arc = 0
+					spdif = 0
+					aux_line = 0
+					fm = 0
+					speaker_safe = 0
+					earpiece = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					telephony_tx = 0
+
+		domain: Device2
+			conf: RemoteSubmix
+				AvailableOutputDevices Includes RemoteSubmix
+
+				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
+					remote_submix = 1
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: BluetoothA2dp
+				ForceUseForMedia IsNot ForceNoBtA2dp
+				AvailableOutputDevices Includes BluetoothA2dp
+
+				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 1
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: BluetoothA2dpHeadphone
+				ForceUseForMedia IsNot ForceNoBtA2dp
+				AvailableOutputDevices Includes BluetoothA2dpHeadphones
+
+				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 1
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: BluetoothA2dpSpeaker
+				ForceUseForMedia IsNot ForceNoBtA2dp
+				AvailableOutputDevices Includes BluetoothA2dpSpeaker
+
+				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 1
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: ForceSpeaker
+				ForceUseForMedia Is ForceSpeaker
+				AvailableOutputDevices Includes Speaker
+
+				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 1
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: WiredHeadphone
+				AvailableOutputDevices Includes WiredHeadphone
+
+				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 1
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: Line
+				AvailableOutputDevices Includes Line
+
+				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 1
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: WiredHeadset
+				AvailableOutputDevices Includes WiredHeadset
+
+				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 1
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: UsbAccessory
+				AvailableOutputDevices Includes UsbAccessory
+
+				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 1
+					usb_device = 0
+					hdmi = 0
+
+			conf: UsbDevice
+				AvailableOutputDevices Includes UsbDevice
+
+				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 1
+					hdmi = 0
+
+			conf: DgtlDockHeadset
+				AvailableOutputDevices Includes DgtlDockHeadset
+
+				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 1
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: AuxDigital
+				#
+				# Rerouting is similar to media and sonification (exept here: sonification is not allowed on HDMI)
+				#
+				AvailableOutputDevices Includes Hdmi
+
+				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 1
+
+			conf: AnlgDockHeadset
+				AvailableOutputDevices Includes AnlgDockHeadset
+				ForceUseForDock Is ForceAnalogDock
+
+				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 1
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: Speaker
+				AvailableOutputDevices Includes Speaker
+
+				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 1
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: Default
+				component: /Policy/policy/strategies/rerouting/selected_output_devices/mask
+					remote_submix = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					speaker = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_sonification.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_sonification.pfw
new file mode 100644
index 0000000..71101f8
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_sonification.pfw
@@ -0,0 +1,485 @@
+supDomain: DeviceForStrategy
+
+	supDomain: Sonification
+
+		domain: UnreachableDevices
+			conf: Calibration
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					# no sonification on remote submix (e.g. WFD)
+					remote_submix = 0
+					hdmi_arc = 0
+					spdif = 0
+					fm = 0
+					speaker_safe = 0
+					aux_line = 0
+					#
+					# Sonification follows phone strategy if in call but HDMI is not reachable
+					#
+					hdmi = 0
+
+		domain: Speaker
+
+			conf: Selected
+				AvailableOutputDevices Includes Speaker
+				ANY
+					#
+					# Sonification falls through ENFORCED_AUDIBLE if not in call (widely speaking)
+					#
+					ALL
+						TelephonyMode IsNot InCall
+						TelephonyMode IsNot InCommunication
+					ALL
+						#
+						# Sonification follows phone strategy if in call (widely speaking)
+						#
+						ForceUseForCommunication Is ForceSpeaker
+						ANY
+							TelephonyMode Is InCall
+							TelephonyMode Is InCommunication
+						AvailableOutputDevices Excludes Line
+
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					speaker = 1
+
+			conf: NotSelected
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					speaker = 0
+
+		domain: Device2
+
+			conf: BluetoothA2dp
+				#
+				# Sonification falls through media strategy if not in call (widely speaking)
+				#
+				AvailableOutputDevices Includes BluetoothA2dp
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia IsNot ForceNoBtA2dp
+
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 1
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+
+			conf: BluetoothA2dpHeadphones
+				#
+				# Sonification falls through media strategy if not in call (widely speaking)
+				#
+				AvailableOutputDevices Includes BluetoothA2dpHeadphones
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia IsNot ForceNoBtA2dp
+
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 1
+					bluetooth_a2dp_speaker = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+
+			conf: BluetoothA2dpSpeaker
+				#
+				# Sonification falls through media strategy if not in call (widely speaking)
+				#
+				AvailableOutputDevices Includes BluetoothA2dpSpeaker
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia IsNot ForceNoBtA2dp
+
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 1
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+
+			conf: ScoCarkit
+				#
+				# Sonification follows phone strategy if in call (widely speaking)
+				#
+				ANY
+					TelephonyMode Is InCall
+					TelephonyMode Is InCommunication
+				ForceUseForCommunication Is ForceBtSco
+				AvailableOutputDevices Includes BluetoothScoCarkit
+
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 1
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+
+			conf: ScoHeadset
+				#
+				# Sonification follows phone strategy if in call (widely speaking)
+				#
+				ANY
+					TelephonyMode Is InCall
+					TelephonyMode Is InCommunication
+				ForceUseForCommunication Is ForceBtSco
+				AvailableOutputDevices Includes BluetoothScoHeadset
+
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 1
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+
+			conf: Sco
+				#
+				# Sonification follows phone strategy if in call (widely speaking)
+				#
+				ANY
+					TelephonyMode Is InCall
+					TelephonyMode Is InCommunication
+				ForceUseForCommunication Is ForceBtSco
+				AvailableOutputDevices Includes BluetoothSco
+
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 1
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+
+			conf: WiredHeadphone
+				AvailableOutputDevices Includes WiredHeadphone
+				ANY
+					#
+					# Sonification falls through media strategy if not in call (widely speaking)
+					#
+					ALL
+						TelephonyMode IsNot InCall
+						TelephonyMode IsNot InCommunication
+						ForceUseForMedia IsNot ForceSpeaker
+					#
+					# Sonification follows Phone strategy if in call (widely speaking)
+					#
+					ALL
+						ANY
+							TelephonyMode Is InCall
+							TelephonyMode Is InCommunication
+						ForceUseForCommunication IsNot ForceSpeaker
+
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 1
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+
+			conf: Line
+				AvailableOutputDevices Includes Line
+				ANY
+					#
+					# Sonification follows Phone strategy if in call (widely speaking)
+					#
+					ALL
+						ANY
+							TelephonyMode Is InCall
+							TelephonyMode Is InCommunication
+						ForceUseForCommunication Is ForceSpeaker
+					#
+					# Sonification falls through media strategy if not in call (widely speaking)
+					#
+					ALL
+						TelephonyMode IsNot InCall
+						TelephonyMode IsNot InCommunication
+						ForceUseForMedia IsNot ForceSpeaker
+
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 1
+
+			conf: WiredHeadset
+				AvailableOutputDevices Includes WiredHeadset
+				ANY
+					#
+					# Sonification falls through media strategy if not in call (widely speaking)
+					#
+					ALL
+						TelephonyMode IsNot InCall
+						TelephonyMode IsNot InCommunication
+						ForceUseForMedia IsNot ForceSpeaker
+					ALL
+						#
+						# Sonification Follows Phone Strategy if in call (widely speaking)
+						#
+						ANY
+							TelephonyMode Is InCall
+							TelephonyMode Is InCommunication
+						ForceUseForCommunication IsNot ForceSpeaker
+
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 1
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+
+			conf: UsbDevice
+				AvailableOutputDevices Includes UsbDevice
+				ANY
+					#
+					# Sonification falls through media strategy if not in call (widely speaking)
+					#
+					ALL
+						AvailableOutputDevices Excludes UsbAccessory
+						TelephonyMode IsNot InCall
+						TelephonyMode IsNot InCommunication
+						ForceUseForMedia IsNot ForceSpeaker
+					ALL
+						#
+						# Sonification Follows Phone Strategy if in call (widely speaking)
+						#
+						ANY
+							TelephonyMode Is InCall
+							TelephonyMode Is InCommunication
+						ForceUseForCommunication IsNot ForceSpeaker
+
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 1
+					telephony_tx = 0
+					line = 0
+
+			conf: UsbAccessory
+				AvailableOutputDevices Includes UsbAccessory
+				#
+				# Sonification falls through media strategy if not in call (widely speaking)
+				#
+				# Sonification Follows Phone Strategy if in call (widely speaking)
+				# but USB Accessory not reachable in call.
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia IsNot ForceSpeaker
+
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 1
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+
+			conf: DgtlDockHeadset
+				AvailableOutputDevices Includes DgtlDockHeadset
+				#
+				# Sonification falls through media strategy if not in call
+				#
+				# Sonification Follows Phone Strategy if in call (widely speaking)
+				# but DgtlDockHeadset not reachable in call.
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia IsNot ForceSpeaker
+
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 1
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+
+			conf: AnlgDockHeadset
+				AvailableOutputDevices Includes AnlgDockHeadset
+				#
+				# Sonification falls through media strategy if not in call
+				#
+				# Sonification Follows Phone Strategy if in call (widely speaking)
+				# but AnlgDockHeadset not reachable in call.
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia IsNot ForceSpeaker
+				ForceUseForDock Is ForceAnalogDock
+
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					angl_dock_headset = 1
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+
+			conf: Earpiece
+				#
+				# Sonification Follows Phone Strategy if in call
+				#
+				ANY
+					TelephonyMode Is InCall
+					TelephonyMode Is InCommunication
+				ForceUseForCommunication IsNot ForceSpeaker
+				AvailableOutputDevices Includes Earpiece
+
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					earpiece = 1
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+
+			conf: None
+				component: /Policy/policy/strategies/sonification/selected_output_devices/mask
+					earpiece = 0
+					wired_headset = 0
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_sonification_respectful.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_sonification_respectful.pfw
new file mode 100644
index 0000000..f66674c
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_sonification_respectful.pfw
@@ -0,0 +1,545 @@
+domainGroup: DeviceForStrategy
+
+	domainGroup: SonificationRespectful
+		#
+		# Sonificiation Respectful follows:
+		#	- If in call: Strategy sonification (that follows phone strategy in call also...)
+		#	- If not in call AND a music stream is active remotely: Strategy sonification (that
+		#     follows enforced audible, which follows media)
+		#	- if not in call and no music stream active remotely and music stream active): strategy
+		#     media
+		#   - Otherwise follows sonification by replacing speaker with speaker safe if speaker is
+		#	  selected.
+		#
+		# Case of stream active handled programmatically
+
+		domain: UnreachableDevices
+			conf: Calibration
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					remote_submix = 0
+					hdmi_arc = 0
+					aux_line = 0
+					spdif = 0
+					fm = 0
+					telephony_tx = 0
+
+		domain: Speakers
+
+			conf: SpeakerSafe
+				AvailableOutputDevices Includes Speaker
+				AvailableOutputDevices Includes SpeakerSafe
+				#
+				# Follows sonification strategy if not in call and replace speaker by speaker safe
+				# if and only if speaker only selected
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					speaker_safe = 1
+					speaker = 0
+
+			conf: Speaker
+				AvailableOutputDevices Includes Speaker
+				ANY
+					#
+					# Follows sonification strategy if not in call
+					#
+					ALL
+						TelephonyMode IsNot InCall
+						TelephonyMode IsNot InCommunication
+					ALL
+						#
+						# Follows Phone Strategy if call
+						#
+						ForceUseForCommunication Is ForceSpeaker
+						ANY
+							TelephonyMode Is InCall
+							TelephonyMode Is InCommunication
+						AvailableOutputDevices Excludes Line
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					speaker_safe = 0
+					speaker = 1
+
+			conf: None
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					speaker_safe = 0
+					speaker = 0
+
+		domain: Device2
+			conf: BluetoothA2dp
+				#
+				# SonificationRespectful Follows Sonification that falls through Media strategy if not in call
+				# SonificationRespectful follows media if music stream is active
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia Is ForceNoBtA2dp
+				AvailableOutputDevices Includes BluetoothA2dp
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					earpiece = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp = 1
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: BluetoothA2dpHeadphones
+				#
+				# SonificationRespectful Follows Sonification that falls through Media strategy if not in call
+				# SonificationRespectful follows media if music stream is active
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia Is ForceNoBtA2dp
+				AvailableOutputDevices Includes BluetoothA2dpHeadphones
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					earpiece = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp_headphones = 1
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: BluetoothA2dpSpeaker
+				#
+				# SonificationRespectful Follows Sonification that falls through Media strategy if not in call
+				# SonificationRespectful follows media if music stream is active
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia Is ForceNoBtA2dp
+				AvailableOutputDevices Includes BluetoothA2dpSpeaker
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					earpiece = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 1
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: BluetoothScoCarkit
+				#
+				# SonificationRespectful Follows Phone strategy if in call
+				#
+				ANY
+					TelephonyMode Is InCall
+					TelephonyMode Is InCommunication
+				ForceUseForCommunication Is ForceBtSco
+				AvailableOutputDevices Includes BluetoothScoCarkit
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					earpiece = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 1
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: BluetoothScoHeadset
+				#
+				# SonificationRespectful Follows Phone strategy if in call
+				#
+				ANY
+					TelephonyMode Is InCall
+					TelephonyMode Is InCommunication
+				ForceUseForCommunication Is ForceBtSco
+				AvailableOutputDevices Includes BluetoothScoHeadset
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					earpiece = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 1
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: BluetoothSco
+				#
+				# SonificationRespectful Follows Phone strategy if in call
+				#
+				ANY
+					TelephonyMode Is InCall
+					TelephonyMode Is InCommunication
+				ForceUseForCommunication Is ForceBtSco
+				AvailableOutputDevices Includes BluetoothSco
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					earpiece = 0
+					bluetooth_sco = 1
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: WiredHeadphone
+				ANY
+					ALL
+						#
+						# SonificationRespectful Follows Phone strategy if in call
+						#
+						ANY
+							TelephonyMode Is InCall
+							TelephonyMode Is InCommunication
+						ForceUseForCommunication IsNot ForceSpeaker
+					ALL
+						#
+						# SonificationRespectful Follows Sonification that falls through Media strategy if not in call
+						# SonificationRespectful follows media if music stream is active
+						#
+						TelephonyMode IsNot InCall
+						TelephonyMode IsNot InCommunication
+						ForceUseForMedia IsNot ForceSpeaker
+				AvailableOutputDevices Includes WiredHeadphone
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					earpiece = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					wired_headphone = 1
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: LineWhenFollowMediaStrategy
+				#
+				# SonificationRespectful Follows Sonification that falls through Media strategy if not in call
+				# SonificationRespectful follows media if music stream is active
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				AvailableOutputDevices Includes WiredHeadphone
+				ForceUseForMedia IsNot ForceSpeaker
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					earpiece = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 1
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: WiredHeadset
+				ANY
+					ALL
+						#
+						# SonificationRespectful Follows Phone strategy if in call
+						#
+						ANY
+							TelephonyMode Is InCall
+							TelephonyMode Is InCommunication
+						ForceUseForCommunication IsNot ForceSpeaker
+					ALL
+						#
+						# SonificationRespectful Follows Sonification that falls through Media strategy if not in call
+						# SonificationRespectful follows media if music stream is active
+						#
+						TelephonyMode IsNot InCall
+						TelephonyMode IsNot InCommunication
+						ForceUseForMedia IsNot ForceSpeaker
+				AvailableOutputDevices Includes WiredHeadset
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					earpiece = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp = 0
+					wired_headset = 1
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: UsbDevice
+				ANY
+					ALL
+						#
+						# SonificationRespectful Follows Phone strategy if in call
+						#
+						ANY
+							TelephonyMode Is InCall
+							TelephonyMode Is InCommunication
+						ForceUseForCommunication IsNot ForceSpeaker
+					ALL
+						#
+						# SonificationRespectful Follows Sonification that falls through Media strategy if not in call
+						# SonificationRespectful follows media if music stream is active
+						#
+						TelephonyMode IsNot InCall
+						TelephonyMode IsNot InCommunication
+						ForceUseForMedia IsNot ForceSpeaker
+						AvailableOutputDevices Excludes UsbAccessory
+				AvailableOutputDevices Includes UsbDevice
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					earpiece = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 1
+					hdmi = 0
+
+			conf: UsbAccessory
+				#
+				# SonificationRespectful Follows Phone strategy if in call (widely speaking)
+				# but UsbAccessory not reachable in call.
+				#
+				# SonificationRespectful Follows Sonification that falls through Media strategy if not in call
+				# SonificationRespectful follows media if music stream is active
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia IsNot ForceSpeaker
+				AvailableOutputDevices Includes UsbAccessory
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					earpiece = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 1
+					usb_device = 0
+					hdmi = 0
+
+			conf: DgtlDockHeadset
+				#
+				# SonificationRespectful Follows Phone strategy if in call (widely speaking)
+				# but DgtlDockHeadset not reachable in call.
+				#
+				# SonificationRespectful Follows Sonification that falls through Media strategy if not in call
+				# SonificationRespectful follows media if music stream is active
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia IsNot ForceSpeaker
+				AvailableOutputDevices Includes DgtlDockHeadset
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					earpiece = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 1
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: AuxDigital
+				#
+				# SonificationRespectful Follows Phone strategy if in call (widely speaking)
+				# but HDMI not reachable in call.
+				#
+				# SonificationRespectful Follows Sonification that falls through Media strategy if not in call
+				# SonificationRespectful follows media if music stream is active
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia IsNot ForceSpeaker
+				AvailableOutputDevices Includes Hdmi
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					earpiece = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 1
+
+			conf: AnlgDockHeadset
+				#
+				# SonificationRespectful Follows Phone strategy if in call (widely speaking)
+				# but AnlgDockHeadset not reachable in call.
+				#
+				# SonificationRespectful Follows Sonification that falls through Media strategy if not in call
+				# SonificationRespectful follows media if music stream is active
+				#
+				TelephonyMode IsNot InCall
+				TelephonyMode IsNot InCommunication
+				ForceUseForMedia IsNot ForceSpeaker
+				ForceUseForDock Is ForceAnalogDock
+				AvailableOutputDevices Includes AnlgDockHeadset
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					earpiece = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 1
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: Earpiece
+				#
+				# SonificationRespectful Follows Phone strategy if in call
+				#
+				ANY
+					TelephonyMode Is InCall
+					TelephonyMode Is InCommunication
+				ForceUseForCommunication IsNot ForceSpeaker
+				AvailableOutputDevices Includes Earpiece
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					earpiece = 1
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
+
+			conf: Line
+				#
+				# SonificationRespectful Follows Phone strategy if in call
+				#
+				ANY
+					TelephonyMode Is InCall
+					TelephonyMode Is InCommunication
+				ForceUseForCommunication Is ForceSpeaker
+				AvailableOutputDevices Includes Line
+
+				component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
+					earpiece = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					bluetooth_a2dp = 0
+					wired_headset = 0
+					wired_headphone = 0
+					line = 1
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					hdmi = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_transmitted_through_speaker.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_transmitted_through_speaker.pfw
new file mode 100644
index 0000000..e5ae9d9
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_transmitted_through_speaker.pfw
@@ -0,0 +1,40 @@
+supDomain: DeviceForStrategy
+
+	supDomain: TransmittedThroughSpeaker
+		domain: UnreacheableDevices
+			conf: Calibration
+				component: /Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask
+					remote_submix = 0
+					hdmi_arc = 0
+					spdif = 0
+					aux_line = 0
+					fm = 0
+					speaker_safe = 0
+					earpiece = 0
+					wired_headset = 1
+					wired_headphone = 0
+					bluetooth_sco = 0
+					bluetooth_sco_headset = 0
+					bluetooth_sco_carkit = 0
+					bluetooth_a2dp = 0
+					bluetooth_a2dp_headphones = 0
+					bluetooth_a2dp_speaker = 0
+					hdmi = 0
+					angl_dock_headset = 0
+					dgtl_dock_headset = 0
+					usb_accessory = 0
+					usb_device = 0
+					telephony_tx = 0
+					line = 0
+
+		domain: Speaker
+			conf: Selected
+				AvailableOutputDevices Includes Speaker
+
+				component: /Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask
+					speaker = 1
+
+			conf: NotSelected
+				component: /Policy/policy/strategies/transmitted_through_speaker/selected_output_devices/mask
+					speaker = 0
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/strategy_for_stream.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/strategy_for_stream.pfw
new file mode 100755
index 0000000..3940b9d
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/strategy_for_stream.pfw
@@ -0,0 +1,20 @@
+domain: StrategyForStream
+
+	conf: Calibration
+		/Policy/policy/streams/voice_call/applicable_strategy/strategy = phone
+		#
+		# NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
+		# while key clicks are played produces a poor result
+		#
+		/Policy/policy/streams/system/applicable_strategy/strategy = media
+		/Policy/policy/streams/ring/applicable_strategy/strategy = sonification
+		/Policy/policy/streams/music/applicable_strategy/strategy = media
+		/Policy/policy/streams/alarm/applicable_strategy/strategy = sonification
+		/Policy/policy/streams/notification/applicable_strategy/strategy = sonification_respectful
+		/Policy/policy/streams/bluetooth_sco/applicable_strategy/strategy = phone
+		/Policy/policy/streams/enforced_audible/applicable_strategy/strategy = enforced_audible
+		/Policy/policy/streams/dtmf/applicable_strategy/strategy = dtmf
+		/Policy/policy/streams/tts/applicable_strategy/strategy = transmitted_through_speaker
+		/Policy/policy/streams/accessibility/applicable_strategy/strategy = accessibility
+		/Policy/policy/streams/rerouting/applicable_strategy/strategy = rerouting
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/strategy_for_usage.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/strategy_for_usage.pfw
new file mode 100644
index 0000000..3f5da13
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/strategy_for_usage.pfw
@@ -0,0 +1,39 @@
+supDomain: SelectedStrategyForUsages
+
+	domain: Calibration
+		conf: Calibration
+			/Policy/policy/usages/unknown/applicable_strategy/strategy = media
+			/Policy/policy/usages/media/applicable_strategy/strategy = media
+			/Policy/policy/usages/voice_communication/applicable_strategy/strategy = phone
+			/Policy/policy/usages/voice_communication_signalling/applicable_strategy/strategy = dtmf
+			/Policy/policy/usages/alarm/applicable_strategy/strategy = sonification
+			/Policy/policy/usages/notification/applicable_strategy/strategy = sonification_respectful
+			/Policy/policy/usages/notification_telephony_ringtone/applicable_strategy/strategy = sonification
+			/Policy/policy/usages/notification_communication_request/applicable_strategy/strategy = sonification_respectful
+			/Policy/policy/usages/notification_communication_instant/applicable_strategy/strategy = sonification_respectful
+			/Policy/policy/usages/notification_communication_delayed/applicable_strategy/strategy = sonification_respectful
+			/Policy/policy/usages/notification_event/applicable_strategy/strategy = sonification_respectful
+			/Policy/policy/usages/assistance_navigation_guidance/applicable_strategy/strategy = media
+			/Policy/policy/usages/assistance_sonification/applicable_strategy/strategy = media
+			/Policy/policy/usages/game/applicable_strategy/strategy = media
+
+	domain: AssistanceAccessibility
+		conf: Sonification
+			#
+			# In case of Ring or Alarm stream type active, switching to sonification
+			# @todo: handle this dynamic case. As a WA, using Ringtone mode...
+			#
+			TelephonyMode Is RingTone
+
+			/Policy/policy/usages/assistance_accessibility/applicable_strategy/strategy = sonification
+
+		conf: Phone
+			ANY
+				TelephonyMode Is InCall
+				TelephonyMode Is InCommunication
+
+			/Policy/policy/usages/assistance_accessibility/applicable_strategy/strategy = phone
+
+		conf: Accessibility
+			/Policy/policy/usages/assistance_accessibility/applicable_strategy/strategy = accessibility
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/volumes.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/volumes.pfw
new file mode 100644
index 0000000..1049564
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/volumes.pfw
@@ -0,0 +1,545 @@
+supDomain: VolumeProfilesForStream
+	domain: Calibration
+		conf: Calibration
+			component: /Policy/policy/streams
+				component: voice_call/volume_profiles
+					component: headset_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = -24.0
+						1/index = 33
+						1/db_attenuation = -18.0
+						2/index = 66
+						2/db_attenuation = -12.0
+						3/index = 100
+						3/db_attenuation = -6.0
+					component: speaker_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = -24.0
+						1/index = 33
+						1/db_attenuation = -16.0
+						2/index = 66
+						2/db_attenuation = -8.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: earpiece_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = -24.0
+						1/index = 33
+						1/db_attenuation = -18.0
+						2/index = 66
+						2/db_attenuation = -12.0
+						3/index = 100
+						3/db_attenuation = -6.0
+					component: extmedia_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -58.0
+						1/index = 33
+						1/db_attenuation = -40.0
+						2/index = 66
+						2/db_attenuation = -17.0
+						3/index = 100
+						3/db_attenuation = 0.0
+
+				component: system/volume_profiles
+					component: headset_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -30.0
+						1/index = 33
+						1/db_attenuation = -26.0
+						2/index = 66
+						2/db_attenuation = -22.0
+						3/index = 100
+						3/db_attenuation = -18.0
+					component: speaker_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -24.0
+						1/index = 33
+						1/db_attenuation = -18.0
+						2/index = 66
+						2/db_attenuation = -12.0
+						3/index = 100
+						3/db_attenuation = -6.0
+					component: earpiece_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -24.0
+						1/index = 33
+						1/db_attenuation = -18.0
+						2/index = 66
+						2/db_attenuation = -12.0
+						3/index = 100
+						3/db_attenuation = -6.0
+					component: extmedia_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -58.0
+						1/index = 33
+						1/db_attenuation = -40.0
+						2/index = 66
+						2/db_attenuation = -21.0
+						3/index = 100
+						3/db_attenuation = -10.0
+
+				component: ring/volume_profiles
+					component: headset_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -49.5
+						1/index = 33
+						1/db_attenuation = -33.5
+						2/index = 66
+						2/db_attenuation = -17.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: speaker_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -35.7
+						1/index = 33
+						1/db_attenuation = -26.1
+						2/index = 66
+						2/db_attenuation = -13.2
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: earpiece_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -49.5
+						1/index = 33
+						1/db_attenuation = -33.5
+						2/index = 66
+						2/db_attenuation = -17.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: extmedia_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -58.0
+						1/index = 33
+						1/db_attenuation = -40.0
+						2/index = 66
+						2/db_attenuation = -27.0
+						3/index = 100
+						3/db_attenuation = -10.0
+
+				component: music/volume_profiles
+					component: headset_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -58.0
+						1/index = 33
+						1/db_attenuation = -40.0
+						2/index = 66
+						2/db_attenuation = -17.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: speaker_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -56.0
+						1/index = 33
+						1/db_attenuation = -34.0
+						2/index = 66
+						2/db_attenuation = -11.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: earpiece_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -58.0
+						1/index = 33
+						1/db_attenuation = -40.0
+						2/index = 66
+						2/db_attenuation = -17.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: extmedia_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -58.0
+						1/index = 33
+						1/db_attenuation = -40.0
+						2/index = 66
+						2/db_attenuation = -17.0
+						3/index = 100
+						3/db_attenuation = 0.0
+
+				component: alarm/volume_profiles
+					component: headset_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -49.5
+						1/index = 33
+						1/db_attenuation = -33.5
+						2/index = 66
+						2/db_attenuation = -17.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: speaker_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -35.7
+						1/index = 33
+						1/db_attenuation = -26.1
+						2/index = 66
+						2/db_attenuation = -13.2
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: earpiece_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -49.5
+						1/index = 33
+						1/db_attenuation = -33.5
+						2/index = 66
+						2/db_attenuation = -17.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: extmedia_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -58.0
+						1/index = 33
+						1/db_attenuation = -40.0
+						2/index = 66
+						2/db_attenuation = -27.0
+						3/index = 100
+						3/db_attenuation = -10.0
+
+				component: notification/volume_profiles
+					component: headset_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -49.5
+						1/index = 33
+						1/db_attenuation = -33.5
+						2/index = 66
+						2/db_attenuation = -17.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: speaker_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -35.7
+						1/index = 33
+						1/db_attenuation = -26.1
+						2/index = 66
+						2/db_attenuation = -13.2
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: earpiece_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -49.5
+						1/index = 33
+						1/db_attenuation = -33.5
+						2/index = 66
+						2/db_attenuation = -17.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: extmedia_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -58.0
+						1/index = 33
+						1/db_attenuation = -40.0
+						2/index = 66
+						2/db_attenuation = -27.0
+						3/index = 100
+						3/db_attenuation = -10.0
+
+				component: bluetooth_sco/volume_profiles
+					component: headset_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = -24.0
+						1/index = 33
+						1/db_attenuation = -18.0
+						2/index = 66
+						2/db_attenuation = -12.0
+						3/index = 100
+						3/db_attenuation = -6.0
+					component: speaker_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = -24.0
+						1/index = 33
+						1/db_attenuation = -16.0
+						2/index = 66
+						2/db_attenuation = -8.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: earpiece_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = -24.0
+						1/index = 33
+						1/db_attenuation = -18.0
+						2/index = 66
+						2/db_attenuation = -12.0
+						3/index = 100
+						3/db_attenuation = -6.0
+					component: extmedia_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -58.0
+						1/index = 33
+						1/db_attenuation = -40.0
+						2/index = 66
+						2/db_attenuation = -17.0
+						3/index = 100
+						3/db_attenuation = 0.0
+
+				component: enforced_audible/volume_profiles
+					component: headset_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -30.0
+						1/index = 33
+						1/db_attenuation = -26.0
+						2/index = 66
+						2/db_attenuation = -22.0
+						3/index = 100
+						3/db_attenuation = -18.0
+					component: speaker_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -24.0
+						1/index = 33
+						1/db_attenuation = -18.0
+						2/index = 66
+						2/db_attenuation = -12.0
+						3/index = 100
+						3/db_attenuation = -6.0
+					component: earpiece_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -24.0
+						1/index = 33
+						1/db_attenuation = -18.0
+						2/index = 66
+						2/db_attenuation = -12.0
+						3/index = 100
+						3/db_attenuation = -6.0
+					component: extmedia_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -58.0
+						1/index = 33
+						1/db_attenuation = -40.0
+						2/index = 66
+						2/db_attenuation = -27.0
+						3/index = 100
+						3/db_attenuation = -10.0
+
+				component: tts/volume_profiles
+					component: headset_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = -96.0
+						1/index = 1
+						1/db_attenuation = -96.0
+						2/index = 2
+						2/db_attenuation = -96.0
+						3/index = 100
+						3/db_attenuation = -96.0
+					component: speaker_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = -96.0
+						1/index = 33
+						1/db_attenuation = -68.0
+						2/index = 66
+						2/db_attenuation = -34.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: earpiece_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = -96.0
+						1/index = 1
+						1/db_attenuation = -96.0
+						2/index = 2
+						2/db_attenuation = -96.0
+						3/index = 100
+						3/db_attenuation = -96.0
+					component: extmedia_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = -96.0
+						1/index = 1
+						1/db_attenuation = -96.0
+						2/index = 2
+						2/db_attenuation = -96.0
+						3/index = 100
+						3/db_attenuation = -96.0
+
+				component: accessibility/volume_profiles
+					component: headset_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -58.0
+						1/index = 33
+						1/db_attenuation = -40.0
+						2/index = 66
+						2/db_attenuation = -17.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: speaker_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -56.0
+						1/index = 33
+						1/db_attenuation = -34.0
+						2/index = 66
+						2/db_attenuation = -11.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: earpiece_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -58.0
+						1/index = 33
+						1/db_attenuation = -40.0
+						2/index = 66
+						2/db_attenuation = -17.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: extmedia_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -58.0
+						1/index = 33
+						1/db_attenuation = -40.0
+						2/index = 66
+						2/db_attenuation = -17.0
+						3/index = 100
+						3/db_attenuation = 0.0
+
+				component: rerouting/volume_profiles
+					component: headset_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = 0.0
+						1/index = 1
+						1/db_attenuation = 0.0
+						2/index = 2
+						2/db_attenuation = 0.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: speaker_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = 0.0
+						1/index = 1
+						1/db_attenuation = 0.0
+						2/index = 2
+						2/db_attenuation = 0.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: earpiece_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = 0.0
+						1/index = 1
+						1/db_attenuation = 0.0
+						2/index = 2
+						2/db_attenuation = 0.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: extmedia_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = 0.0
+						1/index = 1
+						1/db_attenuation = 0.0
+						2/index = 2
+						2/db_attenuation = 0.0
+						3/index = 100
+						3/db_attenuation = 0.0
+
+				component: patch/volume_profiles
+					component: headset_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = 0.0
+						1/index = 1
+						1/db_attenuation = 0.0
+						2/index = 2
+						2/db_attenuation = 0.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: speaker_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = 0.0
+						1/index = 1
+						1/db_attenuation = 0.0
+						2/index = 2
+						2/db_attenuation = 0.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: earpiece_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = 0.0
+						1/index = 1
+						1/db_attenuation = 0.0
+						2/index = 2
+						2/db_attenuation = 0.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: extmedia_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = 0.0
+						1/index = 1
+						1/db_attenuation = 0.0
+						2/index = 2
+						2/db_attenuation = 0.0
+						3/index = 100
+						3/db_attenuation = 0.0
+
+	domain: Dtmf
+		conf: InCall
+			ANY
+				TelephonyMode Is InCall
+				TelephonyMode Is InCommunication
+
+			component: /Policy/policy/streams
+				component: dtmf/volume_profiles
+					component: headset_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -30.0
+						1/index = 33
+						1/db_attenuation = -26.0
+						2/index = 66
+						2/db_attenuation = -22.0
+						3/index = 100
+						3/db_attenuation = -18.0
+					component: speaker_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -24.0
+						1/index = 33
+						1/db_attenuation = -18.0
+						2/index = 66
+						2/db_attenuation = -12.0
+						3/index = 100
+						3/db_attenuation = -6.0
+					component: earpiece_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -24.0
+						1/index = 33
+						1/db_attenuation = -18.0
+						2/index = 66
+						2/db_attenuation = -12.0
+						3/index = 100
+						3/db_attenuation = -6.0
+					component: extmedia_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -58.0
+						1/index = 33
+						1/db_attenuation = -40.0
+						2/index = 66
+						2/db_attenuation = -27.0
+						3/index = 100
+						3/db_attenuation = -10.0
+
+		conf: OutOfCall
+			component: /Policy/policy/streams
+				component: dtmf/volume_profiles
+					component: headset_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = -24.0
+						1/index = 33
+						1/db_attenuation = -18.0
+						2/index = 66
+						2/db_attenuation = -12.0
+						3/index = 100
+						3/db_attenuation = -6.0
+					component: speaker_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = -24.0
+						1/index = 33
+						1/db_attenuation = -16.0
+						2/index = 66
+						2/db_attenuation = -8.0
+						3/index = 100
+						3/db_attenuation = 0.0
+					component: earpiece_device_category/curve_points
+						0/index = 0
+						0/db_attenuation = -24.0
+						1/index = 33
+						1/db_attenuation = -18.0
+						2/index = 66
+						2/db_attenuation = -12.0
+						3/index = 100
+						3/db_attenuation = -6.0
+					component: extmedia_device_category/curve_points
+						0/index = 1
+						0/db_attenuation = -58.0
+						1/index = 33
+						1/db_attenuation = -40.0
+						2/index = 66
+						2/db_attenuation = -17.0
+						3/index = 100
+						3/db_attenuation = 0.0
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicyClass.xml b/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicyClass.xml
new file mode 100755
index 0000000..296879f
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicyClass.xml
@@ -0,0 +1,5 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<SystemClass xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+    xsi:noNamespaceSchemaLocation="../../Schemas/SystemClass.xsd" Name="Policy">
+    <SubsystemInclude Path="PolicySubsystem.xml"/>
+</SystemClass>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem-CommonTypes.xml b/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem-CommonTypes.xml
new file mode 100755
index 0000000..821d6ad
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem-CommonTypes.xml
@@ -0,0 +1,170 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ComponentTypeSet xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+              xmlns:xi="http://www.w3.org/2001/XInclude"
+              xsi:noNamespaceSchemaLocation="Schemas/ComponentTypeSet.xsd">
+    <!-- Output devices definition as a bitfield for the supported devices per output
+    profile. It must match with the output device enum parameter.
+    -->
+     <!--#################### GLOBAL COMPONENTS BEGIN ####################-->
+     <!-- Common Types defintion -->
+     <xi:include href="PolicySubsystem-Volume.xml"/>
+
+     <!--#################### GLOBAL COMPONENTS END ####################-->
+
+    <ComponentType Name="OutputDevicesMask" Description="32th bit is not allowed as dedicated
+                                                     for input devices detection">
+        <BitParameterBlock Name="mask" Size="32">
+            <BitParameter Name="earpiece" Size="1" Pos="0"/>
+            <BitParameter Name="speaker" Size="1" Pos="1"/>
+            <BitParameter Name="wired_headset" Size="1" Pos="2"/>
+            <BitParameter Name="wired_headphone" Size="1" Pos="3"/>
+            <BitParameter Name="bluetooth_sco" Size="1" Pos="4"/>
+            <BitParameter Name="bluetooth_sco_headset" Size="1" Pos="5"/>
+            <BitParameter Name="bluetooth_sco_carkit" Size="1" Pos="6"/>
+            <BitParameter Name="bluetooth_a2dp" Size="1" Pos="7"/>
+            <BitParameter Name="bluetooth_a2dp_headphones" Size="1" Pos="8"/>
+            <BitParameter Name="bluetooth_a2dp_speaker" Size="1" Pos="9"/>
+            <BitParameter Name="hdmi" Size="1" Pos="10"/>
+            <BitParameter Name="angl_dock_headset" Size="1" Pos="11"/>
+            <BitParameter Name="dgtl_dock_headset" Size="1" Pos="12"/>
+            <BitParameter Name="usb_accessory" Size="1" Pos="13"/>
+            <BitParameter Name="usb_device" Size="1" Pos="14"/>
+            <BitParameter Name="remote_submix" Size="1" Pos="15"/>
+            <BitParameter Name="telephony_tx" Size="1" Pos="26"/>
+            <BitParameter Name="line" Size="1" Pos="17"/>
+            <BitParameter Name="hdmi_arc" Size="1" Pos="18"/>
+            <BitParameter Name="spdif" Size="1" Pos="19"/>
+            <BitParameter Name="fm" Size="1" Pos="20"/>
+            <BitParameter Name="aux_line" Size="1" Pos="21"/>
+            <BitParameter Name="speaker_safe" Size="1" Pos="22"/>
+        </BitParameterBlock>
+    </ComponentType>
+
+    <!-- Input devices definition as a bitfield for the supported devices per Input
+    profile. It must match with the Input device enum parameter.
+    -->
+    <ComponentType Name="InputDevicesMask">
+        <BitParameterBlock Name="mask" Size="32">
+            <BitParameter Name="communication" Size="1" Pos="0"/>
+            <BitParameter Name="ambient" Size="1" Pos="1"/>
+            <BitParameter Name="builtin_mic" Size="1" Pos="2"/>
+            <BitParameter Name="bluetooth_sco_headset" Size="1" Pos="3"/>
+            <BitParameter Name="wired_headset" Size="1" Pos="4"/>
+            <BitParameter Name="hdmi" Size="1" Pos="5"/>
+            <BitParameter Name="telephony_rx" Size="1" Pos="6"/>
+            <BitParameter Name="back_mic" Size="1" Pos="7"/>
+            <BitParameter Name="remote_submix" Size="1" Pos="8"/>
+            <BitParameter Name="anlg_dock_headset" Size="1" Pos="9"/>
+            <BitParameter Name="dgtl_dock_headset" Size="1" Pos="10"/>
+            <BitParameter Name="usb_accessory" Size="1" Pos="11"/>
+            <BitParameter Name="usb_device" Size="1" Pos="12"/>
+            <BitParameter Name="fm_tuner" Size="1" Pos="13"/>
+            <BitParameter Name="tv_tuner" Size="1" Pos="14"/>
+            <BitParameter Name="line" Size="1" Pos="15"/>
+            <BitParameter Name="spdif" Size="1" Pos="16"/>
+            <BitParameter Name="bluetooth_a2dp" Size="1" Pos="17"/>
+            <BitParameter Name="loopback" Size="1" Pos="18"/>
+            <BitParameter Name="in" Size="1" Pos="31"/>
+        </BitParameterBlock>
+    </ComponentType>
+
+    <ComponentType Name="OutputFlags"
+                   Description="the audio output flags serve two purposes:
+                    - when an AudioTrack is created they indicate a wish to be connected to an
+                      output stream with attributes corresponding to the specified flags.
+                    - when present in an output profile descriptor listed for a particular audio
+                      hardware module, they indicate that an output stream can be opened that
+                      supports the attributes indicated by the flags.
+                    The audio policy manager will try to match the flags in the request
+                    (when getOuput() is called) to an available output stream.">
+        <BitParameterBlock Name="mask" Size="32">
+            <BitParameter Name="direct" Size="1" Pos="0"/>
+            <BitParameter Name="primary" Size="1" Pos="1"/>
+            <BitParameter Name="fast" Size="1" Pos="2"/>
+            <BitParameter Name="deep_buffer" Size="1" Pos="3"/>
+            <BitParameter Name="compress_offload" Size="1" Pos="4"/>
+            <BitParameter Name="non_blocking" Size="1" Pos="5"/>
+            <BitParameter Name="hw_av_sync" Size="1" Pos="6"/>
+        </BitParameterBlock>
+    </ComponentType>
+
+    <ComponentType Name="InputFlags"
+                   Description="The audio input flags are analogous to audio output flags.
+                                Currently they are used only when an AudioRecord is created,
+                                to indicate a preference to be connected to an input stream with
+                                attributes corresponding to the specified flags.">
+        <BitParameterBlock Name="mask" Size="32">
+            <BitParameter Name="fast" Size="1" Pos="0"/>
+            <BitParameter Name="hw_hotword" Size="1" Pos="2"/>
+        </BitParameterBlock>
+    </ComponentType>
+
+    <ComponentType Name="InputSourcesMask" Description="The audio input source is also known
+                                                        as the use case.">
+        <BitParameterBlock Name="mask" Size="32">
+            <BitParameter Name="default" Size="1" Pos="0"/>
+            <BitParameter Name="mic" Size="1" Pos="1"/>
+            <BitParameter Name="voice_uplink" Size="1" Pos="2"/>
+            <BitParameter Name="voice_downlink" Size="1" Pos="3"/>
+            <BitParameter Name="voice_call" Size="1" Pos="4"/>
+            <BitParameter Name="camcorder" Size="1" Pos="5"/>
+            <BitParameter Name="voice_recognition" Size="1" Pos="6"/>
+            <BitParameter Name="voice_communication" Size="1" Pos="7"/>
+            <BitParameter Name="remote_submix" Size="1" Pos="8"/>
+            <BitParameter Name="fm_tuner" Size="1" Pos="9"/>
+            <BitParameter Name="hotword" Size="1" Pos="10"/>
+        </BitParameterBlock>
+    </ComponentType>
+
+    <!-- Routing Strategy definition as an enumeration. Numerical value must match the value
+         of the routing strategy in policy header file. -->
+    <ComponentType Name="Strategy">
+        <EnumParameter Name="strategy" Size="32">
+            <ValuePair Literal="media" Numerical="0"/>
+            <ValuePair Literal="phone" Numerical="1"/>
+            <ValuePair Literal="sonification" Numerical="2"/>
+            <ValuePair Literal="sonification_respectful" Numerical="3"/>
+            <ValuePair Literal="dtmf" Numerical="4"/>
+            <ValuePair Literal="enforced_audible" Numerical="5"/>
+            <ValuePair Literal="transmitted_through_speaker" Numerical="6"/>
+            <ValuePair Literal="accessibility" Numerical="7"/>
+            <ValuePair Literal="rerouting" Numerical="8"/>
+        </EnumParameter>
+    </ComponentType>
+
+    <!--#################### STRATEGY COMMON TYPES BEGIN ####################-->
+
+    <ComponentType Name="StrategyConfig" Mapping="Strategy:'%1'">
+        <Component Name="selected_output_devices" Type="OutputDevicesMask"/>
+    </ComponentType>
+
+    <!--#################### STRATEGY COMMON TYPES END ####################-->
+
+    <!--#################### STREAM COMMON TYPES BEGIN ####################-->
+
+    <ComponentType Name="Stream">
+        <Component Name="applicable_strategy" Type="Strategy" Mapping="Stream:'%1'"/>
+        <Component Name="volume_profiles" Type="VolumeCurvesCategories"
+                   Description="A volume profile is refered by the stream type."/>
+    </ComponentType>
+
+    <!--#################### STREAM COMMON TYPES END ####################-->
+
+    <!--#################### USAGE COMMON TYPES BEGIN ####################-->
+
+    <ComponentType Name="Usage">
+        <Component Name="applicable_strategy" Type="Strategy" Mapping="Usage:'%1'"/>
+    </ComponentType>
+
+    <!--#################### USAGE COMMON TYPES END ####################-->
+
+    <!--#################### INPUT SOURCE COMMON TYPES BEGIN ####################-->
+
+    <ComponentType Name="InputSource">
+        <Component Name="applicable_input_device" Type="InputDevicesMask"
+                   Mapping="InputSource:'%1'" Description="Selected Input device"/>
+    </ComponentType>
+
+    <!--#################### INPUT SOURCE COMMON TYPES END ####################-->
+
+</ComponentTypeSet>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem-Volume.xml b/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem-Volume.xml
new file mode 100755
index 0000000..cf39cc2
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem-Volume.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<ComponentTypeSet xmlns:xi="http://www.w3.org/2001/XInclude"
+                  xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+                  xsi:noNamespaceSchemaLocation="Schemas/ComponentTypeSet.xsd">
+  <ComponentType Name="VolumeCurvePoints">
+    <ParameterBlock Name="curve_points" ArrayLength="4" Mapping="VolumeProfile:'%1'"
+        Description="4 points to define the volume attenuation curve, each
+                     characterized by the volume index (from 0 to 100) at which
+                     they apply, and the attenuation in dB at that index.
+                     We use 100 steps to avoid rounding errors when computing
+                     the volume">
+        <IntegerParameter Name="index" Size="32"/>
+        <FixedPointParameter Name="db_attenuation" Size="16" Integral="7" Fractional="8"/>
+     </ParameterBlock>
+    </ComponentType>
+
+    <ComponentType Name="VolumeCurvesCategories">
+        <Component Name="headset_device_category" Type="VolumeCurvePoints" Mapping="Category:0"/>
+        <Component Name="speaker_device_category" Type="VolumeCurvePoints" Mapping="Category:1"/>
+        <Component Name="earpiece_device_category" Type="VolumeCurvePoints" Mapping="Category:2"/>
+        <Component Name="extmedia_device_category" Type="VolumeCurvePoints" Mapping="Category:3"/>
+    </ComponentType>
+
+</ComponentTypeSet>
+
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem.xml b/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem.xml
new file mode 100755
index 0000000..b21f6ae
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem.xml
@@ -0,0 +1,137 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<Subsystem xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+           xmlns:xi="http://www.w3.org/2001/XInclude"
+           xsi:noNamespaceSchemaLocation="Schemas/Subsystem.xsd"
+           Name="policy" Type="Policy" Endianness="Little">
+
+    <ComponentLibrary>
+        <!--#################### GLOBAL COMPONENTS BEGIN ####################-->
+        <!-- Common Types defintion -->
+        <xi:include href="PolicySubsystem-CommonTypes.xml"/>
+
+        <!--#################### GLOBAL COMPONENTS END ####################-->
+
+        <!--#################### STRATEGY BEGIN ####################-->
+
+        <ComponentType Name="Strategies" Description="Identifier must match the enum value to make
+                             the link between the PolicyManager and PFW">
+            <Component Name="media" Type="StrategyConfig" Mapping="Amend1:Media,Identifier:0"/>
+            <Component Name="phone" Type="StrategyConfig" Mapping="Amend1:Phone,Identifier:1"/>
+            <Component Name="sonification" Type="StrategyConfig"
+                                           Mapping="Amend1:Sonification,Identifier:2"/>
+            <Component Name="sonification_respectful" Type="StrategyConfig"
+                       Mapping="Amend1:SonificationRespectful,Identifier:3"/>
+            <Component Name="dtmf" Type="StrategyConfig" Mapping="Amend1:Dtmf,Identifier:4"/>
+            <Component Name="enforced_audible" Type="StrategyConfig"
+                                               Mapping="Amend1:EnforcedAudible,Identifier:5"/>
+            <Component Name="transmitted_through_speaker" Type="StrategyConfig"
+                       Mapping="Amend1:TransmittedThroughSpeaker,Identifier:6"/>
+            <Component Name="accessibility" Type="StrategyConfig"
+                                            Mapping="Amend1:Accessibility,Identifier:7"/>
+            <Component Name="rerouting" Type="StrategyConfig"
+                                        Mapping="Amend1:Rerouting,Identifier:8"/>
+        </ComponentType>
+
+        <!--#################### STRATEGY END ####################-->
+
+        <!--#################### STREAM BEGIN ####################-->
+
+        <ComponentType Name="Streams" Description="associated to audio_stream_type_t definition,
+                             identifier mapping must match the value of the enum">
+            <Component Name="voice_call" Type="Stream" Mapping="Amend1:VoiceCall,Identifier:0"/>
+            <Component Name="system" Type="Stream" Mapping="Amend1:System,Identifier:1"/>
+            <Component Name="ring" Type="Stream" Mapping="Amend1:Ring,Identifier:2"/>
+            <Component Name="music" Type="Stream" Mapping="Amend1:Music,Identifier:3"/>
+            <Component Name="alarm" Type="Stream" Mapping="Amend1:Alarm,Identifier:4"/>
+            <Component Name="notification" Type="Stream"
+                                           Mapping="Amend1:Notification,Identifier:5"/>
+            <Component Name="bluetooth_sco" Type="Stream"
+                                            Mapping="Amend1:BluetoothSco,Identifier:6"/>
+            <Component Name="enforced_audible" Type="Stream"
+                                               Mapping="Amend1:EnforceAudible,Identifier:7"
+                       Description="Sounds that cannot be muted by user and must
+                                    be routed to speaker"/>
+            <Component Name="dtmf" Type="Stream" Mapping="Amend1:Dtmf,Identifier:8"/>
+            <Component Name="tts" Type="Stream" Mapping="Amend1:Tts,Identifier:9"
+                             Description="Transmitted Through Speaker.
+                                          Plays over speaker only, silent on other devices"/>
+            <Component Name="accessibility" Type="Stream"
+                                            Mapping="Amend1:Accessibility,Identifier:10"
+                             Description="For accessibility talk back prompts"/>
+            <Component Name="rerouting" Type="Stream" Mapping="Amend1:Rerouting,Identifier:11"
+                             Description="For dynamic policy output mixes"/>
+            <Component Name="patch" Type="Stream" Mapping="Amend1:Patch,Identifier:12"
+                             Description="For internal audio flinger tracks. Fixed volume"/>
+        </ComponentType>
+
+        <!--#################### STREAM END ####################-->
+
+        <!--#################### USAGE BEGIN ####################-->
+
+        <ComponentType Name="Usages" Description="associated to audio_stream_type_t definition,
+                             identifier mapping must match the value of the enum">
+            <Component Name="unknown" Type="Usage" Mapping="Amend1:Unknown,Identifier:0"/>
+            <Component Name="media" Type="Usage" Mapping="Amend1:Media,Identifier:1"/>
+            <Component Name="voice_communication" Type="Usage"
+                                                  Mapping="Amend1:VoiceCommunication,Identifier:2"/>
+            <Component Name="voice_communication_signalling" Type="Usage"
+                       Mapping="Amend1:VoiceCommunicationSignalling,Identifier:3"/>
+            <Component Name="alarm" Type="Usage" Mapping="Amend1:Alarm,Identifier:4"/>
+            <Component Name="notification" Type="Usage" Mapping="Amend1:Notification,Identifier:5"/>
+            <Component Name="notification_telephony_ringtone" Type="Usage"
+                       Mapping="Amend1:NotificationTelephonyRingtone,Identifier:6"/>
+            <Component Name="notification_communication_request" Type="Usage"
+                       Mapping="Amend1:NotificationCommunicationRequest,Identifier:7"/>
+            <Component Name="notification_communication_instant" Type="Usage"
+                       Mapping="Amend1:NotificationCommunicationInstant,Identifier:8"/>
+            <Component Name="notification_communication_delayed" Type="Usage"
+                       Mapping="Amend1:NotificationCommunicationDelated,Identifier:9"/>
+            <Component Name="notification_event" Type="Usage"
+                                                 Mapping="Amend1:NotificationEvent,Identifier:10"/>
+            <Component Name="assistance_accessibility" Type="Usage"
+                       Mapping="Amend1:AssistanceAccessibility,Identifier:11"/>
+            <Component Name="assistance_navigation_guidance" Type="Usage"
+                       Mapping="Amend1:AssistanceNavigationGuidance,Identifier:12"/>
+            <Component Name="assistance_sonification" Type="Usage"
+                       Mapping="Amend1:AssistanceSonification,Identifier:13"/>
+            <Component Name="game" Type="Usage" Mapping="Amend1:BluetoothSco,Identifier:14"/>
+            <Component Name="virtual_source" Type="Usage"
+                                             Mapping="Amend1:VirtualSource,Identifier:15"/>
+        </ComponentType>
+
+        <!--#################### USAGE END ####################-->
+
+        <!--#################### INPUT SOURCE BEGIN ####################-->
+
+        <ComponentType Name="InputSources" Description="associated to audio_source_t definition,
+                             identifier mapping must match the value of the enum">
+            <Component Name="default" Type="InputSource" Mapping="Amend1:Default,Identifier:0"/>
+            <Component Name="mic" Type="InputSource" Mapping="Amend1:Mic,Identifier:1"/>
+            <Component Name="voice_uplink" Type="InputSource"
+                                           Mapping="Amend1:VoiceUplink,Identifier:2"/>
+            <Component Name="voice_downlink" Type="InputSource"
+                                             Mapping="Amend1:VoiceDownlink,Identifier:3"/>
+            <Component Name="voice_call" Type="InputSource"
+                                         Mapping="Amend1:VoiceCall,Identifier:4"/>
+            <Component Name="camcorder" Type="InputSource" Mapping="Amend1:Camcorder,Identifier:5"/>
+            <Component Name="voice_recognition" Type="InputSource"
+                                                Mapping="Amend1:VoiceRecognition,Identifier:6"/>
+            <Component Name="voice_communication" Type="InputSource"
+                                                  Mapping="Amend1:VoiceCommunication,Identifier:7"/>
+            <Component Name="remote_submix" Type="InputSource"
+                                            Mapping="Amend1:RemoteSubmix,Identifier:8"/>
+            <Component Name="fm_tuner" Type="InputSource" Mapping="Amend1:FmTuner,Identifier:1998"/>
+            <Component Name="hotword" Type="InputSource" Mapping="Amend1:Hotword,Identifier:1999"/>
+        </ComponentType>
+
+        <!--#################### INPUT SOURCE END ####################-->
+
+    </ComponentLibrary>
+
+    <InstanceDefinition>
+        <Component Name="streams" Type="Streams"/>
+        <Component Name="strategies" Type="Strategies"/>
+        <Component Name="input_sources" Type="InputSources"/>
+        <Component Name="usages" Type="Usages"/>
+    </InstanceDefinition>
+</Subsystem>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/policy_criteria.txt b/services/audiopolicy/engineconfigurable/parameter-framework/example/policy_criteria.txt
new file mode 100755
index 0000000..ef06498
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/policy_criteria.txt
@@ -0,0 +1,9 @@
+ExclusiveCriterion TelephonyMode                :   Normal          RingTone                InCall              InCommunication
+InclusiveCriterion AvailableInputDevices        :   Communication Ambient BuiltinMic BluetoothScoHeadset WiredHeadset Hdmi TelephonyRx BackMic RemoteSubmix AnlgDockHeadset DgtlDockHeadset UsbAccessory UsbDevice FmTuner TvTuner Line Spdif BluetoothA2dp Loopback
+InclusiveCriterion AvailableOutputDevices       :   Earpiece Speaker WiredSpeaker WiredHeadset WiredHeadphone BluetoothSco BluetoothScoHeadset BluetoothScoCarkit BluetoothA2dp BluetoothA2dpHeadphones BluetoothA2dpSpeaker Hdmi AnlgDockHeadset DgtlDockHeadset UsbAccessory UsbDevice RemoteSubmix TelephonyTx Line HdmiArc Spdif Fm AuxLine SpeakerSafe
+ExclusiveCriterion ForceUseForCommunication     :   ForceNone       ForceSpeaker            ForceBtSco
+ExclusiveCriterion ForceUseForMedia             :   ForceNone       ForceSpeaker			ForceHeadphones         ForceBtA2dp         ForceWiredAccessory ForceAnalogDock ForceDigitalDock    ForceNoBtA2dp       ForceSystemEnforced
+ExclusiveCriterion ForceUseForRecord            :   ForceNone       ForceBtSco              ForceWiredAccessory
+ExclusiveCriterion ForceUseForDock              :   ForceNone       ForceWiredAccessory     ForceBtCarDock      ForceBtDeskDock     ForceAnalogDock ForceDigitalDock
+ExclusiveCriterion ForceUseForSystem            :   ForceNone       ForceSystemEnforced
+ExclusiveCriterion ForceUseForHdmiSystemAudio   :   ForceNone       ForceHdmiSystemEnforced
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
new file mode 100755
index 0000000..a523656
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
@@ -0,0 +1,36 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_TAGS := optional
+LOCAL_SRC_FILES := \
+    PolicySubsystemBuilder.cpp \
+    PolicySubsystem.cpp \
+    Strategy.cpp \
+    InputSource.cpp \
+    VolumeProfile.cpp \
+    Stream.cpp \
+    Usage.cpp
+
+LOCAL_CFLAGS += \
+    -Wall \
+    -Werror \
+    -Wextra \
+
+LOCAL_C_INCLUDES := \
+    $(TOPDIR)external/parameter-framework/parameter \
+    $(TOPDIR)frameworks/av/services/audiopolicy/common/include \
+    $(TOPDIR)frameworks/av/services/audiopolicy/engineconfigurable/include \
+    $(TOPDIR)frameworks/av/services/audiopolicy/engineconfigurable/interface \
+
+LOCAL_SHARED_LIBRARIES := \
+    libaudiopolicyengineconfigurable  \
+    libparameter \
+    libxmlserializer \
+    liblog \
+
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE := libpolicy-subsystem
+
+include $(BUILD_SHARED_LIBRARY)
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
new file mode 100755
index 0000000..497d555
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "InputSource.h"
+#include "PolicyMappingKeys.h"
+#include "PolicySubsystem.h"
+
+using std::string;
+
+InputSource::InputSource(const string &mappingValue,
+                   CInstanceConfigurableElement *instanceConfigurableElement,
+                   const CMappingContext &context)
+    : CFormattedSubsystemObject(instanceConfigurableElement,
+                                mappingValue,
+                                MappingKeyAmend1,
+                                (MappingKeyAmendEnd - MappingKeyAmend1 + 1),
+                                context),
+      mPolicySubsystem(static_cast<const PolicySubsystem *>(
+                           instanceConfigurableElement->getBelongingSubsystem())),
+      mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface()),
+      mApplicableInputDevice(mDefaultApplicableInputDevice)
+{
+    mId = static_cast<audio_source_t>(context.getItemAsInteger(MappingKeyIdentifier));
+    // Declares the strategy to audio policy engine
+    mPolicyPluginInterface->addInputSource(getFormattedMappingValue(), mId);
+}
+
+bool InputSource::receiveFromHW(string & /*error*/)
+{
+    blackboardWrite(&mApplicableInputDevice, sizeof(mApplicableInputDevice));
+    return true;
+}
+
+bool InputSource::sendToHW(string & /*error*/)
+{
+    uint32_t applicableInputDevice;
+    blackboardRead(&applicableInputDevice, sizeof(applicableInputDevice));
+    mApplicableInputDevice = applicableInputDevice;
+    return mPolicyPluginInterface->setDeviceForInputSource(mId, mApplicableInputDevice);
+}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.h
new file mode 100755
index 0000000..67c5b50
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "FormattedSubsystemObject.h"
+#include "InstanceConfigurableElement.h"
+#include "MappingContext.h"
+#include <AudioPolicyPluginInterface.h>
+#include <string>
+
+class PolicySubsystem;
+
+class InputSource : public CFormattedSubsystemObject
+{
+public:
+    InputSource(const std::string &mappingValue,
+             CInstanceConfigurableElement *instanceConfigurableElement,
+             const CMappingContext &context);
+
+protected:
+    virtual bool receiveFromHW(std::string &error);
+    virtual bool sendToHW(std::string &error);
+
+private:
+    const PolicySubsystem *mPolicySubsystem; /**< Route subsytem plugin. */
+
+    /**
+     * Interface to communicate with Audio Policy Engine.
+     */
+    android::AudioPolicyPluginInterface *mPolicyPluginInterface;
+
+    audio_source_t mId; /**< input source identifier to link with audio.h. */
+    uint32_t mApplicableInputDevice; /**< applicable input device for this strategy. */
+    static const uint32_t mDefaultApplicableInputDevice = 0; /**< default input device. */
+};
diff --git a/include/media/nbaio/roundup.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicyMappingKeys.h
old mode 100644
new mode 100755
similarity index 63%
copy from include/media/nbaio/roundup.h
copy to services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicyMappingKeys.h
index 4c3cc25..53944e9
--- a/include/media/nbaio/roundup.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicyMappingKeys.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2012 The Android Open Source Project
+ * Copyright (C) 2015 The Android Open Source Project
  *
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
@@ -14,18 +14,19 @@
  * limitations under the License.
  */
 
-#ifndef ROUNDUP_H
-#define ROUNDUP_H
+#pragma once
 
-#ifdef __cplusplus
-extern "C" {
-#endif
+/**
+ * Mapping item types
+ */
+enum PolicyItemType
+{
+    MappingKeyName,
+    MappingKeyCategory,
+    MappingKeyIdentifier,
+    MappingKeyAmend1,
+    MappingKeyAmend2,
+    MappingKeyAmend3
+};
 
-// Round up to the next highest power of 2
-unsigned roundup(unsigned v);
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif  // ROUNDUP_H
+static const uint8_t MappingKeyAmendEnd = MappingKeyAmend3;
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp
new file mode 100755
index 0000000..bf3906d
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PolicySubsystem.h"
+#include "SubsystemObjectFactory.h"
+#include "PolicyMappingKeys.h"
+#include "Strategy.h"
+#include "Stream.h"
+#include "InputSource.h"
+#include "VolumeProfile.h"
+#include "Usage.h"
+#include <AudioPolicyPluginInterface.h>
+#include <AudioPolicyEngineInstance.h>
+#include <utils/Log.h>
+
+using android::audio_policy::EngineInstance;
+
+const char *const PolicySubsystem::mKeyName = "Name";
+const char *const PolicySubsystem::mKeyIdentifier = "Identifier";
+const char *const PolicySubsystem::mKeyCategory = "Category";
+const char *const PolicySubsystem::mKeyAmend1 = "Amend1";
+const char *const PolicySubsystem::mKeyAmend2 = "Amend2";
+const char *const PolicySubsystem::mKeyAmend3 = "Amend3";
+
+
+const char *const PolicySubsystem::mStreamComponentName = "Stream";
+const char *const PolicySubsystem::mStrategyComponentName = "Strategy";
+const char *const PolicySubsystem::mInputSourceComponentName = "InputSource";
+const char *const PolicySubsystem::mUsageComponentName = "Usage";
+const char *const PolicySubsystem::mVolumeProfileComponentName = "VolumeProfile";
+
+PolicySubsystem::PolicySubsystem(const std::string &name)
+    : CSubsystem(name),
+      mPluginInterface(NULL)
+{
+    // Try to connect a Plugin Interface from Audio Policy Engine
+    EngineInstance *engineInstance = EngineInstance::getInstance();
+
+    ALOG_ASSERT(engineInstance != NULL, "NULL Plugin Interface");
+
+    // Retrieve the Route Interface
+    mPluginInterface = engineInstance->queryInterface<android::AudioPolicyPluginInterface>();
+    ALOG_ASSERT(mPluginInterface != NULL, "NULL Plugin Interface");
+
+    // Provide mapping keys to the core, necessary when parsing the XML Structure files.
+    addContextMappingKey(mKeyName);
+    addContextMappingKey(mKeyCategory);
+    addContextMappingKey(mKeyIdentifier);
+    addContextMappingKey(mKeyAmend1);
+    addContextMappingKey(mKeyAmend2);
+    addContextMappingKey(mKeyAmend3);
+
+    // Provide creators to upper layer
+    addSubsystemObjectFactory(
+        new TSubsystemObjectFactory<Stream>(
+            mStreamComponentName,
+            (1 << MappingKeyAmend1) | (1 << MappingKeyIdentifier))
+        );
+    addSubsystemObjectFactory(
+        new TSubsystemObjectFactory<Strategy>(
+            mStrategyComponentName,
+            (1 << MappingKeyAmend1) | (1 << MappingKeyIdentifier))
+        );
+    addSubsystemObjectFactory(
+        new TSubsystemObjectFactory<Usage>(
+            mUsageComponentName,
+            (1 << MappingKeyAmend1) | (1 << MappingKeyIdentifier))
+        );
+    addSubsystemObjectFactory(
+        new TSubsystemObjectFactory<InputSource>(
+            mInputSourceComponentName,
+            (1 << MappingKeyAmend1) | (1 << MappingKeyIdentifier))
+        );
+    addSubsystemObjectFactory(
+        new TSubsystemObjectFactory<VolumeProfile>(
+            mVolumeProfileComponentName,
+            (1 << MappingKeyAmend1) | (1 << MappingKeyIdentifier) | (1 << MappingKeyIdentifier))
+        );
+}
+
+// Retrieve Route interface
+android::AudioPolicyPluginInterface *PolicySubsystem::getPolicyPluginInterface() const
+{
+    ALOG_ASSERT(mPluginInterface != NULL, "NULL Plugin Interface");
+    return mPluginInterface;
+}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h
new file mode 100755
index 0000000..3c26fe1
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "Subsystem.h"
+#include <string>
+
+namespace android {
+
+class AudioPolicyPluginInterface;
+
+}
+
+class PolicySubsystem : public CSubsystem
+{
+public:
+    PolicySubsystem(const std::string &strName);
+
+    /**
+     * Retrieve Route Manager interface.
+     *
+     * @return RouteManager interface for the route plugin.
+     */
+    android::AudioPolicyPluginInterface *getPolicyPluginInterface() const;
+
+private:
+    /* Copy facilities are put private to disable copy. */
+    PolicySubsystem(const PolicySubsystem &object);
+    PolicySubsystem &operator=(const PolicySubsystem &object);
+
+    android::AudioPolicyPluginInterface *mPluginInterface; /**< Audio Policy Plugin Interface. */
+
+    static const char *const mKeyName; /**< name key mapping string. */
+    static const char *const mKeyIdentifier;
+    static const char *const mKeyCategory;
+
+    static const char *const mKeyAmend1; /**< amend1 key mapping string. */
+    static const char *const mKeyAmend2; /**< amend2 key mapping string. */
+    static const char *const mKeyAmend3; /**< amend3 key mapping string. */
+
+    static const char *const mStreamComponentName;
+    static const char *const mStrategyComponentName;
+    static const char *const mInputSourceComponentName;
+    static const char *const mUsageComponentName;
+    static const char *const mVolumeProfileComponentName;
+};
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystemBuilder.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystemBuilder.cpp
new file mode 100755
index 0000000..b14d446
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystemBuilder.cpp
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "SubsystemLibrary.h"
+#include "NamedElementBuilderTemplate.h"
+#include "PolicySubsystem.h"
+
+static const char *const POLICY_SUBSYSTEM_NAME = "Policy";
+extern "C"
+{
+void getPOLICYSubsystemBuilder(CSubsystemLibrary *subsystemLibrary)
+{
+    subsystemLibrary->addElementBuilder(POLICY_SUBSYSTEM_NAME,
+                                        new TNamedElementBuilderTemplate<PolicySubsystem>());
+}
+}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp
new file mode 100755
index 0000000..1848813
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Strategy.h"
+#include "PolicyMappingKeys.h"
+#include "PolicySubsystem.h"
+
+using std::string;
+using android::routing_strategy;
+
+Strategy::Strategy(const string &mappingValue,
+                   CInstanceConfigurableElement *instanceConfigurableElement,
+                   const CMappingContext &context)
+    : CFormattedSubsystemObject(instanceConfigurableElement,
+                                mappingValue,
+                                MappingKeyAmend1,
+                                (MappingKeyAmendEnd - MappingKeyAmend1 + 1),
+                                context),
+      mPolicySubsystem(static_cast<const PolicySubsystem *>(
+                           instanceConfigurableElement->getBelongingSubsystem())),
+      mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface()),
+      mApplicableOutputDevice(mDefaultApplicableOutputDevice)
+{
+    mId = static_cast<routing_strategy>(context.getItemAsInteger(MappingKeyIdentifier));
+
+    // Declares the strategy to audio policy engine
+    mPolicyPluginInterface->addStrategy(getFormattedMappingValue(), mId);
+}
+
+bool Strategy::receiveFromHW(string & /*error*/)
+{
+    blackboardWrite(&mApplicableOutputDevice, sizeof(mApplicableOutputDevice));
+    return true;
+}
+
+bool Strategy::sendToHW(string & /*error*/)
+{
+    uint32_t applicableOutputDevice;
+    blackboardRead(&applicableOutputDevice, sizeof(applicableOutputDevice));
+    return mPolicyPluginInterface->setDeviceForStrategy(mId, applicableOutputDevice);
+}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h
new file mode 100755
index 0000000..9a9b3e4
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "FormattedSubsystemObject.h"
+#include "InstanceConfigurableElement.h"
+#include "MappingContext.h"
+#include <AudioPolicyPluginInterface.h>
+#include <string>
+
+class PolicySubsystem;
+
+class Strategy : public CFormattedSubsystemObject
+{
+public:
+    Strategy(const std::string &mappingValue,
+             CInstanceConfigurableElement *instanceConfigurableElement,
+             const CMappingContext &context);
+
+protected:
+    virtual bool receiveFromHW(std::string &error);
+    virtual bool sendToHW(std::string &error);
+
+private:
+    const PolicySubsystem *mPolicySubsystem; /**< Route subsytem plugin. */
+
+    /**
+     * Interface to communicate with Audio Policy Engine.
+     */
+    android::AudioPolicyPluginInterface *mPolicyPluginInterface;
+
+    android::routing_strategy mId; /**< strategy identifier to link with audio.h.*/
+    uint32_t mApplicableOutputDevice; /**< applicable output device for this strategy. */
+    static const uint32_t mDefaultApplicableOutputDevice = 0; /**< default output device. */
+};
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp
new file mode 100755
index 0000000..575b0bb
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Stream.h"
+#include "PolicyMappingKeys.h"
+#include "PolicySubsystem.h"
+
+using std::string;
+using android::routing_strategy;
+
+Stream::Stream(const string &mappingValue,
+                   CInstanceConfigurableElement *instanceConfigurableElement,
+                   const CMappingContext &context)
+    : CFormattedSubsystemObject(instanceConfigurableElement,
+                                mappingValue,
+                                MappingKeyAmend1,
+                                (MappingKeyAmendEnd - MappingKeyAmend1 + 1),
+                                context),
+      mPolicySubsystem(static_cast<const PolicySubsystem *>(
+                           instanceConfigurableElement->getBelongingSubsystem())),
+      mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface()),
+      mApplicableStrategy(mDefaultApplicableStrategy)
+{
+    mId = static_cast<audio_stream_type_t>(context.getItemAsInteger(MappingKeyIdentifier));
+
+    // Declares the strategy to audio policy engine
+    mPolicyPluginInterface->addStream(getFormattedMappingValue(), mId);
+}
+
+bool Stream::receiveFromHW(string & /*error*/)
+{
+    blackboardWrite(&mApplicableStrategy, sizeof(mApplicableStrategy));
+    return true;
+}
+
+bool Stream::sendToHW(string & /*error*/)
+{
+    uint32_t applicableStrategy;
+    blackboardRead(&applicableStrategy, sizeof(applicableStrategy));
+    mApplicableStrategy = applicableStrategy;
+    return mPolicyPluginInterface->setStrategyForStream(mId,
+                                              static_cast<routing_strategy>(mApplicableStrategy));
+}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h
new file mode 100755
index 0000000..7d90c36
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "FormattedSubsystemObject.h"
+#include "InstanceConfigurableElement.h"
+#include "MappingContext.h"
+#include <AudioPolicyPluginInterface.h>
+#include <string>
+
+class PolicySubsystem;
+
+class Stream : public CFormattedSubsystemObject
+{
+public:
+    Stream(const std::string &mappingValue,
+             CInstanceConfigurableElement *instanceConfigurableElement,
+             const CMappingContext &context);
+
+protected:
+    virtual bool receiveFromHW(std::string &error);
+    virtual bool sendToHW(std::string &error);
+
+private:
+    const PolicySubsystem *mPolicySubsystem; /**< Route subsytem plugin. */
+
+    /**
+     * Interface to communicate with Audio Policy Engine.
+     */
+    android::AudioPolicyPluginInterface *mPolicyPluginInterface;
+
+    audio_stream_type_t mId; /**< stream type identifier to link with audio.h. */
+    uint32_t mApplicableStrategy; /**< applicable strategy for this stream. */
+    static const uint32_t mDefaultApplicableStrategy = 0; /**< default strategy. */
+};
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp
new file mode 100755
index 0000000..1916b9b
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "Usage.h"
+#include "PolicyMappingKeys.h"
+#include "PolicySubsystem.h"
+
+using std::string;
+using android::routing_strategy;
+
+Usage::Usage(const string &mappingValue,
+                   CInstanceConfigurableElement *instanceConfigurableElement,
+                   const CMappingContext &context)
+    : CFormattedSubsystemObject(instanceConfigurableElement,
+                                mappingValue,
+                                MappingKeyAmend1,
+                                (MappingKeyAmendEnd - MappingKeyAmend1 + 1),
+                                context),
+      mPolicySubsystem(static_cast<const PolicySubsystem *>(
+                           instanceConfigurableElement->getBelongingSubsystem())),
+      mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface()),
+      mApplicableStrategy(mDefaultApplicableStrategy)
+{
+    mId = static_cast<audio_usage_t>(context.getItemAsInteger(MappingKeyIdentifier));
+
+    // Declares the strategy to audio policy engine
+    mPolicyPluginInterface->addUsage(getFormattedMappingValue(), mId);
+}
+
+bool Usage::receiveFromHW(string & /*error*/)
+{
+    blackboardWrite(&mApplicableStrategy, sizeof(mApplicableStrategy));
+    return true;
+}
+
+bool Usage::sendToHW(string & /*error*/)
+{
+    uint32_t applicableStrategy;
+    blackboardRead(&applicableStrategy, sizeof(applicableStrategy));
+    mApplicableStrategy = applicableStrategy;
+    return mPolicyPluginInterface->setStrategyForUsage(mId,
+                                              static_cast<routing_strategy>(mApplicableStrategy));
+}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h
new file mode 100755
index 0000000..8e9b638
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "FormattedSubsystemObject.h"
+#include "InstanceConfigurableElement.h"
+#include "MappingContext.h"
+#include <AudioPolicyPluginInterface.h>
+#include <string>
+
+class PolicySubsystem;
+
+class Usage : public CFormattedSubsystemObject
+{
+public:
+    Usage(const std::string &mappingValue,
+             CInstanceConfigurableElement *instanceConfigurableElement,
+             const CMappingContext &context);
+
+protected:
+    virtual bool receiveFromHW(std::string &error);
+    virtual bool sendToHW(std::string &error);
+
+private:
+    const PolicySubsystem *mPolicySubsystem; /**< Route subsytem plugin. */
+
+    /**
+     * Interface to communicate with Audio Policy Engine.
+     */
+    android::AudioPolicyPluginInterface *mPolicyPluginInterface;
+
+    audio_usage_t mId; /**< usage identifier to link with audio.h. */
+    uint32_t mApplicableStrategy; /**< applicable strategy for this usage. */
+    static const uint32_t mDefaultApplicableStrategy = 0; /**< default strategy. */
+};
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/VolumeProfile.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/VolumeProfile.cpp
new file mode 100755
index 0000000..5c155c8
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/VolumeProfile.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "VolumeProfile.h"
+#include "PolicyMappingKeys.h"
+#include "PolicySubsystem.h"
+#include "ParameterBlockType.h"
+#include <Volume.h>
+#include <math.h>
+
+using std::string;
+
+VolumeProfile::VolumeProfile(const string &mappingValue,
+                             CInstanceConfigurableElement *instanceConfigurableElement,
+                             const CMappingContext &context)
+    : CFormattedSubsystemObject(instanceConfigurableElement,
+                                mappingValue,
+                                MappingKeyAmend1,
+                                (MappingKeyAmendEnd - MappingKeyAmend1 + 1),
+                                context),
+      mPolicySubsystem(static_cast<const PolicySubsystem *>(
+                           instanceConfigurableElement->getBelongingSubsystem())),
+      mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface())
+{
+    uint32_t categoryKey = context.getItemAsInteger(MappingKeyCategory);
+    if (categoryKey >= Volume::DEVICE_CATEGORY_CNT) {
+        mCategory = Volume::DEVICE_CATEGORY_SPEAKER;
+    } else {
+        mCategory = static_cast<Volume::device_category>(categoryKey);
+    }
+    mId = static_cast<audio_stream_type_t>(context.getItemAsInteger(MappingKeyIdentifier));
+
+    // (no exception support, defer the error)
+    if (instanceConfigurableElement->getType() != CInstanceConfigurableElement::EParameterBlock) {
+        return;
+    }
+    // Get actual element type
+    const CParameterBlockType *parameterType = static_cast<const CParameterBlockType *>(
+                instanceConfigurableElement->getTypeElement());
+    mPoints = parameterType->getArrayLength();
+}
+
+bool VolumeProfile::receiveFromHW(string & /*error*/)
+{
+    return true;
+}
+
+bool VolumeProfile::sendToHW(string & /*error*/)
+{
+    Point points[mPoints];
+    blackboardRead(&points, sizeof(Point) * mPoints);
+
+    VolumeCurvePoints pointsVector;
+    for (size_t i = 0; i < mPoints; i++) {
+        VolumeCurvePoint curvePoint;
+        curvePoint.mIndex = points[i].index;
+        curvePoint.mDBAttenuation = static_cast<float>(points[i].dbAttenuation) /
+                (1UL << gFractional);
+        pointsVector.push_back(curvePoint);
+    }
+    return mPolicyPluginInterface->setVolumeProfileForStream(mId, mCategory, pointsVector);
+}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/VolumeProfile.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/VolumeProfile.h
new file mode 100755
index 0000000..a00ae84
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/VolumeProfile.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "FormattedSubsystemObject.h"
+#include "InstanceConfigurableElement.h"
+#include "MappingContext.h"
+#include <Volume.h>
+#include <AudioPolicyPluginInterface.h>
+#include <string>
+
+class PolicySubsystem;
+
+class VolumeProfile : public CFormattedSubsystemObject
+{
+private:
+    struct Point
+    {
+        int index;
+        /** Volume is using FixedPointParameter until float parameters are available. */
+        int16_t dbAttenuation;
+    } __attribute__((packed));
+
+public:
+    VolumeProfile(const std::string &mappingValue,
+                  CInstanceConfigurableElement *instanceConfigurableElement,
+                  const CMappingContext &context);
+
+protected:
+    virtual bool receiveFromHW(std::string &error);
+    virtual bool sendToHW(std::string &error);
+
+private:
+    const PolicySubsystem *mPolicySubsystem; /**< Route subsytem plugin. */
+
+    /**
+     * Interface to communicate with Audio Policy Engine.
+     */
+    android::AudioPolicyPluginInterface *mPolicyPluginInterface;
+
+    /**
+     * volume profile identifier,  which is in fact a stream type to link with audio.h.
+     */
+    audio_stream_type_t mId;
+
+    size_t mPoints;
+    Volume::device_category mCategory;
+
+    static const uint32_t gFractional = 8; /**< Beware to align with the structure. */
+};
diff --git a/services/audiopolicy/engineconfigurable/src/Collection.h b/services/audiopolicy/engineconfigurable/src/Collection.h
new file mode 100755
index 0000000..8f17b15
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/src/Collection.h
@@ -0,0 +1,163 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "Element.h"
+#include "Stream.h"
+#include "Strategy.h"
+#include "Usage.h"
+#include "InputSource.h"
+#include <utils/Errors.h>
+#include <system/audio.h>
+#include <utils/Log.h>
+#include <map>
+#include <stdint.h>
+#include <string>
+
+namespace android
+{
+namespace audio_policy
+{
+
+/**
+ * Collection of policy element as a map indexed with a their UID type.
+ *
+ * @tparam Key type of the policy element indexing the collection.
+ *         Policy Element supported are:
+ *                      - Strategy
+ *                      - Stream
+ *                      - InputSource
+ *                      - Usage.
+ */
+template <typename Key>
+class Collection : public std::map<Key, Element<Key> *>
+{
+private:
+    typedef Element<Key> T;
+    typedef typename std::map<Key, T *>::iterator CollectionIterator;
+    typedef typename std::map<Key, T *>::const_iterator CollectionConstIterator;
+
+public:
+    Collection()
+    {
+        collectionSupported();
+    }
+
+    /**
+     * Add a policy element to the collection. Policy elements are streams, strategies, input
+     * sources, ... Compile time error generated if called with not supported collection.
+     * It also set the key as the unique identifier of the policy element.
+     *
+     * @tparam Key indexing the collection of policy element.
+     * @param[in] name of the policy element to find.
+     * @param[in] key to be used to index this new policy element.
+     *
+     * @return NO_ERROR if the policy element has been successfully added to the collection.
+     */
+    status_t add(const std::string &name, Key key)
+    {
+        if ((*this).find(key) != (*this).end()) {
+            ALOGW("%s: element %s already added", __FUNCTION__, name.c_str());
+            return BAD_VALUE;
+        }
+        (*this)[key] = new T(name);
+        ALOGD("%s: adding element %s to collection", __FUNCTION__, name.c_str());
+        return (*this)[key]->setIdentifier(key);
+    }
+
+    /**
+     * Get a policy element from the collection by its key. Policy elements are streams, strategies,
+     * input sources, ... Compile time error generated if called with not supported collection.
+     *
+     * @tparam Key indexing the collection of policy element.
+     * @param[in] key of the policy element to find.
+     *
+     * @return valid pointer on policy element if found, NULL otherwise.
+     */
+    T *get(Key key) const
+    {
+        CollectionConstIterator it = (*this).find(key);
+        return (it == (*this).end()) ? NULL : it->second;
+    }
+
+    /**
+     * Find a policy element from the collection by its name. Policy elements are streams,
+     * strategies, input sources, ...
+     * Compile time error generated if called with not supported collection.
+     *
+     * @tparam Key indexing the collection of policy element.
+     * @param[in] name of the policy element to find.
+     * @param[in] elementsMap maps of policy elements to search into.
+     *
+     * @return valid pointer on element if found, NULL otherwise.
+     */
+    T *findByName(const std::string &name) const
+    {
+
+        CollectionConstIterator it;
+        for (it = (*this).begin(); it != (*this).end(); ++it) {
+            T *element = it->second;
+            if (element->getName() == name) {
+                return element;
+            }
+        }
+        return NULL;
+    }
+
+    /**
+     * Removes all the elements from the list and destroy them.
+     */
+    void clear()
+    {
+        CollectionIterator it;
+        for (it = (*this).begin(); it != (*this).end(); ++it) {
+            delete it->second;
+        }
+        (*this).clear();
+    }
+
+private:
+    /**
+     * provide a compile time error if no specialization is provided for a given type.
+     *
+     * @tparam T: type of the policyElement. Policy Element supported are:
+     *                      - Strategy
+     *                      - Stream
+     *                      - InputSource
+     *                      - Usage.
+     */
+    struct collectionSupported;
+};
+
+template <>
+struct Collection<audio_stream_type_t>::collectionSupported {};
+template <>
+struct Collection<std::string>::collectionSupported {};
+template <>
+struct Collection<audio_usage_t>::collectionSupported {};
+template <>
+struct Collection<audio_source_t>::collectionSupported {};
+template <>
+struct Collection<routing_strategy>::collectionSupported {};
+
+typedef Collection<routing_strategy> StrategyCollection;
+typedef Collection<audio_stream_type_t> StreamCollection;
+typedef Collection<audio_usage_t> UsageCollection;
+typedef Collection<audio_source_t> InputSourceCollection;
+
+} // namespace audio_policy
+} // namespace android
diff --git a/services/audiopolicy/engineconfigurable/src/Element.h b/services/audiopolicy/engineconfigurable/src/Element.h
new file mode 100755
index 0000000..52e77e5
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/src/Element.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdint.h>
+#include <string>
+#include <utils/Errors.h>
+#include <system/audio.h>
+#include <utils/Log.h>
+
+namespace android
+{
+namespace audio_policy
+{
+
+template <typename Key>
+class Element
+{
+public:
+    Element(const std::string &name)
+        : mName(name)
+    {}
+    ~Element() {}
+
+    /**
+     * Returns identifier of this policy element
+     *
+     * @returns string representing the name of this policy element
+     */
+    const std::string &getName() const { return mName; }
+
+    /**
+    * Set the unique identifier for this policy element.
+    *
+    * @tparam Key type of the unique identifier.
+    * @param[in] identifier to be set.
+    *
+    * @return NO_ERROR if the identifier is valid and set correctly, error code otherwise.
+    */
+    status_t setIdentifier(Key identifier)
+    {
+        mIdentifier = identifier;
+        return NO_ERROR;
+    }
+
+    /**
+     * @return the unique identifier of this policy element.
+     */
+    const Key &getIdentifier() const { return mIdentifier; }
+
+    /**
+     * A Policy element may implement getter/setter function for a given property.
+     * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
+     * or a string.
+     *
+     * @tparam Property for which this policy element has setter / getter.
+     * @return the property kept track by this policy base element.
+     */
+    template <typename Property>
+    Property get() const;
+
+    /**
+     * A Policy element may implement getter/setter function for a given property.
+     * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
+     * or a string.
+     *
+     * @tparam Property for which this policy element has setter / getter.
+     *
+     * @param[in] property value to be assigned for this policy base element.
+     *
+     * @return the property kept track by this policy base element.
+     */
+    template <typename Property>
+    status_t set(Property property);
+
+private:
+    /* Copy facilities are put private to disable copy. */
+    Element(const Element &object);
+    Element &operator=(const Element &object);
+
+    std::string mName; /**< Unique literal Identifier of a policy base element*/
+    Key mIdentifier; /**< Unique numerical Identifier of a policy base element*/
+};
+} // namespace audio_policy
+} // namespace android
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
new file mode 100755
index 0000000..733cdf6
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyEngine"
+//#define LOG_NDEBUG 0
+
+//#define VERY_VERBOSE_LOGGING
+#ifdef VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+#include "Engine.h"
+#include "Strategy.h"
+#include "Stream.h"
+#include "InputSource.h"
+#include "Usage.h"
+#include <policy.h>
+#include <ParameterManagerWrapper.h>
+
+using std::string;
+using std::map;
+
+namespace android
+{
+namespace audio_policy
+{
+template <>
+StrategyCollection &Engine::getCollection<routing_strategy>()
+{
+    return mStrategyCollection;
+}
+template <>
+StreamCollection &Engine::getCollection<audio_stream_type_t>()
+{
+    return mStreamCollection;
+}
+template <>
+UsageCollection &Engine::getCollection<audio_usage_t>()
+{
+    return mUsageCollection;
+}
+template <>
+InputSourceCollection &Engine::getCollection<audio_source_t>()
+{
+    return mInputSourceCollection;
+}
+
+template <>
+const StrategyCollection &Engine::getCollection<routing_strategy>() const
+{
+    return mStrategyCollection;
+}
+template <>
+const StreamCollection &Engine::getCollection<audio_stream_type_t>() const
+{
+    return mStreamCollection;
+}
+template <>
+const UsageCollection &Engine::getCollection<audio_usage_t>() const
+{
+    return mUsageCollection;
+}
+template <>
+const InputSourceCollection &Engine::getCollection<audio_source_t>() const
+{
+    return mInputSourceCollection;
+}
+
+Engine::Engine()
+    : mManagerInterface(this),
+      mPluginInterface(this),
+      mPolicyParameterMgr(new ParameterManagerWrapper()),
+      mApmObserver(NULL)
+{
+}
+
+Engine::~Engine()
+{
+    mStrategyCollection.clear();
+    mStreamCollection.clear();
+    mInputSourceCollection.clear();
+    mUsageCollection.clear();
+}
+
+
+void Engine::setObserver(AudioPolicyManagerObserver *observer)
+{
+    ALOG_ASSERT(observer != NULL, "Invalid Audio Policy Manager observer");
+    mApmObserver = observer;
+}
+
+status_t Engine::initCheck()
+{
+    if (mPolicyParameterMgr != NULL && mPolicyParameterMgr->start() != NO_ERROR) {
+        ALOGE("%s: could not start Policy PFW", __FUNCTION__);
+        delete mPolicyParameterMgr;
+        mPolicyParameterMgr = NULL;
+        return NO_INIT;
+    }
+    return (mApmObserver != NULL)? NO_ERROR : NO_INIT;
+}
+
+bool Engine::setVolumeProfileForStream(const audio_stream_type_t &streamType,
+                                       Volume::device_category deviceCategory,
+                                       const VolumeCurvePoints &points)
+{
+    Stream *stream = getFromCollection<audio_stream_type_t>(streamType);
+    if (stream == NULL) {
+        ALOGE("%s: stream %d not found", __FUNCTION__, streamType);
+        return false;
+    }
+    return stream->setVolumeProfile(deviceCategory, points) == NO_ERROR;
+}
+
+template <typename Key>
+Element<Key> *Engine::getFromCollection(const Key &key) const
+{
+    const Collection<Key> collection = getCollection<Key>();
+    return collection.get(key);
+}
+
+template <typename Key>
+status_t Engine::add(const std::string &name, const Key &key)
+{
+    Collection<Key> &collection = getCollection<Key>();
+    return collection.add(name, key);
+}
+
+template <typename Property, typename Key>
+Property Engine::getPropertyForKey(Key key) const
+{
+    Element<Key> *element = getFromCollection<Key>(key);
+    if (element == NULL) {
+        ALOGE("%s: Element not found within collection", __FUNCTION__);
+        return static_cast<Property>(0);
+    }
+    return element->template get<Property>();
+}
+
+routing_strategy Engine::ManagerInterfaceImpl::getStrategyForUsage(audio_usage_t usage)
+{
+    const SwAudioOutputCollection &outputs = mPolicyEngine->mApmObserver->getOutputs();
+
+    if (usage == AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY &&
+            (outputs.isStreamActive(AUDIO_STREAM_RING) ||
+             outputs.isStreamActive(AUDIO_STREAM_ALARM))) {
+        return STRATEGY_SONIFICATION;
+    }
+    return mPolicyEngine->getPropertyForKey<routing_strategy, audio_usage_t>(usage);
+}
+
+audio_devices_t Engine::ManagerInterfaceImpl::getDeviceForStrategy(routing_strategy strategy) const
+{
+    const SwAudioOutputCollection &outputs = mPolicyEngine->mApmObserver->getOutputs();
+
+    /** This is the only case handled programmatically because the PFW is unable to know the
+     * activity of streams.
+     *
+     * -While media is playing on a remote device, use the the sonification behavior.
+     * Note that we test this usecase before testing if media is playing because
+     * the isStreamActive() method only informs about the activity of a stream, not
+     * if it's for local playback. Note also that we use the same delay between both tests
+     *
+     * -When media is not playing anymore, fall back on the sonification behavior
+     */
+    if (strategy == STRATEGY_SONIFICATION_RESPECTFUL &&
+            !is_state_in_call(getPhoneState()) &&
+            !outputs.isStreamActiveRemotely(AUDIO_STREAM_MUSIC,
+                                    SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY) &&
+            outputs.isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
+        return mPolicyEngine->getPropertyForKey<audio_devices_t, routing_strategy>(STRATEGY_MEDIA);
+    }
+    return mPolicyEngine->getPropertyForKey<audio_devices_t, routing_strategy>(strategy);
+}
+
+template <typename Property, typename Key>
+bool Engine::setPropertyForKey(const Property &property, const Key &key)
+{
+    Element<Key> *element = getFromCollection<Key>(key);
+    if (element == NULL) {
+        ALOGE("%s: Element not found within collection", __FUNCTION__);
+        return BAD_VALUE;
+    }
+    return element->template set<Property>(property) == NO_ERROR;
+}
+
+float Engine::volIndexToDb(Volume::device_category category,
+                             audio_stream_type_t streamType,
+                             int indexInUi)
+{
+    Stream *stream = getFromCollection<audio_stream_type_t>(streamType);
+    if (stream == NULL) {
+        ALOGE("%s: Element indexed by key=%d not found", __FUNCTION__, streamType);
+        return 1.0f;
+    }
+    return stream->volIndexToDb(category, indexInUi);
+}
+
+status_t Engine::initStreamVolume(audio_stream_type_t streamType,
+                                           int indexMin, int indexMax)
+{
+    Stream *stream = getFromCollection<audio_stream_type_t>(streamType);
+    if (stream == NULL) {
+        ALOGE("%s: Stream Type %d not found", __FUNCTION__, streamType);
+        return BAD_TYPE;
+    }
+    mApmObserver->getStreamDescriptors().setVolumeIndexMin(streamType, indexMin);
+    mApmObserver->getStreamDescriptors().setVolumeIndexMax(streamType, indexMax);
+
+    return stream->initVolume(indexMin, indexMax);
+}
+
+status_t Engine::setPhoneState(audio_mode_t mode)
+{
+    return mPolicyParameterMgr->setPhoneState(mode);
+}
+
+audio_mode_t Engine::getPhoneState() const
+{
+    return mPolicyParameterMgr->getPhoneState();
+}
+
+status_t Engine::setForceUse(audio_policy_force_use_t usage,
+                                      audio_policy_forced_cfg_t config)
+{
+    return mPolicyParameterMgr->setForceUse(usage, config);
+}
+
+audio_policy_forced_cfg_t Engine::getForceUse(audio_policy_force_use_t usage) const
+{
+    return mPolicyParameterMgr->getForceUse(usage);
+}
+
+status_t Engine::setDeviceConnectionState(audio_devices_t devices, audio_policy_dev_state_t state,
+                                          const char *deviceAddress)
+{
+    return mPolicyParameterMgr->setDeviceConnectionState(devices, state, deviceAddress);
+}
+
+template <>
+AudioPolicyManagerInterface *Engine::queryInterface()
+{
+    return &mManagerInterface;
+}
+
+template <>
+AudioPolicyPluginInterface *Engine::queryInterface()
+{
+    return &mPluginInterface;
+}
+
+} // namespace audio_policy
+} // namespace android
+
+
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.h b/services/audiopolicy/engineconfigurable/src/Engine.h
new file mode 100755
index 0000000..6fa7a13
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/src/Engine.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+
+#include <AudioPolicyManagerInterface.h>
+#include <AudioPolicyPluginInterface.h>
+#include "Collection.h"
+
+namespace android
+{
+class AudioPolicyManagerObserver;
+
+namespace audio_policy
+{
+
+class ParameterManagerWrapper;
+class VolumeProfile;
+
+class Engine
+{
+public:
+    Engine();
+    virtual ~Engine();
+
+    template <class RequestedInterface>
+    RequestedInterface *queryInterface();
+
+private:
+    /// Interface members
+    class ManagerInterfaceImpl : public AudioPolicyManagerInterface
+    {
+    public:
+        ManagerInterfaceImpl(Engine *policyEngine)
+            : mPolicyEngine(policyEngine) {}
+
+        virtual android::status_t initCheck()
+        {
+            return mPolicyEngine->initCheck();
+        }
+        virtual void setObserver(AudioPolicyManagerObserver *observer)
+        {
+            mPolicyEngine->setObserver(observer);
+        }
+        virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const
+        {
+            return mPolicyEngine->getPropertyForKey<audio_devices_t, audio_source_t>(inputSource);
+        }
+        virtual audio_devices_t getDeviceForStrategy(routing_strategy stategy) const;
+        virtual routing_strategy getStrategyForStream(audio_stream_type_t stream)
+        {
+            return mPolicyEngine->getPropertyForKey<routing_strategy, audio_stream_type_t>(stream);
+        }
+        virtual routing_strategy getStrategyForUsage(audio_usage_t usage);
+        virtual status_t setPhoneState(audio_mode_t mode)
+        {
+            return mPolicyEngine->setPhoneState(mode);
+        }
+        virtual audio_mode_t getPhoneState() const
+        {
+            return mPolicyEngine->getPhoneState();
+        }
+        virtual status_t setForceUse(audio_policy_force_use_t usage,
+                                              audio_policy_forced_cfg_t config)
+        {
+            return mPolicyEngine->setForceUse(usage, config);
+        }
+        virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const
+        {
+            return mPolicyEngine->getForceUse(usage);
+        }
+        virtual android::status_t setDeviceConnectionState(const sp<DeviceDescriptor> devDesc,
+                                                           audio_policy_dev_state_t state)
+        {
+            return mPolicyEngine->setDeviceConnectionState(devDesc->type(), state,
+                                                           devDesc->mAddress);
+        }
+        virtual status_t initStreamVolume(audio_stream_type_t stream,
+                                                   int indexMin, int indexMax)
+        {
+            return mPolicyEngine->initStreamVolume(stream, indexMin, indexMax);
+        }
+
+        virtual void initializeVolumeCurves(bool /*isSpeakerDrcEnabled*/) {}
+
+        virtual float volIndexToDb(Volume::device_category deviceCategory,
+                                     audio_stream_type_t stream,
+                                     int indexInUi)
+        {
+            return mPolicyEngine->volIndexToDb(deviceCategory, stream, indexInUi);
+        }
+
+    private:
+        Engine *mPolicyEngine;
+    } mManagerInterface;
+
+    class PluginInterfaceImpl : public AudioPolicyPluginInterface
+    {
+    public:
+        PluginInterfaceImpl(Engine *policyEngine)
+            : mPolicyEngine(policyEngine) {}
+
+        virtual status_t addStrategy(const std::string &name, routing_strategy strategy)
+        {
+            return mPolicyEngine->add<routing_strategy>(name, strategy);
+        }
+        virtual status_t addStream(const std::string &name, audio_stream_type_t stream)
+        {
+            return mPolicyEngine->add<audio_stream_type_t>(name, stream);
+        }
+        virtual status_t addUsage(const std::string &name, audio_usage_t usage)
+        {
+            return mPolicyEngine->add<audio_usage_t>(name, usage);
+        }
+        virtual status_t addInputSource(const std::string &name, audio_source_t source)
+        {
+            return mPolicyEngine->add<audio_source_t>(name, source);
+        }
+        virtual bool setDeviceForStrategy(const routing_strategy &strategy, audio_devices_t devices)
+        {
+            return mPolicyEngine->setPropertyForKey<audio_devices_t, routing_strategy>(devices,
+                                                                                       strategy);
+        }
+        virtual bool setStrategyForStream(const audio_stream_type_t &stream,
+                                          routing_strategy strategy)
+        {
+            return mPolicyEngine->setPropertyForKey<routing_strategy, audio_stream_type_t>(strategy,
+                                                                                           stream);
+        }
+        virtual bool setVolumeProfileForStream(const audio_stream_type_t &stream,
+                                               Volume::device_category deviceCategory,
+                                               const VolumeCurvePoints &points)
+        {
+            return mPolicyEngine->setVolumeProfileForStream(stream, deviceCategory, points);
+        }
+
+        virtual bool setStrategyForUsage(const audio_usage_t &usage, routing_strategy strategy)
+        {
+            return mPolicyEngine->setPropertyForKey<routing_strategy, audio_usage_t>(strategy,
+                                                                                     usage);
+        }
+        virtual bool setDeviceForInputSource(const audio_source_t &inputSource,
+                                             audio_devices_t device)
+        {
+            return mPolicyEngine->setPropertyForKey<audio_devices_t, audio_source_t>(device,
+                                                                                     inputSource);
+        }
+
+    private:
+        Engine *mPolicyEngine;
+    } mPluginInterface;
+
+private:
+    /* Copy facilities are put private to disable copy. */
+    Engine(const Engine &object);
+    Engine &operator=(const Engine &object);
+
+    void setObserver(AudioPolicyManagerObserver *observer);
+
+    bool setVolumeProfileForStream(const audio_stream_type_t &stream,
+                                   Volume::device_category deviceCategory,
+                                   const VolumeCurvePoints &points);
+
+    status_t initCheck();
+    status_t setPhoneState(audio_mode_t mode);
+    audio_mode_t getPhoneState() const;
+    status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
+    audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const;
+    status_t setDeviceConnectionState(audio_devices_t devices, audio_policy_dev_state_t state,
+                                      const char *deviceAddress);
+
+    float volIndexToDb(Volume::device_category category,
+                       audio_stream_type_t stream,
+                       int indexInUi);
+    status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax);
+
+    StrategyCollection mStrategyCollection; /**< Strategies indexed by their enum id. */
+    StreamCollection mStreamCollection; /**< Streams indexed by their enum id.  */
+    UsageCollection mUsageCollection; /**< Usages indexed by their enum id. */
+    InputSourceCollection mInputSourceCollection; /**< Input sources indexed by their enum id. */
+
+    template <typename Key>
+    status_t add(const std::string &name, const Key &key);
+
+    template <typename Key>
+    Element<Key> *getFromCollection(const Key &key) const;
+
+    template <typename Key>
+    const Collection<Key> &getCollection() const;
+
+    template <typename Key>
+    Collection<Key> &getCollection();
+
+    template <typename Property, typename Key>
+    Property getPropertyForKey(Key key) const;
+
+    template <typename Property, typename Key>
+    bool setPropertyForKey(const Property &property, const Key &key);
+
+    /**
+     * Policy Parameter Manager hidden through a wrapper.
+     */
+    ParameterManagerWrapper *mPolicyParameterMgr;
+
+    AudioPolicyManagerObserver *mApmObserver;
+};
+
+}; // namespace audio_policy
+
+}; // namespace android
+
diff --git a/services/audiopolicy/engineconfigurable/src/EngineInstance.cpp b/services/audiopolicy/engineconfigurable/src/EngineInstance.cpp
new file mode 100755
index 0000000..9aa89b2
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/src/EngineInstance.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <AudioPolicyManagerInterface.h>
+#include <AudioPolicyPluginInterface.h>
+#include "AudioPolicyEngineInstance.h"
+#include "Engine.h"
+
+using std::string;
+
+namespace android
+{
+namespace audio_policy
+{
+
+EngineInstance::EngineInstance()
+{
+}
+
+EngineInstance *EngineInstance::getInstance()
+{
+    static EngineInstance instance;
+    return &instance;
+}
+
+EngineInstance::~EngineInstance()
+{
+}
+
+Engine *EngineInstance::getEngine() const
+{
+    static Engine engine;
+    return &engine;
+}
+
+template <>
+AudioPolicyManagerInterface *EngineInstance::queryInterface() const
+{
+    return getEngine()->queryInterface<AudioPolicyManagerInterface>();
+}
+
+template <>
+AudioPolicyPluginInterface *EngineInstance::queryInterface() const
+{
+    return getEngine()->queryInterface<AudioPolicyPluginInterface>();
+}
+
+} // namespace audio_policy
+} // namespace android
+
diff --git a/services/audiopolicy/engineconfigurable/src/InputSource.cpp b/services/audiopolicy/engineconfigurable/src/InputSource.cpp
new file mode 100755
index 0000000..9ff1538
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/src/InputSource.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyEngine/InputSource"
+
+#include "InputSource.h"
+
+using std::string;
+
+namespace android
+{
+namespace audio_policy
+{
+status_t Element<audio_source_t>::setIdentifier(audio_source_t identifier)
+{
+    if (identifier > AUDIO_SOURCE_MAX && identifier != AUDIO_SOURCE_HOTWORD) {
+        return BAD_VALUE;
+    }
+    mIdentifier = identifier;
+    ALOGD("%s: InputSource %s identifier 0x%X", __FUNCTION__, getName().c_str(), identifier);
+    return NO_ERROR;
+}
+
+/**
+* Set the device associated to this source.
+* It checks if the input device is valid but allows to set a NONE device
+* (i.e. only the IN BIT is set).
+*
+* @param[in] devices selected for the given input source.
+* @tparam audio_devices_t: Applicable input device for this input source.
+*
+* @return NO_ERROR if the device is either valid or none, error code otherwise.
+*/
+template <>
+status_t Element<audio_source_t>::set(audio_devices_t devices)
+{
+    if (!audio_is_input_device(devices) && devices != AUDIO_DEVICE_BIT_IN) {
+        ALOGE("%s: trying to set an invalid device 0x%X for input source %s",
+              __FUNCTION__, devices, getName().c_str());
+        return BAD_VALUE;
+    }
+    ALOGD("%s: 0x%X for input source %s", __FUNCTION__, devices, getName().c_str());
+    mApplicableDevices = devices;
+    return NO_ERROR;
+}
+
+template <>
+audio_devices_t Element<audio_source_t>::get<audio_devices_t>() const
+{
+    ALOGV("%s: 0x%X for inputSource %s", __FUNCTION__, mApplicableDevices, getName().c_str());
+    return mApplicableDevices;
+}
+} // namespace audio_policy
+} // namespace android
+
+
diff --git a/services/audiopolicy/engineconfigurable/src/InputSource.h b/services/audiopolicy/engineconfigurable/src/InputSource.h
new file mode 100755
index 0000000..6c498dc
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/src/InputSource.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "Element.h"
+
+namespace android
+{
+namespace audio_policy
+{
+
+/**
+ * Specialization of policy base class element for audio_source_t
+ * @tparam audio_source_t Policy Base Element identified by the audio_source_t definition.
+ */
+template <>
+class Element<audio_source_t>
+{
+public:
+    Element(const std::string &name)
+        : mName(name),
+          mApplicableDevices(AUDIO_DEVICE_NONE)
+    {}
+    ~Element() {}
+
+    /**
+     * Returns identifier of this policy element
+     *
+     * @returns string representing the name of this policy element
+     */
+    const std::string &getName() const { return mName; }
+
+    /**
+    * Set the unique identifier for this policy element.
+    *
+    * @tparam Key type of the unique identifier.
+    * @param[in] identifier to be set.
+    *
+    * @return NO_ERROR if the identifier is valid and set correctly, error code otherwise.
+    */
+    status_t setIdentifier(audio_source_t identifier);
+
+    /**
+     * @return the unique identifier of this policy element.
+     */
+    audio_source_t getIdentifier() const { return mIdentifier; }
+
+    /**
+     * A Policy element may implement getter/setter function for a given property.
+     * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
+     * or a string.
+     */
+    template <typename Property>
+    Property get() const;
+
+    template <typename Property>
+    status_t set(Property property);
+
+private:
+    /* Copy facilities are put private to disable copy. */
+    Element(const Element &object);
+    Element &operator=(const Element &object);
+
+    std::string mName; /**< Unique literal Identifier of a policy base element*/
+    audio_source_t mIdentifier; /**< Unique numerical Identifier of a policy base element*/
+
+    audio_devices_t mApplicableDevices; /**< Applicable input device for this input source. */
+};
+
+typedef Element<audio_source_t> InputSource;
+
+} // namespace audio_policy
+} // namespace android
+
diff --git a/services/audiopolicy/engineconfigurable/src/Strategy.cpp b/services/audiopolicy/engineconfigurable/src/Strategy.cpp
new file mode 100755
index 0000000..847443a
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/src/Strategy.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyEngine/Strategy"
+
+#include "Strategy.h"
+
+using std::string;
+
+namespace android
+{
+namespace audio_policy
+{
+
+status_t Element<routing_strategy>::setIdentifier(routing_strategy identifier)
+{
+    if (identifier >= NUM_STRATEGIES) {
+        return BAD_VALUE;
+    }
+    mIdentifier = identifier;
+    ALOGD("%s: Strategy %s identifier 0x%X", __FUNCTION__, getName().c_str(), identifier);
+    return NO_ERROR;
+}
+
+/**
+ * Set the device associated to this strategy.
+ * It checks if the output device is valid but allows to set a NONE device
+ *
+ * @param[in] devices selected for the given strategy.
+ *
+ * @return NO_ERROR if the device is either valid or none, error code otherwise.
+ */
+template <>
+status_t Element<routing_strategy>::set<audio_devices_t>(audio_devices_t devices)
+{
+    if (!audio_is_output_devices(devices) && devices != AUDIO_DEVICE_NONE) {
+        ALOGE("%s: trying to set an invalid device 0x%X for strategy %s",
+              __FUNCTION__, devices, getName().c_str());
+        return BAD_VALUE;
+    }
+    ALOGD("%s: 0x%X for strategy %s", __FUNCTION__, devices, getName().c_str());
+    mApplicableDevices = devices;
+    return NO_ERROR;
+}
+
+template <>
+audio_devices_t Element<routing_strategy>::get<audio_devices_t>() const
+{
+    ALOGV("%s: 0x%X for strategy %s", __FUNCTION__, mApplicableDevices, getName().c_str());
+    return mApplicableDevices;
+}
+
+} // namespace audio_policy
+} // namespace android
+
diff --git a/services/audiopolicy/engineconfigurable/src/Strategy.h b/services/audiopolicy/engineconfigurable/src/Strategy.h
new file mode 100755
index 0000000..1157d55
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/src/Strategy.h
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "Element.h"
+#include <RoutingStrategy.h>
+
+namespace android
+{
+namespace audio_policy
+{
+
+/**
+ * @tparam audio_devices_t: Applicable output device(s) for this strategy.
+ */
+template <>
+class Element<routing_strategy>
+{
+public:
+    Element(const std::string &name)
+        : mName(name),
+          mApplicableDevices(AUDIO_DEVICE_NONE)
+    {}
+    ~Element() {}
+
+    /**
+     * Returns identifier of this policy element
+     *
+     * @returns string representing the name of this policy element
+     */
+    const std::string &getName() const { return mName; }
+
+    /**
+    * Set the unique identifier for this policy element.
+    *
+    * @tparam Key type of the unique identifier.
+    * @param[in] identifier to be set.
+    *
+    * @return NO_ERROR if the identifier is valid and set correctly, error code otherwise.
+    */
+    status_t setIdentifier(routing_strategy identifier);
+
+    /**
+     * @return the unique identifier of this policy element.
+     */
+    routing_strategy getIdentifier() const { return mIdentifier; }
+
+    /**
+     * A Policy element may implement getter/setter function for a given property.
+     * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
+     * or a string.
+     */
+    template <typename Property>
+    Property get() const;
+
+    template <typename Property>
+    status_t set(Property property);
+
+private:
+    /* Copy facilities are put private to disable copy. */
+    Element(const Element &object);
+    Element &operator=(const Element &object);
+
+    std::string mName; /**< Unique literal Identifier of a policy base element*/
+    routing_strategy mIdentifier; /**< Unique numerical Identifier of a policy base element*/
+
+    audio_devices_t mApplicableDevices; /**< Applicable output device(s) for this strategy. */
+};
+
+typedef Element<routing_strategy> Strategy;
+
+} // namespace audio_policy
+} // namespace android
+
+
diff --git a/services/audiopolicy/engineconfigurable/src/Stream.cpp b/services/audiopolicy/engineconfigurable/src/Stream.cpp
new file mode 100755
index 0000000..bea2c19
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/src/Stream.cpp
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyEngine/Stream"
+
+#include "Stream.h"
+#include <system/audio.h>
+
+using std::string;
+
+namespace android
+{
+namespace audio_policy
+{
+
+status_t Element<audio_stream_type_t>::setIdentifier(audio_stream_type_t identifier)
+{
+    if (identifier > AUDIO_STREAM_CNT) {
+        return BAD_VALUE;
+    }
+    mIdentifier = identifier;
+    ALOGD("%s: Stream %s identifier 0x%X", __FUNCTION__, getName().c_str(), identifier);
+    return NO_ERROR;
+}
+
+/**
+* Set the strategy to follow for this stream.
+* It checks if the strategy is valid.
+*
+* @param[in] strategy to be followed.
+*
+* @return NO_ERROR if the strategy is set correctly, error code otherwise.
+*/
+template <>
+status_t Element<audio_stream_type_t>::set<routing_strategy>(routing_strategy strategy)
+{
+    if (strategy >= NUM_STRATEGIES) {
+        return BAD_VALUE;
+    }
+    mApplicableStrategy = strategy;
+    ALOGD("%s: 0x%X for Stream %s", __FUNCTION__, strategy, getName().c_str());
+    return NO_ERROR;
+}
+
+template <>
+routing_strategy Element<audio_stream_type_t>::get<routing_strategy>() const
+{
+    ALOGV("%s: 0x%X for Stream %s", __FUNCTION__, mApplicableStrategy, getName().c_str());
+    return mApplicableStrategy;
+}
+
+status_t Element<audio_stream_type_t>::setVolumeProfile(Volume::device_category category,
+                                                        const VolumeCurvePoints &points)
+{
+    ALOGD("%s: adding volume profile for %s for device category %d, points nb =%d", __FUNCTION__,
+          getName().c_str(), category, points.size());
+    mVolumeProfiles[category] = points;
+
+    for (size_t i = 0; i < points.size(); i++) {
+        ALOGV("%s: %s cat=%d curve index =%d Index=%d dBAttenuation=%f",
+              __FUNCTION__, getName().c_str(), category, i, points[i].mIndex,
+             points[i].mDBAttenuation);
+    }
+    return NO_ERROR;
+}
+
+status_t Element<audio_stream_type_t>::initVolume(int indexMin, int indexMax)
+{
+    ALOGV("initStreamVolume() stream %s, min %d, max %d", getName().c_str(), indexMin, indexMax);
+    if (indexMin < 0 || indexMin >= indexMax) {
+        ALOGW("initStreamVolume() invalid index limits for stream %s, min %d, max %d",
+              getName().c_str(), indexMin, indexMax);
+        return BAD_VALUE;
+    }
+    mIndexMin = indexMin;
+    mIndexMax = indexMax;
+
+    return NO_ERROR;
+}
+
+float Element<audio_stream_type_t>::volIndexToDb(Volume::device_category deviceCategory,
+                                                   int indexInUi)
+{
+    VolumeProfileConstIterator it = mVolumeProfiles.find(deviceCategory);
+    if (it == mVolumeProfiles.end()) {
+        ALOGE("%s: device category %d not found for stream %s", __FUNCTION__, deviceCategory,
+              getName().c_str());
+        return 1.0f;
+    }
+    const VolumeCurvePoints curve = mVolumeProfiles[deviceCategory];
+    if (curve.size() != Volume::VOLCNT) {
+        ALOGE("%s: invalid profile for category %d and for stream %s", __FUNCTION__, deviceCategory,
+              getName().c_str());
+        return 1.0f;
+    }
+
+    // the volume index in the UI is relative to the min and max volume indices for this stream type
+    int nbSteps = 1 + curve[Volume::VOLMAX].mIndex -
+            curve[Volume::VOLMIN].mIndex;
+
+    if (mIndexMax - mIndexMin == 0) {
+        ALOGE("%s: Invalid volume indexes Min=Max=%d", __FUNCTION__, mIndexMin);
+        return 1.0f;
+    }
+    int volIdx = (nbSteps * (indexInUi - mIndexMin)) /
+            (mIndexMax - mIndexMin);
+
+    // find what part of the curve this index volume belongs to, or if it's out of bounds
+    int segment = 0;
+    if (volIdx < curve[Volume::VOLMIN].mIndex) {         // out of bounds
+        return 0.0f;
+    } else if (volIdx < curve[Volume::VOLKNEE1].mIndex) {
+        segment = 0;
+    } else if (volIdx < curve[Volume::VOLKNEE2].mIndex) {
+        segment = 1;
+    } else if (volIdx <= curve[Volume::VOLMAX].mIndex) {
+        segment = 2;
+    } else {                                                               // out of bounds
+        return 1.0f;
+    }
+
+    // linear interpolation in the attenuation table in dB
+    float decibels = curve[segment].mDBAttenuation +
+            ((float)(volIdx - curve[segment].mIndex)) *
+                ( (curve[segment+1].mDBAttenuation -
+                        curve[segment].mDBAttenuation) /
+                    ((float)(curve[segment+1].mIndex -
+                            curve[segment].mIndex)) );
+
+    ALOGV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f]",
+            curve[segment].mIndex, volIdx,
+            curve[segment+1].mIndex,
+            curve[segment].mDBAttenuation,
+            decibels,
+            curve[segment+1].mDBAttenuation);
+
+    return decibels;
+}
+
+} // namespace audio_policy
+} // namespace android
+
diff --git a/services/audiopolicy/engineconfigurable/src/Stream.h b/services/audiopolicy/engineconfigurable/src/Stream.h
new file mode 100755
index 0000000..8c39dc6
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/src/Stream.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "Element.h"
+#include "EngineDefinition.h"
+#include <Volume.h>
+#include <RoutingStrategy.h>
+#include <map>
+
+namespace android
+{
+namespace audio_policy
+{
+/**
+ * @tparam routing_strategy: Applicable strategy for this stream.
+ */
+template <>
+class Element<audio_stream_type_t>
+{
+private:
+    typedef std::map<Volume::device_category, VolumeCurvePoints> VolumeProfiles;
+    typedef VolumeProfiles::iterator VolumeProfileIterator;
+    typedef VolumeProfiles::const_iterator VolumeProfileConstIterator;
+
+public:
+    Element(const std::string &name)
+        : mName(name),
+          mApplicableStrategy(STRATEGY_MEDIA),
+          mIndexMin(0),
+          mIndexMax(1)
+    {}
+    ~Element() {}
+
+    /**
+     * Returns identifier of this policy element
+     *
+     * @returns string representing the name of this policy element
+     */
+    const std::string &getName() const { return mName; }
+
+    /**
+    * Set the unique identifier for this policy element.
+    *
+    * @tparam Key type of the unique identifier.
+    * @param[in] identifier to be set.
+    *
+    * @return NO_ERROR if the identifier is valid and set correctly, error code otherwise.
+    */
+    status_t setIdentifier(audio_stream_type_t identifier);
+
+    /**
+     * @return the unique identifier of this policy element.
+     */
+    audio_stream_type_t getIdentifier() const { return mIdentifier; }
+
+    /**
+     * A Policy element may implement getter/setter function for a given property.
+     * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
+     * or a string.
+     */
+    template <typename Property>
+    Property get() const;
+
+    template <typename Property>
+    status_t set(Property property);
+
+    status_t setVolumeProfile(Volume::device_category category, const VolumeCurvePoints &points);
+
+    float volIndexToDb(Volume::device_category deviceCategory, int indexInUi);
+
+    status_t initVolume(int indexMin, int indexMax);
+
+private:
+    /* Copy facilities are put private to disable copy. */
+    Element(const Element &object);
+    Element &operator=(const Element &object);
+
+    std::string mName; /**< Unique literal Identifier of a policy base element*/
+    audio_stream_type_t mIdentifier; /**< Unique numerical Identifier of a policy base element*/
+
+    routing_strategy mApplicableStrategy; /**< Applicable strategy for this stream. */
+
+    /**
+     * Collection of volume profiles indexed by the stream type.
+     * Volume is the only reason why the stream profile was not removed from policy when introducing
+     * attributes.
+     */
+    VolumeProfiles mVolumeProfiles;
+
+    int mIndexMin;
+
+    int mIndexMax;
+};
+
+typedef Element<audio_stream_type_t> Stream;
+
+} // namespace audio_policy
+} // namespace android
+
+
diff --git a/services/audiopolicy/engineconfigurable/src/Usage.cpp b/services/audiopolicy/engineconfigurable/src/Usage.cpp
new file mode 100755
index 0000000..5d20828
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/src/Usage.cpp
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyEngine/Usage"
+
+#include "Usage.h"
+
+namespace android
+{
+namespace audio_policy
+{
+
+status_t Element<audio_usage_t>::setIdentifier(audio_usage_t identifier)
+{
+    if (identifier > AUDIO_USAGE_MAX) {
+        return BAD_VALUE;
+    }
+    mIdentifier = identifier;
+    ALOGD("%s: Usage %s has identifier 0x%X", __FUNCTION__, getName().c_str(), identifier);
+    return NO_ERROR;
+}
+
+template <>
+status_t Element<audio_usage_t>::set<routing_strategy>(routing_strategy strategy)
+{
+    if (strategy >= NUM_STRATEGIES) {
+        return BAD_VALUE;
+    }
+    ALOGD("%s: %d for Usage %s", __FUNCTION__, strategy, getName().c_str());
+    mApplicableStrategy = strategy;
+    return NO_ERROR;
+}
+
+template <>
+routing_strategy Element<audio_usage_t>::get<routing_strategy>() const
+{
+    ALOGD("%s: %d for Usage %s", __FUNCTION__, mApplicableStrategy, getName().c_str());
+    return mApplicableStrategy;
+}
+
+} // namespace audio_policy
+} // namespace android
+
+
diff --git a/services/audiopolicy/engineconfigurable/src/Usage.h b/services/audiopolicy/engineconfigurable/src/Usage.h
new file mode 100755
index 0000000..d69e0e0
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/src/Usage.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "Element.h"
+#include <RoutingStrategy.h>
+
+namespace android
+{
+namespace audio_policy
+{
+
+/**
+ * @tparam routing_strategy: Applicable strategy for this usage.
+ */
+template <>
+class Element<audio_usage_t>
+{
+public:
+    Element(const std::string &name)
+        : mName(name),
+          mApplicableStrategy(STRATEGY_MEDIA)
+    {}
+    ~Element() {}
+
+    /**
+     * Returns identifier of this policy element
+     *
+     * @returns string representing the name of this policy element
+     */
+    const std::string &getName() const { return mName; }
+
+    /**
+    * Set the unique identifier for this policy element.
+    *
+    * @tparam Key type of the unique identifier.
+    * @param[in] identifier to be set.
+    *
+    * @return NO_ERROR if the identifier is valid and set correctly, error code otherwise.
+    */
+    status_t setIdentifier(audio_usage_t identifier);
+
+    /**
+     * @return the unique identifier of this policy element.
+     */
+    audio_usage_t getIdentifier() const { return mIdentifier; }
+
+    /**
+     * A Policy element may implement getter/setter function for a given property.
+     * Property may be routing_strategy, audio_stream_type_t, audio_usage_t, audio_source_t
+     * or a string.
+     */
+    template <typename Property>
+    Property get() const;
+
+    template <typename Property>
+    status_t set(Property property);
+
+private:
+    /* Copy facilities are put private to disable copy. */
+    Element(const Element &object);
+    Element &operator=(const Element &object);
+
+    std::string mName; /**< Unique literal Identifier of a policy base element*/
+    audio_usage_t mIdentifier; /**< Unique numerical Identifier of a policy base element*/
+    routing_strategy mApplicableStrategy; /**< Applicable strategy for this usage. */
+};
+
+typedef Element<audio_usage_t> Usage;
+
+} // namespace audio_policy
+} // namespace android
+
+
diff --git a/services/audiopolicy/engineconfigurable/wrapper/Android.mk b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
new file mode 100644
index 0000000..096f913
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
@@ -0,0 +1,40 @@
+LOCAL_PATH:= $(call my-dir)
+
+##################################################################
+# WRAPPER LIBRARY
+##################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_C_INCLUDES := \
+    $(LOCAL_PATH)/include \
+    $(TARGET_OUT_HEADERS)/parameter \
+    $(TOPDIR)frameworks/av/services/audiopolicy/engineconfigurable/include \
+    $(TOPDIR)frameworks/av/services/audiopolicy/engineconfigurable/interface \
+    $(TOPDIR)frameworks/av/services/audiopolicy/utilities/convert \
+
+LOCAL_SRC_FILES:= ParameterManagerWrapper.cpp
+
+LOCAL_STATIC_LIBRARIES := \
+    libmedia_helper \
+
+LOCAL_MODULE:= libaudiopolicypfwwrapper
+LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
+
+LOCAL_MODULE_TAGS := optional
+LOCAL_CFLAGS := -Wall -Werror -Wextra
+
+include $(BUILD_STATIC_LIBRARY)
+
+##################################################################
+# CONFIGURATION FILE
+##################################################################
+
+# specific management of audio_policy_criteria.conf
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_criteria.conf
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)
+LOCAL_SRC_FILES := config/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
diff --git a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
new file mode 100755
index 0000000..cfe49d4
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
@@ -0,0 +1,435 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyEngine/PFWWrapper"
+
+#include "ParameterManagerWrapper.h"
+#include "audio_policy_criteria_conf.h"
+#include <ParameterMgrPlatformConnector.h>
+#include <SelectionCriterionTypeInterface.h>
+#include <SelectionCriterionInterface.h>
+#include <convert.h>
+#include <algorithm>
+#include <cutils/config_utils.h>
+#include <cutils/misc.h>
+#include <fstream>
+#include <limits>
+#include <sstream>
+#include <string>
+#include <vector>
+#include <stdint.h>
+#include <cmath>
+#include <utils/Log.h>
+
+using std::string;
+using std::map;
+using std::vector;
+
+/// PFW related definitions
+// Logger
+class ParameterMgrPlatformConnectorLogger : public CParameterMgrPlatformConnector::ILogger
+{
+public:
+    ParameterMgrPlatformConnectorLogger() {}
+
+    virtual void log(bool isWarning, const string &log)
+    {
+        const static string format("policy-parameter-manager: ");
+
+        if (isWarning) {
+            ALOGW("%s %s", format.c_str(), log.c_str());
+        } else {
+            ALOGD("%s %s", format.c_str(), log.c_str());
+        }
+    }
+};
+
+namespace android
+{
+
+using utilities::convertTo;
+
+namespace audio_policy
+{
+const char *const ParameterManagerWrapper::mPolicyPfwDefaultConfFileName =
+    "/etc/parameter-framework/ParameterFrameworkConfigurationPolicy.xml";
+
+template <>
+struct ParameterManagerWrapper::parameterManagerElementSupported<ISelectionCriterionInterface> {};
+template <>
+struct ParameterManagerWrapper::parameterManagerElementSupported<ISelectionCriterionTypeInterface> {};
+
+ParameterManagerWrapper::ParameterManagerWrapper()
+    : mPfwConnectorLogger(new ParameterMgrPlatformConnectorLogger)
+{
+    // Connector
+    mPfwConnector = new CParameterMgrPlatformConnector(mPolicyPfwDefaultConfFileName);
+
+    // Logger
+    mPfwConnector->setLogger(mPfwConnectorLogger);
+
+    // Load criteria file
+    if ((loadAudioPolicyCriteriaConfig(gAudioPolicyCriteriaVendorConfFilePath) != NO_ERROR) &&
+        (loadAudioPolicyCriteriaConfig(gAudioPolicyCriteriaConfFilePath) != NO_ERROR)) {
+        ALOGE("%s: Neither vendor conf file (%s) nor system conf file (%s) could be found",
+              __FUNCTION__, gAudioPolicyCriteriaVendorConfFilePath,
+              gAudioPolicyCriteriaConfFilePath);
+    }
+    ALOGD("%s: ParameterManagerWrapper instantiated!", __FUNCTION__);
+}
+
+ParameterManagerWrapper::~ParameterManagerWrapper()
+{
+    // Unset logger
+    mPfwConnector->setLogger(NULL);
+    // Remove logger
+    delete mPfwConnectorLogger;
+    // Remove connector
+    delete mPfwConnector;
+}
+
+status_t ParameterManagerWrapper::start()
+{
+    ALOGD("%s: in", __FUNCTION__);
+    /// Start PFW
+    std::string error;
+    if (!mPfwConnector->start(error)) {
+        ALOGE("%s: Policy PFW start error: %s", __FUNCTION__, error.c_str());
+        return NO_INIT;
+    }
+    ALOGD("%s: Policy PFW successfully started!", __FUNCTION__);
+    return NO_ERROR;
+}
+
+
+void ParameterManagerWrapper::addCriterionType(const string &typeName, bool isInclusive)
+{
+    ALOG_ASSERT(mPolicyCriterionTypes.find(typeName) == mPolicyCriterionTypes.end(),
+                      "CriterionType " << typeName << " already added");
+    ALOGD("%s: Adding new criterionType %s", __FUNCTION__, typeName.c_str());
+
+    mPolicyCriterionTypes[typeName] = mPfwConnector->createSelectionCriterionType(isInclusive);
+}
+
+void ParameterManagerWrapper::addCriterionTypeValuePair(
+    const string &typeName,
+    uint32_t numericValue,
+    const string &literalValue)
+{
+    ALOG_ASSERT(mPolicyCriterionTypes.find(typeName) != mPolicyCriterionTypes.end(),
+                      "CriterionType " << typeName.c_str() << "not found");
+    ALOGV("%s: Adding new value pair (%d,%s) for criterionType %s", __FUNCTION__,
+          numericValue, literalValue.c_str(), typeName.c_str());
+    ISelectionCriterionTypeInterface *criterionType = mPolicyCriterionTypes[typeName];
+    criterionType->addValuePair(numericValue, literalValue.c_str());
+}
+
+void ParameterManagerWrapper::loadCriterionType(cnode *root, bool isInclusive)
+{
+    ALOG_ASSERT(root != NULL, "error in parsing file");
+    cnode *node;
+    for (node = root->first_child; node != NULL; node = node->next) {
+
+        ALOG_ASSERT(node != NULL, "error in parsing file");
+        const char *typeName = node->name;
+        char *valueNames = strndup(node->value, strlen(node->value));
+
+        addCriterionType(typeName, isInclusive);
+
+        uint32_t index = 0;
+        char *ctx;
+        char *valueName = strtok_r(valueNames, ",", &ctx);
+        while (valueName != NULL) {
+            if (strlen(valueName) != 0) {
+
+                // Conf file may use or not pair, if no pair, use incremental index, else
+                // use provided index.
+                if (strchr(valueName, ':') != NULL) {
+
+                    char *first = strtok(valueName, ":");
+                    char *second = strtok(NULL, ":");
+                    ALOG_ASSERT((first != NULL) && (strlen(first) != 0) &&
+                                      (second != NULL) && (strlen(second) != 0),
+                                      "invalid value pair");
+
+                    if (!convertTo<string, uint32_t>(first, index)) {
+                        ALOGE("%s: Invalid index(%s) found", __FUNCTION__, first);
+                    }
+                    addCriterionTypeValuePair(typeName, index, second);
+                } else {
+
+                    uint32_t pfwIndex = isInclusive ? 1 << index : index;
+                    addCriterionTypeValuePair(typeName, pfwIndex, valueName);
+                    index += 1;
+                }
+            }
+            valueName = strtok_r(NULL, ",", &ctx);
+        }
+        free(valueNames);
+    }
+}
+
+void ParameterManagerWrapper::loadInclusiveCriterionType(cnode *root)
+{
+    ALOG_ASSERT(root != NULL, "error in parsing file");
+    cnode *node = config_find(root, gInclusiveCriterionTypeTag.c_str());
+    if (node == NULL) {
+        return;
+    }
+    loadCriterionType(node, true);
+}
+
+void ParameterManagerWrapper::loadExclusiveCriterionType(cnode *root)
+{
+    ALOG_ASSERT(root != NULL, "error in parsing file");
+    cnode *node = config_find(root, gExclusiveCriterionTypeTag.c_str());
+    if (node == NULL) {
+        return;
+    }
+    loadCriterionType(node, false);
+}
+
+void ParameterManagerWrapper::parseChildren(cnode *root, string &defaultValue, string &type)
+{
+    ALOG_ASSERT(root != NULL, "error in parsing file");
+    cnode *node;
+    for (node = root->first_child; node != NULL; node = node->next) {
+        ALOG_ASSERT(node != NULL, "error in parsing file");
+
+        if (string(node->name) == gDefaultTag) {
+            defaultValue = node->value;
+        } else if (string(node->name) == gTypeTag) {
+            type = node->value;
+        } else {
+             ALOGE("%s: Unrecognized %s %s node", __FUNCTION__, node->name, node->value);
+        }
+    }
+}
+
+template <typename T>
+T *ParameterManagerWrapper::getElement(const string &name, std::map<string, T *> &elementsMap)
+{
+    parameterManagerElementSupported<T>();
+    typename std::map<string, T *>::iterator it = elementsMap.find(name);
+    ALOG_ASSERT(it != elementsMap.end(), "Element " << name << " not found");
+    return it->second;
+}
+
+template <typename T>
+const T *ParameterManagerWrapper::getElement(const string &name, const std::map<string, T *> &elementsMap) const
+{
+    parameterManagerElementSupported<T>();
+    typename std::map<string, T *>::const_iterator it = elementsMap.find(name);
+    ALOG_ASSERT(it != elementsMap.end(), "Element " << name << " not found");
+    return it->second;
+}
+
+void ParameterManagerWrapper::loadCriteria(cnode *root)
+{
+    ALOG_ASSERT(root != NULL, "error in parsing file");
+    cnode *node = config_find(root, gCriterionTag.c_str());
+
+    if (node == NULL) {
+        ALOGW("%s: no inclusive criteria found", __FUNCTION__);
+        return;
+    }
+    for (node = node->first_child; node != NULL; node = node->next) {
+        loadCriterion(node);
+    }
+}
+
+void ParameterManagerWrapper::addCriterion(const string &name, const string &typeName,
+                              const string &defaultLiteralValue)
+{
+    ALOG_ASSERT(mPolicyCriteria.find(criterionName) == mPolicyCriteria.end(),
+                "Route Criterion " << criterionName << " already added");
+
+    ISelectionCriterionTypeInterface *criterionType =
+            getElement<ISelectionCriterionTypeInterface>(typeName, mPolicyCriterionTypes);
+
+    ISelectionCriterionInterface *criterion =
+            mPfwConnector->createSelectionCriterion(name, criterionType);
+
+    mPolicyCriteria[name] = criterion;
+    int numericalValue = 0;
+    if (!criterionType->getNumericalValue(defaultLiteralValue.c_str(),  numericalValue)) {
+        ALOGE("%s; trying to apply invalid default literal value (%s)", __FUNCTION__,
+              defaultLiteralValue.c_str());
+    }
+    criterion->setCriterionState(numericalValue);
+}
+
+void ParameterManagerWrapper::loadCriterion(cnode *root)
+{
+    ALOG_ASSERT(root != NULL, "error in parsing file");
+    const char *criterionName = root->name;
+
+    ALOG_ASSERT(mPolicyCriteria.find(criterionName) == mPolicyCriteria.end(),
+                      "Criterion " << criterionName << " already added");
+
+    string paramKeyName = "";
+    string path = "";
+    string typeName = "";
+    string defaultValue = "";
+
+    parseChildren(root, defaultValue, typeName);
+
+    addCriterion(criterionName, typeName, defaultValue);
+}
+
+void ParameterManagerWrapper::loadConfig(cnode *root)
+{
+    ALOG_ASSERT(root != NULL, "error in parsing file");
+    cnode *node = config_find(root, gPolicyConfTag.c_str());
+    if (node == NULL) {
+        ALOGW("%s: Could not find node for pfw", __FUNCTION__);
+        return;
+    }
+    ALOGD("%s: Loading conf for pfw", __FUNCTION__);
+    loadInclusiveCriterionType(node);
+    loadExclusiveCriterionType(node);
+    loadCriteria(node);
+}
+
+
+status_t ParameterManagerWrapper::loadAudioPolicyCriteriaConfig(const char *path)
+{
+    ALOG_ASSERT(path != NULL, "error in parsing file: empty path");
+    cnode *root;
+    char *data;
+    ALOGD("%s", __FUNCTION__);
+    data = (char *)load_file(path, NULL);
+    if (data == NULL) {
+        return -ENODEV;
+    }
+    root = config_node("", "");
+    ALOG_ASSERT(root != NULL, "Unable to allocate a configuration node");
+    config_load(root, data);
+
+    loadConfig(root);
+
+    config_free(root);
+    free(root);
+    free(data);
+    ALOGD("%s: loaded", __FUNCTION__);
+    return NO_ERROR;
+}
+
+bool ParameterManagerWrapper::isStarted()
+{
+    return mPfwConnector && mPfwConnector->isStarted();
+}
+
+status_t ParameterManagerWrapper::setPhoneState(audio_mode_t mode)
+{
+    ISelectionCriterionInterface *criterion = mPolicyCriteria[gPhoneStateCriterionTag];
+    if (!isValueValidForCriterion(criterion, static_cast<int>(mode))) {
+        return BAD_VALUE;
+    }
+    criterion->setCriterionState((int)(mode));
+    applyPlatformConfiguration();
+    return NO_ERROR;
+}
+
+audio_mode_t ParameterManagerWrapper::getPhoneState() const
+{
+    const ISelectionCriterionInterface *criterion =
+            getElement<ISelectionCriterionInterface>(gPhoneStateCriterionTag, mPolicyCriteria);
+    return static_cast<audio_mode_t>(criterion->getCriterionState());
+}
+
+status_t ParameterManagerWrapper::setForceUse(audio_policy_force_use_t usage,
+                                              audio_policy_forced_cfg_t config)
+{
+    // @todo: return an error on a unsupported value
+    if (usage > AUDIO_POLICY_FORCE_USE_CNT) {
+        return BAD_VALUE;
+    }
+
+    ISelectionCriterionInterface *criterion = mPolicyCriteria[gForceUseCriterionTag[usage]];
+    if (!isValueValidForCriterion(criterion, static_cast<int>(config))) {
+        return BAD_VALUE;
+    }
+    criterion->setCriterionState((int)config);
+    applyPlatformConfiguration();
+    return NO_ERROR;
+}
+
+audio_policy_forced_cfg_t ParameterManagerWrapper::getForceUse(audio_policy_force_use_t usage) const
+{
+    // @todo: return an error on a unsupported value
+    if (usage > AUDIO_POLICY_FORCE_USE_CNT) {
+        return AUDIO_POLICY_FORCE_NONE;
+    }
+    const ISelectionCriterionInterface *criterion =
+            getElement<ISelectionCriterionInterface>(gForceUseCriterionTag[usage], mPolicyCriteria);
+    return static_cast<audio_policy_forced_cfg_t>(criterion->getCriterionState());
+}
+
+bool ParameterManagerWrapper::isValueValidForCriterion(ISelectionCriterionInterface *criterion,
+                                                       int valueToCheck)
+{
+    const ISelectionCriterionTypeInterface *interface = criterion->getCriterionType();
+    string literalValue;
+    return interface->getLiteralValue(valueToCheck, literalValue);
+}
+
+status_t ParameterManagerWrapper::setDeviceConnectionState(audio_devices_t devices,
+                                                           audio_policy_dev_state_t state,
+                                                           const char */*deviceAddres*/)
+{
+    ISelectionCriterionInterface *criterion = NULL;
+
+    if (audio_is_output_devices(devices)) {
+        criterion = mPolicyCriteria[gOutputDeviceCriterionTag];
+    } else if (devices & AUDIO_DEVICE_BIT_IN) {
+        criterion = mPolicyCriteria[gInputDeviceCriterionTag];
+    } else {
+        return BAD_TYPE;
+    }
+    if (criterion == NULL) {
+        ALOGE("%s: no criterion found for devices", __FUNCTION__);
+        return DEAD_OBJECT;
+    }
+
+    int32_t previousDevices = criterion->getCriterionState();
+    switch (state)
+    {
+    case AUDIO_POLICY_DEVICE_STATE_AVAILABLE:
+        criterion->setCriterionState(previousDevices |= devices);
+        break;
+
+    case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE:
+        if (devices & AUDIO_DEVICE_BIT_IN) {
+            devices &= ~AUDIO_DEVICE_BIT_IN;
+        }
+        criterion->setCriterionState(previousDevices &= ~devices);
+        break;
+
+    default:
+        return BAD_VALUE;
+    }
+    applyPlatformConfiguration();
+    return NO_ERROR;
+}
+
+void ParameterManagerWrapper::applyPlatformConfiguration()
+{
+    mPfwConnector->applyConfigurations();
+}
+
+} // namespace audio_policy
+} // namespace android
diff --git a/services/audiopolicy/engineconfigurable/wrapper/audio_policy_criteria_conf.h b/services/audiopolicy/engineconfigurable/wrapper/audio_policy_criteria_conf.h
new file mode 100755
index 0000000..58e7135
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/wrapper/audio_policy_criteria_conf.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <string>
+#include <system/audio_policy.h>
+
+//////////////////////////////////////////////////////////////////////////////////////////////////
+//      Definitions for audio policy criteria configuration file (audio_policy_criteria.conf)   //
+//                                                                                              //
+//      @TODO: scripted from audio.h & audio_policy,h                                           //
+//////////////////////////////////////////////////////////////////////////////////////////////////
+
+static const char *const gAudioPolicyCriteriaConfFilePath =
+    "/system/etc/audio_policy_criteria.conf";
+static const char *const gAudioPolicyCriteriaVendorConfFilePath =
+    "/vendor/etc/audio_policy_criteria.conf";
+
+/**
+ * PFW instances tags
+ */
+static const std::string &gPolicyConfTag = "Policy";
+static const std::string &gDefaultTag = "Default";
+static const std::string &gTypeTag = "Type";
+
+/**
+ * PFW elements tags
+ */
+static const std::string &gInclusiveCriterionTypeTag = "InclusiveCriterionType";
+static const std::string &gExclusiveCriterionTypeTag = "ExclusiveCriterionType";
+static const std::string &gCriterionTag = "Criterion";
+
+/**
+ * PFW known criterion tags
+ */
+static const std::string &gInputDeviceCriterionTag = "AvailableInputDevices";
+static const std::string &gOutputDeviceCriterionTag = "AvailableOutputDevices";
+static const std::string &gPhoneStateCriterionTag = "TelephonyMode";
+
+/**
+ * Order MUST be align with defintiion of audio_policy_force_use_t within audio_policy.h
+ */
+static const std::string gForceUseCriterionTag[AUDIO_POLICY_FORCE_USE_CNT] =
+{
+    [AUDIO_POLICY_FORCE_FOR_COMMUNICATION] =        "ForceUseForCommunication",
+    [AUDIO_POLICY_FORCE_FOR_MEDIA] =                "ForceUseForMedia",
+    [AUDIO_POLICY_FORCE_FOR_RECORD] =               "ForceUseForRecord",
+    [AUDIO_POLICY_FORCE_FOR_DOCK] =                 "ForceUseForDock",
+    [AUDIO_POLICY_FORCE_FOR_SYSTEM] =               "ForceUseForSystem",
+    [AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] =    "ForceUseForHdmiSystemAudio"
+};
+
+
+
+
+
+
diff --git a/services/audiopolicy/engineconfigurable/wrapper/config/audio_policy_criteria.conf b/services/audiopolicy/engineconfigurable/wrapper/config/audio_policy_criteria.conf
new file mode 100755
index 0000000..5b046a8
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/wrapper/config/audio_policy_criteria.conf
@@ -0,0 +1,137 @@
+################################################################################################
+#
+# @NOTE:
+# Audio Policy Criteria file example for generic device build
+#
+# Any vendor shall have its own configuration within the corresponding device folder
+#
+################################################################################################
+
+#########################################################
+# Criterion type Example:
+# For each criterion, a couple of numerical, literal values must be provided to the PFW.
+# The numerical part is not mandatory. If not filled by the user, a default numerical value will be
+# automatically provided by audio HAL using the following logic:
+#   - Exclusive criterion:
+#          * 0 -> first literal value,
+#          * 1 -> second literal value,
+#               ...
+#          * N -> (N+1)th literal value.
+#   - Inclusive criterion:
+#          * 1 << 0 -> first literal value,
+#          * 1 << 1 -> second literal value,
+#               ...
+#          * 1 << N -> (N+1)th literal value,
+#
+#########################################################
+# Policy {
+#    InclusiveCriterionType|ExclusiveCriterionType {
+#        <Criterion Name>  [numerical value 1:]<literal value 1>,[numerical value 2:]<literal value 2>,<literal value 3>,...
+#    }
+# }
+
+#########################################################
+# Criterion:
+#########################################################
+# Policy {
+#    Criterion {
+#        <Criterion Name> {
+#            Type            <Criterion type name>
+#            Default         <default value of the criterion>
+#        }
+#    }
+# }
+
+Policy {
+    InclusiveCriterionType {
+        #
+        # DO NOT CHANGE ORDER. This definition must be aligned with the definition of
+        # AUDIO_DEVICE_OUT_* within <system/audio.h> file of android.
+        #
+        OutputDevicesMaskType   Earpiece,Speaker,WiredHeadset,WiredHeadphone,BluetoothSco,BluetoothScoHeadset,BluetoothScoCarkit,BluetoothA2dp,BluetoothA2dpHeadphones,BluetoothA2dpSpeaker,Hdmi,AnlgDockHeadset,DgtlDockHeadset,UsbAccessory,UsbDevice,RemoteSubmix,TelephonyTx,Line,HdmiArc,Spdif,Fm,AuxLine,SpeakerSafe
+        #
+        # DO NOT CHANGE ORDER. This definition must be aligned with the definition of
+        # AUDIO_DEVICE_IN_* within <system/audio.h> file of android.
+        # Note also that direction bit will be decimated by AudioHAL in order to allow using a mask
+        # with the cardinality of 1 between a bit and an input device.
+        #
+        InputDevicesMaskType    Communication,Ambient,BuiltinMic,BluetoothScoHeadset,WiredHeadset,Hdmi,TelephonyRx,BackMic,RemoteSubmix,AnlgDockHeadset,DgtlDockHeadset,UsbAccessory,UsbDevice,FmTuner,TvTune,Line,Spdif,BluetoothA2dp,Loopback
+    }
+    ExclusiveCriterionType {
+        #
+        # The values of the mode MUST be aligned with the definition of the audio_mode_t
+        # from system/audio.h
+        #
+        AndroidModeType     0:Normal,1:RingTone,2:InCall,3:InCommunication
+        #
+        # The values of the mode MUST be aligned with the definition of the
+        # audio_policy_forced_config_t from system/audio.h
+        #
+        ForceUseForCommunicationType    0:ForceNone,1:ForceSpeaker,3:ForceBtSco
+        #
+        # The values of the mode MUST be aligned with the definition of the
+        # audio_policy_forced_config_t from system/audio.h
+        #
+        ForceUseForMediaType            0:ForceNone,1:ForceSpeaker,2:ForceHeadphones,4:ForceBtA2dp,5:ForceWiredAccessory,8:ForceAnalogDock,9:ForceDigitalDock,10:ForceNoBtA2dp
+        #
+        # The values of the mode MUST be aligned with the definition of the
+        # audio_policy_forced_config_t from system/audio.h
+        #
+        ForceUseForRecordType           0:ForceNone,3:ForceBtSco,5:ForceWiredAccessory
+        #
+        # The values of the mode MUST be aligned with the definition of the
+        # audio_policy_forced_config_t from system/audio.h
+        #
+        ForceUseForDockType             0:ForceNone,5:ForceWiredAccessory,6:ForceBtCarDock,7:ForceBtDeskDock,8:ForceAnalogDock,9:ForceDigitalDock
+        #
+        # The values of the mode MUST be aligned with the definition of the
+        # audio_policy_forced_config_t from system/audio.h
+        #
+        ForceUseForSystemType           0:ForceNone,11:ForceSystemEnforced
+        #
+        # The values of the mode MUST be aligned with the definition of the
+        # audio_policy_forced_config_t from system/audio.h
+        #
+        ForceUseForHdmiSystemAudioType  0:ForceNone,12:ForceHdmiSystemEnforced
+    }
+
+    Criterion {
+        AvailableInputDevices {
+            Type            InputDevicesMaskType
+            Default         none
+        }
+        AvailableOutputDevices {
+            Type            OutputDevicesMaskType
+            Default         none
+        }
+        TelephonyMode {
+            Type            AndroidModeType
+            Default         Normal
+        }
+        ForceUseForCommunication {
+            Type            ForceUseForCommunicationType
+            Default         ForceNone
+        }
+        ForceUseForMedia {
+            Type            ForceUseForMediaType
+            Default         ForceNone
+        }
+        ForceUseForRecord {
+            Type            ForceUseForRecordType
+            Default         ForceNone
+        }
+        ForceUseForDock {
+            Type            ForceUseForDockType
+            Default         ForceNone
+        }
+        ForceUseForSystem {
+            Type            ForceUseForSystemType
+            Default         ForceNone
+        }
+        ForceUseForHdmiSystemAudio {
+            Type            ForceUseForHdmiSystemAudioType
+            Default         ForceNone
+        }
+    }
+}
+
diff --git a/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
new file mode 100755
index 0000000..3c5f2c0
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/wrapper/include/ParameterManagerWrapper.h
@@ -0,0 +1,286 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <utils/Errors.h>
+#include <utils/RWLock.h>
+#include <list>
+#include <map>
+#include <string>
+#include <vector>
+
+class CParameterMgrPlatformConnector;
+class ISelectionCriterionInterface;
+class ISelectionCriterionTypeInterface;
+struct cnode;
+
+class ParameterMgrPlatformConnectorLogger;
+
+namespace android
+{
+namespace audio_policy
+{
+
+class ParameterManagerWrapper
+{
+private:
+    typedef std::pair<int, const char *> CriterionTypeValuePair;
+
+    typedef std::map<std::string, ISelectionCriterionInterface *> CriterionCollection;
+    typedef std::map<std::string, ISelectionCriterionTypeInterface *> CriterionTypeCollection;
+    typedef CriterionCollection::iterator CriterionMapIterator;
+    typedef CriterionCollection::const_iterator CriterionMapConstIterator;
+    typedef CriterionTypeCollection::iterator CriterionTypeMapIterator;
+    typedef CriterionTypeCollection::const_iterator CriteriaTypeMapConstIterator;
+
+public:
+    ParameterManagerWrapper();
+    ~ParameterManagerWrapper();
+
+    /**
+     * Starts the platform state service.
+     * It starts the parameter framework policy instance.
+     *
+     * @return NO_ERROR if success, error code otherwise.
+     */
+    status_t start();
+
+    /**
+     * The following API wrap policy action to criteria
+     */
+
+    /**
+     * Checks if the platform state was correctly started (ie the policy parameter manager
+     * has been instantiated and started correctly).
+     *
+     * @todo: map on initCheck?
+     *
+     * @return true if platform state is started correctly, false otherwise.
+     */
+    bool isStarted();
+
+    /**
+     * Set Telephony Mode.
+     * It will set the telephony mode criterion accordingly and apply the configuration in order
+     * to select the right configuration on domains depending on this mode criterion.
+     *
+     * @param[in] mode: Android Phone state (normal, ringtone, csv, in communication)
+     *
+     * @return NO_ERROR if criterion set correctly, error code otherwise.
+     */
+    status_t setPhoneState(audio_mode_t mode);
+
+    audio_mode_t getPhoneState() const;
+
+    /**
+     * Set Force Use config for a given usage.
+     * It will set the corresponding policy parameter framework criterion.
+     *
+     * @param[in] usage for which a configuration shall be forced.
+     * @param[in] config wished to be forced for the given shall.
+     *
+     * @return NO_ERROR if the criterion was set correctly, error code otherwise (e.g. config not
+     * allowed a given usage...)
+     */
+    status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
+
+    audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const;
+
+    /**
+     * Set the connection state of device(s).
+     * It will set the associated policy parameter framework criterion.
+     *
+     * @param[in] devices mask of devices for which the state has changed.
+     * @param[in] state of availability of this(these) device(s).
+     * @param[in] deviceAddress: the mask might not be enough, as it may represents a type of
+     *            device, so address of the device will help precise identification.
+     *
+     * @return NO_ERROR if devices criterion updated correctly, error code otherwise.
+     */
+    status_t setDeviceConnectionState(audio_devices_t devices, audio_policy_dev_state_t state,
+                                      const char *deviceAddress);
+
+private:
+    /**
+     * Apply the configuration of the platform on the policy parameter manager.
+     * Once all the criteria have been set, the client of the platform state must call
+     * this function in order to have the route PFW taking into account these criteria.
+     *
+     * OPENS: shall we expose this?
+     *      - Yes if atomic set operation.
+     *          In this case, abstract it behind the "STAGE AND COMMIT" pattern
+     *      - no if need to set more than one before triggering an apply configuration.
+     */
+    void applyPlatformConfiguration();
+
+    /**
+     * Load the criterion configuration file.
+     *
+     * @param[in] path Criterion conf file path.
+     *
+     * @return NO_ERROR is parsing successful, error code otherwise.
+     */
+    status_t loadAudioPolicyCriteriaConfig(const char *path);
+
+    /**
+     * Add a criterion type to AudioPolicyPfw.
+     *
+     * @param[in] typeName of the PFW criterion type.
+     * @param[in] isInclusive attribute of the criterion type.
+     */
+    void addCriterionType(const std::string &typeName, bool isInclusive);
+
+    /**
+     * Add a criterion type value pair to AudioPolicyPfw.
+     *
+     * @param[in] typeName criterion type name to which this value pair is added to.
+     * @param[in] numeric part of the value pair.
+     * @param[in] literal part of the value pair.
+     */
+    void addCriterionTypeValuePair(const std::string &typeName, uint32_t numeric,
+                                   const std::string &literal);
+
+    /**
+     * Add a criterion to AudioPolicyPfw.
+     *
+     * @param[in] name of the PFW criterion.
+     * @param[in] typeName criterion type name to which this criterion is associated to.
+     * @param[in] defaultLiteralValue of the PFW criterion.
+     */
+    void addCriterion(const std::string &name,
+                      const std::string &typeName,
+                      const std::string &defaultLiteralValue);
+    /**
+     * Parse and load the inclusive criterion type from configuration file.
+     *
+     * @param[in] root node of the configuration file.
+     */
+    void loadInclusiveCriterionType(cnode *root);
+
+    /**
+     * Parse and load the exclusive criterion type from configuration file.
+     *
+     * @param[in] root node of the configuration file.
+     */
+    void loadExclusiveCriterionType(cnode *root);
+
+    /**
+     * Parse and load the criteria from configuration file.
+     *
+     * @param[in] root node of the configuration file.
+     */
+    void loadCriteria(cnode *root);
+
+    /**
+     * Parse and load a criterion from configuration file.
+     *
+     * @param[in] root node of the configuration file.
+     */
+    void loadCriterion(cnode *root);
+
+    /**
+     * Parse and load the criterion types from configuration file.
+     *
+     * @param[in] root node of the configuration file
+     * @param[in] isInclusive true if inclusive, false is exclusive.
+     */
+    void loadCriterionType(cnode *root, bool isInclusive);
+
+    /**
+     * Load the configuration file.
+     *
+     * @param[in] root node of the configuration file.
+     */
+    void loadConfig(cnode *root);
+
+    /**
+     * Parse and load the chidren node from a given root node.
+     *
+     * @param[in] root node of the configuration file
+     * @param[out] defaultValue of the parameter manager element to retrieve.
+     * @param[out] type of the parameter manager element to retrieve.
+    */
+    void parseChildren(cnode *root, std::string &defaultValue, std::string &type);
+
+    /**
+     * Retrieve an element from a map by its name.
+     *
+     * @tparam T type of element to search.
+     * @param[in] name name of the element to find.
+     * @param[in] elementsMap maps of elements to search into.
+     *
+     * @return valid pointer on element if found, NULL otherwise.
+     */
+    template <typename T>
+    T *getElement(const std::string &name, std::map<std::string, T *> &elementsMap);
+
+    /**
+     * Retrieve an element from a map by its name. Const version.
+     *
+     * @tparam T type of element to search.
+     * @param[in] name name of the element to find.
+     * @param[in] elementsMap maps of elements to search into.
+     *
+     * @return valid pointer on element if found, NULL otherwise.
+     */
+    template <typename T>
+    const T *getElement(const std::string &name,
+                        const std::map<std::string, T *> &elementsMap) const;
+
+    /**
+     * set the value of a component state.
+     *
+     * @param[in] value new value to set to the component state.
+     * @param[in] stateName of the component state.
+     */
+    void setValue(int value, const std::string &stateName);
+
+    /**
+     * get the value of a component state.
+     *
+     * @param[in] name of the component state.
+     *
+     * @return value of the component state
+     */
+    int getValue(const std::string &stateName) const;
+
+    bool isValueValidForCriterion(ISelectionCriterionInterface *criterion, int valueToCheck);
+
+    CriterionTypeCollection mPolicyCriterionTypes; /**< Policy Criterion Type map. */
+    CriterionCollection mPolicyCriteria; /**< Policy Criterion Map. */
+
+    CParameterMgrPlatformConnector *mPfwConnector; /**< Policy Parameter Manager connector. */
+    ParameterMgrPlatformConnectorLogger *mPfwConnectorLogger; /**< Policy PFW logger. */
+
+
+    /**
+     * provide a compile time error if no specialization is provided for a given type.
+     *
+     * @tparam T: type of the parameter manager element. Supported one are:
+     *                      - Criterion
+     *                      - CriterionType.
+     */
+    template <typename T>
+    struct parameterManagerElementSupported;
+
+    static const char *const mPolicyPfwDefaultConfFileName; /**< Default Policy PFW top file name.*/
+};
+
+} // namespace audio_policy
+} // namespace android
diff --git a/services/audiopolicy/enginedefault/Android.mk b/services/audiopolicy/enginedefault/Android.mk
new file mode 100755
index 0000000..8d43b89
--- /dev/null
+++ b/services/audiopolicy/enginedefault/Android.mk
@@ -0,0 +1,46 @@
+LOCAL_PATH := $(call my-dir)
+
+# Component build
+#######################################################################
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := \
+    src/Engine.cpp \
+    src/EngineInstance.cpp \
+    src/Gains.cpp \
+
+
+audio_policy_engine_includes_common := \
+    $(LOCAL_PATH)/include \
+    $(TOPDIR)frameworks/av/services/audiopolicy/engine/interface
+
+LOCAL_CFLAGS += \
+    -Wall \
+    -Werror \
+    -Wextra \
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+    $(audio_policy_engine_includes_common)
+
+LOCAL_C_INCLUDES := \
+    $(audio_policy_engine_includes_common) \
+    $(TARGET_OUT_HEADERS)/hw \
+    $(call include-path-for, frameworks-av) \
+    $(call include-path-for, audio-utils) \
+    $(call include-path-for, bionic) \
+    $(TOPDIR)frameworks/av/services/audiopolicy/common/include
+
+
+LOCAL_MODULE := libaudiopolicyenginedefault
+LOCAL_MODULE_TAGS := optional
+LOCAL_STATIC_LIBRARIES := \
+    libmedia_helper \
+    libaudiopolicycomponents
+
+LOCAL_SHARED_LIBRARIES += \
+    libcutils \
+    libutils \
+    libaudioutils \
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audiopolicy/enginedefault/include/AudioPolicyEngineInstance.h b/services/audiopolicy/enginedefault/include/AudioPolicyEngineInstance.h
new file mode 100755
index 0000000..1e329f0
--- /dev/null
+++ b/services/audiopolicy/enginedefault/include/AudioPolicyEngineInstance.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+class AudioPolicyManagerInterface;
+
+namespace android
+{
+namespace audio_policy
+{
+
+class Engine;
+
+class EngineInstance
+{
+protected:
+    EngineInstance();
+
+public:
+    virtual ~EngineInstance();
+
+    /**
+     * Get Audio Policy Engine instance.
+     *
+     * @return pointer to Route Manager Instance object.
+     */
+    static EngineInstance *getInstance();
+
+    /**
+     * Interface query.
+     * The first client of an interface of the policy engine will start the singleton.
+     *
+     * @tparam RequestedInterface: interface that the client is wishing to retrieve.
+     *
+     * @return interface handle.
+     */
+    template <class RequestedInterface>
+    RequestedInterface *queryInterface() const;
+
+protected:
+    /**
+     * Get Audio Policy Engine instance.
+     *
+     * @return Audio Policy Engine singleton.
+     */
+    Engine *getEngine() const;
+
+private:
+    /* Copy facilities are put private to disable copy. */
+    EngineInstance(const EngineInstance &object);
+    EngineInstance &operator=(const EngineInstance &object);
+};
+
+/**
+ * Limit template instantation to supported type interfaces.
+ * Compile time error will claim if invalid interface is requested.
+ */
+template <>
+AudioPolicyManagerInterface *EngineInstance::queryInterface() const;
+
+} // namespace audio_policy
+} // namespace android
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
new file mode 100755
index 0000000..0686414
--- /dev/null
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -0,0 +1,718 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyEngine"
+//#define LOG_NDEBUG 0
+
+//#define VERY_VERBOSE_LOGGING
+#ifdef VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+#include "Engine.h"
+#include "Gains.h"
+#include <AudioPolicyManagerObserver.h>
+#include <AudioPort.h>
+#include <IOProfile.h>
+#include <policy.h>
+#include <utils/String8.h>
+#include <utils/Log.h>
+
+namespace android
+{
+namespace audio_policy
+{
+
+Engine::Engine()
+    : mManagerInterface(this),
+      mPhoneState(AUDIO_MODE_NORMAL),
+      mApmObserver(NULL)
+{
+    for (int i = 0; i < AUDIO_POLICY_FORCE_USE_CNT; i++) {
+        mForceUse[i] = AUDIO_POLICY_FORCE_NONE;
+    }
+}
+
+Engine::~Engine()
+{
+}
+
+void Engine::setObserver(AudioPolicyManagerObserver *observer)
+{
+    ALOG_ASSERT(observer != NULL, "Invalid Audio Policy Manager observer");
+    mApmObserver = observer;
+}
+
+status_t Engine::initCheck()
+{
+    return (mApmObserver != NULL) ?  NO_ERROR : NO_INIT;
+}
+
+float Engine::volIndexToDb(Volume::device_category category, audio_stream_type_t streamType,
+                             int indexInUi)
+{
+    const StreamDescriptor &streamDesc = mApmObserver->getStreamDescriptors().valueAt(streamType);
+    return Gains::volIndexToDb(category, streamDesc, indexInUi);
+}
+
+
+status_t Engine::initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax)
+{
+    ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
+    if (indexMin < 0 || indexMin >= indexMax) {
+        ALOGW("initStreamVolume() invalid index limits for stream %d, min %d, max %d",
+              stream , indexMin, indexMax);
+        return BAD_VALUE;
+    }
+    mApmObserver->getStreamDescriptors().setVolumeIndexMin(stream, indexMin);
+    mApmObserver->getStreamDescriptors().setVolumeIndexMax(stream, indexMax);
+    return NO_ERROR;
+}
+
+void Engine::initializeVolumeCurves(bool isSpeakerDrcEnabled)
+{
+    StreamDescriptorCollection &streams = mApmObserver->getStreamDescriptors();
+
+    for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
+        for (int j = 0; j < Volume::DEVICE_CATEGORY_CNT; j++) {
+            streams.setVolumeCurvePoint(static_cast<audio_stream_type_t>(i),
+                                         static_cast<Volume::device_category>(j),
+                                         Gains::sVolumeProfiles[i][j]);
+        }
+    }
+
+    // Check availability of DRC on speaker path: if available, override some of the speaker curves
+    if (isSpeakerDrcEnabled) {
+        streams.setVolumeCurvePoint(AUDIO_STREAM_SYSTEM, Volume::DEVICE_CATEGORY_SPEAKER,
+                Gains::sDefaultSystemVolumeCurveDrc);
+        streams.setVolumeCurvePoint(AUDIO_STREAM_RING, Volume::DEVICE_CATEGORY_SPEAKER,
+                Gains::sSpeakerSonificationVolumeCurveDrc);
+        streams.setVolumeCurvePoint(AUDIO_STREAM_ALARM, Volume::DEVICE_CATEGORY_SPEAKER,
+                Gains::sSpeakerSonificationVolumeCurveDrc);
+        streams.setVolumeCurvePoint(AUDIO_STREAM_NOTIFICATION, Volume::DEVICE_CATEGORY_SPEAKER,
+                Gains::sSpeakerSonificationVolumeCurveDrc);
+        streams.setVolumeCurvePoint(AUDIO_STREAM_MUSIC, Volume::DEVICE_CATEGORY_SPEAKER,
+                Gains::sSpeakerMediaVolumeCurveDrc);
+        streams.setVolumeCurvePoint(AUDIO_STREAM_ACCESSIBILITY, Volume::DEVICE_CATEGORY_SPEAKER,
+                Gains::sSpeakerMediaVolumeCurveDrc);
+    }
+}
+
+
+status_t Engine::setPhoneState(audio_mode_t state)
+{
+    ALOGV("setPhoneState() state %d", state);
+
+    if (state < 0 || state >= AUDIO_MODE_CNT) {
+        ALOGW("setPhoneState() invalid state %d", state);
+        return BAD_VALUE;
+    }
+
+    if (state == mPhoneState ) {
+        ALOGW("setPhoneState() setting same state %d", state);
+        return BAD_VALUE;
+    }
+
+    // store previous phone state for management of sonification strategy below
+    int oldState = mPhoneState;
+    mPhoneState = state;
+    StreamDescriptorCollection &streams = mApmObserver->getStreamDescriptors();
+    // are we entering or starting a call
+    if (!is_state_in_call(oldState) && is_state_in_call(state)) {
+        ALOGV("  Entering call in setPhoneState()");
+        for (int j = 0; j < Volume::DEVICE_CATEGORY_CNT; j++) {
+            streams.setVolumeCurvePoint(AUDIO_STREAM_DTMF, static_cast<Volume::device_category>(j),
+                                         Gains::sVolumeProfiles[AUDIO_STREAM_VOICE_CALL][j]);
+        }
+    } else if (is_state_in_call(oldState) && !is_state_in_call(state)) {
+        ALOGV("  Exiting call in setPhoneState()");
+        for (int j = 0; j < Volume::DEVICE_CATEGORY_CNT; j++) {
+            streams.setVolumeCurvePoint(AUDIO_STREAM_DTMF, static_cast<Volume::device_category>(j),
+                                         Gains::sVolumeProfiles[AUDIO_STREAM_DTMF][j]);
+        }
+    }
+    return NO_ERROR;
+}
+
+status_t Engine::setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config)
+{
+    switch(usage) {
+    case AUDIO_POLICY_FORCE_FOR_COMMUNICATION:
+        if (config != AUDIO_POLICY_FORCE_SPEAKER && config != AUDIO_POLICY_FORCE_BT_SCO &&
+            config != AUDIO_POLICY_FORCE_NONE) {
+            ALOGW("setForceUse() invalid config %d for FOR_COMMUNICATION", config);
+            return BAD_VALUE;
+        }
+        mForceUse[usage] = config;
+        break;
+    case AUDIO_POLICY_FORCE_FOR_MEDIA:
+        if (config != AUDIO_POLICY_FORCE_HEADPHONES && config != AUDIO_POLICY_FORCE_BT_A2DP &&
+            config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
+            config != AUDIO_POLICY_FORCE_ANALOG_DOCK &&
+            config != AUDIO_POLICY_FORCE_DIGITAL_DOCK && config != AUDIO_POLICY_FORCE_NONE &&
+            config != AUDIO_POLICY_FORCE_NO_BT_A2DP && config != AUDIO_POLICY_FORCE_SPEAKER ) {
+            ALOGW("setForceUse() invalid config %d for FOR_MEDIA", config);
+            return BAD_VALUE;
+        }
+        mForceUse[usage] = config;
+        break;
+    case AUDIO_POLICY_FORCE_FOR_RECORD:
+        if (config != AUDIO_POLICY_FORCE_BT_SCO && config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
+            config != AUDIO_POLICY_FORCE_NONE) {
+            ALOGW("setForceUse() invalid config %d for FOR_RECORD", config);
+            return BAD_VALUE;
+        }
+        mForceUse[usage] = config;
+        break;
+    case AUDIO_POLICY_FORCE_FOR_DOCK:
+        if (config != AUDIO_POLICY_FORCE_NONE && config != AUDIO_POLICY_FORCE_BT_CAR_DOCK &&
+            config != AUDIO_POLICY_FORCE_BT_DESK_DOCK &&
+            config != AUDIO_POLICY_FORCE_WIRED_ACCESSORY &&
+            config != AUDIO_POLICY_FORCE_ANALOG_DOCK &&
+            config != AUDIO_POLICY_FORCE_DIGITAL_DOCK) {
+            ALOGW("setForceUse() invalid config %d for FOR_DOCK", config);
+        }
+        mForceUse[usage] = config;
+        break;
+    case AUDIO_POLICY_FORCE_FOR_SYSTEM:
+        if (config != AUDIO_POLICY_FORCE_NONE &&
+            config != AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
+            ALOGW("setForceUse() invalid config %d for FOR_SYSTEM", config);
+        }
+        mForceUse[usage] = config;
+        break;
+    case AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO:
+        if (config != AUDIO_POLICY_FORCE_NONE &&
+            config != AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED) {
+            ALOGW("setForceUse() invalid config %d forHDMI_SYSTEM_AUDIO", config);
+        }
+        mForceUse[usage] = config;
+        break;
+    default:
+        ALOGW("setForceUse() invalid usage %d", usage);
+        break;
+    }
+    return NO_ERROR;
+}
+
+routing_strategy Engine::getStrategyForStream(audio_stream_type_t stream)
+{
+    // stream to strategy mapping
+    switch (stream) {
+    case AUDIO_STREAM_VOICE_CALL:
+    case AUDIO_STREAM_BLUETOOTH_SCO:
+        return STRATEGY_PHONE;
+    case AUDIO_STREAM_RING:
+    case AUDIO_STREAM_ALARM:
+        return STRATEGY_SONIFICATION;
+    case AUDIO_STREAM_NOTIFICATION:
+        return STRATEGY_SONIFICATION_RESPECTFUL;
+    case AUDIO_STREAM_DTMF:
+        return STRATEGY_DTMF;
+    default:
+        ALOGE("unknown stream type %d", stream);
+    case AUDIO_STREAM_SYSTEM:
+        // NOTE: SYSTEM stream uses MEDIA strategy because muting music and switching outputs
+        // while key clicks are played produces a poor result
+    case AUDIO_STREAM_MUSIC:
+        return STRATEGY_MEDIA;
+    case AUDIO_STREAM_ENFORCED_AUDIBLE:
+        return STRATEGY_ENFORCED_AUDIBLE;
+    case AUDIO_STREAM_TTS:
+        return STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
+    case AUDIO_STREAM_ACCESSIBILITY:
+        return STRATEGY_ACCESSIBILITY;
+    case AUDIO_STREAM_REROUTING:
+        return STRATEGY_REROUTING;
+    }
+}
+
+routing_strategy Engine::getStrategyForUsage(audio_usage_t usage)
+{
+    const SwAudioOutputCollection &outputs = mApmObserver->getOutputs();
+
+    // usage to strategy mapping
+    switch (usage) {
+    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+        if (outputs.isStreamActive(AUDIO_STREAM_RING) ||
+                outputs.isStreamActive(AUDIO_STREAM_ALARM)) {
+            return STRATEGY_SONIFICATION;
+        }
+        if (isInCall()) {
+            return STRATEGY_PHONE;
+        }
+        return STRATEGY_ACCESSIBILITY;
+
+    case AUDIO_USAGE_MEDIA:
+    case AUDIO_USAGE_GAME:
+    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
+        return STRATEGY_MEDIA;
+
+    case AUDIO_USAGE_VOICE_COMMUNICATION:
+        return STRATEGY_PHONE;
+
+    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+        return STRATEGY_DTMF;
+
+    case AUDIO_USAGE_ALARM:
+    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
+        return STRATEGY_SONIFICATION;
+
+    case AUDIO_USAGE_NOTIFICATION:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
+    case AUDIO_USAGE_NOTIFICATION_EVENT:
+        return STRATEGY_SONIFICATION_RESPECTFUL;
+
+    case AUDIO_USAGE_UNKNOWN:
+    default:
+        return STRATEGY_MEDIA;
+    }
+}
+
+audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
+{
+    const DeviceVector &availableOutputDevices = mApmObserver->getAvailableOutputDevices();
+    const DeviceVector &availableInputDevices = mApmObserver->getAvailableInputDevices();
+
+    const SwAudioOutputCollection &outputs = mApmObserver->getOutputs();
+
+    uint32_t device = AUDIO_DEVICE_NONE;
+    uint32_t availableOutputDevicesType = availableOutputDevices.types();
+
+    switch (strategy) {
+
+    case STRATEGY_TRANSMITTED_THROUGH_SPEAKER:
+        device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+        if (!device) {
+            ALOGE("getDeviceForStrategy() no device found for "\
+                    "STRATEGY_TRANSMITTED_THROUGH_SPEAKER");
+        }
+        break;
+
+    case STRATEGY_SONIFICATION_RESPECTFUL:
+        if (isInCall()) {
+            device = getDeviceForStrategy(STRATEGY_SONIFICATION);
+        } else if (outputs.isStreamActiveRemotely(AUDIO_STREAM_MUSIC,
+                SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
+            // while media is playing on a remote device, use the the sonification behavior.
+            // Note that we test this usecase before testing if media is playing because
+            //   the isStreamActive() method only informs about the activity of a stream, not
+            //   if it's for local playback. Note also that we use the same delay between both tests
+            device = getDeviceForStrategy(STRATEGY_SONIFICATION);
+            //user "safe" speaker if available instead of normal speaker to avoid triggering
+            //other acoustic safety mechanisms for notification
+            if ((device & AUDIO_DEVICE_OUT_SPEAKER) &&
+                    (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
+                device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
+                device &= ~AUDIO_DEVICE_OUT_SPEAKER;
+            }
+        } else if (outputs.isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
+            // while media is playing (or has recently played), use the same device
+            device = getDeviceForStrategy(STRATEGY_MEDIA);
+        } else {
+            // when media is not playing anymore, fall back on the sonification behavior
+            device = getDeviceForStrategy(STRATEGY_SONIFICATION);
+            //user "safe" speaker if available instead of normal speaker to avoid triggering
+            //other acoustic safety mechanisms for notification
+            if ((device & AUDIO_DEVICE_OUT_SPEAKER) &&
+                    (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER_SAFE)) {
+                device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
+                device &= ~AUDIO_DEVICE_OUT_SPEAKER;
+            }
+        }
+        break;
+
+    case STRATEGY_DTMF:
+        if (!isInCall()) {
+            // when off call, DTMF strategy follows the same rules as MEDIA strategy
+            device = getDeviceForStrategy(STRATEGY_MEDIA);
+            break;
+        }
+        // when in call, DTMF and PHONE strategies follow the same rules
+        // FALL THROUGH
+
+    case STRATEGY_PHONE:
+        // Force use of only devices on primary output if:
+        // - in call AND
+        //   - cannot route from voice call RX OR
+        //   - audio HAL version is < 3.0 and TX device is on the primary HW module
+        if (getPhoneState() == AUDIO_MODE_IN_CALL) {
+            audio_devices_t txDevice = getDeviceForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
+            sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
+            audio_devices_t availPrimaryInputDevices =
+                 availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle());
+            audio_devices_t availPrimaryOutputDevices =
+                    primaryOutput->supportedDevices() & availableOutputDevices.types();
+
+            if (((availableInputDevices.types() &
+                    AUDIO_DEVICE_IN_TELEPHONY_RX & ~AUDIO_DEVICE_BIT_IN) == 0) ||
+                    (((txDevice & availPrimaryInputDevices & ~AUDIO_DEVICE_BIT_IN) != 0) &&
+                         (primaryOutput->getAudioPort()->getModuleVersion() <
+                             AUDIO_DEVICE_API_VERSION_3_0))) {
+                availableOutputDevicesType = availPrimaryOutputDevices;
+            }
+        }
+        // for phone strategy, we first consider the forced use and then the available devices by order
+        // of priority
+        switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
+        case AUDIO_POLICY_FORCE_BT_SCO:
+            if (!isInCall() || strategy != STRATEGY_DTMF) {
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT;
+                if (device) break;
+            }
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET;
+            if (device) break;
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_SCO;
+            if (device) break;
+            // if SCO device is requested but no SCO device is available, fall back to default case
+            // FALL THROUGH
+
+        default:    // FORCE_NONE
+            // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to A2DP
+            if (!isInCall() &&
+                    (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
+                    (outputs.getA2dpOutput() != 0)) {
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
+                if (device) break;
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
+                if (device) break;
+            }
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
+            if (device) break;
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
+            if (device) break;
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
+            if (device) break;
+            if (!isInCall()) {
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
+                if (device) break;
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
+                if (device) break;
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
+                if (device) break;
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
+                if (device) break;
+            }
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_EARPIECE;
+            if (device) break;
+            device = mApmObserver->getDefaultOutputDevice()->type();
+            if (device == AUDIO_DEVICE_NONE) {
+                ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE");
+            }
+            break;
+
+        case AUDIO_POLICY_FORCE_SPEAKER:
+            // when not in a phone call, phone strategy should route STREAM_VOICE_CALL to
+            // A2DP speaker when forcing to speaker output
+            if (!isInCall() &&
+                    (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
+                    (outputs.getA2dpOutput() != 0)) {
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
+                if (device) break;
+            }
+            if (!isInCall()) {
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
+                if (device) break;
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
+                if (device) break;
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
+                if (device) break;
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
+                if (device) break;
+                device = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
+                if (device) break;
+            }
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
+            if (device) break;
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+            if (device) break;
+            device = mApmObserver->getDefaultOutputDevice()->type();
+            if (device == AUDIO_DEVICE_NONE) {
+                ALOGE("getDeviceForStrategy() no device found for STRATEGY_PHONE, FORCE_SPEAKER");
+            }
+            break;
+        }
+    break;
+
+    case STRATEGY_SONIFICATION:
+
+        // If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
+        // handleIncallSonification().
+        if (isInCall()) {
+            device = getDeviceForStrategy(STRATEGY_PHONE);
+            break;
+        }
+        // FALL THROUGH
+
+    case STRATEGY_ENFORCED_AUDIBLE:
+        // strategy STRATEGY_ENFORCED_AUDIBLE uses same routing policy as STRATEGY_SONIFICATION
+        // except:
+        //   - when in call where it doesn't default to STRATEGY_PHONE behavior
+        //   - in countries where not enforced in which case it follows STRATEGY_MEDIA
+
+        if ((strategy == STRATEGY_SONIFICATION) ||
+                (mForceUse[AUDIO_POLICY_FORCE_FOR_SYSTEM] == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
+            device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+            if (device == AUDIO_DEVICE_NONE) {
+                ALOGE("getDeviceForStrategy() speaker device not found for STRATEGY_SONIFICATION");
+            }
+        }
+        // The second device used for sonification is the same as the device used by media strategy
+        // FALL THROUGH
+
+    // FIXME: STRATEGY_ACCESSIBILITY and STRATEGY_REROUTING follow STRATEGY_MEDIA for now
+    case STRATEGY_ACCESSIBILITY:
+        if (strategy == STRATEGY_ACCESSIBILITY) {
+            // do not route accessibility prompts to a digital output currently configured with a
+            // compressed format as they would likely not be mixed and dropped.
+            for (size_t i = 0; i < outputs.size(); i++) {
+                sp<AudioOutputDescriptor> desc = outputs.valueAt(i);
+                audio_devices_t devices = desc->device() &
+                    (AUDIO_DEVICE_OUT_HDMI | AUDIO_DEVICE_OUT_SPDIF | AUDIO_DEVICE_OUT_HDMI_ARC);
+                if (desc->isActive() && !audio_is_linear_pcm(desc->mFormat) &&
+                        devices != AUDIO_DEVICE_NONE) {
+                    availableOutputDevicesType = availableOutputDevices.types() & ~devices;
+                }
+            }
+        }
+        // FALL THROUGH
+
+    case STRATEGY_REROUTING:
+    case STRATEGY_MEDIA: {
+        uint32_t device2 = AUDIO_DEVICE_NONE;
+        if (strategy != STRATEGY_SONIFICATION) {
+            // no sonification on remote submix (e.g. WFD)
+            if (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, String8("0")) != 0) {
+                device2 = availableOutputDevices.types() & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+            }
+        }
+        if (isInCall() && (strategy == STRATEGY_MEDIA)) {
+            device = getDeviceForStrategy(STRATEGY_PHONE);
+            break;
+        }
+        if ((device2 == AUDIO_DEVICE_NONE) &&
+                (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] != AUDIO_POLICY_FORCE_NO_BT_A2DP) &&
+                (outputs.getA2dpOutput() != 0)) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP;
+            if (device2 == AUDIO_DEVICE_NONE) {
+                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES;
+            }
+            if (device2 == AUDIO_DEVICE_NONE) {
+                device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER;
+            }
+        }
+        if ((device2 == AUDIO_DEVICE_NONE) &&
+            (mForceUse[AUDIO_POLICY_FORCE_FOR_MEDIA] == AUDIO_POLICY_FORCE_SPEAKER)) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADPHONE;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_ACCESSORY;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET;
+        }
+        if ((device2 == AUDIO_DEVICE_NONE) && (strategy != STRATEGY_SONIFICATION)) {
+            // no sonification on aux digital (e.g. HDMI)
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_DIGITAL;
+        }
+        if ((device2 == AUDIO_DEVICE_NONE) &&
+                (mForceUse[AUDIO_POLICY_FORCE_FOR_DOCK] == AUDIO_POLICY_FORCE_ANALOG_DOCK)) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
+        }
+        if (device2 == AUDIO_DEVICE_NONE) {
+            device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
+        }
+        int device3 = AUDIO_DEVICE_NONE;
+        if (strategy == STRATEGY_MEDIA) {
+            // ARC, SPDIF and AUX_LINE can co-exist with others.
+            device3 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HDMI_ARC;
+            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_SPDIF);
+            device3 |= (availableOutputDevicesType & AUDIO_DEVICE_OUT_AUX_LINE);
+        }
+
+        device2 |= device3;
+        // device is DEVICE_OUT_SPEAKER if we come from case STRATEGY_SONIFICATION or
+        // STRATEGY_ENFORCED_AUDIBLE, AUDIO_DEVICE_NONE otherwise
+        device |= device2;
+
+        // If hdmi system audio mode is on, remove speaker out of output list.
+        if ((strategy == STRATEGY_MEDIA) &&
+            (mForceUse[AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO] ==
+                AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED)) {
+            device &= ~AUDIO_DEVICE_OUT_SPEAKER;
+        }
+
+        if (device) break;
+        device = mApmObserver->getDefaultOutputDevice()->type();
+        if (device == AUDIO_DEVICE_NONE) {
+            ALOGE("getDeviceForStrategy() no device found for STRATEGY_MEDIA");
+        }
+        } break;
+
+    default:
+        ALOGW("getDeviceForStrategy() unknown strategy: %d", strategy);
+        break;
+    }
+
+    ALOGVV("getDeviceForStrategy() strategy %d, device %x", strategy, device);
+    return device;
+}
+
+
+audio_devices_t Engine::getDeviceForInputSource(audio_source_t inputSource) const
+{
+    const DeviceVector &availableOutputDevices = mApmObserver->getAvailableOutputDevices();
+    const DeviceVector &availableInputDevices = mApmObserver->getAvailableInputDevices();
+    const SwAudioOutputCollection &outputs = mApmObserver->getOutputs();
+    audio_devices_t availableDeviceTypes = availableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
+
+    uint32_t device = AUDIO_DEVICE_NONE;
+
+    switch (inputSource) {
+    case AUDIO_SOURCE_VOICE_UPLINK:
+      if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
+          device = AUDIO_DEVICE_IN_VOICE_CALL;
+          break;
+      }
+      break;
+
+    case AUDIO_SOURCE_DEFAULT:
+    case AUDIO_SOURCE_MIC:
+    if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_A2DP) {
+        device = AUDIO_DEVICE_IN_BLUETOOTH_A2DP;
+    } else if ((mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO) &&
+        (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET)) {
+        device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
+    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
+        device = AUDIO_DEVICE_IN_WIRED_HEADSET;
+    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
+        device = AUDIO_DEVICE_IN_USB_DEVICE;
+    } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+        device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+    }
+    break;
+
+    case AUDIO_SOURCE_VOICE_COMMUNICATION:
+        // Allow only use of devices on primary input if in call and HAL does not support routing
+        // to voice call path.
+        if ((getPhoneState() == AUDIO_MODE_IN_CALL) &&
+                (availableOutputDevices.types() & AUDIO_DEVICE_OUT_TELEPHONY_TX) == 0) {
+            sp<AudioOutputDescriptor> primaryOutput = outputs.getPrimaryOutput();
+            availableDeviceTypes =
+                    availableInputDevices.getDevicesFromHwModule(primaryOutput->getModuleHandle())
+                    & ~AUDIO_DEVICE_BIT_IN;
+        }
+
+        switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
+        case AUDIO_POLICY_FORCE_BT_SCO:
+            // if SCO device is requested but no SCO device is available, fall back to default case
+            if (availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
+                device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
+                break;
+            }
+            // FALL THROUGH
+
+        default:    // FORCE_NONE
+            if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
+                device = AUDIO_DEVICE_IN_WIRED_HEADSET;
+            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
+                device = AUDIO_DEVICE_IN_USB_DEVICE;
+            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+                device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+            }
+            break;
+
+        case AUDIO_POLICY_FORCE_SPEAKER:
+            if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
+                device = AUDIO_DEVICE_IN_BACK_MIC;
+            } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+                device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+            }
+            break;
+        }
+        break;
+
+    case AUDIO_SOURCE_VOICE_RECOGNITION:
+    case AUDIO_SOURCE_HOTWORD:
+        if (mForceUse[AUDIO_POLICY_FORCE_FOR_RECORD] == AUDIO_POLICY_FORCE_BT_SCO &&
+                availableDeviceTypes & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET) {
+            device = AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET;
+        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_WIRED_HEADSET) {
+            device = AUDIO_DEVICE_IN_WIRED_HEADSET;
+        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_USB_DEVICE) {
+            device = AUDIO_DEVICE_IN_USB_DEVICE;
+        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+            device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+        }
+        break;
+    case AUDIO_SOURCE_CAMCORDER:
+        if (availableDeviceTypes & AUDIO_DEVICE_IN_BACK_MIC) {
+            device = AUDIO_DEVICE_IN_BACK_MIC;
+        } else if (availableDeviceTypes & AUDIO_DEVICE_IN_BUILTIN_MIC) {
+            device = AUDIO_DEVICE_IN_BUILTIN_MIC;
+        }
+        break;
+    case AUDIO_SOURCE_VOICE_DOWNLINK:
+    case AUDIO_SOURCE_VOICE_CALL:
+        if (availableDeviceTypes & AUDIO_DEVICE_IN_VOICE_CALL) {
+            device = AUDIO_DEVICE_IN_VOICE_CALL;
+        }
+        break;
+    case AUDIO_SOURCE_REMOTE_SUBMIX:
+        if (availableDeviceTypes & AUDIO_DEVICE_IN_REMOTE_SUBMIX) {
+            device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
+        }
+        break;
+     case AUDIO_SOURCE_FM_TUNER:
+        if (availableDeviceTypes & AUDIO_DEVICE_IN_FM_TUNER) {
+            device = AUDIO_DEVICE_IN_FM_TUNER;
+        }
+        break;
+    default:
+        ALOGW("getDeviceForInputSource() invalid input source %d", inputSource);
+        break;
+    }
+    ALOGV("getDeviceForInputSource()input source %d, device %08x", inputSource, device);
+    return device;
+}
+
+template <>
+AudioPolicyManagerInterface *Engine::queryInterface()
+{
+    return &mManagerInterface;
+}
+
+} // namespace audio_policy
+} // namespace android
+
+
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
new file mode 100755
index 0000000..56a4748
--- /dev/null
+++ b/services/audiopolicy/enginedefault/src/Engine.h
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+
+#include "AudioPolicyManagerInterface.h"
+#include "Gains.h"
+#include <AudioGain.h>
+#include <policy.h>
+
+namespace android
+{
+
+class AudioPolicyManagerObserver;
+
+namespace audio_policy
+{
+
+class Engine
+{
+public:
+    Engine();
+    virtual ~Engine();
+
+    template <class RequestedInterface>
+    RequestedInterface *queryInterface();
+
+private:
+    /// Interface members
+    class ManagerInterfaceImpl : public AudioPolicyManagerInterface
+    {
+    public:
+        ManagerInterfaceImpl(Engine *policyEngine)
+            : mPolicyEngine(policyEngine) {}
+
+        virtual void setObserver(AudioPolicyManagerObserver *observer)
+        {
+            mPolicyEngine->setObserver(observer);
+        }
+        virtual status_t initCheck()
+        {
+            return mPolicyEngine->initCheck();
+        }
+        virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const
+        {
+            return mPolicyEngine->getDeviceForInputSource(inputSource);
+        }
+        virtual audio_devices_t getDeviceForStrategy(routing_strategy strategy) const
+        {
+            return mPolicyEngine->getDeviceForStrategy(strategy);
+        }
+        virtual routing_strategy getStrategyForStream(audio_stream_type_t stream)
+        {
+            return mPolicyEngine->getStrategyForStream(stream);
+        }
+        virtual routing_strategy getStrategyForUsage(audio_usage_t usage)
+        {
+            return mPolicyEngine->getStrategyForUsage(usage);
+        }
+        virtual status_t setPhoneState(audio_mode_t mode)
+        {
+            return mPolicyEngine->setPhoneState(mode);
+        }
+        virtual audio_mode_t getPhoneState() const
+        {
+            return mPolicyEngine->getPhoneState();
+        }
+        virtual status_t setForceUse(audio_policy_force_use_t usage,
+                                     audio_policy_forced_cfg_t config)
+        {
+            return mPolicyEngine->setForceUse(usage, config);
+        }
+        virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const
+        {
+            return mPolicyEngine->getForceUse(usage);
+        }
+        virtual status_t setDeviceConnectionState(const sp<DeviceDescriptor> /*devDesc*/,
+                                                  audio_policy_dev_state_t /*state*/)
+        {
+            return NO_ERROR;
+        }
+        virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax)
+        {
+            return mPolicyEngine->initStreamVolume(stream, indexMin, indexMax);
+        }
+        virtual void initializeVolumeCurves(bool isSpeakerDrcEnabled)
+        {
+            return mPolicyEngine->initializeVolumeCurves(isSpeakerDrcEnabled);
+        }
+        virtual float volIndexToDb(Volume::device_category deviceCategory,
+                                     audio_stream_type_t stream,int indexInUi)
+        {
+            return mPolicyEngine->volIndexToDb(deviceCategory, stream, indexInUi);
+        }
+    private:
+        Engine *mPolicyEngine;
+    } mManagerInterface;
+
+private:
+    /* Copy facilities are put private to disable copy. */
+    Engine(const Engine &object);
+    Engine &operator=(const Engine &object);
+
+    void setObserver(AudioPolicyManagerObserver *observer);
+
+    status_t initCheck();
+
+    inline bool isInCall() const
+    {
+        return is_state_in_call(mPhoneState);
+    }
+
+    status_t setPhoneState(audio_mode_t mode);
+    audio_mode_t getPhoneState() const
+    {
+        return mPhoneState;
+    }
+    status_t setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config);
+    audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage) const
+    {
+        return mForceUse[usage];
+    }
+    status_t setDefaultDevice(audio_devices_t device);
+
+    routing_strategy getStrategyForStream(audio_stream_type_t stream);
+    routing_strategy getStrategyForUsage(audio_usage_t usage);
+    audio_devices_t getDeviceForStrategy(routing_strategy strategy) const;
+    audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const;
+
+    float volIndexToDb(Volume::device_category category,
+                         audio_stream_type_t stream, int indexInUi);
+    status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax);
+    void initializeVolumeCurves(bool isSpeakerDrcEnabled);
+
+    audio_mode_t mPhoneState;  /**< current phone state. */
+
+    /** current forced use configuration. */
+    audio_policy_forced_cfg_t mForceUse[AUDIO_POLICY_FORCE_USE_CNT];
+
+    AudioPolicyManagerObserver *mApmObserver;
+};
+} // namespace audio_policy
+} // namespace android
+
diff --git a/services/audiopolicy/enginedefault/src/EngineInstance.cpp b/services/audiopolicy/enginedefault/src/EngineInstance.cpp
new file mode 100755
index 0000000..17e9832
--- /dev/null
+++ b/services/audiopolicy/enginedefault/src/EngineInstance.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <AudioPolicyManagerInterface.h>
+#include "AudioPolicyEngineInstance.h"
+#include "Engine.h"
+
+namespace android
+{
+namespace audio_policy
+{
+
+EngineInstance::EngineInstance()
+{
+}
+
+EngineInstance *EngineInstance::getInstance()
+{
+    static EngineInstance instance;
+    return &instance;
+}
+
+EngineInstance::~EngineInstance()
+{
+}
+
+Engine *EngineInstance::getEngine() const
+{
+    static Engine engine;
+    return &engine;
+}
+
+template <>
+AudioPolicyManagerInterface *EngineInstance::queryInterface() const
+{
+    return getEngine()->queryInterface<AudioPolicyManagerInterface>();
+}
+
+} // namespace audio_policy
+} // namespace android
+
diff --git a/services/audiopolicy/enginedefault/src/Gains.cpp b/services/audiopolicy/enginedefault/src/Gains.cpp
new file mode 100644
index 0000000..78f2909
--- /dev/null
+++ b/services/audiopolicy/enginedefault/src/Gains.cpp
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::Gains"
+//#define LOG_NDEBUG 0
+
+//#define VERY_VERBOSE_LOGGING
+#ifdef VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+#include "Gains.h"
+#include <Volume.h>
+#include <math.h>
+#include <utils/String8.h>
+
+namespace android {
+
+// Enginedefault
+const VolumeCurvePoint
+Gains::sDefaultVolumeCurve[Volume::VOLCNT] = {
+    {1, -49.5f}, {33, -33.5f}, {66, -17.0f}, {100, 0.0f}
+};
+
+
+const VolumeCurvePoint
+Gains::sDefaultMediaVolumeCurve[Volume::VOLCNT] = {
+    {1, -58.0f}, {20, -40.0f}, {60, -17.0f}, {100, 0.0f}
+};
+
+const VolumeCurvePoint
+Gains::sExtMediaSystemVolumeCurve[Volume::VOLCNT] = {
+    {1, -58.0f}, {20, -40.0f}, {60, -21.0f}, {100, -10.0f}
+};
+
+const VolumeCurvePoint
+Gains::sSpeakerMediaVolumeCurve[Volume::VOLCNT] = {
+    {1, -56.0f}, {20, -34.0f}, {60, -11.0f}, {100, 0.0f}
+};
+
+const VolumeCurvePoint
+Gains::sSpeakerMediaVolumeCurveDrc[Volume::VOLCNT] = {
+    {1, -55.0f}, {20, -43.0f}, {86, -12.0f}, {100, 0.0f}
+};
+
+const VolumeCurvePoint
+Gains::sSpeakerSonificationVolumeCurve[Volume::VOLCNT] = {
+    {1, -29.7f}, {33, -20.1f}, {66, -10.2f}, {100, 0.0f}
+};
+
+const VolumeCurvePoint
+Gains::sSpeakerSonificationVolumeCurveDrc[Volume::VOLCNT] = {
+    {1, -35.7f}, {33, -26.1f}, {66, -13.2f}, {100, 0.0f}
+};
+
+// AUDIO_STREAM_SYSTEM, AUDIO_STREAM_ENFORCED_AUDIBLE and AUDIO_STREAM_DTMF volume tracks
+// AUDIO_STREAM_RING on phones and AUDIO_STREAM_MUSIC on tablets.
+// AUDIO_STREAM_DTMF tracks AUDIO_STREAM_VOICE_CALL while in call (See AudioService.java).
+// The range is constrained between -24dB and -6dB over speaker and -30dB and -18dB over headset.
+
+const VolumeCurvePoint
+Gains::sDefaultSystemVolumeCurve[Volume::VOLCNT] = {
+    {1, -24.0f}, {33, -18.0f}, {66, -12.0f}, {100, -6.0f}
+};
+
+const VolumeCurvePoint
+Gains::sDefaultSystemVolumeCurveDrc[Volume::VOLCNT] = {
+    {1, -34.0f}, {33, -24.0f}, {66, -15.0f}, {100, -6.0f}
+};
+
+const VolumeCurvePoint
+Gains::sHeadsetSystemVolumeCurve[Volume::VOLCNT] = {
+    {1, -30.0f}, {33, -26.0f}, {66, -22.0f}, {100, -18.0f}
+};
+
+const VolumeCurvePoint
+Gains::sDefaultVoiceVolumeCurve[Volume::VOLCNT] = {
+    {0, -42.0f}, {33, -28.0f}, {66, -14.0f}, {100, 0.0f}
+};
+
+const VolumeCurvePoint
+Gains::sSpeakerVoiceVolumeCurve[Volume::VOLCNT] = {
+    {0, -24.0f}, {33, -16.0f}, {66, -8.0f}, {100, 0.0f}
+};
+
+const VolumeCurvePoint
+Gains::sLinearVolumeCurve[Volume::VOLCNT] = {
+    {0, -96.0f}, {33, -68.0f}, {66, -34.0f}, {100, 0.0f}
+};
+
+const VolumeCurvePoint
+Gains::sSilentVolumeCurve[Volume::VOLCNT] = {
+    {0, -96.0f}, {1, -96.0f}, {2, -96.0f}, {100, -96.0f}
+};
+
+const VolumeCurvePoint
+Gains::sFullScaleVolumeCurve[Volume::VOLCNT] = {
+    {0, 0.0f}, {1, 0.0f}, {2, 0.0f}, {100, 0.0f}
+};
+
+const VolumeCurvePoint *Gains::sVolumeProfiles[AUDIO_STREAM_CNT]
+                                                  [Volume::DEVICE_CATEGORY_CNT] = {
+    { // AUDIO_STREAM_VOICE_CALL
+        Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_SYSTEM
+        Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultSystemVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
+        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_RING
+        Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
+        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_MUSIC
+        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_ALARM
+        Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
+        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_NOTIFICATION
+        Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultVolumeCurve,  // DEVICE_CATEGORY_EARPIECE
+        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_BLUETOOTH_SCO
+        Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_ENFORCED_AUDIBLE
+        Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    {  // AUDIO_STREAM_DTMF
+        Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sExtMediaSystemVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_TTS
+      // "Transmitted Through Speaker": always silent except on DEVICE_CATEGORY_SPEAKER
+        Gains::sSilentVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sLinearVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sSilentVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sSilentVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_ACCESSIBILITY
+        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sDefaultMediaVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_REROUTING
+        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sFullScaleVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+    { // AUDIO_STREAM_PATCH
+        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
+        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
+        Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
+        Gains::sFullScaleVolumeCurve  // DEVICE_CATEGORY_EXT_MEDIA
+    },
+};
+
+//static
+float Gains::volIndexToDb(Volume::device_category deviceCategory,
+                          const StreamDescriptor& streamDesc,
+                          int indexInUi)
+{
+    const VolumeCurvePoint *curve = streamDesc.getVolumeCurvePoint(deviceCategory);
+
+    // the volume index in the UI is relative to the min and max volume indices for this stream type
+    int nbSteps = 1 + curve[Volume::VOLMAX].mIndex -
+            curve[Volume::VOLMIN].mIndex;
+    int volIdx = (nbSteps * (indexInUi - streamDesc.getVolumeIndexMin())) /
+            (streamDesc.getVolumeIndexMax() - streamDesc.getVolumeIndexMin());
+
+    // find what part of the curve this index volume belongs to, or if it's out of bounds
+    int segment = 0;
+    if (volIdx < curve[Volume::VOLMIN].mIndex) {         // out of bounds
+        return VOLUME_MIN_DB;
+    } else if (volIdx < curve[Volume::VOLKNEE1].mIndex) {
+        segment = 0;
+    } else if (volIdx < curve[Volume::VOLKNEE2].mIndex) {
+        segment = 1;
+    } else if (volIdx <= curve[Volume::VOLMAX].mIndex) {
+        segment = 2;
+    } else {                                                               // out of bounds
+        return 0.0f;
+    }
+
+    // linear interpolation in the attenuation table in dB
+    float decibels = curve[segment].mDBAttenuation +
+            ((float)(volIdx - curve[segment].mIndex)) *
+                ( (curve[segment+1].mDBAttenuation -
+                        curve[segment].mDBAttenuation) /
+                    ((float)(curve[segment+1].mIndex -
+                            curve[segment].mIndex)) );
+
+    ALOGVV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f]",
+            curve[segment].mIndex, volIdx,
+            curve[segment+1].mIndex,
+            curve[segment].mDBAttenuation,
+            decibels,
+            curve[segment+1].mDBAttenuation);
+
+    return decibels;
+}
+
+
+//static
+float Gains::volIndexToAmpl(Volume::device_category deviceCategory,
+                            const StreamDescriptor& streamDesc,
+                            int indexInUi)
+{
+    return Volume::DbToAmpl(volIndexToDb(deviceCategory, streamDesc, indexInUi));
+}
+
+
+
+}; // namespace android
diff --git a/services/audiopolicy/enginedefault/src/Gains.h b/services/audiopolicy/enginedefault/src/Gains.h
new file mode 100644
index 0000000..7620b7d
--- /dev/null
+++ b/services/audiopolicy/enginedefault/src/Gains.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <StreamDescriptor.h>
+#include <utils/KeyedVector.h>
+#include <system/audio.h>
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class StreamDescriptor;
+
+class Gains
+{
+public :
+    static float volIndexToAmpl(Volume::device_category deviceCategory,
+                                const StreamDescriptor& streamDesc,
+                                int indexInUi);
+
+    static float volIndexToDb(Volume::device_category deviceCategory,
+                              const StreamDescriptor& streamDesc,
+                              int indexInUi);
+
+    // default volume curve
+    static const VolumeCurvePoint sDefaultVolumeCurve[Volume::VOLCNT];
+    // default volume curve for media strategy
+    static const VolumeCurvePoint sDefaultMediaVolumeCurve[Volume::VOLCNT];
+    // volume curve for non-media audio on ext media outputs (HDMI, Line, etc)
+    static const VolumeCurvePoint sExtMediaSystemVolumeCurve[Volume::VOLCNT];
+    // volume curve for media strategy on speakers
+    static const VolumeCurvePoint sSpeakerMediaVolumeCurve[Volume::VOLCNT];
+    static const VolumeCurvePoint sSpeakerMediaVolumeCurveDrc[Volume::VOLCNT];
+    // volume curve for sonification strategy on speakers
+    static const VolumeCurvePoint sSpeakerSonificationVolumeCurve[Volume::VOLCNT];
+    static const VolumeCurvePoint sSpeakerSonificationVolumeCurveDrc[Volume::VOLCNT];
+    static const VolumeCurvePoint sDefaultSystemVolumeCurve[Volume::VOLCNT];
+    static const VolumeCurvePoint sDefaultSystemVolumeCurveDrc[Volume::VOLCNT];
+    static const VolumeCurvePoint sHeadsetSystemVolumeCurve[Volume::VOLCNT];
+    static const VolumeCurvePoint sDefaultVoiceVolumeCurve[Volume::VOLCNT];
+    static const VolumeCurvePoint sSpeakerVoiceVolumeCurve[Volume::VOLCNT];
+    static const VolumeCurvePoint sLinearVolumeCurve[Volume::VOLCNT];
+    static const VolumeCurvePoint sSilentVolumeCurve[Volume::VOLCNT];
+    static const VolumeCurvePoint sFullScaleVolumeCurve[Volume::VOLCNT];
+    // default volume curves per stream and device category. See initializeVolumeCurves()
+    static const VolumeCurvePoint *sVolumeProfiles[AUDIO_STREAM_CNT][Volume::DEVICE_CATEGORY_CNT];
+};
+
+}; // namespace android
diff --git a/services/audiopolicy/AudioPolicyFactory.cpp b/services/audiopolicy/manager/AudioPolicyFactory.cpp
similarity index 94%
rename from services/audiopolicy/AudioPolicyFactory.cpp
rename to services/audiopolicy/manager/AudioPolicyFactory.cpp
index 2ae7bc1..9910a1f 100644
--- a/services/audiopolicy/AudioPolicyFactory.cpp
+++ b/services/audiopolicy/manager/AudioPolicyFactory.cpp
@@ -14,7 +14,7 @@
  * limitations under the License.
  */
 
-#include "AudioPolicyManager.h"
+#include "managerdefault/AudioPolicyManager.h"
 
 namespace android {
 
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
new file mode 100644
index 0000000..e7f6864
--- /dev/null
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -0,0 +1,4888 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyManager"
+//#define LOG_NDEBUG 0
+
+//#define VERY_VERBOSE_LOGGING
+#ifdef VERY_VERBOSE_LOGGING
+#define ALOGVV ALOGV
+#else
+#define ALOGVV(a...) do { } while(0)
+#endif
+
+#include <inttypes.h>
+#include <math.h>
+
+#include <AudioPolicyManagerInterface.h>
+#include <AudioPolicyEngineInstance.h>
+#include <cutils/properties.h>
+#include <utils/Log.h>
+#include <hardware/audio.h>
+#include <hardware/audio_effect.h>
+#include <media/AudioParameter.h>
+#include <media/AudioPolicyHelper.h>
+#include <soundtrigger/SoundTrigger.h>
+#include "AudioPolicyManager.h"
+#include "audio_policy_conf.h"
+#include <ConfigParsingUtils.h>
+#include <policy.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+// AudioPolicyInterface implementation
+// ----------------------------------------------------------------------------
+
+status_t AudioPolicyManager::setDeviceConnectionState(audio_devices_t device,
+                                                      audio_policy_dev_state_t state,
+                                                      const char *device_address,
+                                                      const char *device_name)
+{
+    return setDeviceConnectionStateInt(device, state, device_address, device_name);
+}
+
+status_t AudioPolicyManager::setDeviceConnectionStateInt(audio_devices_t device,
+                                                         audio_policy_dev_state_t state,
+                                                         const char *device_address,
+                                                         const char *device_name)
+{
+    ALOGV("setDeviceConnectionStateInt() device: 0x%X, state %d, address %s name %s",
+-            device, state, device_address, device_name);
+
+    // connect/disconnect only 1 device at a time
+    if (!audio_is_output_device(device) && !audio_is_input_device(device)) return BAD_VALUE;
+
+    sp<DeviceDescriptor> devDesc =
+            mHwModules.getDeviceDescriptor(device, device_address, device_name);
+
+    // handle output devices
+    if (audio_is_output_device(device)) {
+        SortedVector <audio_io_handle_t> outputs;
+
+        ssize_t index = mAvailableOutputDevices.indexOf(devDesc);
+
+        // save a copy of the opened output descriptors before any output is opened or closed
+        // by checkOutputsForDevice(). This will be needed by checkOutputForAllStrategies()
+        mPreviousOutputs = mOutputs;
+        switch (state)
+        {
+        // handle output device connection
+        case AUDIO_POLICY_DEVICE_STATE_AVAILABLE: {
+            if (index >= 0) {
+                ALOGW("setDeviceConnectionState() device already connected: %x", device);
+                return INVALID_OPERATION;
+            }
+            ALOGV("setDeviceConnectionState() connecting device %x", device);
+
+            // register new device as available
+            index = mAvailableOutputDevices.add(devDesc);
+            if (index >= 0) {
+                sp<HwModule> module = mHwModules.getModuleForDevice(device);
+                if (module == 0) {
+                    ALOGD("setDeviceConnectionState() could not find HW module for device %08x",
+                          device);
+                    mAvailableOutputDevices.remove(devDesc);
+                    return INVALID_OPERATION;
+                }
+                mAvailableOutputDevices[index]->attach(module);
+            } else {
+                return NO_MEMORY;
+            }
+
+            if (checkOutputsForDevice(devDesc, state, outputs, devDesc->mAddress) != NO_ERROR) {
+                mAvailableOutputDevices.remove(devDesc);
+                return INVALID_OPERATION;
+            }
+            // Propagate device availability to Engine
+            mEngine->setDeviceConnectionState(devDesc, state);
+
+            // outputs should never be empty here
+            ALOG_ASSERT(outputs.size() != 0, "setDeviceConnectionState():"
+                    "checkOutputsForDevice() returned no outputs but status OK");
+            ALOGV("setDeviceConnectionState() checkOutputsForDevice() returned %zu outputs",
+                  outputs.size());
+
+            // Send connect to HALs
+            AudioParameter param = AudioParameter(devDesc->mAddress);
+            param.addInt(String8(AUDIO_PARAMETER_DEVICE_CONNECT), device);
+            mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
+
+            } break;
+        // handle output device disconnection
+        case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: {
+            if (index < 0) {
+                ALOGW("setDeviceConnectionState() device not connected: %x", device);
+                return INVALID_OPERATION;
+            }
+
+            ALOGV("setDeviceConnectionState() disconnecting output device %x", device);
+
+            // Send Disconnect to HALs
+            AudioParameter param = AudioParameter(devDesc->mAddress);
+            param.addInt(String8(AUDIO_PARAMETER_DEVICE_DISCONNECT), device);
+            mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
+
+            // remove device from available output devices
+            mAvailableOutputDevices.remove(devDesc);
+
+            checkOutputsForDevice(devDesc, state, outputs, devDesc->mAddress);
+
+            // Propagate device availability to Engine
+            mEngine->setDeviceConnectionState(devDesc, state);
+            } break;
+
+        default:
+            ALOGE("setDeviceConnectionState() invalid state: %x", state);
+            return BAD_VALUE;
+        }
+
+        // checkA2dpSuspend must run before checkOutputForAllStrategies so that A2DP
+        // output is suspended before any tracks are moved to it
+        checkA2dpSuspend();
+        checkOutputForAllStrategies();
+        // outputs must be closed after checkOutputForAllStrategies() is executed
+        if (!outputs.isEmpty()) {
+            for (size_t i = 0; i < outputs.size(); i++) {
+                sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]);
+                // close unused outputs after device disconnection or direct outputs that have been
+                // opened by checkOutputsForDevice() to query dynamic parameters
+                if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
+                        (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
+                         (desc->mDirectOpenCount == 0))) {
+                    closeOutput(outputs[i]);
+                }
+            }
+            // check again after closing A2DP output to reset mA2dpSuspended if needed
+            checkA2dpSuspend();
+        }
+
+        updateDevicesAndOutputs();
+        if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
+            audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
+            updateCallRouting(newDevice);
+        }
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
+            if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (desc != mPrimaryOutput)) {
+                audio_devices_t newDevice = getNewOutputDevice(desc, true /*fromCache*/);
+                // do not force device change on duplicated output because if device is 0, it will
+                // also force a device 0 for the two outputs it is duplicated to which may override
+                // a valid device selection on those outputs.
+                bool force = !desc->isDuplicated()
+                        && (!device_distinguishes_on_address(device)
+                                // always force when disconnecting (a non-duplicated device)
+                                || (state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE));
+                setOutputDevice(desc, newDevice, force, 0);
+            }
+        }
+
+        mpClientInterface->onAudioPortListUpdate();
+        return NO_ERROR;
+    }  // end if is output device
+
+    // handle input devices
+    if (audio_is_input_device(device)) {
+        SortedVector <audio_io_handle_t> inputs;
+
+        ssize_t index = mAvailableInputDevices.indexOf(devDesc);
+        switch (state)
+        {
+        // handle input device connection
+        case AUDIO_POLICY_DEVICE_STATE_AVAILABLE: {
+            if (index >= 0) {
+                ALOGW("setDeviceConnectionState() device already connected: %d", device);
+                return INVALID_OPERATION;
+            }
+            sp<HwModule> module = mHwModules.getModuleForDevice(device);
+            if (module == NULL) {
+                ALOGW("setDeviceConnectionState(): could not find HW module for device %08x",
+                      device);
+                return INVALID_OPERATION;
+            }
+            if (checkInputsForDevice(devDesc, state, inputs, devDesc->mAddress) != NO_ERROR) {
+                return INVALID_OPERATION;
+            }
+
+            index = mAvailableInputDevices.add(devDesc);
+            if (index >= 0) {
+                mAvailableInputDevices[index]->attach(module);
+            } else {
+                return NO_MEMORY;
+            }
+
+            // Set connect to HALs
+            AudioParameter param = AudioParameter(devDesc->mAddress);
+            param.addInt(String8(AUDIO_PARAMETER_DEVICE_CONNECT), device);
+            mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
+
+            // Propagate device availability to Engine
+            mEngine->setDeviceConnectionState(devDesc, state);
+        } break;
+
+        // handle input device disconnection
+        case AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE: {
+            if (index < 0) {
+                ALOGW("setDeviceConnectionState() device not connected: %d", device);
+                return INVALID_OPERATION;
+            }
+
+            ALOGV("setDeviceConnectionState() disconnecting input device %x", device);
+
+            // Set Disconnect to HALs
+            AudioParameter param = AudioParameter(devDesc->mAddress);
+            param.addInt(String8(AUDIO_PARAMETER_DEVICE_DISCONNECT), device);
+            mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
+
+            checkInputsForDevice(devDesc, state, inputs, devDesc->mAddress);
+            mAvailableInputDevices.remove(devDesc);
+
+            // Propagate device availability to Engine
+            mEngine->setDeviceConnectionState(devDesc, state);
+        } break;
+
+        default:
+            ALOGE("setDeviceConnectionState() invalid state: %x", state);
+            return BAD_VALUE;
+        }
+
+        closeAllInputs();
+
+        if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
+            audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
+            updateCallRouting(newDevice);
+        }
+
+        mpClientInterface->onAudioPortListUpdate();
+        return NO_ERROR;
+    } // end if is input device
+
+    ALOGW("setDeviceConnectionState() invalid device: %x", device);
+    return BAD_VALUE;
+}
+
+audio_policy_dev_state_t AudioPolicyManager::getDeviceConnectionState(audio_devices_t device,
+                                                                      const char *device_address)
+{
+    sp<DeviceDescriptor> devDesc = mHwModules.getDeviceDescriptor(device, device_address, "");
+
+    DeviceVector *deviceVector;
+
+    if (audio_is_output_device(device)) {
+        deviceVector = &mAvailableOutputDevices;
+    } else if (audio_is_input_device(device)) {
+        deviceVector = &mAvailableInputDevices;
+    } else {
+        ALOGW("getDeviceConnectionState() invalid device type %08x", device);
+        return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
+    }
+    return deviceVector->getDeviceConnectionState(devDesc);
+}
+
+void AudioPolicyManager::updateCallRouting(audio_devices_t rxDevice, int delayMs)
+{
+    bool createTxPatch = false;
+    struct audio_patch patch;
+    patch.num_sources = 1;
+    patch.num_sinks = 1;
+    status_t status;
+    audio_patch_handle_t afPatchHandle;
+    DeviceVector deviceList;
+
+    if(!hasPrimaryOutput()) {
+        return;
+    }
+    audio_devices_t txDevice = getDeviceAndMixForInputSource(AUDIO_SOURCE_VOICE_COMMUNICATION);
+    ALOGV("updateCallRouting device rxDevice %08x txDevice %08x", rxDevice, txDevice);
+
+    // release existing RX patch if any
+    if (mCallRxPatch != 0) {
+        mpClientInterface->releaseAudioPatch(mCallRxPatch->mAfPatchHandle, 0);
+        mCallRxPatch.clear();
+    }
+    // release TX patch if any
+    if (mCallTxPatch != 0) {
+        mpClientInterface->releaseAudioPatch(mCallTxPatch->mAfPatchHandle, 0);
+        mCallTxPatch.clear();
+    }
+
+    // If the RX device is on the primary HW module, then use legacy routing method for voice calls
+    // via setOutputDevice() on primary output.
+    // Otherwise, create two audio patches for TX and RX path.
+    if (availablePrimaryOutputDevices() & rxDevice) {
+        setOutputDevice(mPrimaryOutput, rxDevice, true, delayMs);
+        // If the TX device is also on the primary HW module, setOutputDevice() will take care
+        // of it due to legacy implementation. If not, create a patch.
+        if ((availablePrimaryInputDevices() & txDevice & ~AUDIO_DEVICE_BIT_IN)
+                == AUDIO_DEVICE_NONE) {
+            createTxPatch = true;
+        }
+    } else {
+        // create RX path audio patch
+        deviceList = mAvailableOutputDevices.getDevicesFromType(rxDevice);
+        ALOG_ASSERT(!deviceList.isEmpty(),
+                    "updateCallRouting() selected device not in output device list");
+        sp<DeviceDescriptor> rxSinkDeviceDesc = deviceList.itemAt(0);
+        deviceList = mAvailableInputDevices.getDevicesFromType(AUDIO_DEVICE_IN_TELEPHONY_RX);
+        ALOG_ASSERT(!deviceList.isEmpty(),
+                    "updateCallRouting() no telephony RX device");
+        sp<DeviceDescriptor> rxSourceDeviceDesc = deviceList.itemAt(0);
+
+        rxSourceDeviceDesc->toAudioPortConfig(&patch.sources[0]);
+        rxSinkDeviceDesc->toAudioPortConfig(&patch.sinks[0]);
+
+        // request to reuse existing output stream if one is already opened to reach the RX device
+        SortedVector<audio_io_handle_t> outputs =
+                                getOutputsForDevice(rxDevice, mOutputs);
+        audio_io_handle_t output = selectOutput(outputs,
+                                                AUDIO_OUTPUT_FLAG_NONE,
+                                                AUDIO_FORMAT_INVALID);
+        if (output != AUDIO_IO_HANDLE_NONE) {
+            sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
+            ALOG_ASSERT(!outputDesc->isDuplicated(),
+                        "updateCallRouting() RX device output is duplicated");
+            outputDesc->toAudioPortConfig(&patch.sources[1]);
+            patch.sources[1].ext.mix.usecase.stream = AUDIO_STREAM_PATCH;
+            patch.num_sources = 2;
+        }
+
+        afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+        status = mpClientInterface->createAudioPatch(&patch, &afPatchHandle, 0);
+        ALOGW_IF(status != NO_ERROR, "updateCallRouting() error %d creating RX audio patch",
+                                               status);
+        if (status == NO_ERROR) {
+            mCallRxPatch = new AudioPatch(&patch, mUidCached);
+            mCallRxPatch->mAfPatchHandle = afPatchHandle;
+            mCallRxPatch->mUid = mUidCached;
+        }
+        createTxPatch = true;
+    }
+    if (createTxPatch) {
+
+        struct audio_patch patch;
+        patch.num_sources = 1;
+        patch.num_sinks = 1;
+        deviceList = mAvailableInputDevices.getDevicesFromType(txDevice);
+        ALOG_ASSERT(!deviceList.isEmpty(),
+                    "updateCallRouting() selected device not in input device list");
+        sp<DeviceDescriptor> txSourceDeviceDesc = deviceList.itemAt(0);
+        txSourceDeviceDesc->toAudioPortConfig(&patch.sources[0]);
+        deviceList = mAvailableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_TELEPHONY_TX);
+        ALOG_ASSERT(!deviceList.isEmpty(),
+                    "updateCallRouting() no telephony TX device");
+        sp<DeviceDescriptor> txSinkDeviceDesc = deviceList.itemAt(0);
+        txSinkDeviceDesc->toAudioPortConfig(&patch.sinks[0]);
+
+        SortedVector<audio_io_handle_t> outputs =
+                                getOutputsForDevice(AUDIO_DEVICE_OUT_TELEPHONY_TX, mOutputs);
+        audio_io_handle_t output = selectOutput(outputs,
+                                                AUDIO_OUTPUT_FLAG_NONE,
+                                                AUDIO_FORMAT_INVALID);
+        // request to reuse existing output stream if one is already opened to reach the TX
+        // path output device
+        if (output != AUDIO_IO_HANDLE_NONE) {
+            sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
+            ALOG_ASSERT(!outputDesc->isDuplicated(),
+                        "updateCallRouting() RX device output is duplicated");
+            outputDesc->toAudioPortConfig(&patch.sources[1]);
+            patch.sources[1].ext.mix.usecase.stream = AUDIO_STREAM_PATCH;
+            patch.num_sources = 2;
+        }
+
+        afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+        status = mpClientInterface->createAudioPatch(&patch, &afPatchHandle, 0);
+        ALOGW_IF(status != NO_ERROR, "setPhoneState() error %d creating TX audio patch",
+                                               status);
+        if (status == NO_ERROR) {
+            mCallTxPatch = new AudioPatch(&patch, mUidCached);
+            mCallTxPatch->mAfPatchHandle = afPatchHandle;
+            mCallTxPatch->mUid = mUidCached;
+        }
+    }
+}
+
+void AudioPolicyManager::setPhoneState(audio_mode_t state)
+{
+    ALOGV("setPhoneState() state %d", state);
+    // store previous phone state for management of sonification strategy below
+    int oldState = mEngine->getPhoneState();
+
+    if (mEngine->setPhoneState(state) != NO_ERROR) {
+        ALOGW("setPhoneState() invalid or same state %d", state);
+        return;
+    }
+    /// Opens: can these line be executed after the switch of volume curves???
+    // if leaving call state, handle special case of active streams
+    // pertaining to sonification strategy see handleIncallSonification()
+    if (isStateInCall(oldState)) {
+        ALOGV("setPhoneState() in call state management: new state is %d", state);
+        for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
+            if (stream == AUDIO_STREAM_PATCH) {
+                continue;
+            }
+            handleIncallSonification((audio_stream_type_t)stream, false, true);
+        }
+
+        // force reevaluating accessibility routing when call stops
+        mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
+    }
+
+    /**
+     * Switching to or from incall state or switching between telephony and VoIP lead to force
+     * routing command.
+     */
+    bool force = ((is_state_in_call(oldState) != is_state_in_call(state))
+                  || (is_state_in_call(state) && (state != oldState)));
+
+    // check for device and output changes triggered by new phone state
+    checkA2dpSuspend();
+    checkOutputForAllStrategies();
+    updateDevicesAndOutputs();
+
+    int delayMs = 0;
+    if (isStateInCall(state)) {
+        nsecs_t sysTime = systemTime();
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
+            // mute media and sonification strategies and delay device switch by the largest
+            // latency of any output where either strategy is active.
+            // This avoid sending the ring tone or music tail into the earpiece or headset.
+            if ((isStrategyActive(desc, STRATEGY_MEDIA,
+                                  SONIFICATION_HEADSET_MUSIC_DELAY,
+                                  sysTime) ||
+                 isStrategyActive(desc, STRATEGY_SONIFICATION,
+                                  SONIFICATION_HEADSET_MUSIC_DELAY,
+                                  sysTime)) &&
+                    (delayMs < (int)desc->latency()*2)) {
+                delayMs = desc->latency()*2;
+            }
+            setStrategyMute(STRATEGY_MEDIA, true, desc);
+            setStrategyMute(STRATEGY_MEDIA, false, desc, MUTE_TIME_MS,
+                getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/));
+            setStrategyMute(STRATEGY_SONIFICATION, true, desc);
+            setStrategyMute(STRATEGY_SONIFICATION, false, desc, MUTE_TIME_MS,
+                getDeviceForStrategy(STRATEGY_SONIFICATION, true /*fromCache*/));
+        }
+    }
+
+    if (hasPrimaryOutput()) {
+        // Note that despite the fact that getNewOutputDevice() is called on the primary output,
+        // the device returned is not necessarily reachable via this output
+        audio_devices_t rxDevice = getNewOutputDevice(mPrimaryOutput, false /*fromCache*/);
+        // force routing command to audio hardware when ending call
+        // even if no device change is needed
+        if (isStateInCall(oldState) && rxDevice == AUDIO_DEVICE_NONE) {
+            rxDevice = mPrimaryOutput->device();
+        }
+
+        if (state == AUDIO_MODE_IN_CALL) {
+            updateCallRouting(rxDevice, delayMs);
+        } else if (oldState == AUDIO_MODE_IN_CALL) {
+            if (mCallRxPatch != 0) {
+                mpClientInterface->releaseAudioPatch(mCallRxPatch->mAfPatchHandle, 0);
+                mCallRxPatch.clear();
+            }
+            if (mCallTxPatch != 0) {
+                mpClientInterface->releaseAudioPatch(mCallTxPatch->mAfPatchHandle, 0);
+                mCallTxPatch.clear();
+            }
+            setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
+        } else {
+            setOutputDevice(mPrimaryOutput, rxDevice, force, 0);
+        }
+    }
+    // if entering in call state, handle special case of active streams
+    // pertaining to sonification strategy see handleIncallSonification()
+    if (isStateInCall(state)) {
+        ALOGV("setPhoneState() in call state management: new state is %d", state);
+        for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
+            if (stream == AUDIO_STREAM_PATCH) {
+                continue;
+            }
+            handleIncallSonification((audio_stream_type_t)stream, true, true);
+        }
+
+        // force reevaluating accessibility routing when call starts
+        mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
+    }
+
+    // Flag that ringtone volume must be limited to music volume until we exit MODE_RINGTONE
+    if (state == AUDIO_MODE_RINGTONE &&
+        isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY)) {
+        mLimitRingtoneVolume = true;
+    } else {
+        mLimitRingtoneVolume = false;
+    }
+}
+
+audio_mode_t AudioPolicyManager::getPhoneState() {
+    return mEngine->getPhoneState();
+}
+
+void AudioPolicyManager::setForceUse(audio_policy_force_use_t usage,
+                                         audio_policy_forced_cfg_t config)
+{
+    ALOGV("setForceUse() usage %d, config %d, mPhoneState %d", usage, config, mEngine->getPhoneState());
+
+    if (mEngine->setForceUse(usage, config) != NO_ERROR) {
+        ALOGW("setForceUse() could not set force cfg %d for usage %d", config, usage);
+        return;
+    }
+    bool forceVolumeReeval = (usage == AUDIO_POLICY_FORCE_FOR_COMMUNICATION) ||
+            (usage == AUDIO_POLICY_FORCE_FOR_DOCK) ||
+            (usage == AUDIO_POLICY_FORCE_FOR_SYSTEM);
+
+    // check for device and output changes triggered by new force usage
+    checkA2dpSuspend();
+    checkOutputForAllStrategies();
+    updateDevicesAndOutputs();
+    if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
+        audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, true /*fromCache*/);
+        updateCallRouting(newDevice);
+    }
+    for (size_t i = 0; i < mOutputs.size(); i++) {
+        sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(i);
+        audio_devices_t newDevice = getNewOutputDevice(outputDesc, true /*fromCache*/);
+        if ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) || (outputDesc != mPrimaryOutput)) {
+            setOutputDevice(outputDesc, newDevice, (newDevice != AUDIO_DEVICE_NONE));
+        }
+        if (forceVolumeReeval && (newDevice != AUDIO_DEVICE_NONE)) {
+            applyStreamVolumes(outputDesc, newDevice, 0, true);
+        }
+    }
+
+    audio_io_handle_t activeInput = mInputs.getActiveInput();
+    if (activeInput != 0) {
+        setInputDevice(activeInput, getNewInputDevice(activeInput));
+    }
+
+}
+
+void AudioPolicyManager::setSystemProperty(const char* property, const char* value)
+{
+    ALOGV("setSystemProperty() property %s, value %s", property, value);
+}
+
+// Find a direct output profile compatible with the parameters passed, even if the input flags do
+// not explicitly request a direct output
+sp<IOProfile> AudioPolicyManager::getProfileForDirectOutput(
+                                                               audio_devices_t device,
+                                                               uint32_t samplingRate,
+                                                               audio_format_t format,
+                                                               audio_channel_mask_t channelMask,
+                                                               audio_output_flags_t flags)
+{
+    // only retain flags that will drive the direct output profile selection
+    // if explicitly requested
+    static const uint32_t kRelevantFlags =
+            (AUDIO_OUTPUT_FLAG_HW_AV_SYNC | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+    flags =
+        (audio_output_flags_t)((flags & kRelevantFlags) | AUDIO_OUTPUT_FLAG_DIRECT);
+
+    sp<IOProfile> profile;
+
+    for (size_t i = 0; i < mHwModules.size(); i++) {
+        if (mHwModules[i]->mHandle == 0) {
+            continue;
+        }
+        for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++) {
+            sp<IOProfile> curProfile = mHwModules[i]->mOutputProfiles[j];
+            if (!curProfile->isCompatibleProfile(device, String8(""),
+                    samplingRate, NULL /*updatedSamplingRate*/,
+                    format, NULL /*updatedFormat*/,
+                    channelMask, NULL /*updatedChannelMask*/,
+                    flags)) {
+                continue;
+            }
+            // reject profiles not corresponding to a device currently available
+            if ((mAvailableOutputDevices.types() & curProfile->mSupportedDevices.types()) == 0) {
+                continue;
+            }
+            // if several profiles are compatible, give priority to one with offload capability
+            if (profile != 0 && ((curProfile->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0)) {
+                continue;
+            }
+            profile = curProfile;
+            if ((profile->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+                break;
+            }
+        }
+    }
+    return profile;
+}
+
+audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream,
+                                                uint32_t samplingRate,
+                                                audio_format_t format,
+                                                audio_channel_mask_t channelMask,
+                                                audio_output_flags_t flags,
+                                                const audio_offload_info_t *offloadInfo)
+{
+    routing_strategy strategy = getStrategy(stream);
+    audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
+    ALOGV("getOutput() device %d, stream %d, samplingRate %d, format %x, channelMask %x, flags %x",
+          device, stream, samplingRate, format, channelMask, flags);
+
+    return getOutputForDevice(device, AUDIO_SESSION_ALLOCATE,
+                              stream, samplingRate,format, channelMask,
+                              flags, offloadInfo);
+}
+
+status_t AudioPolicyManager::getOutputForAttr(const audio_attributes_t *attr,
+                                              audio_io_handle_t *output,
+                                              audio_session_t session,
+                                              audio_stream_type_t *stream,
+                                              uid_t uid,
+                                              uint32_t samplingRate,
+                                              audio_format_t format,
+                                              audio_channel_mask_t channelMask,
+                                              audio_output_flags_t flags,
+                                              audio_port_handle_t selectedDeviceId,
+                                              const audio_offload_info_t *offloadInfo)
+{
+    audio_attributes_t attributes;
+    if (attr != NULL) {
+        if (!isValidAttributes(attr)) {
+            ALOGE("getOutputForAttr() invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
+                  attr->usage, attr->content_type, attr->flags,
+                  attr->tags);
+            return BAD_VALUE;
+        }
+        attributes = *attr;
+    } else {
+        if (*stream < AUDIO_STREAM_MIN || *stream >= AUDIO_STREAM_PUBLIC_CNT) {
+            ALOGE("getOutputForAttr():  invalid stream type");
+            return BAD_VALUE;
+        }
+        stream_type_to_audio_attributes(*stream, &attributes);
+    }
+    sp<SwAudioOutputDescriptor> desc;
+    if (mPolicyMixes.getOutputForAttr(attributes, desc) == NO_ERROR) {
+        ALOG_ASSERT(desc != 0, "Invalid desc returned by getOutputForAttr");
+        if (!audio_is_linear_pcm(format)) {
+            return BAD_VALUE;
+        }
+        *stream = streamTypefromAttributesInt(&attributes);
+        *output = desc->mIoHandle;
+        ALOGV("getOutputForAttr() returns output %d", *output);
+        return NO_ERROR;
+    }
+    if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE) {
+        ALOGW("getOutputForAttr() no policy mix found for usage AUDIO_USAGE_VIRTUAL_SOURCE");
+        return BAD_VALUE;
+    }
+
+    ALOGV("getOutputForAttr() usage=%d, content=%d, tag=%s flags=%08x"
+            " session %d selectedDeviceId %d",
+            attributes.usage, attributes.content_type, attributes.tags, attributes.flags,
+            session, selectedDeviceId);
+
+    *stream = streamTypefromAttributesInt(&attributes);
+
+    // Explicit routing?
+    sp<DeviceDescriptor> deviceDesc;
+    for (size_t i = 0; i < mAvailableOutputDevices.size(); i++) {
+        if (mAvailableOutputDevices[i]->getId() == selectedDeviceId) {
+            deviceDesc = mAvailableOutputDevices[i];
+            break;
+        }
+    }
+    mOutputRoutes.addRoute(session, *stream, SessionRoute::SOURCE_TYPE_NA, deviceDesc, uid);
+
+    routing_strategy strategy = (routing_strategy) getStrategyForAttr(&attributes);
+    audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
+
+    if ((attributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
+        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
+    }
+
+    ALOGV("getOutputForAttr() device 0x%x, samplingRate %d, format %x, channelMask %x, flags %x",
+          device, samplingRate, format, channelMask, flags);
+
+    *output = getOutputForDevice(device, session, *stream,
+                                 samplingRate, format, channelMask,
+                                 flags, offloadInfo);
+    if (*output == AUDIO_IO_HANDLE_NONE) {
+        mOutputRoutes.removeRoute(session);
+        return INVALID_OPERATION;
+    }
+
+    return NO_ERROR;
+}
+
+audio_io_handle_t AudioPolicyManager::getOutputForDevice(
+        audio_devices_t device,
+        audio_session_t session __unused,
+        audio_stream_type_t stream,
+        uint32_t samplingRate,
+        audio_format_t format,
+        audio_channel_mask_t channelMask,
+        audio_output_flags_t flags,
+        const audio_offload_info_t *offloadInfo)
+{
+    audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+    uint32_t latency = 0;
+    status_t status;
+
+#ifdef AUDIO_POLICY_TEST
+    if (mCurOutput != 0) {
+        ALOGV("getOutput() test output mCurOutput %d, samplingRate %d, format %d, channelMask %x, mDirectOutput %d",
+                mCurOutput, mTestSamplingRate, mTestFormat, mTestChannels, mDirectOutput);
+
+        if (mTestOutputs[mCurOutput] == 0) {
+            ALOGV("getOutput() opening test output");
+            sp<AudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(NULL,
+                                                                               mpClientInterface);
+            outputDesc->mDevice = mTestDevice;
+            outputDesc->mLatency = mTestLatencyMs;
+            outputDesc->mFlags =
+                    (audio_output_flags_t)(mDirectOutput ? AUDIO_OUTPUT_FLAG_DIRECT : 0);
+            outputDesc->mRefCount[stream] = 0;
+            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+            config.sample_rate = mTestSamplingRate;
+            config.channel_mask = mTestChannels;
+            config.format = mTestFormat;
+            if (offloadInfo != NULL) {
+                config.offload_info = *offloadInfo;
+            }
+            status = mpClientInterface->openOutput(0,
+                                                  &mTestOutputs[mCurOutput],
+                                                  &config,
+                                                  &outputDesc->mDevice,
+                                                  String8(""),
+                                                  &outputDesc->mLatency,
+                                                  outputDesc->mFlags);
+            if (status == NO_ERROR) {
+                outputDesc->mSamplingRate = config.sample_rate;
+                outputDesc->mFormat = config.format;
+                outputDesc->mChannelMask = config.channel_mask;
+                AudioParameter outputCmd = AudioParameter();
+                outputCmd.addInt(String8("set_id"),mCurOutput);
+                mpClientInterface->setParameters(mTestOutputs[mCurOutput],outputCmd.toString());
+                addOutput(mTestOutputs[mCurOutput], outputDesc);
+            }
+        }
+        return mTestOutputs[mCurOutput];
+    }
+#endif //AUDIO_POLICY_TEST
+
+    // open a direct output if required by specified parameters
+    //force direct flag if offload flag is set: offloading implies a direct output stream
+    // and all common behaviors are driven by checking only the direct flag
+    // this should normally be set appropriately in the policy configuration file
+    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
+    }
+    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
+        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
+    }
+    // only allow deep buffering for music stream type
+    if (stream != AUDIO_STREAM_MUSIC) {
+        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
+    } else if (/* stream == AUDIO_STREAM_MUSIC && */
+            flags == AUDIO_OUTPUT_FLAG_NONE &&
+            property_get_bool("audio.deep_buffer.media", false /* default_value */)) {
+        // use DEEP_BUFFER as default output for music stream type
+        flags = (audio_output_flags_t)AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+    }
+    if (stream == AUDIO_STREAM_TTS) {
+        flags = AUDIO_OUTPUT_FLAG_TTS;
+    }
+
+    sp<IOProfile> profile;
+
+    // skip direct output selection if the request can obviously be attached to a mixed output
+    // and not explicitly requested
+    if (((flags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
+            audio_is_linear_pcm(format) && samplingRate <= MAX_MIXER_SAMPLING_RATE &&
+            audio_channel_count_from_out_mask(channelMask) <= 2) {
+        goto non_direct_output;
+    }
+
+    // Do not allow offloading if one non offloadable effect is enabled. This prevents from
+    // creating an offloaded track and tearing it down immediately after start when audioflinger
+    // detects there is an active non offloadable effect.
+    // FIXME: We should check the audio session here but we do not have it in this context.
+    // This may prevent offloading in rare situations where effects are left active by apps
+    // in the background.
+
+    if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0) ||
+            !mEffects.isNonOffloadableEffectEnabled()) {
+        profile = getProfileForDirectOutput(device,
+                                           samplingRate,
+                                           format,
+                                           channelMask,
+                                           (audio_output_flags_t)flags);
+    }
+
+    if (profile != 0) {
+        sp<SwAudioOutputDescriptor> outputDesc = NULL;
+
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
+            if (!desc->isDuplicated() && (profile == desc->mProfile)) {
+                outputDesc = desc;
+                // reuse direct output if currently open and configured with same parameters
+                if ((samplingRate == outputDesc->mSamplingRate) &&
+                        (format == outputDesc->mFormat) &&
+                        (channelMask == outputDesc->mChannelMask)) {
+                    outputDesc->mDirectOpenCount++;
+                    ALOGV("getOutput() reusing direct output %d", mOutputs.keyAt(i));
+                    return mOutputs.keyAt(i);
+                }
+            }
+        }
+        // close direct output if currently open and configured with different parameters
+        if (outputDesc != NULL) {
+            closeOutput(outputDesc->mIoHandle);
+        }
+
+        // if the selected profile is offloaded and no offload info was specified,
+        // create a default one
+        audio_offload_info_t defaultOffloadInfo = AUDIO_INFO_INITIALIZER;
+        if ((profile->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) && !offloadInfo) {
+            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+            defaultOffloadInfo.sample_rate = samplingRate;
+            defaultOffloadInfo.channel_mask = channelMask;
+            defaultOffloadInfo.format = format;
+            defaultOffloadInfo.stream_type = stream;
+            defaultOffloadInfo.bit_rate = 0;
+            defaultOffloadInfo.duration_us = -1;
+            defaultOffloadInfo.has_video = true; // conservative
+            defaultOffloadInfo.is_streaming = true; // likely
+            offloadInfo = &defaultOffloadInfo;
+        }
+
+        outputDesc = new SwAudioOutputDescriptor(profile, mpClientInterface);
+        outputDesc->mDevice = device;
+        outputDesc->mLatency = 0;
+        outputDesc->mFlags = (audio_output_flags_t)(outputDesc->mFlags | flags);
+        audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+        config.sample_rate = samplingRate;
+        config.channel_mask = channelMask;
+        config.format = format;
+        if (offloadInfo != NULL) {
+            config.offload_info = *offloadInfo;
+        }
+        status = mpClientInterface->openOutput(profile->getModuleHandle(),
+                                               &output,
+                                               &config,
+                                               &outputDesc->mDevice,
+                                               String8(""),
+                                               &outputDesc->mLatency,
+                                               outputDesc->mFlags);
+
+        // only accept an output with the requested parameters
+        if (status != NO_ERROR ||
+            (samplingRate != 0 && samplingRate != config.sample_rate) ||
+            (format != AUDIO_FORMAT_DEFAULT && format != config.format) ||
+            (channelMask != 0 && channelMask != config.channel_mask)) {
+            ALOGV("getOutput() failed opening direct output: output %d samplingRate %d %d,"
+                    "format %d %d, channelMask %04x %04x", output, samplingRate,
+                    outputDesc->mSamplingRate, format, outputDesc->mFormat, channelMask,
+                    outputDesc->mChannelMask);
+            if (output != AUDIO_IO_HANDLE_NONE) {
+                mpClientInterface->closeOutput(output);
+            }
+            // fall back to mixer output if possible when the direct output could not be open
+            if (audio_is_linear_pcm(format) && samplingRate <= MAX_MIXER_SAMPLING_RATE) {
+                goto non_direct_output;
+            }
+            return AUDIO_IO_HANDLE_NONE;
+        }
+        outputDesc->mSamplingRate = config.sample_rate;
+        outputDesc->mChannelMask = config.channel_mask;
+        outputDesc->mFormat = config.format;
+        outputDesc->mRefCount[stream] = 0;
+        outputDesc->mStopTime[stream] = 0;
+        outputDesc->mDirectOpenCount = 1;
+
+        audio_io_handle_t srcOutput = getOutputForEffect();
+        addOutput(output, outputDesc);
+        audio_io_handle_t dstOutput = getOutputForEffect();
+        if (dstOutput == output) {
+            mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, srcOutput, dstOutput);
+        }
+        mPreviousOutputs = mOutputs;
+        ALOGV("getOutput() returns new direct output %d", output);
+        mpClientInterface->onAudioPortListUpdate();
+        return output;
+    }
+
+non_direct_output:
+    // ignoring channel mask due to downmix capability in mixer
+
+    // open a non direct output
+
+    // for non direct outputs, only PCM is supported
+    if (audio_is_linear_pcm(format)) {
+        // get which output is suitable for the specified stream. The actual
+        // routing change will happen when startOutput() will be called
+        SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
+
+        // at this stage we should ignore the DIRECT flag as no direct output could be found earlier
+        flags = (audio_output_flags_t)(flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
+        output = selectOutput(outputs, flags, format);
+    }
+    ALOGW_IF((output == 0), "getOutput() could not find output for stream %d, samplingRate %d,"
+            "format %d, channels %x, flags %x", stream, samplingRate, format, channelMask, flags);
+
+    ALOGV("  getOutputForDevice() returns output %d", output);
+
+    return output;
+}
+
+audio_io_handle_t AudioPolicyManager::selectOutput(const SortedVector<audio_io_handle_t>& outputs,
+                                                       audio_output_flags_t flags,
+                                                       audio_format_t format)
+{
+    // select one output among several that provide a path to a particular device or set of
+    // devices (the list was previously build by getOutputsForDevice()).
+    // The priority is as follows:
+    // 1: the output with the highest number of requested policy flags
+    // 2: the primary output
+    // 3: the first output in the list
+
+    if (outputs.size() == 0) {
+        return 0;
+    }
+    if (outputs.size() == 1) {
+        return outputs[0];
+    }
+
+    int maxCommonFlags = 0;
+    audio_io_handle_t outputFlags = 0;
+    audio_io_handle_t outputPrimary = 0;
+
+    for (size_t i = 0; i < outputs.size(); i++) {
+        sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
+        if (!outputDesc->isDuplicated()) {
+            // if a valid format is specified, skip output if not compatible
+            if (format != AUDIO_FORMAT_INVALID) {
+                if (outputDesc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
+                    if (format != outputDesc->mFormat) {
+                        continue;
+                    }
+                } else if (!audio_is_linear_pcm(format)) {
+                    continue;
+                }
+            }
+
+            int commonFlags = popcount(outputDesc->mProfile->mFlags & flags);
+            if (commonFlags > maxCommonFlags) {
+                outputFlags = outputs[i];
+                maxCommonFlags = commonFlags;
+                ALOGV("selectOutput() commonFlags for output %d, %04x", outputs[i], commonFlags);
+            }
+            if (outputDesc->mProfile->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) {
+                outputPrimary = outputs[i];
+            }
+        }
+    }
+
+    if (outputFlags != 0) {
+        return outputFlags;
+    }
+    if (outputPrimary != 0) {
+        return outputPrimary;
+    }
+
+    return outputs[0];
+}
+
+status_t AudioPolicyManager::startOutput(audio_io_handle_t output,
+                                             audio_stream_type_t stream,
+                                             audio_session_t session)
+{
+    ALOGV("startOutput() output %d, stream %d, session %d",
+          output, stream, session);
+    ssize_t index = mOutputs.indexOfKey(output);
+    if (index < 0) {
+        ALOGW("startOutput() unknown output %d", output);
+        return BAD_VALUE;
+    }
+
+    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
+
+    // Routing?
+    mOutputRoutes.incRouteActivity(session);
+
+    audio_devices_t newDevice;
+    if (outputDesc->mPolicyMix != NULL) {
+        newDevice = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+    } else if (mOutputRoutes.hasRouteChanged(session)) {
+        newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
+        checkStrategyRoute(getStrategy(stream), output);
+    } else {
+        newDevice = AUDIO_DEVICE_NONE;
+    }
+
+    uint32_t delayMs = 0;
+
+    status_t status = startSource(outputDesc, stream, newDevice, &delayMs);
+
+    if (status != NO_ERROR) {
+        mOutputRoutes.decRouteActivity(session);
+        return status;
+    }
+    // Automatically enable the remote submix input when output is started on a re routing mix
+    // of type MIX_TYPE_RECORDERS
+    if (audio_is_remote_submix_device(newDevice) && outputDesc->mPolicyMix != NULL &&
+            outputDesc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS) {
+            setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
+                    AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+                    outputDesc->mPolicyMix->mRegistrationId,
+                    "remote-submix");
+    }
+
+    if (delayMs != 0) {
+        usleep(delayMs * 1000);
+    }
+
+    return status;
+}
+
+status_t AudioPolicyManager::startSource(sp<AudioOutputDescriptor> outputDesc,
+                                             audio_stream_type_t stream,
+                                             audio_devices_t device,
+                                             uint32_t *delayMs)
+{
+    // cannot start playback of STREAM_TTS if any other output is being used
+    uint32_t beaconMuteLatency = 0;
+
+    *delayMs = 0;
+    if (stream == AUDIO_STREAM_TTS) {
+        ALOGV("\t found BEACON stream");
+        if (mOutputs.isAnyOutputActive(AUDIO_STREAM_TTS /*streamToIgnore*/)) {
+            return INVALID_OPERATION;
+        } else {
+            beaconMuteLatency = handleEventForBeacon(STARTING_BEACON);
+        }
+    } else {
+        // some playback other than beacon starts
+        beaconMuteLatency = handleEventForBeacon(STARTING_OUTPUT);
+    }
+
+    // check active before incrementing usage count
+    bool force = !outputDesc->isActive();
+
+    // increment usage count for this stream on the requested output:
+    // NOTE that the usage count is the same for duplicated output and hardware output which is
+    // necessary for a correct control of hardware output routing by startOutput() and stopOutput()
+    outputDesc->changeRefCount(stream, 1);
+
+    if (outputDesc->mRefCount[stream] == 1 || device != AUDIO_DEVICE_NONE) {
+        // starting an output being rerouted?
+        if (device == AUDIO_DEVICE_NONE) {
+            device = getNewOutputDevice(outputDesc, false /*fromCache*/);
+        }
+        routing_strategy strategy = getStrategy(stream);
+        bool shouldWait = (strategy == STRATEGY_SONIFICATION) ||
+                            (strategy == STRATEGY_SONIFICATION_RESPECTFUL) ||
+                            (beaconMuteLatency > 0);
+        uint32_t waitMs = beaconMuteLatency;
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
+            if (desc != outputDesc) {
+                // force a device change if any other output is managed by the same hw
+                // module and has a current device selection that differs from selected device.
+                // In this case, the audio HAL must receive the new device selection so that it can
+                // change the device currently selected by the other active output.
+                if (outputDesc->sharesHwModuleWith(desc) &&
+                    desc->device() != device) {
+                    force = true;
+                }
+                // wait for audio on other active outputs to be presented when starting
+                // a notification so that audio focus effect can propagate, or that a mute/unmute
+                // event occurred for beacon
+                uint32_t latency = desc->latency();
+                if (shouldWait && desc->isActive(latency * 2) && (waitMs < latency)) {
+                    waitMs = latency;
+                }
+            }
+        }
+        uint32_t muteWaitMs = setOutputDevice(outputDesc, device, force);
+
+        // handle special case for sonification while in call
+        if (isInCall()) {
+            handleIncallSonification(stream, true, false);
+        }
+
+        // apply volume rules for current stream and device if necessary
+        checkAndSetVolume(stream,
+                          mStreams.valueFor(stream).getVolumeIndex(device),
+                          outputDesc,
+                          device);
+
+        // update the outputs if starting an output with a stream that can affect notification
+        // routing
+        handleNotificationRoutingForStream(stream);
+
+        // force reevaluating accessibility routing when ringtone or alarm starts
+        if (strategy == STRATEGY_SONIFICATION) {
+            mpClientInterface->invalidateStream(AUDIO_STREAM_ACCESSIBILITY);
+        }
+    }
+    return NO_ERROR;
+}
+
+
+status_t AudioPolicyManager::stopOutput(audio_io_handle_t output,
+                                            audio_stream_type_t stream,
+                                            audio_session_t session)
+{
+    ALOGV("stopOutput() output %d, stream %d, session %d", output, stream, session);
+    ssize_t index = mOutputs.indexOfKey(output);
+    if (index < 0) {
+        ALOGW("stopOutput() unknown output %d", output);
+        return BAD_VALUE;
+    }
+
+    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
+
+    if (outputDesc->mRefCount[stream] == 1) {
+        // Automatically disable the remote submix input when output is stopped on a
+        // re routing mix of type MIX_TYPE_RECORDERS
+        if (audio_is_remote_submix_device(outputDesc->mDevice) &&
+                outputDesc->mPolicyMix != NULL &&
+                outputDesc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS) {
+            setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
+                    AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+                    outputDesc->mPolicyMix->mRegistrationId,
+                    "remote-submix");
+        }
+    }
+
+    // Routing?
+    bool forceDeviceUpdate = false;
+    if (outputDesc->mRefCount[stream] > 0) {
+        int activityCount = mOutputRoutes.decRouteActivity(session);
+        forceDeviceUpdate = (mOutputRoutes.hasRoute(session) && (activityCount == 0));
+
+        if (forceDeviceUpdate) {
+            checkStrategyRoute(getStrategy(stream), AUDIO_IO_HANDLE_NONE);
+        }
+    }
+
+    return stopSource(outputDesc, stream, forceDeviceUpdate);
+}
+
+status_t AudioPolicyManager::stopSource(sp<AudioOutputDescriptor> outputDesc,
+                                            audio_stream_type_t stream,
+                                            bool forceDeviceUpdate)
+{
+    // always handle stream stop, check which stream type is stopping
+    handleEventForBeacon(stream == AUDIO_STREAM_TTS ? STOPPING_BEACON : STOPPING_OUTPUT);
+
+    // handle special case for sonification while in call
+    if (isInCall()) {
+        handleIncallSonification(stream, false, false);
+    }
+
+    if (outputDesc->mRefCount[stream] > 0) {
+        // decrement usage count of this stream on the output
+        outputDesc->changeRefCount(stream, -1);
+
+        // store time at which the stream was stopped - see isStreamActive()
+        if (outputDesc->mRefCount[stream] == 0 || forceDeviceUpdate) {
+            outputDesc->mStopTime[stream] = systemTime();
+            audio_devices_t newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
+            // delay the device switch by twice the latency because stopOutput() is executed when
+            // the track stop() command is received and at that time the audio track buffer can
+            // still contain data that needs to be drained. The latency only covers the audio HAL
+            // and kernel buffers. Also the latency does not always include additional delay in the
+            // audio path (audio DSP, CODEC ...)
+            setOutputDevice(outputDesc, newDevice, false, outputDesc->latency()*2);
+
+            // force restoring the device selection on other active outputs if it differs from the
+            // one being selected for this output
+            for (size_t i = 0; i < mOutputs.size(); i++) {
+                audio_io_handle_t curOutput = mOutputs.keyAt(i);
+                sp<AudioOutputDescriptor> desc = mOutputs.valueAt(i);
+                if (desc != outputDesc &&
+                        desc->isActive() &&
+                        outputDesc->sharesHwModuleWith(desc) &&
+                        (newDevice != desc->device())) {
+                    setOutputDevice(desc,
+                                    getNewOutputDevice(desc, false /*fromCache*/),
+                                    true,
+                                    outputDesc->latency()*2);
+                }
+            }
+            // update the outputs if stopping one with a stream that can affect notification routing
+            handleNotificationRoutingForStream(stream);
+        }
+        return NO_ERROR;
+    } else {
+        ALOGW("stopOutput() refcount is already 0");
+        return INVALID_OPERATION;
+    }
+}
+
+void AudioPolicyManager::releaseOutput(audio_io_handle_t output,
+                                       audio_stream_type_t stream __unused,
+                                       audio_session_t session __unused)
+{
+    ALOGV("releaseOutput() %d", output);
+    ssize_t index = mOutputs.indexOfKey(output);
+    if (index < 0) {
+        ALOGW("releaseOutput() releasing unknown output %d", output);
+        return;
+    }
+
+#ifdef AUDIO_POLICY_TEST
+    int testIndex = testOutputIndex(output);
+    if (testIndex != 0) {
+        sp<AudioOutputDescriptor> outputDesc = mOutputs.valueAt(index);
+        if (outputDesc->isActive()) {
+            mpClientInterface->closeOutput(output);
+            removeOutput(output);
+            mTestOutputs[testIndex] = 0;
+        }
+        return;
+    }
+#endif //AUDIO_POLICY_TEST
+
+    // Routing
+    mOutputRoutes.removeRoute(session);
+
+    sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(index);
+    if (desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
+        if (desc->mDirectOpenCount <= 0) {
+            ALOGW("releaseOutput() invalid open count %d for output %d",
+                                                              desc->mDirectOpenCount, output);
+            return;
+        }
+        if (--desc->mDirectOpenCount == 0) {
+            closeOutput(output);
+            // If effects where present on the output, audioflinger moved them to the primary
+            // output by default: move them back to the appropriate output.
+            audio_io_handle_t dstOutput = getOutputForEffect();
+            if (hasPrimaryOutput() && dstOutput != mPrimaryOutput->mIoHandle) {
+                mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX,
+                                               mPrimaryOutput->mIoHandle, dstOutput);
+            }
+            mpClientInterface->onAudioPortListUpdate();
+        }
+    }
+}
+
+
+status_t AudioPolicyManager::getInputForAttr(const audio_attributes_t *attr,
+                                             audio_io_handle_t *input,
+                                             audio_session_t session,
+                                             uid_t uid,
+                                             uint32_t samplingRate,
+                                             audio_format_t format,
+                                             audio_channel_mask_t channelMask,
+                                             audio_input_flags_t flags,
+                                             audio_port_handle_t selectedDeviceId,
+                                             input_type_t *inputType)
+{
+    ALOGV("getInputForAttr() source %d, samplingRate %d, format %d, channelMask %x,"
+            "session %d, flags %#x",
+          attr->source, samplingRate, format, channelMask, session, flags);
+
+    *input = AUDIO_IO_HANDLE_NONE;
+    *inputType = API_INPUT_INVALID;
+    audio_devices_t device;
+    // handle legacy remote submix case where the address was not always specified
+    String8 address = String8("");
+    bool isSoundTrigger = false;
+    audio_source_t inputSource = attr->source;
+    audio_source_t halInputSource;
+    AudioMix *policyMix = NULL;
+
+    if (inputSource == AUDIO_SOURCE_DEFAULT) {
+        inputSource = AUDIO_SOURCE_MIC;
+    }
+    halInputSource = inputSource;
+
+    // Explicit routing?
+    sp<DeviceDescriptor> deviceDesc;
+    for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
+        if (mAvailableInputDevices[i]->getId() == selectedDeviceId) {
+            deviceDesc = mAvailableInputDevices[i];
+            break;
+        }
+    }
+    mInputRoutes.addRoute(session, SessionRoute::STREAM_TYPE_NA, inputSource, deviceDesc, uid);
+
+    if (inputSource == AUDIO_SOURCE_REMOTE_SUBMIX &&
+            strncmp(attr->tags, "addr=", strlen("addr=")) == 0) {
+        status_t ret = mPolicyMixes.getInputMixForAttr(*attr, &policyMix);
+        if (ret != NO_ERROR) {
+            return ret;
+        }
+        *inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
+        device = AUDIO_DEVICE_IN_REMOTE_SUBMIX;
+        address = String8(attr->tags + strlen("addr="));
+    } else {
+        device = getDeviceAndMixForInputSource(inputSource, &policyMix);
+        if (device == AUDIO_DEVICE_NONE) {
+            ALOGW("getInputForAttr() could not find device for source %d", inputSource);
+            return BAD_VALUE;
+        }
+        if (policyMix != NULL) {
+            address = policyMix->mRegistrationId;
+            if (policyMix->mMixType == MIX_TYPE_RECORDERS) {
+                // there is an external policy, but this input is attached to a mix of recorders,
+                // meaning it receives audio injected into the framework, so the recorder doesn't
+                // know about it and is therefore considered "legacy"
+                *inputType = API_INPUT_LEGACY;
+            } else {
+                // recording a mix of players defined by an external policy, we're rerouting for
+                // an external policy
+                *inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
+            }
+        } else if (audio_is_remote_submix_device(device)) {
+            address = String8("0");
+            *inputType = API_INPUT_MIX_CAPTURE;
+        } else if (device == AUDIO_DEVICE_IN_TELEPHONY_RX) {
+            *inputType = API_INPUT_TELEPHONY_RX;
+        } else {
+            *inputType = API_INPUT_LEGACY;
+        }
+        // adapt channel selection to input source
+        switch (inputSource) {
+        case AUDIO_SOURCE_VOICE_UPLINK:
+            channelMask = AUDIO_CHANNEL_IN_VOICE_UPLINK;
+            break;
+        case AUDIO_SOURCE_VOICE_DOWNLINK:
+            channelMask = AUDIO_CHANNEL_IN_VOICE_DNLINK;
+            break;
+        case AUDIO_SOURCE_VOICE_CALL:
+            channelMask = AUDIO_CHANNEL_IN_VOICE_UPLINK | AUDIO_CHANNEL_IN_VOICE_DNLINK;
+            break;
+        default:
+            break;
+        }
+        if (inputSource == AUDIO_SOURCE_HOTWORD) {
+            ssize_t index = mSoundTriggerSessions.indexOfKey(session);
+            if (index >= 0) {
+                *input = mSoundTriggerSessions.valueFor(session);
+                isSoundTrigger = true;
+                flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_HW_HOTWORD);
+                ALOGV("SoundTrigger capture on session %d input %d", session, *input);
+            } else {
+                halInputSource = AUDIO_SOURCE_VOICE_RECOGNITION;
+            }
+        }
+    }
+
+    // find a compatible input profile (not necessarily identical in parameters)
+    sp<IOProfile> profile;
+    // samplingRate and flags may be updated by getInputProfile
+    uint32_t profileSamplingRate = samplingRate;
+    audio_format_t profileFormat = format;
+    audio_channel_mask_t profileChannelMask = channelMask;
+    audio_input_flags_t profileFlags = flags;
+    for (;;) {
+        profile = getInputProfile(device, address,
+                                  profileSamplingRate, profileFormat, profileChannelMask,
+                                  profileFlags);
+        if (profile != 0) {
+            break; // success
+        } else if (profileFlags != AUDIO_INPUT_FLAG_NONE) {
+            profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
+        } else { // fail
+            ALOGW("getInputForAttr() could not find profile for device 0x%X, samplingRate %u,"
+                    "format %#x, channelMask 0x%X, flags %#x",
+                    device, samplingRate, format, channelMask, flags);
+            return BAD_VALUE;
+        }
+    }
+
+    if (profile->getModuleHandle() == 0) {
+        ALOGE("getInputForAttr(): HW module %s not opened", profile->getModuleName());
+        return NO_INIT;
+    }
+
+    audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+    config.sample_rate = profileSamplingRate;
+    config.channel_mask = profileChannelMask;
+    config.format = profileFormat;
+
+    status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
+                                                   input,
+                                                   &config,
+                                                   &device,
+                                                   address,
+                                                   halInputSource,
+                                                   profileFlags);
+
+    // only accept input with the exact requested set of parameters
+    if (status != NO_ERROR || *input == AUDIO_IO_HANDLE_NONE ||
+        (profileSamplingRate != config.sample_rate) ||
+        (profileFormat != config.format) ||
+        (profileChannelMask != config.channel_mask)) {
+        ALOGW("getInputForAttr() failed opening input: samplingRate %d, format %d,"
+                " channelMask %x",
+                samplingRate, format, channelMask);
+        if (*input != AUDIO_IO_HANDLE_NONE) {
+            mpClientInterface->closeInput(*input);
+        }
+        return BAD_VALUE;
+    }
+
+    sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(profile);
+    inputDesc->mInputSource = inputSource;
+    inputDesc->mRefCount = 0;
+    inputDesc->mOpenRefCount = 1;
+    inputDesc->mSamplingRate = profileSamplingRate;
+    inputDesc->mFormat = profileFormat;
+    inputDesc->mChannelMask = profileChannelMask;
+    inputDesc->mDevice = device;
+    inputDesc->mSessions.add(session);
+    inputDesc->mIsSoundTrigger = isSoundTrigger;
+    inputDesc->mPolicyMix = policyMix;
+
+    ALOGV("getInputForAttr() returns input type = %d", *inputType);
+
+    addInput(*input, inputDesc);
+    mpClientInterface->onAudioPortListUpdate();
+
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::startInput(audio_io_handle_t input,
+                                        audio_session_t session)
+{
+    ALOGV("startInput() input %d", input);
+    ssize_t index = mInputs.indexOfKey(input);
+    if (index < 0) {
+        ALOGW("startInput() unknown input %d", input);
+        return BAD_VALUE;
+    }
+    sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
+
+    index = inputDesc->mSessions.indexOf(session);
+    if (index < 0) {
+        ALOGW("startInput() unknown session %d on input %d", session, input);
+        return BAD_VALUE;
+    }
+
+    // virtual input devices are compatible with other input devices
+    if (!is_virtual_input_device(inputDesc->mDevice)) {
+
+        // for a non-virtual input device, check if there is another (non-virtual) active input
+        audio_io_handle_t activeInput = mInputs.getActiveInput();
+        if (activeInput != 0 && activeInput != input) {
+
+            // If the already active input uses AUDIO_SOURCE_HOTWORD then it is closed,
+            // otherwise the active input continues and the new input cannot be started.
+            sp<AudioInputDescriptor> activeDesc = mInputs.valueFor(activeInput);
+            if (activeDesc->mInputSource == AUDIO_SOURCE_HOTWORD) {
+                ALOGW("startInput(%d) preempting low-priority input %d", input, activeInput);
+                stopInput(activeInput, activeDesc->mSessions.itemAt(0));
+                releaseInput(activeInput, activeDesc->mSessions.itemAt(0));
+            } else {
+                ALOGE("startInput(%d) failed: other input %d already started", input, activeInput);
+                return INVALID_OPERATION;
+            }
+        }
+    }
+
+    // Routing?
+    mInputRoutes.incRouteActivity(session);
+
+    if (inputDesc->mRefCount == 0 || mInputRoutes.hasRouteChanged(session)) {
+        // if input maps to a dynamic policy with an activity listener, notify of state change
+        if ((inputDesc->mPolicyMix != NULL)
+                && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
+            mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mRegistrationId,
+                    MIX_STATE_MIXING);
+        }
+
+        if (mInputs.activeInputsCount() == 0) {
+            SoundTrigger::setCaptureState(true);
+        }
+        setInputDevice(input, getNewInputDevice(input), true /* force */);
+
+        // automatically enable the remote submix output when input is started if not
+        // used by a policy mix of type MIX_TYPE_RECORDERS
+        // For remote submix (a virtual device), we open only one input per capture request.
+        if (audio_is_remote_submix_device(inputDesc->mDevice)) {
+            String8 address = String8("");
+            if (inputDesc->mPolicyMix == NULL) {
+                address = String8("0");
+            } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
+                address = inputDesc->mPolicyMix->mRegistrationId;
+            }
+            if (address != "") {
+                setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+                        AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+                        address, "remote-submix");
+            }
+        }
+    }
+
+    ALOGV("AudioPolicyManager::startInput() input source = %d", inputDesc->mInputSource);
+
+    inputDesc->mRefCount++;
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::stopInput(audio_io_handle_t input,
+                                       audio_session_t session)
+{
+    ALOGV("stopInput() input %d", input);
+    ssize_t index = mInputs.indexOfKey(input);
+    if (index < 0) {
+        ALOGW("stopInput() unknown input %d", input);
+        return BAD_VALUE;
+    }
+    sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
+
+    index = inputDesc->mSessions.indexOf(session);
+    if (index < 0) {
+        ALOGW("stopInput() unknown session %d on input %d", session, input);
+        return BAD_VALUE;
+    }
+
+    if (inputDesc->mRefCount == 0) {
+        ALOGW("stopInput() input %d already stopped", input);
+        return INVALID_OPERATION;
+    }
+
+    inputDesc->mRefCount--;
+
+    // Routing?
+    mInputRoutes.decRouteActivity(session);
+
+    if (inputDesc->mRefCount == 0) {
+        // if input maps to a dynamic policy with an activity listener, notify of state change
+        if ((inputDesc->mPolicyMix != NULL)
+                && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
+            mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mRegistrationId,
+                    MIX_STATE_IDLE);
+        }
+
+        // automatically disable the remote submix output when input is stopped if not
+        // used by a policy mix of type MIX_TYPE_RECORDERS
+        if (audio_is_remote_submix_device(inputDesc->mDevice)) {
+            String8 address = String8("");
+            if (inputDesc->mPolicyMix == NULL) {
+                address = String8("0");
+            } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
+                address = inputDesc->mPolicyMix->mRegistrationId;
+            }
+            if (address != "") {
+                setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+                                         AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+                                         address, "remote-submix");
+            }
+        }
+
+        resetInputDevice(input);
+
+        if (mInputs.activeInputsCount() == 0) {
+            SoundTrigger::setCaptureState(false);
+        }
+    }
+    return NO_ERROR;
+}
+
+void AudioPolicyManager::releaseInput(audio_io_handle_t input,
+                                      audio_session_t session)
+{
+    ALOGV("releaseInput() %d", input);
+    ssize_t index = mInputs.indexOfKey(input);
+    if (index < 0) {
+        ALOGW("releaseInput() releasing unknown input %d", input);
+        return;
+    }
+
+    // Routing
+    mInputRoutes.removeRoute(session);
+
+    sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(index);
+    ALOG_ASSERT(inputDesc != 0);
+
+    index = inputDesc->mSessions.indexOf(session);
+    if (index < 0) {
+        ALOGW("releaseInput() unknown session %d on input %d", session, input);
+        return;
+    }
+    inputDesc->mSessions.remove(session);
+    if (inputDesc->mOpenRefCount == 0) {
+        ALOGW("releaseInput() invalid open ref count %d", inputDesc->mOpenRefCount);
+        return;
+    }
+    inputDesc->mOpenRefCount--;
+    if (inputDesc->mOpenRefCount > 0) {
+        ALOGV("releaseInput() exit > 0");
+        return;
+    }
+
+    closeInput(input);
+    mpClientInterface->onAudioPortListUpdate();
+    ALOGV("releaseInput() exit");
+}
+
+void AudioPolicyManager::closeAllInputs() {
+    bool patchRemoved = false;
+
+    for(size_t input_index = 0; input_index < mInputs.size(); input_index++) {
+        sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(input_index);
+        ssize_t patch_index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+        if (patch_index >= 0) {
+            sp<AudioPatch> patchDesc = mAudioPatches.valueAt(patch_index);
+            status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+            mAudioPatches.removeItemsAt(patch_index);
+            patchRemoved = true;
+        }
+        mpClientInterface->closeInput(mInputs.keyAt(input_index));
+    }
+    mInputs.clear();
+    nextAudioPortGeneration();
+
+    if (patchRemoved) {
+        mpClientInterface->onAudioPatchListUpdate();
+    }
+}
+
+void AudioPolicyManager::initStreamVolume(audio_stream_type_t stream,
+                                            int indexMin,
+                                            int indexMax)
+{
+    ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
+    mEngine->initStreamVolume(stream, indexMin, indexMax);
+    //FIXME: AUDIO_STREAM_ACCESSIBILITY volume follows AUDIO_STREAM_MUSIC for now
+    if (stream == AUDIO_STREAM_MUSIC) {
+        mEngine->initStreamVolume(AUDIO_STREAM_ACCESSIBILITY, indexMin, indexMax);
+    }
+}
+
+status_t AudioPolicyManager::setStreamVolumeIndex(audio_stream_type_t stream,
+                                                  int index,
+                                                  audio_devices_t device)
+{
+
+    if ((index < mStreams.valueFor(stream).getVolumeIndexMin()) ||
+            (index > mStreams.valueFor(stream).getVolumeIndexMax())) {
+        return BAD_VALUE;
+    }
+    if (!audio_is_output_device(device)) {
+        return BAD_VALUE;
+    }
+
+    // Force max volume if stream cannot be muted
+    if (!mStreams.canBeMuted(stream)) index = mStreams.valueFor(stream).getVolumeIndexMax();
+
+    ALOGV("setStreamVolumeIndex() stream %d, device %04x, index %d",
+          stream, device, index);
+
+    // if device is AUDIO_DEVICE_OUT_DEFAULT set default value and
+    // clear all device specific values
+    if (device == AUDIO_DEVICE_OUT_DEFAULT) {
+        mStreams.clearCurrentVolumeIndex(stream);
+    }
+    mStreams.addCurrentVolumeIndex(stream, device, index);
+
+    // update volume on all outputs whose current device is also selected by the same
+    // strategy as the device specified by the caller
+    audio_devices_t strategyDevice = getDeviceForStrategy(getStrategy(stream), true /*fromCache*/);
+
+
+    //FIXME: AUDIO_STREAM_ACCESSIBILITY volume follows AUDIO_STREAM_MUSIC for now
+    audio_devices_t accessibilityDevice = AUDIO_DEVICE_NONE;
+    if (stream == AUDIO_STREAM_MUSIC) {
+        mStreams.addCurrentVolumeIndex(AUDIO_STREAM_ACCESSIBILITY, device, index);
+        accessibilityDevice = getDeviceForStrategy(STRATEGY_ACCESSIBILITY, true /*fromCache*/);
+    }
+    if ((device != AUDIO_DEVICE_OUT_DEFAULT) &&
+            (device & (strategyDevice | accessibilityDevice)) == 0) {
+        return NO_ERROR;
+    }
+    status_t status = NO_ERROR;
+    for (size_t i = 0; i < mOutputs.size(); i++) {
+        sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
+        audio_devices_t curDevice = Volume::getDeviceForVolume(desc->device());
+        if ((device == AUDIO_DEVICE_OUT_DEFAULT) || ((curDevice & strategyDevice) != 0)) {
+            status_t volStatus = checkAndSetVolume(stream, index, desc, curDevice);
+            if (volStatus != NO_ERROR) {
+                status = volStatus;
+            }
+        }
+        if ((device == AUDIO_DEVICE_OUT_DEFAULT) || ((curDevice & accessibilityDevice) != 0)) {
+            status_t volStatus = checkAndSetVolume(AUDIO_STREAM_ACCESSIBILITY,
+                                                   index, desc, curDevice);
+        }
+    }
+    return status;
+}
+
+status_t AudioPolicyManager::getStreamVolumeIndex(audio_stream_type_t stream,
+                                                      int *index,
+                                                      audio_devices_t device)
+{
+    if (index == NULL) {
+        return BAD_VALUE;
+    }
+    if (!audio_is_output_device(device)) {
+        return BAD_VALUE;
+    }
+    // if device is AUDIO_DEVICE_OUT_DEFAULT, return volume for device corresponding to
+    // the strategy the stream belongs to.
+    if (device == AUDIO_DEVICE_OUT_DEFAULT) {
+        device = getDeviceForStrategy(getStrategy(stream), true /*fromCache*/);
+    }
+    device = Volume::getDeviceForVolume(device);
+
+    *index =  mStreams.valueFor(stream).getVolumeIndex(device);
+    ALOGV("getStreamVolumeIndex() stream %d device %08x index %d", stream, device, *index);
+    return NO_ERROR;
+}
+
+audio_io_handle_t AudioPolicyManager::selectOutputForEffects(
+                                            const SortedVector<audio_io_handle_t>& outputs)
+{
+    // select one output among several suitable for global effects.
+    // The priority is as follows:
+    // 1: An offloaded output. If the effect ends up not being offloadable,
+    //    AudioFlinger will invalidate the track and the offloaded output
+    //    will be closed causing the effect to be moved to a PCM output.
+    // 2: A deep buffer output
+    // 3: the first output in the list
+
+    if (outputs.size() == 0) {
+        return 0;
+    }
+
+    audio_io_handle_t outputOffloaded = 0;
+    audio_io_handle_t outputDeepBuffer = 0;
+
+    for (size_t i = 0; i < outputs.size(); i++) {
+        sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(outputs[i]);
+        ALOGV("selectOutputForEffects outputs[%zu] flags %x", i, desc->mFlags);
+        if ((desc->mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) {
+            outputOffloaded = outputs[i];
+        }
+        if ((desc->mFlags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) != 0) {
+            outputDeepBuffer = outputs[i];
+        }
+    }
+
+    ALOGV("selectOutputForEffects outputOffloaded %d outputDeepBuffer %d",
+          outputOffloaded, outputDeepBuffer);
+    if (outputOffloaded != 0) {
+        return outputOffloaded;
+    }
+    if (outputDeepBuffer != 0) {
+        return outputDeepBuffer;
+    }
+
+    return outputs[0];
+}
+
+audio_io_handle_t AudioPolicyManager::getOutputForEffect(const effect_descriptor_t *desc)
+{
+    // apply simple rule where global effects are attached to the same output as MUSIC streams
+
+    routing_strategy strategy = getStrategy(AUDIO_STREAM_MUSIC);
+    audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
+    SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevice(device, mOutputs);
+
+    audio_io_handle_t output = selectOutputForEffects(dstOutputs);
+    ALOGV("getOutputForEffect() got output %d for fx %s flags %x",
+          output, (desc == NULL) ? "unspecified" : desc->name,  (desc == NULL) ? 0 : desc->flags);
+
+    return output;
+}
+
+status_t AudioPolicyManager::registerEffect(const effect_descriptor_t *desc,
+                                audio_io_handle_t io,
+                                uint32_t strategy,
+                                int session,
+                                int id)
+{
+    ssize_t index = mOutputs.indexOfKey(io);
+    if (index < 0) {
+        index = mInputs.indexOfKey(io);
+        if (index < 0) {
+            ALOGW("registerEffect() unknown io %d", io);
+            return INVALID_OPERATION;
+        }
+    }
+    return mEffects.registerEffect(desc, io, strategy, session, id);
+}
+
+bool AudioPolicyManager::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
+{
+    return mOutputs.isStreamActive(stream, inPastMs);
+}
+
+bool AudioPolicyManager::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
+{
+    return mOutputs.isStreamActiveRemotely(stream, inPastMs);
+}
+
+bool AudioPolicyManager::isSourceActive(audio_source_t source) const
+{
+    for (size_t i = 0; i < mInputs.size(); i++) {
+        const sp<AudioInputDescriptor>  inputDescriptor = mInputs.valueAt(i);
+        if (inputDescriptor->mRefCount == 0) {
+            continue;
+        }
+        if (inputDescriptor->mInputSource == (int)source) {
+            return true;
+        }
+        // AUDIO_SOURCE_HOTWORD is equivalent to AUDIO_SOURCE_VOICE_RECOGNITION only if it
+        // corresponds to an active capture triggered by a hardware hotword recognition
+        if ((source == AUDIO_SOURCE_VOICE_RECOGNITION) &&
+                 (inputDescriptor->mInputSource == AUDIO_SOURCE_HOTWORD)) {
+            // FIXME: we should not assume that the first session is the active one and keep
+            // activity count per session. Same in startInput().
+            ssize_t index = mSoundTriggerSessions.indexOfKey(inputDescriptor->mSessions.itemAt(0));
+            if (index >= 0) {
+                return true;
+            }
+        }
+    }
+    return false;
+}
+
+// Register a list of custom mixes with their attributes and format.
+// When a mix is registered, corresponding input and output profiles are
+// added to the remote submix hw module. The profile contains only the
+// parameters (sampling rate, format...) specified by the mix.
+// The corresponding input remote submix device is also connected.
+//
+// When a remote submix device is connected, the address is checked to select the
+// appropriate profile and the corresponding input or output stream is opened.
+//
+// When capture starts, getInputForAttr() will:
+//  - 1 look for a mix matching the address passed in attribtutes tags if any
+//  - 2 if none found, getDeviceForInputSource() will:
+//     - 2.1 look for a mix matching the attributes source
+//     - 2.2 if none found, default to device selection by policy rules
+// At this time, the corresponding output remote submix device is also connected
+// and active playback use cases can be transferred to this mix if needed when reconnecting
+// after AudioTracks are invalidated
+//
+// When playback starts, getOutputForAttr() will:
+//  - 1 look for a mix matching the address passed in attribtutes tags if any
+//  - 2 if none found, look for a mix matching the attributes usage
+//  - 3 if none found, default to device and output selection by policy rules.
+
+status_t AudioPolicyManager::registerPolicyMixes(Vector<AudioMix> mixes)
+{
+    sp<HwModule> module;
+    for (size_t i = 0; i < mHwModules.size(); i++) {
+        if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[i]->mName) == 0 &&
+                mHwModules[i]->mHandle != 0) {
+            module = mHwModules[i];
+            break;
+        }
+    }
+
+    if (module == 0) {
+        return INVALID_OPERATION;
+    }
+
+    ALOGV("registerPolicyMixes() num mixes %d", mixes.size());
+
+    for (size_t i = 0; i < mixes.size(); i++) {
+        String8 address = mixes[i].mRegistrationId;
+
+        if (mPolicyMixes.registerMix(address, mixes[i]) != NO_ERROR) {
+            continue;
+        }
+        audio_config_t outputConfig = mixes[i].mFormat;
+        audio_config_t inputConfig = mixes[i].mFormat;
+        // NOTE: audio flinger mixer does not support mono output: configure remote submix HAL in
+        // stereo and let audio flinger do the channel conversion if needed.
+        outputConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+        inputConfig.channel_mask = AUDIO_CHANNEL_IN_STEREO;
+        module->addOutputProfile(address, &outputConfig,
+                                 AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address);
+        module->addInputProfile(address, &inputConfig,
+                                 AUDIO_DEVICE_IN_REMOTE_SUBMIX, address);
+
+        if (mixes[i].mMixType == MIX_TYPE_PLAYERS) {
+            setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
+                                     AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+                                     address.string(), "remote-submix");
+        } else {
+            setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+                                     AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+                                     address.string(), "remote-submix");
+        }
+    }
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::unregisterPolicyMixes(Vector<AudioMix> mixes)
+{
+    sp<HwModule> module;
+    for (size_t i = 0; i < mHwModules.size(); i++) {
+        if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[i]->mName) == 0 &&
+                mHwModules[i]->mHandle != 0) {
+            module = mHwModules[i];
+            break;
+        }
+    }
+
+    if (module == 0) {
+        return INVALID_OPERATION;
+    }
+
+    ALOGV("unregisterPolicyMixes() num mixes %d", mixes.size());
+
+    for (size_t i = 0; i < mixes.size(); i++) {
+        String8 address = mixes[i].mRegistrationId;
+
+        if (mPolicyMixes.unregisterMix(address) != NO_ERROR) {
+            continue;
+        }
+
+        if (getDeviceConnectionState(AUDIO_DEVICE_IN_REMOTE_SUBMIX, address.string()) ==
+                                             AUDIO_POLICY_DEVICE_STATE_AVAILABLE)
+        {
+            setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
+                                     AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+                                     address.string(), "remote-submix");
+        }
+
+        if (getDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address.string()) ==
+                                             AUDIO_POLICY_DEVICE_STATE_AVAILABLE)
+        {
+            setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+                                     AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+                                     address.string(), "remote-submix");
+        }
+        module->removeOutputProfile(address);
+        module->removeInputProfile(address);
+    }
+    return NO_ERROR;
+}
+
+
+status_t AudioPolicyManager::dump(int fd)
+{
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+    String8 result;
+
+    snprintf(buffer, SIZE, "\nAudioPolicyManager Dump: %p\n", this);
+    result.append(buffer);
+
+    snprintf(buffer, SIZE, " Primary Output: %d\n",
+             hasPrimaryOutput() ? mPrimaryOutput->mIoHandle : AUDIO_IO_HANDLE_NONE);
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Phone state: %d\n", mEngine->getPhoneState());
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Force use for communications %d\n",
+             mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION));
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Force use for media %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA));
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Force use for record %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD));
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Force use for dock %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_DOCK));
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Force use for system %d\n", mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM));
+    result.append(buffer);
+    snprintf(buffer, SIZE, " Force use for hdmi system audio %d\n",
+            mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO));
+    result.append(buffer);
+    write(fd, result.string(), result.size());
+
+    mAvailableOutputDevices.dump(fd, String8("output"));
+    mAvailableInputDevices.dump(fd, String8("input"));
+    mHwModules.dump(fd);
+    mOutputs.dump(fd);
+    mInputs.dump(fd);
+    mStreams.dump(fd);
+    mEffects.dump(fd);
+    mAudioPatches.dump(fd);
+
+    return NO_ERROR;
+}
+
+// This function checks for the parameters which can be offloaded.
+// This can be enhanced depending on the capability of the DSP and policy
+// of the system.
+bool AudioPolicyManager::isOffloadSupported(const audio_offload_info_t& offloadInfo)
+{
+    ALOGV("isOffloadSupported: SR=%u, CM=0x%x, Format=0x%x, StreamType=%d,"
+     " BitRate=%u, duration=%" PRId64 " us, has_video=%d",
+     offloadInfo.sample_rate, offloadInfo.channel_mask,
+     offloadInfo.format,
+     offloadInfo.stream_type, offloadInfo.bit_rate, offloadInfo.duration_us,
+     offloadInfo.has_video);
+
+    // Check if offload has been disabled
+    char propValue[PROPERTY_VALUE_MAX];
+    if (property_get("audio.offload.disable", propValue, "0")) {
+        if (atoi(propValue) != 0) {
+            ALOGV("offload disabled by audio.offload.disable=%s", propValue );
+            return false;
+        }
+    }
+
+    // Check if stream type is music, then only allow offload as of now.
+    if (offloadInfo.stream_type != AUDIO_STREAM_MUSIC)
+    {
+        ALOGV("isOffloadSupported: stream_type != MUSIC, returning false");
+        return false;
+    }
+
+    //TODO: enable audio offloading with video when ready
+    const bool allowOffloadWithVideo =
+            property_get_bool("audio.offload.video", false /* default_value */);
+    if (offloadInfo.has_video && !allowOffloadWithVideo) {
+        ALOGV("isOffloadSupported: has_video == true, returning false");
+        return false;
+    }
+
+    //If duration is less than minimum value defined in property, return false
+    if (property_get("audio.offload.min.duration.secs", propValue, NULL)) {
+        if (offloadInfo.duration_us < (atoi(propValue) * 1000000 )) {
+            ALOGV("Offload denied by duration < audio.offload.min.duration.secs(=%s)", propValue);
+            return false;
+        }
+    } else if (offloadInfo.duration_us < OFFLOAD_DEFAULT_MIN_DURATION_SECS * 1000000) {
+        ALOGV("Offload denied by duration < default min(=%u)", OFFLOAD_DEFAULT_MIN_DURATION_SECS);
+        return false;
+    }
+
+    // Do not allow offloading if one non offloadable effect is enabled. This prevents from
+    // creating an offloaded track and tearing it down immediately after start when audioflinger
+    // detects there is an active non offloadable effect.
+    // FIXME: We should check the audio session here but we do not have it in this context.
+    // This may prevent offloading in rare situations where effects are left active by apps
+    // in the background.
+    if (mEffects.isNonOffloadableEffectEnabled()) {
+        return false;
+    }
+
+    // See if there is a profile to support this.
+    // AUDIO_DEVICE_NONE
+    sp<IOProfile> profile = getProfileForDirectOutput(AUDIO_DEVICE_NONE /*ignore device */,
+                                            offloadInfo.sample_rate,
+                                            offloadInfo.format,
+                                            offloadInfo.channel_mask,
+                                            AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
+    ALOGV("isOffloadSupported() profile %sfound", profile != 0 ? "" : "NOT ");
+    return (profile != 0);
+}
+
+status_t AudioPolicyManager::listAudioPorts(audio_port_role_t role,
+                                            audio_port_type_t type,
+                                            unsigned int *num_ports,
+                                            struct audio_port *ports,
+                                            unsigned int *generation)
+{
+    if (num_ports == NULL || (*num_ports != 0 && ports == NULL) ||
+            generation == NULL) {
+        return BAD_VALUE;
+    }
+    ALOGV("listAudioPorts() role %d type %d num_ports %d ports %p", role, type, *num_ports, ports);
+    if (ports == NULL) {
+        *num_ports = 0;
+    }
+
+    size_t portsWritten = 0;
+    size_t portsMax = *num_ports;
+    *num_ports = 0;
+    if (type == AUDIO_PORT_TYPE_NONE || type == AUDIO_PORT_TYPE_DEVICE) {
+        if (role == AUDIO_PORT_ROLE_SINK || role == AUDIO_PORT_ROLE_NONE) {
+            for (size_t i = 0;
+                    i  < mAvailableOutputDevices.size() && portsWritten < portsMax; i++) {
+                mAvailableOutputDevices[i]->toAudioPort(&ports[portsWritten++]);
+            }
+            *num_ports += mAvailableOutputDevices.size();
+        }
+        if (role == AUDIO_PORT_ROLE_SOURCE || role == AUDIO_PORT_ROLE_NONE) {
+            for (size_t i = 0;
+                    i  < mAvailableInputDevices.size() && portsWritten < portsMax; i++) {
+                mAvailableInputDevices[i]->toAudioPort(&ports[portsWritten++]);
+            }
+            *num_ports += mAvailableInputDevices.size();
+        }
+    }
+    if (type == AUDIO_PORT_TYPE_NONE || type == AUDIO_PORT_TYPE_MIX) {
+        if (role == AUDIO_PORT_ROLE_SINK || role == AUDIO_PORT_ROLE_NONE) {
+            for (size_t i = 0; i < mInputs.size() && portsWritten < portsMax; i++) {
+                mInputs[i]->toAudioPort(&ports[portsWritten++]);
+            }
+            *num_ports += mInputs.size();
+        }
+        if (role == AUDIO_PORT_ROLE_SOURCE || role == AUDIO_PORT_ROLE_NONE) {
+            size_t numOutputs = 0;
+            for (size_t i = 0; i < mOutputs.size(); i++) {
+                if (!mOutputs[i]->isDuplicated()) {
+                    numOutputs++;
+                    if (portsWritten < portsMax) {
+                        mOutputs[i]->toAudioPort(&ports[portsWritten++]);
+                    }
+                }
+            }
+            *num_ports += numOutputs;
+        }
+    }
+    *generation = curAudioPortGeneration();
+    ALOGV("listAudioPorts() got %zu ports needed %d", portsWritten, *num_ports);
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::getAudioPort(struct audio_port *port __unused)
+{
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch,
+                                               audio_patch_handle_t *handle,
+                                               uid_t uid)
+{
+    ALOGV("createAudioPatch()");
+
+    if (handle == NULL || patch == NULL) {
+        return BAD_VALUE;
+    }
+    ALOGV("createAudioPatch() num sources %d num sinks %d", patch->num_sources, patch->num_sinks);
+
+    if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
+            patch->num_sinks == 0 || patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
+        return BAD_VALUE;
+    }
+    // only one source per audio patch supported for now
+    if (patch->num_sources > 1) {
+        return INVALID_OPERATION;
+    }
+
+    if (patch->sources[0].role != AUDIO_PORT_ROLE_SOURCE) {
+        return INVALID_OPERATION;
+    }
+    for (size_t i = 0; i < patch->num_sinks; i++) {
+        if (patch->sinks[i].role != AUDIO_PORT_ROLE_SINK) {
+            return INVALID_OPERATION;
+        }
+    }
+
+    sp<AudioPatch> patchDesc;
+    ssize_t index = mAudioPatches.indexOfKey(*handle);
+
+    ALOGV("createAudioPatch source id %d role %d type %d", patch->sources[0].id,
+                                                           patch->sources[0].role,
+                                                           patch->sources[0].type);
+#if LOG_NDEBUG == 0
+    for (size_t i = 0; i < patch->num_sinks; i++) {
+        ALOGV("createAudioPatch sink %d: id %d role %d type %d", i, patch->sinks[i].id,
+                                                             patch->sinks[i].role,
+                                                             patch->sinks[i].type);
+    }
+#endif
+
+    if (index >= 0) {
+        patchDesc = mAudioPatches.valueAt(index);
+        ALOGV("createAudioPatch() mUidCached %d patchDesc->mUid %d uid %d",
+                                                                  mUidCached, patchDesc->mUid, uid);
+        if (patchDesc->mUid != mUidCached && uid != patchDesc->mUid) {
+            return INVALID_OPERATION;
+        }
+    } else {
+        *handle = 0;
+    }
+
+    if (patch->sources[0].type == AUDIO_PORT_TYPE_MIX) {
+        sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputFromId(patch->sources[0].id);
+        if (outputDesc == NULL) {
+            ALOGV("createAudioPatch() output not found for id %d", patch->sources[0].id);
+            return BAD_VALUE;
+        }
+        ALOG_ASSERT(!outputDesc->isDuplicated(),"duplicated output %d in source in ports",
+                                                outputDesc->mIoHandle);
+        if (patchDesc != 0) {
+            if (patchDesc->mPatch.sources[0].id != patch->sources[0].id) {
+                ALOGV("createAudioPatch() source id differs for patch current id %d new id %d",
+                                          patchDesc->mPatch.sources[0].id, patch->sources[0].id);
+                return BAD_VALUE;
+            }
+        }
+        DeviceVector devices;
+        for (size_t i = 0; i < patch->num_sinks; i++) {
+            // Only support mix to devices connection
+            // TODO add support for mix to mix connection
+            if (patch->sinks[i].type != AUDIO_PORT_TYPE_DEVICE) {
+                ALOGV("createAudioPatch() source mix but sink is not a device");
+                return INVALID_OPERATION;
+            }
+            sp<DeviceDescriptor> devDesc =
+                    mAvailableOutputDevices.getDeviceFromId(patch->sinks[i].id);
+            if (devDesc == 0) {
+                ALOGV("createAudioPatch() out device not found for id %d", patch->sinks[i].id);
+                return BAD_VALUE;
+            }
+
+            if (!outputDesc->mProfile->isCompatibleProfile(devDesc->type(),
+                                                           devDesc->mAddress,
+                                                           patch->sources[0].sample_rate,
+                                                           NULL,  // updatedSamplingRate
+                                                           patch->sources[0].format,
+                                                           NULL,  // updatedFormat
+                                                           patch->sources[0].channel_mask,
+                                                           NULL,  // updatedChannelMask
+                                                           AUDIO_OUTPUT_FLAG_NONE /*FIXME*/)) {
+                ALOGV("createAudioPatch() profile not supported for device %08x",
+                        devDesc->type());
+                return INVALID_OPERATION;
+            }
+            devices.add(devDesc);
+        }
+        if (devices.size() == 0) {
+            return INVALID_OPERATION;
+        }
+
+        // TODO: reconfigure output format and channels here
+        ALOGV("createAudioPatch() setting device %08x on output %d",
+              devices.types(), outputDesc->mIoHandle);
+        setOutputDevice(outputDesc, devices.types(), true, 0, handle);
+        index = mAudioPatches.indexOfKey(*handle);
+        if (index >= 0) {
+            if (patchDesc != 0 && patchDesc != mAudioPatches.valueAt(index)) {
+                ALOGW("createAudioPatch() setOutputDevice() did not reuse the patch provided");
+            }
+            patchDesc = mAudioPatches.valueAt(index);
+            patchDesc->mUid = uid;
+            ALOGV("createAudioPatch() success");
+        } else {
+            ALOGW("createAudioPatch() setOutputDevice() failed to create a patch");
+            return INVALID_OPERATION;
+        }
+    } else if (patch->sources[0].type == AUDIO_PORT_TYPE_DEVICE) {
+        if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
+            // input device to input mix connection
+            // only one sink supported when connecting an input device to a mix
+            if (patch->num_sinks > 1) {
+                return INVALID_OPERATION;
+            }
+            sp<AudioInputDescriptor> inputDesc = mInputs.getInputFromId(patch->sinks[0].id);
+            if (inputDesc == NULL) {
+                return BAD_VALUE;
+            }
+            if (patchDesc != 0) {
+                if (patchDesc->mPatch.sinks[0].id != patch->sinks[0].id) {
+                    return BAD_VALUE;
+                }
+            }
+            sp<DeviceDescriptor> devDesc =
+                    mAvailableInputDevices.getDeviceFromId(patch->sources[0].id);
+            if (devDesc == 0) {
+                return BAD_VALUE;
+            }
+
+            if (!inputDesc->mProfile->isCompatibleProfile(devDesc->type(),
+                                                          devDesc->mAddress,
+                                                          patch->sinks[0].sample_rate,
+                                                          NULL, /*updatedSampleRate*/
+                                                          patch->sinks[0].format,
+                                                          NULL, /*updatedFormat*/
+                                                          patch->sinks[0].channel_mask,
+                                                          NULL, /*updatedChannelMask*/
+                                                          // FIXME for the parameter type,
+                                                          // and the NONE
+                                                          (audio_output_flags_t)
+                                                            AUDIO_INPUT_FLAG_NONE)) {
+                return INVALID_OPERATION;
+            }
+            // TODO: reconfigure output format and channels here
+            ALOGV("createAudioPatch() setting device %08x on output %d",
+                                                  devDesc->type(), inputDesc->mIoHandle);
+            setInputDevice(inputDesc->mIoHandle, devDesc->type(), true, handle);
+            index = mAudioPatches.indexOfKey(*handle);
+            if (index >= 0) {
+                if (patchDesc != 0 && patchDesc != mAudioPatches.valueAt(index)) {
+                    ALOGW("createAudioPatch() setInputDevice() did not reuse the patch provided");
+                }
+                patchDesc = mAudioPatches.valueAt(index);
+                patchDesc->mUid = uid;
+                ALOGV("createAudioPatch() success");
+            } else {
+                ALOGW("createAudioPatch() setInputDevice() failed to create a patch");
+                return INVALID_OPERATION;
+            }
+        } else if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) {
+            // device to device connection
+            if (patchDesc != 0) {
+                if (patchDesc->mPatch.sources[0].id != patch->sources[0].id) {
+                    return BAD_VALUE;
+                }
+            }
+            sp<DeviceDescriptor> srcDeviceDesc =
+                    mAvailableInputDevices.getDeviceFromId(patch->sources[0].id);
+            if (srcDeviceDesc == 0) {
+                return BAD_VALUE;
+            }
+
+            //update source and sink with our own data as the data passed in the patch may
+            // be incomplete.
+            struct audio_patch newPatch = *patch;
+            srcDeviceDesc->toAudioPortConfig(&newPatch.sources[0], &patch->sources[0]);
+
+            for (size_t i = 0; i < patch->num_sinks; i++) {
+                if (patch->sinks[i].type != AUDIO_PORT_TYPE_DEVICE) {
+                    ALOGV("createAudioPatch() source device but one sink is not a device");
+                    return INVALID_OPERATION;
+                }
+
+                sp<DeviceDescriptor> sinkDeviceDesc =
+                        mAvailableOutputDevices.getDeviceFromId(patch->sinks[i].id);
+                if (sinkDeviceDesc == 0) {
+                    return BAD_VALUE;
+                }
+                sinkDeviceDesc->toAudioPortConfig(&newPatch.sinks[i], &patch->sinks[i]);
+
+                // create a software bridge in PatchPanel if:
+                // - source and sink devices are on differnt HW modules OR
+                // - audio HAL version is < 3.0
+                if ((srcDeviceDesc->getModuleHandle() != sinkDeviceDesc->getModuleHandle()) ||
+                        (srcDeviceDesc->mModule->mHalVersion < AUDIO_DEVICE_API_VERSION_3_0)) {
+                    // support only one sink device for now to simplify output selection logic
+                    if (patch->num_sinks > 1) {
+                        return INVALID_OPERATION;
+                    }
+                    SortedVector<audio_io_handle_t> outputs =
+                                            getOutputsForDevice(sinkDeviceDesc->type(), mOutputs);
+                    // if the sink device is reachable via an opened output stream, request to go via
+                    // this output stream by adding a second source to the patch description
+                    audio_io_handle_t output = selectOutput(outputs,
+                                                            AUDIO_OUTPUT_FLAG_NONE,
+                                                            AUDIO_FORMAT_INVALID);
+                    if (output != AUDIO_IO_HANDLE_NONE) {
+                        sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
+                        if (outputDesc->isDuplicated()) {
+                            return INVALID_OPERATION;
+                        }
+                        outputDesc->toAudioPortConfig(&newPatch.sources[1], &patch->sources[0]);
+                        newPatch.sources[1].ext.mix.usecase.stream = AUDIO_STREAM_PATCH;
+                        newPatch.num_sources = 2;
+                    }
+                }
+            }
+            // TODO: check from routing capabilities in config file and other conflicting patches
+
+            audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+            if (index >= 0) {
+                afPatchHandle = patchDesc->mAfPatchHandle;
+            }
+
+            status_t status = mpClientInterface->createAudioPatch(&newPatch,
+                                                                  &afPatchHandle,
+                                                                  0);
+            ALOGV("createAudioPatch() patch panel returned %d patchHandle %d",
+                                                                  status, afPatchHandle);
+            if (status == NO_ERROR) {
+                if (index < 0) {
+                    patchDesc = new AudioPatch(&newPatch, uid);
+                    addAudioPatch(patchDesc->mHandle, patchDesc);
+                } else {
+                    patchDesc->mPatch = newPatch;
+                }
+                patchDesc->mAfPatchHandle = afPatchHandle;
+                *handle = patchDesc->mHandle;
+                nextAudioPortGeneration();
+                mpClientInterface->onAudioPatchListUpdate();
+            } else {
+                ALOGW("createAudioPatch() patch panel could not connect device patch, error %d",
+                status);
+                return INVALID_OPERATION;
+            }
+        } else {
+            return BAD_VALUE;
+        }
+    } else {
+        return BAD_VALUE;
+    }
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::releaseAudioPatch(audio_patch_handle_t handle,
+                                                  uid_t uid)
+{
+    ALOGV("releaseAudioPatch() patch %d", handle);
+
+    ssize_t index = mAudioPatches.indexOfKey(handle);
+
+    if (index < 0) {
+        return BAD_VALUE;
+    }
+    sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
+    ALOGV("releaseAudioPatch() mUidCached %d patchDesc->mUid %d uid %d",
+          mUidCached, patchDesc->mUid, uid);
+    if (patchDesc->mUid != mUidCached && uid != patchDesc->mUid) {
+        return INVALID_OPERATION;
+    }
+
+    struct audio_patch *patch = &patchDesc->mPatch;
+    patchDesc->mUid = mUidCached;
+    if (patch->sources[0].type == AUDIO_PORT_TYPE_MIX) {
+        sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputFromId(patch->sources[0].id);
+        if (outputDesc == NULL) {
+            ALOGV("releaseAudioPatch() output not found for id %d", patch->sources[0].id);
+            return BAD_VALUE;
+        }
+
+        setOutputDevice(outputDesc,
+                        getNewOutputDevice(outputDesc, true /*fromCache*/),
+                       true,
+                       0,
+                       NULL);
+    } else if (patch->sources[0].type == AUDIO_PORT_TYPE_DEVICE) {
+        if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
+            sp<AudioInputDescriptor> inputDesc = mInputs.getInputFromId(patch->sinks[0].id);
+            if (inputDesc == NULL) {
+                ALOGV("releaseAudioPatch() input not found for id %d", patch->sinks[0].id);
+                return BAD_VALUE;
+            }
+            setInputDevice(inputDesc->mIoHandle,
+                           getNewInputDevice(inputDesc->mIoHandle),
+                           true,
+                           NULL);
+        } else if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) {
+            audio_patch_handle_t afPatchHandle = patchDesc->mAfPatchHandle;
+            status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+            ALOGV("releaseAudioPatch() patch panel returned %d patchHandle %d",
+                                                              status, patchDesc->mAfPatchHandle);
+            removeAudioPatch(patchDesc->mHandle);
+            nextAudioPortGeneration();
+            mpClientInterface->onAudioPatchListUpdate();
+        } else {
+            return BAD_VALUE;
+        }
+    } else {
+        return BAD_VALUE;
+    }
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::listAudioPatches(unsigned int *num_patches,
+                                              struct audio_patch *patches,
+                                              unsigned int *generation)
+{
+    if (generation == NULL) {
+        return BAD_VALUE;
+    }
+    *generation = curAudioPortGeneration();
+    return mAudioPatches.listAudioPatches(num_patches, patches);
+}
+
+status_t AudioPolicyManager::setAudioPortConfig(const struct audio_port_config *config)
+{
+    ALOGV("setAudioPortConfig()");
+
+    if (config == NULL) {
+        return BAD_VALUE;
+    }
+    ALOGV("setAudioPortConfig() on port handle %d", config->id);
+    // Only support gain configuration for now
+    if (config->config_mask != AUDIO_PORT_CONFIG_GAIN) {
+        return INVALID_OPERATION;
+    }
+
+    sp<AudioPortConfig> audioPortConfig;
+    if (config->type == AUDIO_PORT_TYPE_MIX) {
+        if (config->role == AUDIO_PORT_ROLE_SOURCE) {
+            sp<SwAudioOutputDescriptor> outputDesc = mOutputs.getOutputFromId(config->id);
+            if (outputDesc == NULL) {
+                return BAD_VALUE;
+            }
+            ALOG_ASSERT(!outputDesc->isDuplicated(),
+                        "setAudioPortConfig() called on duplicated output %d",
+                        outputDesc->mIoHandle);
+            audioPortConfig = outputDesc;
+        } else if (config->role == AUDIO_PORT_ROLE_SINK) {
+            sp<AudioInputDescriptor> inputDesc = mInputs.getInputFromId(config->id);
+            if (inputDesc == NULL) {
+                return BAD_VALUE;
+            }
+            audioPortConfig = inputDesc;
+        } else {
+            return BAD_VALUE;
+        }
+    } else if (config->type == AUDIO_PORT_TYPE_DEVICE) {
+        sp<DeviceDescriptor> deviceDesc;
+        if (config->role == AUDIO_PORT_ROLE_SOURCE) {
+            deviceDesc = mAvailableInputDevices.getDeviceFromId(config->id);
+        } else if (config->role == AUDIO_PORT_ROLE_SINK) {
+            deviceDesc = mAvailableOutputDevices.getDeviceFromId(config->id);
+        } else {
+            return BAD_VALUE;
+        }
+        if (deviceDesc == NULL) {
+            return BAD_VALUE;
+        }
+        audioPortConfig = deviceDesc;
+    } else {
+        return BAD_VALUE;
+    }
+
+    struct audio_port_config backupConfig;
+    status_t status = audioPortConfig->applyAudioPortConfig(config, &backupConfig);
+    if (status == NO_ERROR) {
+        struct audio_port_config newConfig;
+        audioPortConfig->toAudioPortConfig(&newConfig, config);
+        status = mpClientInterface->setAudioPortConfig(&newConfig, 0);
+    }
+    if (status != NO_ERROR) {
+        audioPortConfig->applyAudioPortConfig(&backupConfig);
+    }
+
+    return status;
+}
+
+void AudioPolicyManager::releaseResourcesForUid(uid_t uid)
+{
+    clearAudioPatches(uid);
+    clearSessionRoutes(uid);
+}
+
+void AudioPolicyManager::clearAudioPatches(uid_t uid)
+{
+    for (ssize_t i = (ssize_t)mAudioPatches.size() - 1; i >= 0; i--)  {
+        sp<AudioPatch> patchDesc = mAudioPatches.valueAt(i);
+        if (patchDesc->mUid == uid) {
+            releaseAudioPatch(mAudioPatches.keyAt(i), uid);
+        }
+    }
+}
+
+
+void AudioPolicyManager::checkStrategyRoute(routing_strategy strategy,
+                                            audio_io_handle_t ouptutToSkip)
+{
+    audio_devices_t device = getDeviceForStrategy(strategy, false /*fromCache*/);
+    SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(device, mOutputs);
+    for (size_t j = 0; j < mOutputs.size(); j++) {
+        if (mOutputs.keyAt(j) == ouptutToSkip) {
+            continue;
+        }
+        sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueAt(j);
+        if (!isStrategyActive(outputDesc, (routing_strategy)strategy)) {
+            continue;
+        }
+        // If the default device for this strategy is on another output mix,
+        // invalidate all tracks in this strategy to force re connection.
+        // Otherwise select new device on the output mix.
+        if (outputs.indexOf(mOutputs.keyAt(j)) < 0) {
+            for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
+                if (stream == AUDIO_STREAM_PATCH) {
+                    continue;
+                }
+                if (getStrategy((audio_stream_type_t)stream) == strategy) {
+                    mpClientInterface->invalidateStream((audio_stream_type_t)stream);
+                }
+            }
+        } else {
+            audio_devices_t newDevice = getNewOutputDevice(outputDesc, false /*fromCache*/);
+            setOutputDevice(outputDesc, newDevice, false);
+        }
+    }
+}
+
+void AudioPolicyManager::clearSessionRoutes(uid_t uid)
+{
+    // remove output routes associated with this uid
+    SortedVector<routing_strategy> affectedStrategies;
+    for (ssize_t i = (ssize_t)mOutputRoutes.size() - 1; i >= 0; i--)  {
+        sp<SessionRoute> route = mOutputRoutes.valueAt(i);
+        if (route->mUid == uid) {
+            mOutputRoutes.removeItemsAt(i);
+            if (route->mDeviceDescriptor != 0) {
+                affectedStrategies.add(getStrategy(route->mStreamType));
+            }
+        }
+    }
+    // reroute outputs if necessary
+    for (size_t i = 0; i < affectedStrategies.size(); i++) {
+        checkStrategyRoute(affectedStrategies[i], AUDIO_IO_HANDLE_NONE);
+    }
+
+    // remove input routes associated with this uid
+    SortedVector<audio_source_t> affectedSources;
+    for (ssize_t i = (ssize_t)mInputRoutes.size() - 1; i >= 0; i--)  {
+        sp<SessionRoute> route = mInputRoutes.valueAt(i);
+        if (route->mUid == uid) {
+            mInputRoutes.removeItemsAt(i);
+            if (route->mDeviceDescriptor != 0) {
+                affectedSources.add(route->mSource);
+            }
+        }
+    }
+    // reroute inputs if necessary
+    SortedVector<audio_io_handle_t> inputsToClose;
+    for (size_t i = 0; i < mInputs.size(); i++) {
+        sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(i);
+        if (affectedSources.indexOf(inputDesc->mInputSource) >= 0) {
+            inputsToClose.add(inputDesc->mIoHandle);
+        }
+    }
+    for (size_t i = 0; i < inputsToClose.size(); i++) {
+        closeInput(inputsToClose[i]);
+    }
+}
+
+
+status_t AudioPolicyManager::acquireSoundTriggerSession(audio_session_t *session,
+                                       audio_io_handle_t *ioHandle,
+                                       audio_devices_t *device)
+{
+    *session = (audio_session_t)mpClientInterface->newAudioUniqueId();
+    *ioHandle = (audio_io_handle_t)mpClientInterface->newAudioUniqueId();
+    *device = getDeviceAndMixForInputSource(AUDIO_SOURCE_HOTWORD);
+
+    return mSoundTriggerSessions.acquireSession(*session, *ioHandle);
+}
+
+status_t AudioPolicyManager::startAudioSource(const struct audio_port_config *source __unused,
+                                       const audio_attributes_t *attributes __unused,
+                                       audio_io_handle_t *handle __unused)
+{
+    return INVALID_OPERATION;
+}
+
+status_t AudioPolicyManager::stopAudioSource(audio_io_handle_t handle __unused)
+{
+    return INVALID_OPERATION;
+}
+
+// ----------------------------------------------------------------------------
+// AudioPolicyManager
+// ----------------------------------------------------------------------------
+uint32_t AudioPolicyManager::nextAudioPortGeneration()
+{
+    return android_atomic_inc(&mAudioPortGeneration);
+}
+
+AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface)
+    :
+#ifdef AUDIO_POLICY_TEST
+    Thread(false),
+#endif //AUDIO_POLICY_TEST
+    mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
+    mA2dpSuspended(false),
+    mSpeakerDrcEnabled(false),
+    mAudioPortGeneration(1),
+    mBeaconMuteRefCount(0),
+    mBeaconPlayingRefCount(0),
+    mBeaconMuted(false)
+{
+    audio_policy::EngineInstance *engineInstance = audio_policy::EngineInstance::getInstance();
+    if (!engineInstance) {
+        ALOGE("%s:  Could not get an instance of policy engine", __FUNCTION__);
+        return;
+    }
+    // Retrieve the Policy Manager Interface
+    mEngine = engineInstance->queryInterface<AudioPolicyManagerInterface>();
+    if (mEngine == NULL) {
+        ALOGE("%s: Failed to get Policy Engine Interface", __FUNCTION__);
+        return;
+    }
+    mEngine->setObserver(this);
+    status_t status = mEngine->initCheck();
+    ALOG_ASSERT(status == NO_ERROR, "Policy engine not initialized(err=%d)", status);
+
+    mUidCached = getuid();
+    mpClientInterface = clientInterface;
+
+    mDefaultOutputDevice = new DeviceDescriptor(AUDIO_DEVICE_OUT_SPEAKER);
+    if (ConfigParsingUtils::loadAudioPolicyConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE,
+                 mHwModules, mAvailableInputDevices, mAvailableOutputDevices,
+                 mDefaultOutputDevice, mSpeakerDrcEnabled) != NO_ERROR) {
+        if (ConfigParsingUtils::loadAudioPolicyConfig(AUDIO_POLICY_CONFIG_FILE,
+                                  mHwModules, mAvailableInputDevices, mAvailableOutputDevices,
+                                  mDefaultOutputDevice, mSpeakerDrcEnabled) != NO_ERROR) {
+            ALOGE("could not load audio policy configuration file, setting defaults");
+            defaultAudioPolicyConfig();
+        }
+    }
+    // mAvailableOutputDevices and mAvailableInputDevices now contain all attached devices
+
+    // must be done after reading the policy (since conditionned by Speaker Drc Enabling)
+    mEngine->initializeVolumeCurves(mSpeakerDrcEnabled);
+
+    // open all output streams needed to access attached devices
+    audio_devices_t outputDeviceTypes = mAvailableOutputDevices.types();
+    audio_devices_t inputDeviceTypes = mAvailableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
+    for (size_t i = 0; i < mHwModules.size(); i++) {
+        mHwModules[i]->mHandle = mpClientInterface->loadHwModule(mHwModules[i]->mName);
+        if (mHwModules[i]->mHandle == 0) {
+            ALOGW("could not open HW module %s", mHwModules[i]->mName);
+            continue;
+        }
+        // open all output streams needed to access attached devices
+        // except for direct output streams that are only opened when they are actually
+        // required by an app.
+        // This also validates mAvailableOutputDevices list
+        for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
+        {
+            const sp<IOProfile> outProfile = mHwModules[i]->mOutputProfiles[j];
+
+            if (outProfile->mSupportedDevices.isEmpty()) {
+                ALOGW("Output profile contains no device on module %s", mHwModules[i]->mName);
+                continue;
+            }
+
+            if ((outProfile->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
+                continue;
+            }
+            audio_devices_t profileType = outProfile->mSupportedDevices.types();
+            if ((profileType & mDefaultOutputDevice->type()) != AUDIO_DEVICE_NONE) {
+                profileType = mDefaultOutputDevice->type();
+            } else {
+                // chose first device present in mSupportedDevices also part of
+                // outputDeviceTypes
+                for (size_t k = 0; k  < outProfile->mSupportedDevices.size(); k++) {
+                    profileType = outProfile->mSupportedDevices[k]->type();
+                    if ((profileType & outputDeviceTypes) != 0) {
+                        break;
+                    }
+                }
+            }
+            if ((profileType & outputDeviceTypes) == 0) {
+                continue;
+            }
+            sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile,
+                                                                                 mpClientInterface);
+
+            outputDesc->mDevice = profileType;
+            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+            config.sample_rate = outputDesc->mSamplingRate;
+            config.channel_mask = outputDesc->mChannelMask;
+            config.format = outputDesc->mFormat;
+            audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+            status_t status = mpClientInterface->openOutput(outProfile->getModuleHandle(),
+                                                            &output,
+                                                            &config,
+                                                            &outputDesc->mDevice,
+                                                            String8(""),
+                                                            &outputDesc->mLatency,
+                                                            outputDesc->mFlags);
+
+            if (status != NO_ERROR) {
+                ALOGW("Cannot open output stream for device %08x on hw module %s",
+                      outputDesc->mDevice,
+                      mHwModules[i]->mName);
+            } else {
+                outputDesc->mSamplingRate = config.sample_rate;
+                outputDesc->mChannelMask = config.channel_mask;
+                outputDesc->mFormat = config.format;
+
+                for (size_t k = 0; k  < outProfile->mSupportedDevices.size(); k++) {
+                    audio_devices_t type = outProfile->mSupportedDevices[k]->type();
+                    ssize_t index =
+                            mAvailableOutputDevices.indexOf(outProfile->mSupportedDevices[k]);
+                    // give a valid ID to an attached device once confirmed it is reachable
+                    if (index >= 0 && !mAvailableOutputDevices[index]->isAttached()) {
+                        mAvailableOutputDevices[index]->attach(mHwModules[i]);
+                    }
+                }
+                if (mPrimaryOutput == 0 &&
+                        outProfile->mFlags & AUDIO_OUTPUT_FLAG_PRIMARY) {
+                    mPrimaryOutput = outputDesc;
+                }
+                addOutput(output, outputDesc);
+                setOutputDevice(outputDesc,
+                                outputDesc->mDevice,
+                                true);
+            }
+        }
+        // open input streams needed to access attached devices to validate
+        // mAvailableInputDevices list
+        for (size_t j = 0; j < mHwModules[i]->mInputProfiles.size(); j++)
+        {
+            const sp<IOProfile> inProfile = mHwModules[i]->mInputProfiles[j];
+
+            if (inProfile->mSupportedDevices.isEmpty()) {
+                ALOGW("Input profile contains no device on module %s", mHwModules[i]->mName);
+                continue;
+            }
+            // chose first device present in mSupportedDevices also part of
+            // inputDeviceTypes
+            audio_devices_t profileType = AUDIO_DEVICE_NONE;
+            for (size_t k = 0; k  < inProfile->mSupportedDevices.size(); k++) {
+                profileType = inProfile->mSupportedDevices[k]->type();
+                if (profileType & inputDeviceTypes) {
+                    break;
+                }
+            }
+            if ((profileType & inputDeviceTypes) == 0) {
+                continue;
+            }
+            sp<AudioInputDescriptor> inputDesc = new AudioInputDescriptor(inProfile);
+
+            inputDesc->mInputSource = AUDIO_SOURCE_MIC;
+            inputDesc->mDevice = profileType;
+
+            // find the address
+            DeviceVector inputDevices = mAvailableInputDevices.getDevicesFromType(profileType);
+            //   the inputs vector must be of size 1, but we don't want to crash here
+            String8 address = inputDevices.size() > 0 ? inputDevices.itemAt(0)->mAddress
+                    : String8("");
+            ALOGV("  for input device 0x%x using address %s", profileType, address.string());
+            ALOGE_IF(inputDevices.size() == 0, "Input device list is empty!");
+
+            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+            config.sample_rate = inputDesc->mSamplingRate;
+            config.channel_mask = inputDesc->mChannelMask;
+            config.format = inputDesc->mFormat;
+            audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
+            status_t status = mpClientInterface->openInput(inProfile->getModuleHandle(),
+                                                           &input,
+                                                           &config,
+                                                           &inputDesc->mDevice,
+                                                           address,
+                                                           AUDIO_SOURCE_MIC,
+                                                           AUDIO_INPUT_FLAG_NONE);
+
+            if (status == NO_ERROR) {
+                for (size_t k = 0; k  < inProfile->mSupportedDevices.size(); k++) {
+                    audio_devices_t type = inProfile->mSupportedDevices[k]->type();
+                    ssize_t index =
+                            mAvailableInputDevices.indexOf(inProfile->mSupportedDevices[k]);
+                    // give a valid ID to an attached device once confirmed it is reachable
+                    if (index >= 0) {
+                        sp<DeviceDescriptor> devDesc = mAvailableInputDevices[index];
+                        if (!devDesc->isAttached()) {
+                            devDesc->attach(mHwModules[i]);
+                            devDesc->importAudioPort(inProfile);
+                        }
+                    }
+                }
+                mpClientInterface->closeInput(input);
+            } else {
+                ALOGW("Cannot open input stream for device %08x on hw module %s",
+                      inputDesc->mDevice,
+                      mHwModules[i]->mName);
+            }
+        }
+    }
+    // make sure all attached devices have been allocated a unique ID
+    for (size_t i = 0; i  < mAvailableOutputDevices.size();) {
+        if (!mAvailableOutputDevices[i]->isAttached()) {
+            ALOGW("Input device %08x unreachable", mAvailableOutputDevices[i]->type());
+            mAvailableOutputDevices.remove(mAvailableOutputDevices[i]);
+            continue;
+        }
+        // The device is now validated and can be appended to the available devices of the engine
+        mEngine->setDeviceConnectionState(mAvailableOutputDevices[i],
+                                          AUDIO_POLICY_DEVICE_STATE_AVAILABLE);
+        i++;
+    }
+    for (size_t i = 0; i  < mAvailableInputDevices.size();) {
+        if (!mAvailableInputDevices[i]->isAttached()) {
+            ALOGW("Input device %08x unreachable", mAvailableInputDevices[i]->type());
+            mAvailableInputDevices.remove(mAvailableInputDevices[i]);
+            continue;
+        }
+        // The device is now validated and can be appended to the available devices of the engine
+        mEngine->setDeviceConnectionState(mAvailableInputDevices[i],
+                                          AUDIO_POLICY_DEVICE_STATE_AVAILABLE);
+        i++;
+    }
+    // make sure default device is reachable
+    if (mAvailableOutputDevices.indexOf(mDefaultOutputDevice) < 0) {
+        ALOGE("Default device %08x is unreachable", mDefaultOutputDevice->type());
+    }
+
+    ALOGE_IF((mPrimaryOutput == 0), "Failed to open primary output");
+
+    updateDevicesAndOutputs();
+
+#ifdef AUDIO_POLICY_TEST
+    if (mPrimaryOutput != 0) {
+        AudioParameter outputCmd = AudioParameter();
+        outputCmd.addInt(String8("set_id"), 0);
+        mpClientInterface->setParameters(mPrimaryOutput->mIoHandle, outputCmd.toString());
+
+        mTestDevice = AUDIO_DEVICE_OUT_SPEAKER;
+        mTestSamplingRate = 44100;
+        mTestFormat = AUDIO_FORMAT_PCM_16_BIT;
+        mTestChannels =  AUDIO_CHANNEL_OUT_STEREO;
+        mTestLatencyMs = 0;
+        mCurOutput = 0;
+        mDirectOutput = false;
+        for (int i = 0; i < NUM_TEST_OUTPUTS; i++) {
+            mTestOutputs[i] = 0;
+        }
+
+        const size_t SIZE = 256;
+        char buffer[SIZE];
+        snprintf(buffer, SIZE, "AudioPolicyManagerTest");
+        run(buffer, ANDROID_PRIORITY_AUDIO);
+    }
+#endif //AUDIO_POLICY_TEST
+}
+
+AudioPolicyManager::~AudioPolicyManager()
+{
+#ifdef AUDIO_POLICY_TEST
+    exit();
+#endif //AUDIO_POLICY_TEST
+   for (size_t i = 0; i < mOutputs.size(); i++) {
+        mpClientInterface->closeOutput(mOutputs.keyAt(i));
+   }
+   for (size_t i = 0; i < mInputs.size(); i++) {
+        mpClientInterface->closeInput(mInputs.keyAt(i));
+   }
+   mAvailableOutputDevices.clear();
+   mAvailableInputDevices.clear();
+   mOutputs.clear();
+   mInputs.clear();
+   mHwModules.clear();
+}
+
+status_t AudioPolicyManager::initCheck()
+{
+    return hasPrimaryOutput() ? NO_ERROR : NO_INIT;
+}
+
+#ifdef AUDIO_POLICY_TEST
+bool AudioPolicyManager::threadLoop()
+{
+    ALOGV("entering threadLoop()");
+    while (!exitPending())
+    {
+        String8 command;
+        int valueInt;
+        String8 value;
+
+        Mutex::Autolock _l(mLock);
+        mWaitWorkCV.waitRelative(mLock, milliseconds(50));
+
+        command = mpClientInterface->getParameters(0, String8("test_cmd_policy"));
+        AudioParameter param = AudioParameter(command);
+
+        if (param.getInt(String8("test_cmd_policy"), valueInt) == NO_ERROR &&
+            valueInt != 0) {
+            ALOGV("Test command %s received", command.string());
+            String8 target;
+            if (param.get(String8("target"), target) != NO_ERROR) {
+                target = "Manager";
+            }
+            if (param.getInt(String8("test_cmd_policy_output"), valueInt) == NO_ERROR) {
+                param.remove(String8("test_cmd_policy_output"));
+                mCurOutput = valueInt;
+            }
+            if (param.get(String8("test_cmd_policy_direct"), value) == NO_ERROR) {
+                param.remove(String8("test_cmd_policy_direct"));
+                if (value == "false") {
+                    mDirectOutput = false;
+                } else if (value == "true") {
+                    mDirectOutput = true;
+                }
+            }
+            if (param.getInt(String8("test_cmd_policy_input"), valueInt) == NO_ERROR) {
+                param.remove(String8("test_cmd_policy_input"));
+                mTestInput = valueInt;
+            }
+
+            if (param.get(String8("test_cmd_policy_format"), value) == NO_ERROR) {
+                param.remove(String8("test_cmd_policy_format"));
+                int format = AUDIO_FORMAT_INVALID;
+                if (value == "PCM 16 bits") {
+                    format = AUDIO_FORMAT_PCM_16_BIT;
+                } else if (value == "PCM 8 bits") {
+                    format = AUDIO_FORMAT_PCM_8_BIT;
+                } else if (value == "Compressed MP3") {
+                    format = AUDIO_FORMAT_MP3;
+                }
+                if (format != AUDIO_FORMAT_INVALID) {
+                    if (target == "Manager") {
+                        mTestFormat = format;
+                    } else if (mTestOutputs[mCurOutput] != 0) {
+                        AudioParameter outputParam = AudioParameter();
+                        outputParam.addInt(String8("format"), format);
+                        mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
+                    }
+                }
+            }
+            if (param.get(String8("test_cmd_policy_channels"), value) == NO_ERROR) {
+                param.remove(String8("test_cmd_policy_channels"));
+                int channels = 0;
+
+                if (value == "Channels Stereo") {
+                    channels =  AUDIO_CHANNEL_OUT_STEREO;
+                } else if (value == "Channels Mono") {
+                    channels =  AUDIO_CHANNEL_OUT_MONO;
+                }
+                if (channels != 0) {
+                    if (target == "Manager") {
+                        mTestChannels = channels;
+                    } else if (mTestOutputs[mCurOutput] != 0) {
+                        AudioParameter outputParam = AudioParameter();
+                        outputParam.addInt(String8("channels"), channels);
+                        mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
+                    }
+                }
+            }
+            if (param.getInt(String8("test_cmd_policy_sampleRate"), valueInt) == NO_ERROR) {
+                param.remove(String8("test_cmd_policy_sampleRate"));
+                if (valueInt >= 0 && valueInt <= 96000) {
+                    int samplingRate = valueInt;
+                    if (target == "Manager") {
+                        mTestSamplingRate = samplingRate;
+                    } else if (mTestOutputs[mCurOutput] != 0) {
+                        AudioParameter outputParam = AudioParameter();
+                        outputParam.addInt(String8("sampling_rate"), samplingRate);
+                        mpClientInterface->setParameters(mTestOutputs[mCurOutput], outputParam.toString());
+                    }
+                }
+            }
+
+            if (param.get(String8("test_cmd_policy_reopen"), value) == NO_ERROR) {
+                param.remove(String8("test_cmd_policy_reopen"));
+
+                mpClientInterface->closeOutput(mpClientInterface->closeOutput(mPrimaryOutput););
+
+                audio_module_handle_t moduleHandle = mPrimaryOutput->getModuleHandle();
+
+                removeOutput(mPrimaryOutput->mIoHandle);
+                sp<SwAudioOutputDescriptor> outputDesc = new AudioOutputDescriptor(NULL,
+                                                                               mpClientInterface);
+                outputDesc->mDevice = AUDIO_DEVICE_OUT_SPEAKER;
+                audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+                config.sample_rate = outputDesc->mSamplingRate;
+                config.channel_mask = outputDesc->mChannelMask;
+                config.format = outputDesc->mFormat;
+                audio_io_handle_t handle;
+                status_t status = mpClientInterface->openOutput(moduleHandle,
+                                                                &handle,
+                                                                &config,
+                                                                &outputDesc->mDevice,
+                                                                String8(""),
+                                                                &outputDesc->mLatency,
+                                                                outputDesc->mFlags);
+                if (status != NO_ERROR) {
+                    ALOGE("Failed to reopen hardware output stream, "
+                        "samplingRate: %d, format %d, channels %d",
+                        outputDesc->mSamplingRate, outputDesc->mFormat, outputDesc->mChannelMask);
+                } else {
+                    outputDesc->mSamplingRate = config.sample_rate;
+                    outputDesc->mChannelMask = config.channel_mask;
+                    outputDesc->mFormat = config.format;
+                    mPrimaryOutput = outputDesc;
+                    AudioParameter outputCmd = AudioParameter();
+                    outputCmd.addInt(String8("set_id"), 0);
+                    mpClientInterface->setParameters(handle, outputCmd.toString());
+                    addOutput(handle, outputDesc);
+                }
+            }
+
+
+            mpClientInterface->setParameters(0, String8("test_cmd_policy="));
+        }
+    }
+    return false;
+}
+
+void AudioPolicyManager::exit()
+{
+    {
+        AutoMutex _l(mLock);
+        requestExit();
+        mWaitWorkCV.signal();
+    }
+    requestExitAndWait();
+}
+
+int AudioPolicyManager::testOutputIndex(audio_io_handle_t output)
+{
+    for (int i = 0; i < NUM_TEST_OUTPUTS; i++) {
+        if (output == mTestOutputs[i]) return i;
+    }
+    return 0;
+}
+#endif //AUDIO_POLICY_TEST
+
+// ---
+
+void AudioPolicyManager::addOutput(audio_io_handle_t output, sp<SwAudioOutputDescriptor> outputDesc)
+{
+    outputDesc->setIoHandle(output);
+    mOutputs.add(output, outputDesc);
+    nextAudioPortGeneration();
+}
+
+void AudioPolicyManager::removeOutput(audio_io_handle_t output)
+{
+    mOutputs.removeItem(output);
+}
+
+void AudioPolicyManager::addInput(audio_io_handle_t input, sp<AudioInputDescriptor> inputDesc)
+{
+    inputDesc->setIoHandle(input);
+    mInputs.add(input, inputDesc);
+    nextAudioPortGeneration();
+}
+
+void AudioPolicyManager::findIoHandlesByAddress(sp<SwAudioOutputDescriptor> desc /*in*/,
+        const audio_devices_t device /*in*/,
+        const String8 address /*in*/,
+        SortedVector<audio_io_handle_t>& outputs /*out*/) {
+    sp<DeviceDescriptor> devDesc =
+        desc->mProfile->mSupportedDevices.getDevice(device, address);
+    if (devDesc != 0) {
+        ALOGV("findIoHandlesByAddress(): adding opened output %d on same address %s",
+              desc->mIoHandle, address.string());
+        outputs.add(desc->mIoHandle);
+    }
+}
+
+status_t AudioPolicyManager::checkOutputsForDevice(const sp<DeviceDescriptor> devDesc,
+                                                   audio_policy_dev_state_t state,
+                                                   SortedVector<audio_io_handle_t>& outputs,
+                                                   const String8 address)
+{
+    audio_devices_t device = devDesc->type();
+    sp<SwAudioOutputDescriptor> desc;
+
+    if (audio_device_is_digital(device)) {
+        // erase all current sample rates, formats and channel masks
+        devDesc->clearCapabilities();
+    }
+
+    if (state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
+        // first list already open outputs that can be routed to this device
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            desc = mOutputs.valueAt(i);
+            if (!desc->isDuplicated() && (desc->supportedDevices() & device)) {
+                if (!device_distinguishes_on_address(device)) {
+                    ALOGV("checkOutputsForDevice(): adding opened output %d", mOutputs.keyAt(i));
+                    outputs.add(mOutputs.keyAt(i));
+                } else {
+                    ALOGV("  checking address match due to device 0x%x", device);
+                    findIoHandlesByAddress(desc, device, address, outputs);
+                }
+            }
+        }
+        // then look for output profiles that can be routed to this device
+        SortedVector< sp<IOProfile> > profiles;
+        for (size_t i = 0; i < mHwModules.size(); i++)
+        {
+            if (mHwModules[i]->mHandle == 0) {
+                continue;
+            }
+            for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
+            {
+                sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
+                if (profile->mSupportedDevices.types() & device) {
+                    if (!device_distinguishes_on_address(device) ||
+                            address == profile->mSupportedDevices[0]->mAddress) {
+                        profiles.add(profile);
+                        ALOGV("checkOutputsForDevice(): adding profile %zu from module %zu", j, i);
+                    }
+                }
+            }
+        }
+
+        ALOGV("  found %d profiles, %d outputs", profiles.size(), outputs.size());
+
+        if (profiles.isEmpty() && outputs.isEmpty()) {
+            ALOGW("checkOutputsForDevice(): No output available for device %04x", device);
+            return BAD_VALUE;
+        }
+
+        // open outputs for matching profiles if needed. Direct outputs are also opened to
+        // query for dynamic parameters and will be closed later by setDeviceConnectionState()
+        for (ssize_t profile_index = 0; profile_index < (ssize_t)profiles.size(); profile_index++) {
+            sp<IOProfile> profile = profiles[profile_index];
+
+            // nothing to do if one output is already opened for this profile
+            size_t j;
+            for (j = 0; j < outputs.size(); j++) {
+                desc = mOutputs.valueFor(outputs.itemAt(j));
+                if (!desc->isDuplicated() && desc->mProfile == profile) {
+                    // matching profile: save the sample rates, format and channel masks supported
+                    // by the profile in our device descriptor
+                    if (audio_device_is_digital(device)) {
+                        devDesc->importAudioPort(profile);
+                    }
+                    break;
+                }
+            }
+            if (j != outputs.size()) {
+                continue;
+            }
+
+            ALOGV("opening output for device %08x with params %s profile %p",
+                                                      device, address.string(), profile.get());
+            desc = new SwAudioOutputDescriptor(profile, mpClientInterface);
+            desc->mDevice = device;
+            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+            config.sample_rate = desc->mSamplingRate;
+            config.channel_mask = desc->mChannelMask;
+            config.format = desc->mFormat;
+            config.offload_info.sample_rate = desc->mSamplingRate;
+            config.offload_info.channel_mask = desc->mChannelMask;
+            config.offload_info.format = desc->mFormat;
+            audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+            status_t status = mpClientInterface->openOutput(profile->getModuleHandle(),
+                                                            &output,
+                                                            &config,
+                                                            &desc->mDevice,
+                                                            address,
+                                                            &desc->mLatency,
+                                                            desc->mFlags);
+            if (status == NO_ERROR) {
+                desc->mSamplingRate = config.sample_rate;
+                desc->mChannelMask = config.channel_mask;
+                desc->mFormat = config.format;
+
+                // Here is where the out_set_parameters() for card & device gets called
+                if (!address.isEmpty()) {
+                    char *param = audio_device_address_to_parameter(device, address);
+                    mpClientInterface->setParameters(output, String8(param));
+                    free(param);
+                }
+
+                // Here is where we step through and resolve any "dynamic" fields
+                String8 reply;
+                char *value;
+                if (profile->mSamplingRates[0] == 0) {
+                    reply = mpClientInterface->getParameters(output,
+                                            String8(AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES));
+                    ALOGV("checkOutputsForDevice() supported sampling rates %s",
+                              reply.string());
+                    value = strpbrk((char *)reply.string(), "=");
+                    if (value != NULL) {
+                        profile->loadSamplingRates(value + 1);
+                    }
+                }
+                if (profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) {
+                    reply = mpClientInterface->getParameters(output,
+                                                   String8(AUDIO_PARAMETER_STREAM_SUP_FORMATS));
+                    ALOGV("checkOutputsForDevice() supported formats %s",
+                              reply.string());
+                    value = strpbrk((char *)reply.string(), "=");
+                    if (value != NULL) {
+                        profile->loadFormats(value + 1);
+                    }
+                }
+                if (profile->mChannelMasks[0] == 0) {
+                    reply = mpClientInterface->getParameters(output,
+                                                  String8(AUDIO_PARAMETER_STREAM_SUP_CHANNELS));
+                    ALOGV("checkOutputsForDevice() supported channel masks %s",
+                              reply.string());
+                    value = strpbrk((char *)reply.string(), "=");
+                    if (value != NULL) {
+                        profile->loadOutChannels(value + 1);
+                    }
+                }
+                if (((profile->mSamplingRates[0] == 0) &&
+                         (profile->mSamplingRates.size() < 2)) ||
+                     ((profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) &&
+                         (profile->mFormats.size() < 2)) ||
+                     ((profile->mChannelMasks[0] == 0) &&
+                         (profile->mChannelMasks.size() < 2))) {
+                    ALOGW("checkOutputsForDevice() missing param");
+                    mpClientInterface->closeOutput(output);
+                    output = AUDIO_IO_HANDLE_NONE;
+                } else if (profile->mSamplingRates[0] == 0 || profile->mFormats[0] == 0 ||
+                            profile->mChannelMasks[0] == 0) {
+                    mpClientInterface->closeOutput(output);
+                    config.sample_rate = profile->pickSamplingRate();
+                    config.channel_mask = profile->pickChannelMask();
+                    config.format = profile->pickFormat();
+                    config.offload_info.sample_rate = config.sample_rate;
+                    config.offload_info.channel_mask = config.channel_mask;
+                    config.offload_info.format = config.format;
+                    status = mpClientInterface->openOutput(profile->getModuleHandle(),
+                                                           &output,
+                                                           &config,
+                                                           &desc->mDevice,
+                                                           address,
+                                                           &desc->mLatency,
+                                                           desc->mFlags);
+                    if (status == NO_ERROR) {
+                        desc->mSamplingRate = config.sample_rate;
+                        desc->mChannelMask = config.channel_mask;
+                        desc->mFormat = config.format;
+                    } else {
+                        output = AUDIO_IO_HANDLE_NONE;
+                    }
+                }
+
+                if (output != AUDIO_IO_HANDLE_NONE) {
+                    addOutput(output, desc);
+                    if (device_distinguishes_on_address(device) && address != "0") {
+                        sp<AudioPolicyMix> policyMix;
+                        if (mPolicyMixes.getAudioPolicyMix(address, policyMix) != NO_ERROR) {
+                            ALOGE("checkOutputsForDevice() cannot find policy for address %s",
+                                  address.string());
+                        }
+                        policyMix->setOutput(desc);
+                        desc->mPolicyMix = policyMix->getMix();
+
+                    } else if (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
+                                    hasPrimaryOutput()) {
+                        // no duplicated output for direct outputs and
+                        // outputs used by dynamic policy mixes
+                        audio_io_handle_t duplicatedOutput = AUDIO_IO_HANDLE_NONE;
+
+                        // set initial stream volume for device
+                        applyStreamVolumes(desc, device, 0, true);
+
+                        //TODO: configure audio effect output stage here
+
+                        // open a duplicating output thread for the new output and the primary output
+                        duplicatedOutput =
+                                mpClientInterface->openDuplicateOutput(output,
+                                                                       mPrimaryOutput->mIoHandle);
+                        if (duplicatedOutput != AUDIO_IO_HANDLE_NONE) {
+                            // add duplicated output descriptor
+                            sp<SwAudioOutputDescriptor> dupOutputDesc =
+                                    new SwAudioOutputDescriptor(NULL, mpClientInterface);
+                            dupOutputDesc->mOutput1 = mPrimaryOutput;
+                            dupOutputDesc->mOutput2 = desc;
+                            dupOutputDesc->mSamplingRate = desc->mSamplingRate;
+                            dupOutputDesc->mFormat = desc->mFormat;
+                            dupOutputDesc->mChannelMask = desc->mChannelMask;
+                            dupOutputDesc->mLatency = desc->mLatency;
+                            addOutput(duplicatedOutput, dupOutputDesc);
+                            applyStreamVolumes(dupOutputDesc, device, 0, true);
+                        } else {
+                            ALOGW("checkOutputsForDevice() could not open dup output for %d and %d",
+                                    mPrimaryOutput->mIoHandle, output);
+                            mpClientInterface->closeOutput(output);
+                            removeOutput(output);
+                            nextAudioPortGeneration();
+                            output = AUDIO_IO_HANDLE_NONE;
+                        }
+                    }
+                }
+            } else {
+                output = AUDIO_IO_HANDLE_NONE;
+            }
+            if (output == AUDIO_IO_HANDLE_NONE) {
+                ALOGW("checkOutputsForDevice() could not open output for device %x", device);
+                profiles.removeAt(profile_index);
+                profile_index--;
+            } else {
+                outputs.add(output);
+                // Load digital format info only for digital devices
+                if (audio_device_is_digital(device)) {
+                    devDesc->importAudioPort(profile);
+                }
+
+                if (device_distinguishes_on_address(device)) {
+                    ALOGV("checkOutputsForDevice(): setOutputDevice(dev=0x%x, addr=%s)",
+                            device, address.string());
+                    setOutputDevice(desc, device, true/*force*/, 0/*delay*/,
+                            NULL/*patch handle*/, address.string());
+                }
+                ALOGV("checkOutputsForDevice(): adding output %d", output);
+            }
+        }
+
+        if (profiles.isEmpty()) {
+            ALOGW("checkOutputsForDevice(): No output available for device %04x", device);
+            return BAD_VALUE;
+        }
+    } else { // Disconnect
+        // check if one opened output is not needed any more after disconnecting one device
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            desc = mOutputs.valueAt(i);
+            if (!desc->isDuplicated()) {
+                // exact match on device
+                if (device_distinguishes_on_address(device) &&
+                        (desc->supportedDevices() == device)) {
+                    findIoHandlesByAddress(desc, device, address, outputs);
+                } else if (!(desc->supportedDevices() & mAvailableOutputDevices.types())) {
+                    ALOGV("checkOutputsForDevice(): disconnecting adding output %d",
+                            mOutputs.keyAt(i));
+                    outputs.add(mOutputs.keyAt(i));
+                }
+            }
+        }
+        // Clear any profiles associated with the disconnected device.
+        for (size_t i = 0; i < mHwModules.size(); i++)
+        {
+            if (mHwModules[i]->mHandle == 0) {
+                continue;
+            }
+            for (size_t j = 0; j < mHwModules[i]->mOutputProfiles.size(); j++)
+            {
+                sp<IOProfile> profile = mHwModules[i]->mOutputProfiles[j];
+                if (profile->mSupportedDevices.types() & device) {
+                    ALOGV("checkOutputsForDevice(): "
+                            "clearing direct output profile %zu on module %zu", j, i);
+                    if (profile->mSamplingRates[0] == 0) {
+                        profile->mSamplingRates.clear();
+                        profile->mSamplingRates.add(0);
+                    }
+                    if (profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) {
+                        profile->mFormats.clear();
+                        profile->mFormats.add(AUDIO_FORMAT_DEFAULT);
+                    }
+                    if (profile->mChannelMasks[0] == 0) {
+                        profile->mChannelMasks.clear();
+                        profile->mChannelMasks.add(0);
+                    }
+                }
+            }
+        }
+    }
+    return NO_ERROR;
+}
+
+status_t AudioPolicyManager::checkInputsForDevice(const sp<DeviceDescriptor> devDesc,
+                                                  audio_policy_dev_state_t state,
+                                                  SortedVector<audio_io_handle_t>& inputs,
+                                                  const String8 address)
+{
+    audio_devices_t device = devDesc->type();
+    sp<AudioInputDescriptor> desc;
+
+    if (audio_device_is_digital(device)) {
+        // erase all current sample rates, formats and channel masks
+        devDesc->clearCapabilities();
+    }
+
+    if (state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
+        // first list already open inputs that can be routed to this device
+        for (size_t input_index = 0; input_index < mInputs.size(); input_index++) {
+            desc = mInputs.valueAt(input_index);
+            if (desc->mProfile->mSupportedDevices.types() & (device & ~AUDIO_DEVICE_BIT_IN)) {
+                ALOGV("checkInputsForDevice(): adding opened input %d", mInputs.keyAt(input_index));
+               inputs.add(mInputs.keyAt(input_index));
+            }
+        }
+
+        // then look for input profiles that can be routed to this device
+        SortedVector< sp<IOProfile> > profiles;
+        for (size_t module_idx = 0; module_idx < mHwModules.size(); module_idx++)
+        {
+            if (mHwModules[module_idx]->mHandle == 0) {
+                continue;
+            }
+            for (size_t profile_index = 0;
+                 profile_index < mHwModules[module_idx]->mInputProfiles.size();
+                 profile_index++)
+            {
+                sp<IOProfile> profile = mHwModules[module_idx]->mInputProfiles[profile_index];
+
+                if (profile->mSupportedDevices.types() & (device & ~AUDIO_DEVICE_BIT_IN)) {
+                    if (!device_distinguishes_on_address(device) ||
+                            address == profile->mSupportedDevices[0]->mAddress) {
+                        profiles.add(profile);
+                        ALOGV("checkInputsForDevice(): adding profile %zu from module %zu",
+                              profile_index, module_idx);
+                    }
+                }
+            }
+        }
+
+        if (profiles.isEmpty() && inputs.isEmpty()) {
+            ALOGW("checkInputsForDevice(): No input available for device 0x%X", device);
+            return BAD_VALUE;
+        }
+
+        // open inputs for matching profiles if needed. Direct inputs are also opened to
+        // query for dynamic parameters and will be closed later by setDeviceConnectionState()
+        for (ssize_t profile_index = 0; profile_index < (ssize_t)profiles.size(); profile_index++) {
+
+            sp<IOProfile> profile = profiles[profile_index];
+            // nothing to do if one input is already opened for this profile
+            size_t input_index;
+            for (input_index = 0; input_index < mInputs.size(); input_index++) {
+                desc = mInputs.valueAt(input_index);
+                if (desc->mProfile == profile) {
+                    if (audio_device_is_digital(device)) {
+                        devDesc->importAudioPort(profile);
+                    }
+                    break;
+                }
+            }
+            if (input_index != mInputs.size()) {
+                continue;
+            }
+
+            ALOGV("opening input for device 0x%X with params %s", device, address.string());
+            desc = new AudioInputDescriptor(profile);
+            desc->mDevice = device;
+            audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+            config.sample_rate = desc->mSamplingRate;
+            config.channel_mask = desc->mChannelMask;
+            config.format = desc->mFormat;
+            audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
+            status_t status = mpClientInterface->openInput(profile->getModuleHandle(),
+                                                           &input,
+                                                           &config,
+                                                           &desc->mDevice,
+                                                           address,
+                                                           AUDIO_SOURCE_MIC,
+                                                           AUDIO_INPUT_FLAG_NONE /*FIXME*/);
+
+            if (status == NO_ERROR) {
+                desc->mSamplingRate = config.sample_rate;
+                desc->mChannelMask = config.channel_mask;
+                desc->mFormat = config.format;
+
+                if (!address.isEmpty()) {
+                    char *param = audio_device_address_to_parameter(device, address);
+                    mpClientInterface->setParameters(input, String8(param));
+                    free(param);
+                }
+
+                // Here is where we step through and resolve any "dynamic" fields
+                String8 reply;
+                char *value;
+                if (profile->mSamplingRates[0] == 0) {
+                    reply = mpClientInterface->getParameters(input,
+                                            String8(AUDIO_PARAMETER_STREAM_SUP_SAMPLING_RATES));
+                    ALOGV("checkInputsForDevice() direct input sup sampling rates %s",
+                              reply.string());
+                    value = strpbrk((char *)reply.string(), "=");
+                    if (value != NULL) {
+                        profile->loadSamplingRates(value + 1);
+                    }
+                }
+                if (profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) {
+                    reply = mpClientInterface->getParameters(input,
+                                                   String8(AUDIO_PARAMETER_STREAM_SUP_FORMATS));
+                    ALOGV("checkInputsForDevice() direct input sup formats %s", reply.string());
+                    value = strpbrk((char *)reply.string(), "=");
+                    if (value != NULL) {
+                        profile->loadFormats(value + 1);
+                    }
+                }
+                if (profile->mChannelMasks[0] == 0) {
+                    reply = mpClientInterface->getParameters(input,
+                                                  String8(AUDIO_PARAMETER_STREAM_SUP_CHANNELS));
+                    ALOGV("checkInputsForDevice() direct input sup channel masks %s",
+                              reply.string());
+                    value = strpbrk((char *)reply.string(), "=");
+                    if (value != NULL) {
+                        profile->loadInChannels(value + 1);
+                    }
+                }
+                if (((profile->mSamplingRates[0] == 0) && (profile->mSamplingRates.size() < 2)) ||
+                     ((profile->mFormats[0] == 0) && (profile->mFormats.size() < 2)) ||
+                     ((profile->mChannelMasks[0] == 0) && (profile->mChannelMasks.size() < 2))) {
+                    ALOGW("checkInputsForDevice() direct input missing param");
+                    mpClientInterface->closeInput(input);
+                    input = AUDIO_IO_HANDLE_NONE;
+                }
+
+                if (input != 0) {
+                    addInput(input, desc);
+                }
+            } // endif input != 0
+
+            if (input == AUDIO_IO_HANDLE_NONE) {
+                ALOGW("checkInputsForDevice() could not open input for device 0x%X", device);
+                profiles.removeAt(profile_index);
+                profile_index--;
+            } else {
+                inputs.add(input);
+                if (audio_device_is_digital(device)) {
+                    devDesc->importAudioPort(profile);
+                }
+                ALOGV("checkInputsForDevice(): adding input %d", input);
+            }
+        } // end scan profiles
+
+        if (profiles.isEmpty()) {
+            ALOGW("checkInputsForDevice(): No input available for device 0x%X", device);
+            return BAD_VALUE;
+        }
+    } else {
+        // Disconnect
+        // check if one opened input is not needed any more after disconnecting one device
+        for (size_t input_index = 0; input_index < mInputs.size(); input_index++) {
+            desc = mInputs.valueAt(input_index);
+            if (!(desc->mProfile->mSupportedDevices.types() & mAvailableInputDevices.types() &
+                    ~AUDIO_DEVICE_BIT_IN)) {
+                ALOGV("checkInputsForDevice(): disconnecting adding input %d",
+                      mInputs.keyAt(input_index));
+                inputs.add(mInputs.keyAt(input_index));
+            }
+        }
+        // Clear any profiles associated with the disconnected device.
+        for (size_t module_index = 0; module_index < mHwModules.size(); module_index++) {
+            if (mHwModules[module_index]->mHandle == 0) {
+                continue;
+            }
+            for (size_t profile_index = 0;
+                 profile_index < mHwModules[module_index]->mInputProfiles.size();
+                 profile_index++) {
+                sp<IOProfile> profile = mHwModules[module_index]->mInputProfiles[profile_index];
+                if (profile->mSupportedDevices.types() & (device & ~AUDIO_DEVICE_BIT_IN)) {
+                    ALOGV("checkInputsForDevice(): clearing direct input profile %zu on module %zu",
+                          profile_index, module_index);
+                    if (profile->mSamplingRates[0] == 0) {
+                        profile->mSamplingRates.clear();
+                        profile->mSamplingRates.add(0);
+                    }
+                    if (profile->mFormats[0] == AUDIO_FORMAT_DEFAULT) {
+                        profile->mFormats.clear();
+                        profile->mFormats.add(AUDIO_FORMAT_DEFAULT);
+                    }
+                    if (profile->mChannelMasks[0] == 0) {
+                        profile->mChannelMasks.clear();
+                        profile->mChannelMasks.add(0);
+                    }
+                }
+            }
+        }
+    } // end disconnect
+
+    return NO_ERROR;
+}
+
+
+void AudioPolicyManager::closeOutput(audio_io_handle_t output)
+{
+    ALOGV("closeOutput(%d)", output);
+
+    sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
+    if (outputDesc == NULL) {
+        ALOGW("closeOutput() unknown output %d", output);
+        return;
+    }
+    mPolicyMixes.closeOutput(outputDesc);
+
+    // look for duplicated outputs connected to the output being removed.
+    for (size_t i = 0; i < mOutputs.size(); i++) {
+        sp<SwAudioOutputDescriptor> dupOutputDesc = mOutputs.valueAt(i);
+        if (dupOutputDesc->isDuplicated() &&
+                (dupOutputDesc->mOutput1 == outputDesc ||
+                dupOutputDesc->mOutput2 == outputDesc)) {
+            sp<AudioOutputDescriptor> outputDesc2;
+            if (dupOutputDesc->mOutput1 == outputDesc) {
+                outputDesc2 = dupOutputDesc->mOutput2;
+            } else {
+                outputDesc2 = dupOutputDesc->mOutput1;
+            }
+            // As all active tracks on duplicated output will be deleted,
+            // and as they were also referenced on the other output, the reference
+            // count for their stream type must be adjusted accordingly on
+            // the other output.
+            for (int j = 0; j < AUDIO_STREAM_CNT; j++) {
+                int refCount = dupOutputDesc->mRefCount[j];
+                outputDesc2->changeRefCount((audio_stream_type_t)j,-refCount);
+            }
+            audio_io_handle_t duplicatedOutput = mOutputs.keyAt(i);
+            ALOGV("closeOutput() closing also duplicated output %d", duplicatedOutput);
+
+            mpClientInterface->closeOutput(duplicatedOutput);
+            removeOutput(duplicatedOutput);
+        }
+    }
+
+    nextAudioPortGeneration();
+
+    ssize_t index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
+    if (index >= 0) {
+        sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
+        status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+        mAudioPatches.removeItemsAt(index);
+        mpClientInterface->onAudioPatchListUpdate();
+    }
+
+    AudioParameter param;
+    param.add(String8("closing"), String8("true"));
+    mpClientInterface->setParameters(output, param.toString());
+
+    mpClientInterface->closeOutput(output);
+    removeOutput(output);
+    mPreviousOutputs = mOutputs;
+}
+
+void AudioPolicyManager::closeInput(audio_io_handle_t input)
+{
+    ALOGV("closeInput(%d)", input);
+
+    sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input);
+    if (inputDesc == NULL) {
+        ALOGW("closeInput() unknown input %d", input);
+        return;
+    }
+
+    nextAudioPortGeneration();
+
+    ssize_t index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+    if (index >= 0) {
+        sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
+        status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+        mAudioPatches.removeItemsAt(index);
+        mpClientInterface->onAudioPatchListUpdate();
+    }
+
+    mpClientInterface->closeInput(input);
+    mInputs.removeItem(input);
+}
+
+SortedVector<audio_io_handle_t> AudioPolicyManager::getOutputsForDevice(
+                                                                audio_devices_t device,
+                                                                SwAudioOutputCollection openOutputs)
+{
+    SortedVector<audio_io_handle_t> outputs;
+
+    ALOGVV("getOutputsForDevice() device %04x", device);
+    for (size_t i = 0; i < openOutputs.size(); i++) {
+        ALOGVV("output %d isDuplicated=%d device=%04x",
+                i, openOutputs.valueAt(i)->isDuplicated(),
+                openOutputs.valueAt(i)->supportedDevices());
+        if ((device & openOutputs.valueAt(i)->supportedDevices()) == device) {
+            ALOGVV("getOutputsForDevice() found output %d", openOutputs.keyAt(i));
+            outputs.add(openOutputs.keyAt(i));
+        }
+    }
+    return outputs;
+}
+
+bool AudioPolicyManager::vectorsEqual(SortedVector<audio_io_handle_t>& outputs1,
+                                      SortedVector<audio_io_handle_t>& outputs2)
+{
+    if (outputs1.size() != outputs2.size()) {
+        return false;
+    }
+    for (size_t i = 0; i < outputs1.size(); i++) {
+        if (outputs1[i] != outputs2[i]) {
+            return false;
+        }
+    }
+    return true;
+}
+
+void AudioPolicyManager::checkOutputForStrategy(routing_strategy strategy)
+{
+    audio_devices_t oldDevice = getDeviceForStrategy(strategy, true /*fromCache*/);
+    audio_devices_t newDevice = getDeviceForStrategy(strategy, false /*fromCache*/);
+    SortedVector<audio_io_handle_t> srcOutputs = getOutputsForDevice(oldDevice, mPreviousOutputs);
+    SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevice(newDevice, mOutputs);
+
+    // also take into account external policy-related changes: add all outputs which are
+    // associated with policies in the "before" and "after" output vectors
+    ALOGVV("checkOutputForStrategy(): policy related outputs");
+    for (size_t i = 0 ; i < mPreviousOutputs.size() ; i++) {
+        const sp<SwAudioOutputDescriptor> desc = mPreviousOutputs.valueAt(i);
+        if (desc != 0 && desc->mPolicyMix != NULL) {
+            srcOutputs.add(desc->mIoHandle);
+            ALOGVV(" previous outputs: adding %d", desc->mIoHandle);
+        }
+    }
+    for (size_t i = 0 ; i < mOutputs.size() ; i++) {
+        const sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
+        if (desc != 0 && desc->mPolicyMix != NULL) {
+            dstOutputs.add(desc->mIoHandle);
+            ALOGVV(" new outputs: adding %d", desc->mIoHandle);
+        }
+    }
+
+    if (!vectorsEqual(srcOutputs,dstOutputs)) {
+        ALOGV("checkOutputForStrategy() strategy %d, moving from output %d to output %d",
+              strategy, srcOutputs[0], dstOutputs[0]);
+        // mute strategy while moving tracks from one output to another
+        for (size_t i = 0; i < srcOutputs.size(); i++) {
+            sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(srcOutputs[i]);
+            if (isStrategyActive(desc, strategy)) {
+                setStrategyMute(strategy, true, desc);
+                setStrategyMute(strategy, false, desc, MUTE_TIME_MS, newDevice);
+            }
+        }
+
+        // Move effects associated to this strategy from previous output to new output
+        if (strategy == STRATEGY_MEDIA) {
+            audio_io_handle_t fxOutput = selectOutputForEffects(dstOutputs);
+            SortedVector<audio_io_handle_t> moved;
+            for (size_t i = 0; i < mEffects.size(); i++) {
+                sp<EffectDescriptor> effectDesc = mEffects.valueAt(i);
+                if (effectDesc->mSession == AUDIO_SESSION_OUTPUT_MIX &&
+                        effectDesc->mIo != fxOutput) {
+                    if (moved.indexOf(effectDesc->mIo) < 0) {
+                        ALOGV("checkOutputForStrategy() moving effect %d to output %d",
+                              mEffects.keyAt(i), fxOutput);
+                        mpClientInterface->moveEffects(AUDIO_SESSION_OUTPUT_MIX, effectDesc->mIo,
+                                                       fxOutput);
+                        moved.add(effectDesc->mIo);
+                    }
+                    effectDesc->mIo = fxOutput;
+                }
+            }
+        }
+        // Move tracks associated to this strategy from previous output to new output
+        for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
+            if (i == AUDIO_STREAM_PATCH) {
+                continue;
+            }
+            if (getStrategy((audio_stream_type_t)i) == strategy) {
+                mpClientInterface->invalidateStream((audio_stream_type_t)i);
+            }
+        }
+    }
+}
+
+void AudioPolicyManager::checkOutputForAllStrategies()
+{
+    if (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
+        checkOutputForStrategy(STRATEGY_ENFORCED_AUDIBLE);
+    checkOutputForStrategy(STRATEGY_PHONE);
+    if (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) != AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)
+        checkOutputForStrategy(STRATEGY_ENFORCED_AUDIBLE);
+    checkOutputForStrategy(STRATEGY_SONIFICATION);
+    checkOutputForStrategy(STRATEGY_SONIFICATION_RESPECTFUL);
+    checkOutputForStrategy(STRATEGY_ACCESSIBILITY);
+    checkOutputForStrategy(STRATEGY_MEDIA);
+    checkOutputForStrategy(STRATEGY_DTMF);
+    checkOutputForStrategy(STRATEGY_REROUTING);
+}
+
+void AudioPolicyManager::checkA2dpSuspend()
+{
+    audio_io_handle_t a2dpOutput = mOutputs.getA2dpOutput();
+    if (a2dpOutput == 0) {
+        mA2dpSuspended = false;
+        return;
+    }
+
+    bool isScoConnected =
+            ((mAvailableInputDevices.types() & AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET &
+                    ~AUDIO_DEVICE_BIT_IN) != 0) ||
+            ((mAvailableOutputDevices.types() & AUDIO_DEVICE_OUT_ALL_SCO) != 0);
+    // suspend A2DP output if:
+    //      (NOT already suspended) &&
+    //      ((SCO device is connected &&
+    //       (forced usage for communication || for record is SCO))) ||
+    //      (phone state is ringing || in call)
+    //
+    // restore A2DP output if:
+    //      (Already suspended) &&
+    //      ((SCO device is NOT connected ||
+    //       (forced usage NOT for communication && NOT for record is SCO))) &&
+    //      (phone state is NOT ringing && NOT in call)
+    //
+    if (mA2dpSuspended) {
+        if ((!isScoConnected ||
+             ((mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) != AUDIO_POLICY_FORCE_BT_SCO) &&
+              (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) != AUDIO_POLICY_FORCE_BT_SCO))) &&
+             ((mEngine->getPhoneState() != AUDIO_MODE_IN_CALL) &&
+              (mEngine->getPhoneState() != AUDIO_MODE_RINGTONE))) {
+
+            mpClientInterface->restoreOutput(a2dpOutput);
+            mA2dpSuspended = false;
+        }
+    } else {
+        if ((isScoConnected &&
+             ((mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION) == AUDIO_POLICY_FORCE_BT_SCO) ||
+              (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_RECORD) == AUDIO_POLICY_FORCE_BT_SCO))) ||
+             ((mEngine->getPhoneState() == AUDIO_MODE_IN_CALL) ||
+              (mEngine->getPhoneState() == AUDIO_MODE_RINGTONE))) {
+
+            mpClientInterface->suspendOutput(a2dpOutput);
+            mA2dpSuspended = true;
+        }
+    }
+}
+
+audio_devices_t AudioPolicyManager::getNewOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
+                                                       bool fromCache)
+{
+    audio_devices_t device = AUDIO_DEVICE_NONE;
+
+    ssize_t index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
+    if (index >= 0) {
+        sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
+        if (patchDesc->mUid != mUidCached) {
+            ALOGV("getNewOutputDevice() device %08x forced by patch %d",
+                  outputDesc->device(), outputDesc->mPatchHandle);
+            return outputDesc->device();
+        }
+    }
+
+    // check the following by order of priority to request a routing change if necessary:
+    // 1: the strategy enforced audible is active and enforced on the output:
+    //      use device for strategy enforced audible
+    // 2: we are in call or the strategy phone is active on the output:
+    //      use device for strategy phone
+    // 3: the strategy for enforced audible is active but not enforced on the output:
+    //      use the device for strategy enforced audible
+    // 4: the strategy accessibility is active on the output:
+    //      use device for strategy accessibility
+    // 5: the strategy sonification is active on the output:
+    //      use device for strategy sonification
+    // 6: the strategy "respectful" sonification is active on the output:
+    //      use device for strategy "respectful" sonification
+    // 7: the strategy media is active on the output:
+    //      use device for strategy media
+    // 8: the strategy DTMF is active on the output:
+    //      use device for strategy DTMF
+    // 9: the strategy for beacon, a.k.a. "transmitted through speaker" is active on the output:
+    //      use device for strategy t-t-s
+    if (isStrategyActive(outputDesc, STRATEGY_ENFORCED_AUDIBLE) &&
+        mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED) {
+        device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
+    } else if (isInCall() ||
+                    isStrategyActive(outputDesc, STRATEGY_PHONE)) {
+        device = getDeviceForStrategy(STRATEGY_PHONE, fromCache);
+    } else if (isStrategyActive(outputDesc, STRATEGY_ENFORCED_AUDIBLE)) {
+        device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
+    } else if (isStrategyActive(outputDesc, STRATEGY_ACCESSIBILITY)) {
+        device = getDeviceForStrategy(STRATEGY_ACCESSIBILITY, fromCache);
+    } else if (isStrategyActive(outputDesc, STRATEGY_SONIFICATION)) {
+        device = getDeviceForStrategy(STRATEGY_SONIFICATION, fromCache);
+    } else if (isStrategyActive(outputDesc, STRATEGY_SONIFICATION_RESPECTFUL)) {
+        device = getDeviceForStrategy(STRATEGY_SONIFICATION_RESPECTFUL, fromCache);
+    } else if (isStrategyActive(outputDesc, STRATEGY_MEDIA)) {
+        device = getDeviceForStrategy(STRATEGY_MEDIA, fromCache);
+    } else if (isStrategyActive(outputDesc, STRATEGY_DTMF)) {
+        device = getDeviceForStrategy(STRATEGY_DTMF, fromCache);
+    } else if (isStrategyActive(outputDesc, STRATEGY_TRANSMITTED_THROUGH_SPEAKER)) {
+        device = getDeviceForStrategy(STRATEGY_TRANSMITTED_THROUGH_SPEAKER, fromCache);
+    } else if (isStrategyActive(outputDesc, STRATEGY_REROUTING)) {
+        device = getDeviceForStrategy(STRATEGY_REROUTING, fromCache);
+    }
+
+    ALOGV("getNewOutputDevice() selected device %x", device);
+    return device;
+}
+
+audio_devices_t AudioPolicyManager::getNewInputDevice(audio_io_handle_t input)
+{
+    sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input);
+
+    ssize_t index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+    if (index >= 0) {
+        sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
+        if (patchDesc->mUid != mUidCached) {
+            ALOGV("getNewInputDevice() device %08x forced by patch %d",
+                  inputDesc->mDevice, inputDesc->mPatchHandle);
+            return inputDesc->mDevice;
+        }
+    }
+
+    audio_devices_t device = getDeviceAndMixForInputSource(inputDesc->mInputSource);
+
+    return device;
+}
+
+uint32_t AudioPolicyManager::getStrategyForStream(audio_stream_type_t stream) {
+    return (uint32_t)getStrategy(stream);
+}
+
+audio_devices_t AudioPolicyManager::getDevicesForStream(audio_stream_type_t stream) {
+    // By checking the range of stream before calling getStrategy, we avoid
+    // getStrategy's behavior for invalid streams.  getStrategy would do a ALOGE
+    // and then return STRATEGY_MEDIA, but we want to return the empty set.
+    if (stream < (audio_stream_type_t) 0 || stream >= AUDIO_STREAM_PUBLIC_CNT) {
+        return AUDIO_DEVICE_NONE;
+    }
+    audio_devices_t devices;
+    routing_strategy strategy = getStrategy(stream);
+    devices = getDeviceForStrategy(strategy, true /*fromCache*/);
+    SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(devices, mOutputs);
+    for (size_t i = 0; i < outputs.size(); i++) {
+        sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
+        if (isStrategyActive(outputDesc, strategy)) {
+            devices = outputDesc->device();
+            break;
+        }
+    }
+
+    /*Filter SPEAKER_SAFE out of results, as AudioService doesn't know about it
+      and doesn't really need to.*/
+    if (devices & AUDIO_DEVICE_OUT_SPEAKER_SAFE) {
+        devices |= AUDIO_DEVICE_OUT_SPEAKER;
+        devices &= ~AUDIO_DEVICE_OUT_SPEAKER_SAFE;
+    }
+
+    return devices;
+}
+
+routing_strategy AudioPolicyManager::getStrategy(audio_stream_type_t stream) const
+{
+    ALOG_ASSERT(stream != AUDIO_STREAM_PATCH,"getStrategy() called for AUDIO_STREAM_PATCH");
+    return mEngine->getStrategyForStream(stream);
+}
+
+uint32_t AudioPolicyManager::getStrategyForAttr(const audio_attributes_t *attr) {
+    // flags to strategy mapping
+    if ((attr->flags & AUDIO_FLAG_BEACON) == AUDIO_FLAG_BEACON) {
+        return (uint32_t) STRATEGY_TRANSMITTED_THROUGH_SPEAKER;
+    }
+    if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
+        return (uint32_t) STRATEGY_ENFORCED_AUDIBLE;
+    }
+    // usage to strategy mapping
+    return static_cast<uint32_t>(mEngine->getStrategyForUsage(attr->usage));
+}
+
+void AudioPolicyManager::handleNotificationRoutingForStream(audio_stream_type_t stream) {
+    switch(stream) {
+    case AUDIO_STREAM_MUSIC:
+        checkOutputForStrategy(STRATEGY_SONIFICATION_RESPECTFUL);
+        updateDevicesAndOutputs();
+        break;
+    default:
+        break;
+    }
+}
+
+uint32_t AudioPolicyManager::handleEventForBeacon(int event) {
+    switch(event) {
+    case STARTING_OUTPUT:
+        mBeaconMuteRefCount++;
+        break;
+    case STOPPING_OUTPUT:
+        if (mBeaconMuteRefCount > 0) {
+            mBeaconMuteRefCount--;
+        }
+        break;
+    case STARTING_BEACON:
+        mBeaconPlayingRefCount++;
+        break;
+    case STOPPING_BEACON:
+        if (mBeaconPlayingRefCount > 0) {
+            mBeaconPlayingRefCount--;
+        }
+        break;
+    }
+
+    if (mBeaconMuteRefCount > 0) {
+        // any playback causes beacon to be muted
+        return setBeaconMute(true);
+    } else {
+        // no other playback: unmute when beacon starts playing, mute when it stops
+        return setBeaconMute(mBeaconPlayingRefCount == 0);
+    }
+}
+
+uint32_t AudioPolicyManager::setBeaconMute(bool mute) {
+    ALOGV("setBeaconMute(%d) mBeaconMuteRefCount=%d mBeaconPlayingRefCount=%d",
+            mute, mBeaconMuteRefCount, mBeaconPlayingRefCount);
+    // keep track of muted state to avoid repeating mute/unmute operations
+    if (mBeaconMuted != mute) {
+        // mute/unmute AUDIO_STREAM_TTS on all outputs
+        ALOGV("\t muting %d", mute);
+        uint32_t maxLatency = 0;
+        for (size_t i = 0; i < mOutputs.size(); i++) {
+            sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
+            setStreamMute(AUDIO_STREAM_TTS, mute/*on*/,
+                    desc,
+                    0 /*delay*/, AUDIO_DEVICE_NONE);
+            const uint32_t latency = desc->latency() * 2;
+            if (latency > maxLatency) {
+                maxLatency = latency;
+            }
+        }
+        mBeaconMuted = mute;
+        return maxLatency;
+    }
+    return 0;
+}
+
+audio_devices_t AudioPolicyManager::getDeviceForStrategy(routing_strategy strategy,
+                                                         bool fromCache)
+{
+    // Routing
+    // see if we have an explicit route
+    // scan the whole RouteMap, for each entry, convert the stream type to a strategy
+    // (getStrategy(stream)).
+    // if the strategy from the stream type in the RouteMap is the same as the argument above,
+    // and activity count is non-zero
+    // the device = the device from the descriptor in the RouteMap, and exit.
+    for (size_t routeIndex = 0; routeIndex < mOutputRoutes.size(); routeIndex++) {
+        sp<SessionRoute> route = mOutputRoutes.valueAt(routeIndex);
+        routing_strategy strat = getStrategy(route->mStreamType);
+        // Special case for accessibility strategy which must follow any strategy it is
+        // currently remapped to
+        bool strategyMatch = (strat == strategy) ||
+                             ((strategy == STRATEGY_ACCESSIBILITY) &&
+                              ((mEngine->getStrategyForUsage(
+                                      AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY) == strat) ||
+                               (strat == STRATEGY_MEDIA)));
+        if (strategyMatch && route->isActive()) {
+            return route->mDeviceDescriptor->type();
+        }
+    }
+
+    if (fromCache) {
+        ALOGVV("getDeviceForStrategy() from cache strategy %d, device %x",
+              strategy, mDeviceForStrategy[strategy]);
+        return mDeviceForStrategy[strategy];
+    }
+    return mEngine->getDeviceForStrategy(strategy);
+}
+
+void AudioPolicyManager::updateDevicesAndOutputs()
+{
+    for (int i = 0; i < NUM_STRATEGIES; i++) {
+        mDeviceForStrategy[i] = getDeviceForStrategy((routing_strategy)i, false /*fromCache*/);
+    }
+    mPreviousOutputs = mOutputs;
+}
+
+uint32_t AudioPolicyManager::checkDeviceMuteStrategies(sp<AudioOutputDescriptor> outputDesc,
+                                                       audio_devices_t prevDevice,
+                                                       uint32_t delayMs)
+{
+    // mute/unmute strategies using an incompatible device combination
+    // if muting, wait for the audio in pcm buffer to be drained before proceeding
+    // if unmuting, unmute only after the specified delay
+    if (outputDesc->isDuplicated()) {
+        return 0;
+    }
+
+    uint32_t muteWaitMs = 0;
+    audio_devices_t device = outputDesc->device();
+    bool shouldMute = outputDesc->isActive() && (popcount(device) >= 2);
+
+    for (size_t i = 0; i < NUM_STRATEGIES; i++) {
+        audio_devices_t curDevice = getDeviceForStrategy((routing_strategy)i, false /*fromCache*/);
+        curDevice = curDevice & outputDesc->supportedDevices();
+        bool mute = shouldMute && (curDevice & device) && (curDevice != device);
+        bool doMute = false;
+
+        if (mute && !outputDesc->mStrategyMutedByDevice[i]) {
+            doMute = true;
+            outputDesc->mStrategyMutedByDevice[i] = true;
+        } else if (!mute && outputDesc->mStrategyMutedByDevice[i]){
+            doMute = true;
+            outputDesc->mStrategyMutedByDevice[i] = false;
+        }
+        if (doMute) {
+            for (size_t j = 0; j < mOutputs.size(); j++) {
+                sp<AudioOutputDescriptor> desc = mOutputs.valueAt(j);
+                // skip output if it does not share any device with current output
+                if ((desc->supportedDevices() & outputDesc->supportedDevices())
+                        == AUDIO_DEVICE_NONE) {
+                    continue;
+                }
+                ALOGVV("checkDeviceMuteStrategies() %s strategy %d (curDevice %04x)",
+                      mute ? "muting" : "unmuting", i, curDevice);
+                setStrategyMute((routing_strategy)i, mute, desc, mute ? 0 : delayMs);
+                if (isStrategyActive(desc, (routing_strategy)i)) {
+                    if (mute) {
+                        // FIXME: should not need to double latency if volume could be applied
+                        // immediately by the audioflinger mixer. We must account for the delay
+                        // between now and the next time the audioflinger thread for this output
+                        // will process a buffer (which corresponds to one buffer size,
+                        // usually 1/2 or 1/4 of the latency).
+                        if (muteWaitMs < desc->latency() * 2) {
+                            muteWaitMs = desc->latency() * 2;
+                        }
+                    }
+                }
+            }
+        }
+    }
+
+    // temporary mute output if device selection changes to avoid volume bursts due to
+    // different per device volumes
+    if (outputDesc->isActive() && (device != prevDevice)) {
+        if (muteWaitMs < outputDesc->latency() * 2) {
+            muteWaitMs = outputDesc->latency() * 2;
+        }
+        for (size_t i = 0; i < NUM_STRATEGIES; i++) {
+            if (isStrategyActive(outputDesc, (routing_strategy)i)) {
+                setStrategyMute((routing_strategy)i, true, outputDesc);
+                // do tempMute unmute after twice the mute wait time
+                setStrategyMute((routing_strategy)i, false, outputDesc,
+                                muteWaitMs *2, device);
+            }
+        }
+    }
+
+    // wait for the PCM output buffers to empty before proceeding with the rest of the command
+    if (muteWaitMs > delayMs) {
+        muteWaitMs -= delayMs;
+        usleep(muteWaitMs * 1000);
+        return muteWaitMs;
+    }
+    return 0;
+}
+
+uint32_t AudioPolicyManager::setOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
+                                             audio_devices_t device,
+                                             bool force,
+                                             int delayMs,
+                                             audio_patch_handle_t *patchHandle,
+                                             const char* address)
+{
+    ALOGV("setOutputDevice() device %04x delayMs %d", device, delayMs);
+    AudioParameter param;
+    uint32_t muteWaitMs;
+
+    if (outputDesc->isDuplicated()) {
+        muteWaitMs = setOutputDevice(outputDesc->subOutput1(), device, force, delayMs);
+        muteWaitMs += setOutputDevice(outputDesc->subOutput2(), device, force, delayMs);
+        return muteWaitMs;
+    }
+    // no need to proceed if new device is not AUDIO_DEVICE_NONE and not supported by current
+    // output profile
+    if ((device != AUDIO_DEVICE_NONE) &&
+            ((device & outputDesc->supportedDevices()) == 0)) {
+        return 0;
+    }
+
+    // filter devices according to output selected
+    device = (audio_devices_t)(device & outputDesc->supportedDevices());
+
+    audio_devices_t prevDevice = outputDesc->mDevice;
+
+    ALOGV("setOutputDevice() prevDevice 0x%04x", prevDevice);
+
+    if (device != AUDIO_DEVICE_NONE) {
+        outputDesc->mDevice = device;
+    }
+    muteWaitMs = checkDeviceMuteStrategies(outputDesc, prevDevice, delayMs);
+
+    // Do not change the routing if:
+    //      the requested device is AUDIO_DEVICE_NONE
+    //      OR the requested device is the same as current device
+    //  AND force is not specified
+    //  AND the output is connected by a valid audio patch.
+    // Doing this check here allows the caller to call setOutputDevice() without conditions
+    if ((device == AUDIO_DEVICE_NONE || device == prevDevice) &&
+        !force &&
+        outputDesc->mPatchHandle != 0) {
+        ALOGV("setOutputDevice() setting same device 0x%04x or null device", device);
+        return muteWaitMs;
+    }
+
+    ALOGV("setOutputDevice() changing device");
+
+    // do the routing
+    if (device == AUDIO_DEVICE_NONE) {
+        resetOutputDevice(outputDesc, delayMs, NULL);
+    } else {
+        DeviceVector deviceList = (address == NULL) ?
+                mAvailableOutputDevices.getDevicesFromType(device)
+                : mAvailableOutputDevices.getDevicesFromTypeAddr(device, String8(address));
+        if (!deviceList.isEmpty()) {
+            struct audio_patch patch;
+            outputDesc->toAudioPortConfig(&patch.sources[0]);
+            patch.num_sources = 1;
+            patch.num_sinks = 0;
+            for (size_t i = 0; i < deviceList.size() && i < AUDIO_PATCH_PORTS_MAX; i++) {
+                deviceList.itemAt(i)->toAudioPortConfig(&patch.sinks[i]);
+                patch.num_sinks++;
+            }
+            ssize_t index;
+            if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
+                index = mAudioPatches.indexOfKey(*patchHandle);
+            } else {
+                index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
+            }
+            sp< AudioPatch> patchDesc;
+            audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+            if (index >= 0) {
+                patchDesc = mAudioPatches.valueAt(index);
+                afPatchHandle = patchDesc->mAfPatchHandle;
+            }
+
+            status_t status = mpClientInterface->createAudioPatch(&patch,
+                                                                   &afPatchHandle,
+                                                                   delayMs);
+            ALOGV("setOutputDevice() createAudioPatch returned %d patchHandle %d"
+                    "num_sources %d num_sinks %d",
+                                       status, afPatchHandle, patch.num_sources, patch.num_sinks);
+            if (status == NO_ERROR) {
+                if (index < 0) {
+                    patchDesc = new AudioPatch(&patch, mUidCached);
+                    addAudioPatch(patchDesc->mHandle, patchDesc);
+                } else {
+                    patchDesc->mPatch = patch;
+                }
+                patchDesc->mAfPatchHandle = afPatchHandle;
+                patchDesc->mUid = mUidCached;
+                if (patchHandle) {
+                    *patchHandle = patchDesc->mHandle;
+                }
+                outputDesc->mPatchHandle = patchDesc->mHandle;
+                nextAudioPortGeneration();
+                mpClientInterface->onAudioPatchListUpdate();
+            }
+        }
+
+        // inform all input as well
+        for (size_t i = 0; i < mInputs.size(); i++) {
+            const sp<AudioInputDescriptor>  inputDescriptor = mInputs.valueAt(i);
+            if (!is_virtual_input_device(inputDescriptor->mDevice)) {
+                AudioParameter inputCmd = AudioParameter();
+                ALOGV("%s: inform input %d of device:%d", __func__,
+                      inputDescriptor->mIoHandle, device);
+                inputCmd.addInt(String8(AudioParameter::keyRouting),device);
+                mpClientInterface->setParameters(inputDescriptor->mIoHandle,
+                                                 inputCmd.toString(),
+                                                 delayMs);
+            }
+        }
+    }
+
+    // update stream volumes according to new device
+    applyStreamVolumes(outputDesc, device, delayMs);
+
+    return muteWaitMs;
+}
+
+status_t AudioPolicyManager::resetOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
+                                               int delayMs,
+                                               audio_patch_handle_t *patchHandle)
+{
+    ssize_t index;
+    if (patchHandle) {
+        index = mAudioPatches.indexOfKey(*patchHandle);
+    } else {
+        index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
+    }
+    if (index < 0) {
+        return INVALID_OPERATION;
+    }
+    sp< AudioPatch> patchDesc = mAudioPatches.valueAt(index);
+    status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, delayMs);
+    ALOGV("resetOutputDevice() releaseAudioPatch returned %d", status);
+    outputDesc->mPatchHandle = 0;
+    removeAudioPatch(patchDesc->mHandle);
+    nextAudioPortGeneration();
+    mpClientInterface->onAudioPatchListUpdate();
+    return status;
+}
+
+status_t AudioPolicyManager::setInputDevice(audio_io_handle_t input,
+                                            audio_devices_t device,
+                                            bool force,
+                                            audio_patch_handle_t *patchHandle)
+{
+    status_t status = NO_ERROR;
+
+    sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input);
+    if ((device != AUDIO_DEVICE_NONE) && ((device != inputDesc->mDevice) || force)) {
+        inputDesc->mDevice = device;
+
+        DeviceVector deviceList = mAvailableInputDevices.getDevicesFromType(device);
+        if (!deviceList.isEmpty()) {
+            struct audio_patch patch;
+            inputDesc->toAudioPortConfig(&patch.sinks[0]);
+            // AUDIO_SOURCE_HOTWORD is for internal use only:
+            // handled as AUDIO_SOURCE_VOICE_RECOGNITION by the audio HAL
+            if (patch.sinks[0].ext.mix.usecase.source == AUDIO_SOURCE_HOTWORD &&
+                    !inputDesc->mIsSoundTrigger) {
+                patch.sinks[0].ext.mix.usecase.source = AUDIO_SOURCE_VOICE_RECOGNITION;
+            }
+            patch.num_sinks = 1;
+            //only one input device for now
+            deviceList.itemAt(0)->toAudioPortConfig(&patch.sources[0]);
+            patch.num_sources = 1;
+            ssize_t index;
+            if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
+                index = mAudioPatches.indexOfKey(*patchHandle);
+            } else {
+                index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+            }
+            sp< AudioPatch> patchDesc;
+            audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+            if (index >= 0) {
+                patchDesc = mAudioPatches.valueAt(index);
+                afPatchHandle = patchDesc->mAfPatchHandle;
+            }
+
+            status_t status = mpClientInterface->createAudioPatch(&patch,
+                                                                  &afPatchHandle,
+                                                                  0);
+            ALOGV("setInputDevice() createAudioPatch returned %d patchHandle %d",
+                                                                          status, afPatchHandle);
+            if (status == NO_ERROR) {
+                if (index < 0) {
+                    patchDesc = new AudioPatch(&patch, mUidCached);
+                    addAudioPatch(patchDesc->mHandle, patchDesc);
+                } else {
+                    patchDesc->mPatch = patch;
+                }
+                patchDesc->mAfPatchHandle = afPatchHandle;
+                patchDesc->mUid = mUidCached;
+                if (patchHandle) {
+                    *patchHandle = patchDesc->mHandle;
+                }
+                inputDesc->mPatchHandle = patchDesc->mHandle;
+                nextAudioPortGeneration();
+                mpClientInterface->onAudioPatchListUpdate();
+            }
+        }
+    }
+    return status;
+}
+
+status_t AudioPolicyManager::resetInputDevice(audio_io_handle_t input,
+                                              audio_patch_handle_t *patchHandle)
+{
+    sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input);
+    ssize_t index;
+    if (patchHandle) {
+        index = mAudioPatches.indexOfKey(*patchHandle);
+    } else {
+        index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+    }
+    if (index < 0) {
+        return INVALID_OPERATION;
+    }
+    sp< AudioPatch> patchDesc = mAudioPatches.valueAt(index);
+    status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
+    ALOGV("resetInputDevice() releaseAudioPatch returned %d", status);
+    inputDesc->mPatchHandle = 0;
+    removeAudioPatch(patchDesc->mHandle);
+    nextAudioPortGeneration();
+    mpClientInterface->onAudioPatchListUpdate();
+    return status;
+}
+
+sp<IOProfile> AudioPolicyManager::getInputProfile(audio_devices_t device,
+                                                  String8 address,
+                                                  uint32_t& samplingRate,
+                                                  audio_format_t& format,
+                                                  audio_channel_mask_t& channelMask,
+                                                  audio_input_flags_t flags)
+{
+    // Choose an input profile based on the requested capture parameters: select the first available
+    // profile supporting all requested parameters.
+    //
+    // TODO: perhaps isCompatibleProfile should return a "matching" score so we can return
+    // the best matching profile, not the first one.
+
+    for (size_t i = 0; i < mHwModules.size(); i++)
+    {
+        if (mHwModules[i]->mHandle == 0) {
+            continue;
+        }
+        for (size_t j = 0; j < mHwModules[i]->mInputProfiles.size(); j++)
+        {
+            sp<IOProfile> profile = mHwModules[i]->mInputProfiles[j];
+            // profile->log();
+            if (profile->isCompatibleProfile(device, address, samplingRate,
+                                             &samplingRate /*updatedSamplingRate*/,
+                                             format,
+                                             &format /*updatedFormat*/,
+                                             channelMask,
+                                             &channelMask /*updatedChannelMask*/,
+                                             (audio_output_flags_t) flags)) {
+
+                return profile;
+            }
+        }
+    }
+    return NULL;
+}
+
+
+audio_devices_t AudioPolicyManager::getDeviceAndMixForInputSource(audio_source_t inputSource,
+                                                                  AudioMix **policyMix)
+{
+    audio_devices_t availableDeviceTypes = mAvailableInputDevices.types() & ~AUDIO_DEVICE_BIT_IN;
+    audio_devices_t selectedDeviceFromMix =
+           mPolicyMixes.getDeviceAndMixForInputSource(inputSource, availableDeviceTypes, policyMix);
+
+    if (selectedDeviceFromMix != AUDIO_DEVICE_NONE) {
+        return selectedDeviceFromMix;
+    }
+    return getDeviceForInputSource(inputSource);
+}
+
+audio_devices_t AudioPolicyManager::getDeviceForInputSource(audio_source_t inputSource)
+{
+    for (size_t routeIndex = 0; routeIndex < mInputRoutes.size(); routeIndex++) {
+         sp<SessionRoute> route = mInputRoutes.valueAt(routeIndex);
+         if (inputSource == route->mSource && route->isActive()) {
+             return route->mDeviceDescriptor->type();
+         }
+     }
+
+     return mEngine->getDeviceForInputSource(inputSource);
+}
+
+float AudioPolicyManager::computeVolume(audio_stream_type_t stream,
+                                            int index,
+                                            audio_devices_t device)
+{
+    float volumeDb = mEngine->volIndexToDb(Volume::getDeviceCategory(device), stream, index);
+
+    // if a headset is connected, apply the following rules to ring tones and notifications
+    // to avoid sound level bursts in user's ears:
+    // - always attenuate ring tones and notifications volume by 6dB
+    // - if music is playing, always limit the volume to current music volume,
+    // with a minimum threshold at -36dB so that notification is always perceived.
+    const routing_strategy stream_strategy = getStrategy(stream);
+    if ((device & (AUDIO_DEVICE_OUT_BLUETOOTH_A2DP |
+            AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
+            AUDIO_DEVICE_OUT_WIRED_HEADSET |
+            AUDIO_DEVICE_OUT_WIRED_HEADPHONE)) &&
+        ((stream_strategy == STRATEGY_SONIFICATION)
+                || (stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL)
+                || (stream == AUDIO_STREAM_SYSTEM)
+                || ((stream_strategy == STRATEGY_ENFORCED_AUDIBLE) &&
+                    (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_NONE))) &&
+            mStreams.canBeMuted(stream)) {
+        volumeDb += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
+        // when the phone is ringing we must consider that music could have been paused just before
+        // by the music application and behave as if music was active if the last music track was
+        // just stopped
+        if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY) ||
+                mLimitRingtoneVolume) {
+            audio_devices_t musicDevice = getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/);
+            float musicVolDB = computeVolume(AUDIO_STREAM_MUSIC,
+                                 mStreams.valueFor(AUDIO_STREAM_MUSIC).getVolumeIndex(musicDevice),
+                               musicDevice);
+            float minVolDB = (musicVolDB > SONIFICATION_HEADSET_VOLUME_MIN_DB) ?
+                    musicVolDB : SONIFICATION_HEADSET_VOLUME_MIN_DB;
+            if (volumeDb > minVolDB) {
+                volumeDb = minVolDB;
+                ALOGV("computeVolume limiting volume to %f musicVol %f", minVolDB, musicVolDB);
+            }
+        }
+    }
+
+    return volumeDb;
+}
+
+status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream,
+                                                   int index,
+                                                   const sp<AudioOutputDescriptor>& outputDesc,
+                                                   audio_devices_t device,
+                                                   int delayMs,
+                                                   bool force)
+{
+    // do not change actual stream volume if the stream is muted
+    if (outputDesc->mMuteCount[stream] != 0) {
+        ALOGVV("checkAndSetVolume() stream %d muted count %d",
+              stream, outputDesc->mMuteCount[stream]);
+        return NO_ERROR;
+    }
+    audio_policy_forced_cfg_t forceUseForComm =
+            mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION);
+    // do not change in call volume if bluetooth is connected and vice versa
+    if ((stream == AUDIO_STREAM_VOICE_CALL && forceUseForComm == AUDIO_POLICY_FORCE_BT_SCO) ||
+        (stream == AUDIO_STREAM_BLUETOOTH_SCO && forceUseForComm != AUDIO_POLICY_FORCE_BT_SCO)) {
+        ALOGV("checkAndSetVolume() cannot set stream %d volume with force use = %d for comm",
+             stream, forceUseForComm);
+        return INVALID_OPERATION;
+    }
+
+    if (device == AUDIO_DEVICE_NONE) {
+        device = outputDesc->device();
+    }
+
+    float volumeDb = computeVolume(stream, index, device);
+    if (outputDesc->isFixedVolume(device)) {
+        volumeDb = 0.0f;
+    }
+
+    outputDesc->setVolume(volumeDb, stream, device, delayMs, force);
+
+    if (stream == AUDIO_STREAM_VOICE_CALL ||
+        stream == AUDIO_STREAM_BLUETOOTH_SCO) {
+        float voiceVolume;
+        // Force voice volume to max for bluetooth SCO as volume is managed by the headset
+        if (stream == AUDIO_STREAM_VOICE_CALL) {
+            voiceVolume = (float)index/(float)mStreams.valueFor(stream).getVolumeIndexMax();
+        } else {
+            voiceVolume = 1.0;
+        }
+
+        if (voiceVolume != mLastVoiceVolume && outputDesc == mPrimaryOutput) {
+            mpClientInterface->setVoiceVolume(voiceVolume, delayMs);
+            mLastVoiceVolume = voiceVolume;
+        }
+    }
+
+    return NO_ERROR;
+}
+
+void AudioPolicyManager::applyStreamVolumes(const sp<AudioOutputDescriptor>& outputDesc,
+                                                audio_devices_t device,
+                                                int delayMs,
+                                                bool force)
+{
+    ALOGVV("applyStreamVolumes() for device %08x", device);
+
+    for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
+        if (stream == AUDIO_STREAM_PATCH) {
+            continue;
+        }
+        checkAndSetVolume((audio_stream_type_t)stream,
+                          mStreams.valueFor((audio_stream_type_t)stream).getVolumeIndex(device),
+                          outputDesc,
+                          device,
+                          delayMs,
+                          force);
+    }
+}
+
+void AudioPolicyManager::setStrategyMute(routing_strategy strategy,
+                                             bool on,
+                                             const sp<AudioOutputDescriptor>& outputDesc,
+                                             int delayMs,
+                                             audio_devices_t device)
+{
+    ALOGVV("setStrategyMute() strategy %d, mute %d, output ID %d",
+           strategy, on, outputDesc->getId());
+    for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
+        if (stream == AUDIO_STREAM_PATCH) {
+            continue;
+        }
+        if (getStrategy((audio_stream_type_t)stream) == strategy) {
+            setStreamMute((audio_stream_type_t)stream, on, outputDesc, delayMs, device);
+        }
+    }
+}
+
+void AudioPolicyManager::setStreamMute(audio_stream_type_t stream,
+                                           bool on,
+                                           const sp<AudioOutputDescriptor>& outputDesc,
+                                           int delayMs,
+                                           audio_devices_t device)
+{
+    const StreamDescriptor& streamDesc = mStreams.valueFor(stream);
+    if (device == AUDIO_DEVICE_NONE) {
+        device = outputDesc->device();
+    }
+
+    ALOGVV("setStreamMute() stream %d, mute %d, mMuteCount %d device %04x",
+          stream, on, outputDesc->mMuteCount[stream], device);
+
+    if (on) {
+        if (outputDesc->mMuteCount[stream] == 0) {
+            if (streamDesc.canBeMuted() &&
+                    ((stream != AUDIO_STREAM_ENFORCED_AUDIBLE) ||
+                     (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_NONE))) {
+                checkAndSetVolume(stream, 0, outputDesc, device, delayMs);
+            }
+        }
+        // increment mMuteCount after calling checkAndSetVolume() so that volume change is not ignored
+        outputDesc->mMuteCount[stream]++;
+    } else {
+        if (outputDesc->mMuteCount[stream] == 0) {
+            ALOGV("setStreamMute() unmuting non muted stream!");
+            return;
+        }
+        if (--outputDesc->mMuteCount[stream] == 0) {
+            checkAndSetVolume(stream,
+                              streamDesc.getVolumeIndex(device),
+                              outputDesc,
+                              device,
+                              delayMs);
+        }
+    }
+}
+
+void AudioPolicyManager::handleIncallSonification(audio_stream_type_t stream,
+                                                      bool starting, bool stateChange)
+{
+    if(!hasPrimaryOutput()) {
+        return;
+    }
+
+    // if the stream pertains to sonification strategy and we are in call we must
+    // mute the stream if it is low visibility. If it is high visibility, we must play a tone
+    // in the device used for phone strategy and play the tone if the selected device does not
+    // interfere with the device used for phone strategy
+    // if stateChange is true, we are called from setPhoneState() and we must mute or unmute as
+    // many times as there are active tracks on the output
+    const routing_strategy stream_strategy = getStrategy(stream);
+    if ((stream_strategy == STRATEGY_SONIFICATION) ||
+            ((stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL))) {
+        sp<SwAudioOutputDescriptor> outputDesc = mPrimaryOutput;
+        ALOGV("handleIncallSonification() stream %d starting %d device %x stateChange %d",
+                stream, starting, outputDesc->mDevice, stateChange);
+        if (outputDesc->mRefCount[stream]) {
+            int muteCount = 1;
+            if (stateChange) {
+                muteCount = outputDesc->mRefCount[stream];
+            }
+            if (audio_is_low_visibility(stream)) {
+                ALOGV("handleIncallSonification() low visibility, muteCount %d", muteCount);
+                for (int i = 0; i < muteCount; i++) {
+                    setStreamMute(stream, starting, mPrimaryOutput);
+                }
+            } else {
+                ALOGV("handleIncallSonification() high visibility");
+                if (outputDesc->device() &
+                        getDeviceForStrategy(STRATEGY_PHONE, true /*fromCache*/)) {
+                    ALOGV("handleIncallSonification() high visibility muted, muteCount %d", muteCount);
+                    for (int i = 0; i < muteCount; i++) {
+                        setStreamMute(stream, starting, mPrimaryOutput);
+                    }
+                }
+                if (starting) {
+                    mpClientInterface->startTone(AUDIO_POLICY_TONE_IN_CALL_NOTIFICATION,
+                                                 AUDIO_STREAM_VOICE_CALL);
+                } else {
+                    mpClientInterface->stopTone();
+                }
+            }
+        }
+    }
+}
+
+
+
+void AudioPolicyManager::defaultAudioPolicyConfig(void)
+{
+    sp<HwModule> module;
+    sp<IOProfile> profile;
+    sp<DeviceDescriptor> defaultInputDevice =
+                    new DeviceDescriptor(AUDIO_DEVICE_IN_BUILTIN_MIC);
+    mAvailableOutputDevices.add(mDefaultOutputDevice);
+    mAvailableInputDevices.add(defaultInputDevice);
+
+    module = new HwModule("primary");
+
+    profile = new IOProfile(String8("primary"), AUDIO_PORT_ROLE_SOURCE);
+    profile->attach(module);
+    profile->mSamplingRates.add(44100);
+    profile->mFormats.add(AUDIO_FORMAT_PCM_16_BIT);
+    profile->mChannelMasks.add(AUDIO_CHANNEL_OUT_STEREO);
+    profile->mSupportedDevices.add(mDefaultOutputDevice);
+    profile->mFlags = AUDIO_OUTPUT_FLAG_PRIMARY;
+    module->mOutputProfiles.add(profile);
+
+    profile = new IOProfile(String8("primary"), AUDIO_PORT_ROLE_SINK);
+    profile->attach(module);
+    profile->mSamplingRates.add(8000);
+    profile->mFormats.add(AUDIO_FORMAT_PCM_16_BIT);
+    profile->mChannelMasks.add(AUDIO_CHANNEL_IN_MONO);
+    profile->mSupportedDevices.add(defaultInputDevice);
+    module->mInputProfiles.add(profile);
+
+    mHwModules.add(module);
+}
+
+audio_stream_type_t AudioPolicyManager::streamTypefromAttributesInt(const audio_attributes_t *attr)
+{
+    // flags to stream type mapping
+    if ((attr->flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
+        return AUDIO_STREAM_ENFORCED_AUDIBLE;
+    }
+    if ((attr->flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
+        return AUDIO_STREAM_BLUETOOTH_SCO;
+    }
+    if ((attr->flags & AUDIO_FLAG_BEACON) == AUDIO_FLAG_BEACON) {
+        return AUDIO_STREAM_TTS;
+    }
+
+    // usage to stream type mapping
+    switch (attr->usage) {
+    case AUDIO_USAGE_MEDIA:
+    case AUDIO_USAGE_GAME:
+    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+        return AUDIO_STREAM_MUSIC;
+    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+        if (isStreamActive(AUDIO_STREAM_ALARM)) {
+            return AUDIO_STREAM_ALARM;
+        }
+        if (isStreamActive(AUDIO_STREAM_RING)) {
+            return AUDIO_STREAM_RING;
+        }
+        if (isInCall()) {
+            return AUDIO_STREAM_VOICE_CALL;
+        }
+        return AUDIO_STREAM_ACCESSIBILITY;
+    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
+        return AUDIO_STREAM_SYSTEM;
+    case AUDIO_USAGE_VOICE_COMMUNICATION:
+        return AUDIO_STREAM_VOICE_CALL;
+
+    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+        return AUDIO_STREAM_DTMF;
+
+    case AUDIO_USAGE_ALARM:
+        return AUDIO_STREAM_ALARM;
+    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
+        return AUDIO_STREAM_RING;
+
+    case AUDIO_USAGE_NOTIFICATION:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
+    case AUDIO_USAGE_NOTIFICATION_EVENT:
+        return AUDIO_STREAM_NOTIFICATION;
+
+    case AUDIO_USAGE_UNKNOWN:
+    default:
+        return AUDIO_STREAM_MUSIC;
+    }
+}
+
+bool AudioPolicyManager::isValidAttributes(const audio_attributes_t *paa)
+{
+    // has flags that map to a strategy?
+    if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO | AUDIO_FLAG_BEACON)) != 0) {
+        return true;
+    }
+
+    // has known usage?
+    switch (paa->usage) {
+    case AUDIO_USAGE_UNKNOWN:
+    case AUDIO_USAGE_MEDIA:
+    case AUDIO_USAGE_VOICE_COMMUNICATION:
+    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+    case AUDIO_USAGE_ALARM:
+    case AUDIO_USAGE_NOTIFICATION:
+    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
+    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
+    case AUDIO_USAGE_NOTIFICATION_EVENT:
+    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
+    case AUDIO_USAGE_GAME:
+    case AUDIO_USAGE_VIRTUAL_SOURCE:
+        break;
+    default:
+        return false;
+    }
+    return true;
+}
+
+bool AudioPolicyManager::isStrategyActive(const sp<AudioOutputDescriptor> outputDesc,
+                                          routing_strategy strategy, uint32_t inPastMs,
+                                          nsecs_t sysTime) const
+{
+    if ((sysTime == 0) && (inPastMs != 0)) {
+        sysTime = systemTime();
+    }
+    for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
+        if (i == AUDIO_STREAM_PATCH) {
+            continue;
+        }
+        if (((getStrategy((audio_stream_type_t)i) == strategy) ||
+                (NUM_STRATEGIES == strategy)) &&
+                outputDesc->isStreamActive((audio_stream_type_t)i, inPastMs, sysTime)) {
+            return true;
+        }
+    }
+    return false;
+}
+
+audio_policy_forced_cfg_t AudioPolicyManager::getForceUse(audio_policy_force_use_t usage)
+{
+    return mEngine->getForceUse(usage);
+}
+
+bool AudioPolicyManager::isInCall()
+{
+    return isStateInCall(mEngine->getPhoneState());
+}
+
+bool AudioPolicyManager::isStateInCall(int state)
+{
+    return is_state_in_call(state);
+}
+
+}; // namespace android
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
new file mode 100644
index 0000000..bf3ae4a
--- /dev/null
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -0,0 +1,618 @@
+/*
+ * Copyright (C) 2009 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <stdint.h>
+#include <sys/types.h>
+#include <cutils/config_utils.h>
+#include <cutils/misc.h>
+#include <utils/Timers.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/SortedVector.h>
+#include <media/AudioPolicy.h>
+#include "AudioPolicyInterface.h"
+
+#include <AudioPolicyManagerInterface.h>
+#include <AudioPolicyManagerObserver.h>
+#include <AudioGain.h>
+#include <AudioPort.h>
+#include <AudioPatch.h>
+#include <ConfigParsingUtils.h>
+#include <DeviceDescriptor.h>
+#include <IOProfile.h>
+#include <HwModule.h>
+#include <AudioInputDescriptor.h>
+#include <AudioOutputDescriptor.h>
+#include <AudioPolicyMix.h>
+#include <EffectDescriptor.h>
+#include <SoundTriggerSession.h>
+#include <StreamDescriptor.h>
+#include <SessionRoute.h>
+
+namespace android {
+
+// ----------------------------------------------------------------------------
+
+// Attenuation applied to STRATEGY_SONIFICATION streams when a headset is connected: 6dB
+#define SONIFICATION_HEADSET_VOLUME_FACTOR 0.5
+#define SONIFICATION_HEADSET_VOLUME_FACTOR_DB (-6)
+// Min volume for STRATEGY_SONIFICATION streams when limited by music volume: -36dB
+#define SONIFICATION_HEADSET_VOLUME_MIN  0.016
+#define SONIFICATION_HEADSET_VOLUME_MIN_DB  (-36)
+
+// Time in milliseconds during which we consider that music is still active after a music
+// track was stopped - see computeVolume()
+#define SONIFICATION_HEADSET_MUSIC_DELAY  5000
+
+// Time in milliseconds during witch some streams are muted while the audio path
+// is switched
+#define MUTE_TIME_MS 2000
+
+#define NUM_TEST_OUTPUTS 5
+
+#define NUM_VOL_CURVE_KNEES 2
+
+// Default minimum length allowed for offloading a compressed track
+// Can be overridden by the audio.offload.min.duration.secs property
+#define OFFLOAD_DEFAULT_MIN_DURATION_SECS 60
+
+// ----------------------------------------------------------------------------
+// AudioPolicyManager implements audio policy manager behavior common to all platforms.
+// ----------------------------------------------------------------------------
+
+class AudioPolicyManager : public AudioPolicyInterface, public AudioPolicyManagerObserver
+
+#ifdef AUDIO_POLICY_TEST
+    , public Thread
+#endif //AUDIO_POLICY_TEST
+{
+
+public:
+                AudioPolicyManager(AudioPolicyClientInterface *clientInterface);
+        virtual ~AudioPolicyManager();
+
+        // AudioPolicyInterface
+        virtual status_t setDeviceConnectionState(audio_devices_t device,
+                                                          audio_policy_dev_state_t state,
+                                                          const char *device_address,
+                                                          const char *device_name);
+        virtual audio_policy_dev_state_t getDeviceConnectionState(audio_devices_t device,
+                                                                              const char *device_address);
+        virtual void setPhoneState(audio_mode_t state);
+        virtual void setForceUse(audio_policy_force_use_t usage,
+                                 audio_policy_forced_cfg_t config);
+        virtual audio_policy_forced_cfg_t getForceUse(audio_policy_force_use_t usage);
+
+        virtual void setSystemProperty(const char* property, const char* value);
+        virtual status_t initCheck();
+        virtual audio_io_handle_t getOutput(audio_stream_type_t stream,
+                                            uint32_t samplingRate,
+                                            audio_format_t format,
+                                            audio_channel_mask_t channelMask,
+                                            audio_output_flags_t flags,
+                                            const audio_offload_info_t *offloadInfo);
+        virtual status_t getOutputForAttr(const audio_attributes_t *attr,
+                                          audio_io_handle_t *output,
+                                          audio_session_t session,
+                                          audio_stream_type_t *stream,
+                                          uid_t uid,
+                                          uint32_t samplingRate,
+                                          audio_format_t format,
+                                          audio_channel_mask_t channelMask,
+                                          audio_output_flags_t flags,
+                                          audio_port_handle_t selectedDeviceId,
+                                          const audio_offload_info_t *offloadInfo);
+        virtual status_t startOutput(audio_io_handle_t output,
+                                     audio_stream_type_t stream,
+                                     audio_session_t session);
+        virtual status_t stopOutput(audio_io_handle_t output,
+                                    audio_stream_type_t stream,
+                                    audio_session_t session);
+        virtual void releaseOutput(audio_io_handle_t output,
+                                   audio_stream_type_t stream,
+                                   audio_session_t session);
+        virtual status_t getInputForAttr(const audio_attributes_t *attr,
+                                         audio_io_handle_t *input,
+                                         audio_session_t session,
+                                         uid_t uid,
+                                         uint32_t samplingRate,
+                                         audio_format_t format,
+                                         audio_channel_mask_t channelMask,
+                                         audio_input_flags_t flags,
+                                         audio_port_handle_t selectedDeviceId,
+                                         input_type_t *inputType);
+
+        // indicates to the audio policy manager that the input starts being used.
+        virtual status_t startInput(audio_io_handle_t input,
+                                    audio_session_t session);
+
+        // indicates to the audio policy manager that the input stops being used.
+        virtual status_t stopInput(audio_io_handle_t input,
+                                   audio_session_t session);
+        virtual void releaseInput(audio_io_handle_t input,
+                                  audio_session_t session);
+        virtual void closeAllInputs();
+        virtual void initStreamVolume(audio_stream_type_t stream,
+                                                    int indexMin,
+                                                    int indexMax);
+        virtual status_t setStreamVolumeIndex(audio_stream_type_t stream,
+                                              int index,
+                                              audio_devices_t device);
+        virtual status_t getStreamVolumeIndex(audio_stream_type_t stream,
+                                              int *index,
+                                              audio_devices_t device);
+
+        // return the strategy corresponding to a given stream type
+        virtual uint32_t getStrategyForStream(audio_stream_type_t stream);
+        // return the strategy corresponding to the given audio attributes
+        virtual uint32_t getStrategyForAttr(const audio_attributes_t *attr);
+
+        // return the enabled output devices for the given stream type
+        virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream);
+
+        virtual audio_io_handle_t getOutputForEffect(const effect_descriptor_t *desc = NULL);
+        virtual status_t registerEffect(const effect_descriptor_t *desc,
+                                        audio_io_handle_t io,
+                                        uint32_t strategy,
+                                        int session,
+                                        int id);
+        virtual status_t unregisterEffect(int id)
+        {
+            return mEffects.unregisterEffect(id);
+        }
+        virtual status_t setEffectEnabled(int id, bool enabled)
+        {
+            return mEffects.setEffectEnabled(id, enabled);
+        }
+
+        virtual bool isStreamActive(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
+        // return whether a stream is playing remotely, override to change the definition of
+        //   local/remote playback, used for instance by notification manager to not make
+        //   media players lose audio focus when not playing locally
+        //   For the base implementation, "remotely" means playing during screen mirroring which
+        //   uses an output for playback with a non-empty, non "0" address.
+        virtual bool isStreamActiveRemotely(audio_stream_type_t stream,
+                                            uint32_t inPastMs = 0) const;
+
+        virtual bool isSourceActive(audio_source_t source) const;
+
+        virtual status_t dump(int fd);
+
+        virtual bool isOffloadSupported(const audio_offload_info_t& offloadInfo);
+
+        virtual status_t listAudioPorts(audio_port_role_t role,
+                                        audio_port_type_t type,
+                                        unsigned int *num_ports,
+                                        struct audio_port *ports,
+                                        unsigned int *generation);
+        virtual status_t getAudioPort(struct audio_port *port);
+        virtual status_t createAudioPatch(const struct audio_patch *patch,
+                                           audio_patch_handle_t *handle,
+                                           uid_t uid);
+        virtual status_t releaseAudioPatch(audio_patch_handle_t handle,
+                                              uid_t uid);
+        virtual status_t listAudioPatches(unsigned int *num_patches,
+                                          struct audio_patch *patches,
+                                          unsigned int *generation);
+        virtual status_t setAudioPortConfig(const struct audio_port_config *config);
+
+        virtual status_t acquireSoundTriggerSession(audio_session_t *session,
+                                               audio_io_handle_t *ioHandle,
+                                               audio_devices_t *device);
+
+        virtual status_t releaseSoundTriggerSession(audio_session_t session)
+        {
+            return mSoundTriggerSessions.releaseSession(session);
+        }
+
+        virtual status_t registerPolicyMixes(Vector<AudioMix> mixes);
+        virtual status_t unregisterPolicyMixes(Vector<AudioMix> mixes);
+
+        virtual status_t startAudioSource(const struct audio_port_config *source,
+                                          const audio_attributes_t *attributes,
+                                          audio_io_handle_t *handle);
+        virtual status_t stopAudioSource(audio_io_handle_t handle);
+
+        virtual void     releaseResourcesForUid(uid_t uid);
+
+        // Audio policy configuration file parsing (audio_policy.conf)
+        // TODO candidates to be moved to ConfigParsingUtils
+                void defaultAudioPolicyConfig(void);
+
+        // return the strategy corresponding to a given stream type
+        routing_strategy getStrategy(audio_stream_type_t stream) const;
+
+protected:
+        // From AudioPolicyManagerObserver
+        virtual const AudioPatchCollection &getAudioPatches() const
+        {
+            return mAudioPatches;
+        }
+        virtual const SoundTriggerSessionCollection &getSoundTriggerSessionCollection() const
+        {
+            return mSoundTriggerSessions;
+        }
+        virtual const AudioPolicyMixCollection &getAudioPolicyMixCollection() const
+        {
+            return mPolicyMixes;
+        }
+        virtual const SwAudioOutputCollection &getOutputs() const
+        {
+            return mOutputs;
+        }
+        virtual const AudioInputCollection &getInputs() const
+        {
+            return mInputs;
+        }
+        virtual const DeviceVector &getAvailableOutputDevices() const
+        {
+            return mAvailableOutputDevices;
+        }
+        virtual const DeviceVector &getAvailableInputDevices() const
+        {
+            return mAvailableInputDevices;
+        }
+        virtual StreamDescriptorCollection &getStreamDescriptors()
+        {
+            return mStreams;
+        }
+        virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const
+        {
+            return mDefaultOutputDevice;
+        }
+protected:
+        void addOutput(audio_io_handle_t output, sp<SwAudioOutputDescriptor> outputDesc);
+        void removeOutput(audio_io_handle_t output);
+        void addInput(audio_io_handle_t input, sp<AudioInputDescriptor> inputDesc);
+
+        // return appropriate device for streams handled by the specified strategy according to current
+        // phone state, connected devices...
+        // if fromCache is true, the device is returned from mDeviceForStrategy[],
+        // otherwise it is determine by current state
+        // (device connected,phone state, force use, a2dp output...)
+        // This allows to:
+        //  1 speed up process when the state is stable (when starting or stopping an output)
+        //  2 access to either current device selection (fromCache == true) or
+        // "future" device selection (fromCache == false) when called from a context
+        //  where conditions are changing (setDeviceConnectionState(), setPhoneState()...) AND
+        //  before updateDevicesAndOutputs() is called.
+        virtual audio_devices_t getDeviceForStrategy(routing_strategy strategy,
+                                                     bool fromCache);
+
+        bool isStrategyActive(const sp<AudioOutputDescriptor> outputDesc, routing_strategy strategy,
+                              uint32_t inPastMs = 0, nsecs_t sysTime = 0) const;
+
+        // change the route of the specified output. Returns the number of ms we have slept to
+        // allow new routing to take effect in certain cases.
+        virtual uint32_t setOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
+                             audio_devices_t device,
+                             bool force = false,
+                             int delayMs = 0,
+                             audio_patch_handle_t *patchHandle = NULL,
+                             const char* address = NULL);
+        status_t resetOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
+                                   int delayMs = 0,
+                                   audio_patch_handle_t *patchHandle = NULL);
+        status_t setInputDevice(audio_io_handle_t input,
+                                audio_devices_t device,
+                                bool force = false,
+                                audio_patch_handle_t *patchHandle = NULL);
+        status_t resetInputDevice(audio_io_handle_t input,
+                                  audio_patch_handle_t *patchHandle = NULL);
+
+        // select input device corresponding to requested audio source
+        virtual audio_devices_t getDeviceForInputSource(audio_source_t inputSource);
+
+        // compute the actual volume for a given stream according to the requested index and a particular
+        // device
+        virtual float computeVolume(audio_stream_type_t stream,
+                                    int index,
+                                    audio_devices_t device);
+
+        // check that volume change is permitted, compute and send new volume to audio hardware
+        virtual status_t checkAndSetVolume(audio_stream_type_t stream, int index,
+                                           const sp<AudioOutputDescriptor>& outputDesc,
+                                           audio_devices_t device,
+                                           int delayMs = 0, bool force = false);
+
+        // apply all stream volumes to the specified output and device
+        void applyStreamVolumes(const sp<AudioOutputDescriptor>& outputDesc,
+                                audio_devices_t device, int delayMs = 0, bool force = false);
+
+        // Mute or unmute all streams handled by the specified strategy on the specified output
+        void setStrategyMute(routing_strategy strategy,
+                             bool on,
+                             const sp<AudioOutputDescriptor>& outputDesc,
+                             int delayMs = 0,
+                             audio_devices_t device = (audio_devices_t)0);
+
+        // Mute or unmute the stream on the specified output
+        void setStreamMute(audio_stream_type_t stream,
+                           bool on,
+                           const sp<AudioOutputDescriptor>& outputDesc,
+                           int delayMs = 0,
+                           audio_devices_t device = (audio_devices_t)0);
+
+        // handle special cases for sonification strategy while in call: mute streams or replace by
+        // a special tone in the device used for communication
+        void handleIncallSonification(audio_stream_type_t stream, bool starting, bool stateChange);
+
+        audio_mode_t getPhoneState();
+
+        // true if device is in a telephony or VoIP call
+        virtual bool isInCall();
+        // true if given state represents a device in a telephony or VoIP call
+        virtual bool isStateInCall(int state);
+
+        // when a device is connected, checks if an open output can be routed
+        // to this device. If none is open, tries to open one of the available outputs.
+        // Returns an output suitable to this device or 0.
+        // when a device is disconnected, checks if an output is not used any more and
+        // returns its handle if any.
+        // transfers the audio tracks and effects from one output thread to another accordingly.
+        status_t checkOutputsForDevice(const sp<DeviceDescriptor> devDesc,
+                                       audio_policy_dev_state_t state,
+                                       SortedVector<audio_io_handle_t>& outputs,
+                                       const String8 address);
+
+        status_t checkInputsForDevice(const sp<DeviceDescriptor> devDesc,
+                                      audio_policy_dev_state_t state,
+                                      SortedVector<audio_io_handle_t>& inputs,
+                                      const String8 address);
+
+        // close an output and its companion duplicating output.
+        void closeOutput(audio_io_handle_t output);
+
+        // close an input.
+        void closeInput(audio_io_handle_t input);
+
+        // checks and if necessary changes outputs used for all strategies.
+        // must be called every time a condition that affects the output choice for a given strategy
+        // changes: connected device, phone state, force use...
+        // Must be called before updateDevicesAndOutputs()
+        void checkOutputForStrategy(routing_strategy strategy);
+
+        // Same as checkOutputForStrategy() but for a all strategies in order of priority
+        void checkOutputForAllStrategies();
+
+        // manages A2DP output suspend/restore according to phone state and BT SCO usage
+        void checkA2dpSuspend();
+
+        // selects the most appropriate device on output for current state
+        // must be called every time a condition that affects the device choice for a given output is
+        // changed: connected device, phone state, force use, output start, output stop..
+        // see getDeviceForStrategy() for the use of fromCache parameter
+        audio_devices_t getNewOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
+                                           bool fromCache);
+
+        // updates cache of device used by all strategies (mDeviceForStrategy[])
+        // must be called every time a condition that affects the device choice for a given strategy is
+        // changed: connected device, phone state, force use...
+        // cached values are used by getDeviceForStrategy() if parameter fromCache is true.
+         // Must be called after checkOutputForAllStrategies()
+        void updateDevicesAndOutputs();
+
+        // selects the most appropriate device on input for current state
+        audio_devices_t getNewInputDevice(audio_io_handle_t input);
+
+        virtual uint32_t getMaxEffectsCpuLoad()
+        {
+            return mEffects.getMaxEffectsCpuLoad();
+        }
+
+        virtual uint32_t getMaxEffectsMemory()
+        {
+            return mEffects.getMaxEffectsMemory();
+        }
+#ifdef AUDIO_POLICY_TEST
+        virtual     bool        threadLoop();
+                    void        exit();
+        int testOutputIndex(audio_io_handle_t output);
+#endif //AUDIO_POLICY_TEST
+
+        SortedVector<audio_io_handle_t> getOutputsForDevice(audio_devices_t device,
+                                                            SwAudioOutputCollection openOutputs);
+        bool vectorsEqual(SortedVector<audio_io_handle_t>& outputs1,
+                                           SortedVector<audio_io_handle_t>& outputs2);
+
+        // mute/unmute strategies using an incompatible device combination
+        // if muting, wait for the audio in pcm buffer to be drained before proceeding
+        // if unmuting, unmute only after the specified delay
+        // Returns the number of ms waited
+        virtual uint32_t  checkDeviceMuteStrategies(sp<AudioOutputDescriptor> outputDesc,
+                                            audio_devices_t prevDevice,
+                                            uint32_t delayMs);
+
+        audio_io_handle_t selectOutput(const SortedVector<audio_io_handle_t>& outputs,
+                                       audio_output_flags_t flags,
+                                       audio_format_t format);
+        // samplingRate, format, channelMask are in/out and so may be modified
+        sp<IOProfile> getInputProfile(audio_devices_t device,
+                                      String8 address,
+                                      uint32_t& samplingRate,
+                                      audio_format_t& format,
+                                      audio_channel_mask_t& channelMask,
+                                      audio_input_flags_t flags);
+        sp<IOProfile> getProfileForDirectOutput(audio_devices_t device,
+                                                       uint32_t samplingRate,
+                                                       audio_format_t format,
+                                                       audio_channel_mask_t channelMask,
+                                                       audio_output_flags_t flags);
+
+        audio_io_handle_t selectOutputForEffects(const SortedVector<audio_io_handle_t>& outputs);
+
+        virtual status_t addAudioPatch(audio_patch_handle_t handle, const sp<AudioPatch>& patch)
+        {
+            return mAudioPatches.addAudioPatch(handle, patch);
+        }
+        virtual status_t removeAudioPatch(audio_patch_handle_t handle)
+        {
+            return mAudioPatches.removeAudioPatch(handle);
+        }
+
+        audio_devices_t availablePrimaryOutputDevices() const
+        {
+            if (!hasPrimaryOutput()) {
+                return AUDIO_DEVICE_NONE;
+            }
+            return mPrimaryOutput->supportedDevices() & mAvailableOutputDevices.types();
+        }
+        audio_devices_t availablePrimaryInputDevices() const
+        {
+            if (!hasPrimaryOutput()) {
+                return AUDIO_DEVICE_NONE;
+            }
+            return mAvailableInputDevices.getDevicesFromHwModule(mPrimaryOutput->getModuleHandle());
+        }
+
+        void updateCallRouting(audio_devices_t rxDevice, int delayMs = 0);
+
+        // if argument "device" is different from AUDIO_DEVICE_NONE,  startSource() will force
+        // the re-evaluation of the output device.
+        status_t startSource(sp<AudioOutputDescriptor> outputDesc,
+                             audio_stream_type_t stream,
+                             audio_devices_t device,
+                             uint32_t *delayMs);
+        status_t stopSource(sp<AudioOutputDescriptor> outputDesc,
+                            audio_stream_type_t stream,
+                            bool forceDeviceUpdate);
+
+        void clearAudioPatches(uid_t uid);
+        void clearSessionRoutes(uid_t uid);
+        void checkStrategyRoute(routing_strategy strategy, audio_io_handle_t ouptutToSkip);
+
+        status_t hasPrimaryOutput() const { return mPrimaryOutput != 0; }
+
+        uid_t mUidCached;
+        AudioPolicyClientInterface *mpClientInterface;  // audio policy client interface
+        sp<SwAudioOutputDescriptor> mPrimaryOutput;     // primary output descriptor
+        // list of descriptors for outputs currently opened
+
+        SwAudioOutputCollection mOutputs;
+        // copy of mOutputs before setDeviceConnectionState() opens new outputs
+        // reset to mOutputs when updateDevicesAndOutputs() is called.
+        SwAudioOutputCollection mPreviousOutputs;
+        AudioInputCollection mInputs;     // list of input descriptors
+
+        DeviceVector  mAvailableOutputDevices; // all available output devices
+        DeviceVector  mAvailableInputDevices;  // all available input devices
+
+        SessionRouteMap mOutputRoutes = SessionRouteMap(SessionRouteMap::MAPTYPE_OUTPUT);
+        SessionRouteMap mInputRoutes = SessionRouteMap(SessionRouteMap::MAPTYPE_INPUT);
+
+        StreamDescriptorCollection mStreams; // stream descriptors for volume control
+        bool    mLimitRingtoneVolume;        // limit ringtone volume to music volume if headset connected
+        audio_devices_t mDeviceForStrategy[NUM_STRATEGIES];
+        float   mLastVoiceVolume;            // last voice volume value sent to audio HAL
+
+        EffectDescriptorCollection mEffects;  // list of registered audio effects
+        bool    mA2dpSuspended;  // true if A2DP output is suspended
+        sp<DeviceDescriptor> mDefaultOutputDevice; // output device selected by default at boot time
+        bool mSpeakerDrcEnabled;// true on devices that use DRC on the DEVICE_CATEGORY_SPEAKER path
+                                // to boost soft sounds, used to adjust volume curves accordingly
+
+        HwModuleCollection mHwModules;
+
+        volatile int32_t mAudioPortGeneration;
+
+        AudioPatchCollection mAudioPatches;
+
+        SoundTriggerSessionCollection mSoundTriggerSessions;
+
+        sp<AudioPatch> mCallTxPatch;
+        sp<AudioPatch> mCallRxPatch;
+
+        // for supporting "beacon" streams, i.e. streams that only play on speaker, and never
+        // when something other than STREAM_TTS (a.k.a. "Transmitted Through Speaker") is playing
+        enum {
+            STARTING_OUTPUT,
+            STARTING_BEACON,
+            STOPPING_OUTPUT,
+            STOPPING_BEACON
+        };
+        uint32_t mBeaconMuteRefCount;   // ref count for stream that would mute beacon
+        uint32_t mBeaconPlayingRefCount;// ref count for the playing beacon streams
+        bool mBeaconMuted;              // has STREAM_TTS been muted
+
+        AudioPolicyMixCollection mPolicyMixes; // list of registered mixes
+
+#ifdef AUDIO_POLICY_TEST
+        Mutex   mLock;
+        Condition mWaitWorkCV;
+
+        int             mCurOutput;
+        bool            mDirectOutput;
+        audio_io_handle_t mTestOutputs[NUM_TEST_OUTPUTS];
+        int             mTestInput;
+        uint32_t        mTestDevice;
+        uint32_t        mTestSamplingRate;
+        uint32_t        mTestFormat;
+        uint32_t        mTestChannels;
+        uint32_t        mTestLatencyMs;
+#endif //AUDIO_POLICY_TEST
+
+        uint32_t nextAudioPortGeneration();
+
+        // Audio Policy Engine Interface.
+        AudioPolicyManagerInterface *mEngine;
+private:
+        // updates device caching and output for streams that can influence the
+        //    routing of notifications
+        void handleNotificationRoutingForStream(audio_stream_type_t stream);
+        // find the outputs on a given output descriptor that have the given address.
+        // to be called on an AudioOutputDescriptor whose supported devices (as defined
+        //   in mProfile->mSupportedDevices) matches the device whose address is to be matched.
+        // see deviceDistinguishesOnAddress(audio_devices_t) for whether the device type is one
+        //   where addresses are used to distinguish between one connected device and another.
+        void findIoHandlesByAddress(sp<SwAudioOutputDescriptor> desc /*in*/,
+                const audio_devices_t device /*in*/,
+                const String8 address /*in*/,
+                SortedVector<audio_io_handle_t>& outputs /*out*/);
+        uint32_t curAudioPortGeneration() const { return mAudioPortGeneration; }
+        // internal method to return the output handle for the given device and format
+        audio_io_handle_t getOutputForDevice(
+                audio_devices_t device,
+                audio_session_t session,
+                audio_stream_type_t stream,
+                uint32_t samplingRate,
+                audio_format_t format,
+                audio_channel_mask_t channelMask,
+                audio_output_flags_t flags,
+                const audio_offload_info_t *offloadInfo);
+        // internal function to derive a stream type value from audio attributes
+        audio_stream_type_t streamTypefromAttributesInt(const audio_attributes_t *attr);
+        // event is one of STARTING_OUTPUT, STARTING_BEACON, STOPPING_OUTPUT, STOPPING_BEACON
+        // returns 0 if no mute/unmute event happened, the largest latency of the device where
+        //   the mute/unmute happened
+        uint32_t handleEventForBeacon(int event);
+        uint32_t setBeaconMute(bool mute);
+        bool     isValidAttributes(const audio_attributes_t *paa);
+
+        // select input device corresponding to requested audio source and return associated policy
+        // mix if any. Calls getDeviceForInputSource().
+        audio_devices_t getDeviceAndMixForInputSource(audio_source_t inputSource,
+                                                        AudioMix **policyMix = NULL);
+
+        // Called by setDeviceConnectionState().
+        status_t setDeviceConnectionStateInt(audio_devices_t device,
+                                                          audio_policy_dev_state_t state,
+                                                          const char *device_address,
+                                                          const char *device_name);
+};
+
+};
diff --git a/services/audiopolicy/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
similarity index 97%
rename from services/audiopolicy/AudioPolicyClientImpl.cpp
rename to services/audiopolicy/service/AudioPolicyClientImpl.cpp
index 3e090e9..489a9be 100644
--- a/services/audiopolicy/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -213,6 +213,12 @@
     mAudioPolicyService->onAudioPatchListUpdate();
 }
 
+void AudioPolicyService::AudioPolicyClient::onDynamicPolicyMixStateUpdate(
+        String8 regId, int32_t state)
+{
+    mAudioPolicyService->onDynamicPolicyMixStateUpdate(regId, state);
+}
+
 audio_unique_id_t AudioPolicyService::AudioPolicyClient::newAudioUniqueId()
 {
     return AudioSystem::newAudioUniqueId();
diff --git a/services/audiopolicy/AudioPolicyClientImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
similarity index 100%
rename from services/audiopolicy/AudioPolicyClientImplLegacy.cpp
rename to services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
diff --git a/services/audiopolicy/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
similarity index 98%
rename from services/audiopolicy/AudioPolicyEffects.cpp
rename to services/audiopolicy/service/AudioPolicyEffects.cpp
index e6ace20..282ddeb 100644
--- a/services/audiopolicy/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -109,8 +109,8 @@
         Vector <EffectDesc *> effects = mInputSources.valueAt(index)->mEffects;
         for (size_t i = 0; i < effects.size(); i++) {
             EffectDesc *effect = effects[i];
-            sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, -1, 0, 0,
-                                                 audioSession, input);
+            sp<AudioEffect> fx = new AudioEffect(NULL, String16("android"), &effect->mUuid, -1, 0,
+                                                 0, audioSession, input);
             status_t status = fx->initCheck();
             if (status != NO_ERROR && status != ALREADY_EXISTS) {
                 ALOGW("addInputEffects(): failed to create Fx %s on source %d",
@@ -254,7 +254,7 @@
         Vector <EffectDesc *> effects = mOutputStreams.valueAt(index)->mEffects;
         for (size_t i = 0; i < effects.size(); i++) {
             EffectDesc *effect = effects[i];
-            sp<AudioEffect> fx = new AudioEffect(NULL, &effect->mUuid, 0, 0, 0,
+            sp<AudioEffect> fx = new AudioEffect(NULL, String16("android"), &effect->mUuid, 0, 0, 0,
                                                  audioSession, output);
             status_t status = fx->initCheck();
             if (status != NO_ERROR && status != ALREADY_EXISTS) {
diff --git a/services/audiopolicy/AudioPolicyEffects.h b/services/audiopolicy/service/AudioPolicyEffects.h
similarity index 100%
rename from services/audiopolicy/AudioPolicyEffects.h
rename to services/audiopolicy/service/AudioPolicyEffects.h
diff --git a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
similarity index 89%
rename from services/audiopolicy/AudioPolicyInterfaceImpl.cpp
rename to services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index a45dbb3..793c26a 100644
--- a/services/audiopolicy/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -28,7 +28,8 @@
 
 status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device,
                                                   audio_policy_dev_state_t state,
-                                                  const char *device_address)
+                                                  const char *device_address,
+                                                  const char *device_name)
 {
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
@@ -46,8 +47,8 @@
 
     ALOGV("setDeviceConnectionState()");
     Mutex::Autolock _l(mLock);
-    return mAudioPolicyManager->setDeviceConnectionState(device,
-                                                      state, device_address);
+    return mAudioPolicyManager->setDeviceConnectionState(device, state,
+                                                         device_address, device_name);
 }
 
 audio_policy_dev_state_t AudioPolicyService::getDeviceConnectionState(
@@ -145,10 +146,12 @@
                                               audio_io_handle_t *output,
                                               audio_session_t session,
                                               audio_stream_type_t *stream,
+                                              uid_t uid,
                                               uint32_t samplingRate,
                                               audio_format_t format,
                                               audio_channel_mask_t channelMask,
                                               audio_output_flags_t flags,
+                                              audio_port_handle_t selectedDeviceId,
                                               const audio_offload_info_t *offloadInfo)
 {
     if (mAudioPolicyManager == NULL) {
@@ -156,8 +159,17 @@
     }
     ALOGV("getOutput()");
     Mutex::Autolock _l(mLock);
-    return mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, samplingRate,
-                                    format, channelMask, flags, offloadInfo);
+
+    // if the caller is us, trust the specified uid
+    if (IPCThreadState::self()->getCallingPid() != getpid_cached || uid == (uid_t)-1) {
+        uid_t newclientUid = IPCThreadState::self()->getCallingUid();
+        if (uid != (uid_t)-1 && uid != newclientUid) {
+            ALOGW("%s uid %d tried to pass itself off as %d", __FUNCTION__, newclientUid, uid);
+        }
+        uid = newclientUid;
+    }
+    return mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid, samplingRate,
+                                    format, channelMask, flags, selectedDeviceId, offloadInfo);
 }
 
 status_t AudioPolicyService::startOutput(audio_io_handle_t output,
@@ -246,10 +258,12 @@
 status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
                                              audio_io_handle_t *input,
                                              audio_session_t session,
+                                             uid_t uid,
                                              uint32_t samplingRate,
                                              audio_format_t format,
                                              audio_channel_mask_t channelMask,
-                                             audio_input_flags_t flags)
+                                             audio_input_flags_t flags,
+                                             audio_port_handle_t selectedDeviceId)
 {
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
@@ -260,19 +274,28 @@
         return BAD_VALUE;
     }
 
-    if (((attr->source == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) ||
-        ((attr->source == AUDIO_SOURCE_FM_TUNER) && !captureFmTunerAllowed())) {
+    if ((attr->source == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) {
         return BAD_VALUE;
     }
     sp<AudioPolicyEffects>audioPolicyEffects;
     status_t status;
     AudioPolicyInterface::input_type_t inputType;
+    // if the caller is us, trust the specified uid
+    if (IPCThreadState::self()->getCallingPid() != getpid_cached || uid == (uid_t)-1) {
+        uid_t newclientUid = IPCThreadState::self()->getCallingUid();
+        if (uid != (uid_t)-1 && uid != newclientUid) {
+            ALOGW("%s uid %d tried to pass itself off as %d", __FUNCTION__, newclientUid, uid);
+        }
+        uid = newclientUid;
+    }
+
     {
         Mutex::Autolock _l(mLock);
         // the audio_in_acoustics_t parameter is ignored by get_input()
-        status = mAudioPolicyManager->getInputForAttr(attr, input, session,
+        status = mAudioPolicyManager->getInputForAttr(attr, input, session, uid,
                                                      samplingRate, format, channelMask,
-                                                     flags, &inputType);
+                                                     flags, selectedDeviceId,
+                                                     &inputType);
         audioPolicyEffects = mAudioPolicyEffects;
 
         if (status == NO_ERROR) {
@@ -280,6 +303,8 @@
             switch (inputType) {
             case AudioPolicyInterface::API_INPUT_LEGACY:
                 break;
+            case AudioPolicyInterface::API_INPUT_TELEPHONY_RX:
+                // FIXME: use the same permission as for remote submix for now.
             case AudioPolicyInterface::API_INPUT_MIX_CAPTURE:
                 if (!captureAudioOutputAllowed()) {
                     ALOGE("getInputForAttr() permission denied: capture not allowed");
@@ -545,9 +570,6 @@
                                             unsigned int *generation)
 {
     Mutex::Autolock _l(mLock);
-    if(!modifyAudioRoutingAllowed()) {
-        return PERMISSION_DENIED;
-    }
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
@@ -558,9 +580,6 @@
 status_t AudioPolicyService::getAudioPort(struct audio_port *port)
 {
     Mutex::Autolock _l(mLock);
-    if(!modifyAudioRoutingAllowed()) {
-        return PERMISSION_DENIED;
-    }
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
@@ -601,9 +620,6 @@
         unsigned int *generation)
 {
     Mutex::Autolock _l(mLock);
-    if(!modifyAudioRoutingAllowed()) {
-        return PERMISSION_DENIED;
-    }
     if (mAudioPolicyManager == NULL) {
         return NO_INIT;
     }
@@ -660,4 +676,26 @@
     }
 }
 
+status_t AudioPolicyService::startAudioSource(const struct audio_port_config *source,
+                                  const audio_attributes_t *attributes,
+                                  audio_io_handle_t *handle)
+{
+    Mutex::Autolock _l(mLock);
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+
+    return mAudioPolicyManager->startAudioSource(source, attributes, handle);
+}
+
+status_t AudioPolicyService::stopAudioSource(audio_io_handle_t handle)
+{
+    Mutex::Autolock _l(mLock);
+    if (mAudioPolicyManager == NULL) {
+        return NO_INIT;
+    }
+
+    return mAudioPolicyManager->stopAudioSource(handle);
+}
+
 }; // namespace android
diff --git a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
similarity index 95%
rename from services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
rename to services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
index b8846c6..13af3ef 100644
--- a/services/audiopolicy/AudioPolicyInterfaceImplLegacy.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
@@ -33,7 +33,8 @@
 
 status_t AudioPolicyService::setDeviceConnectionState(audio_devices_t device,
                                                   audio_policy_dev_state_t state,
-                                                  const char *device_address)
+                                                  const char *device_address,
+                                                  const char *device_name __unused)
 {
     if (mpAudioPolicy == NULL) {
         return NO_INIT;
@@ -233,10 +234,12 @@
 status_t AudioPolicyService::getInputForAttr(const audio_attributes_t *attr,
                                              audio_io_handle_t *input,
                                              audio_session_t session,
+                                             uid_t uid __unused,
                                              uint32_t samplingRate,
                                              audio_format_t format,
                                              audio_channel_mask_t channelMask,
-                                             audio_input_flags_t flags __unused)
+                                             audio_input_flags_t flags __unused,
+                                             audio_port_handle_t selectedDeviceId __unused)
 {
     if (mpAudioPolicy == NULL) {
         return NO_INIT;
@@ -254,8 +257,7 @@
         inputSource = AUDIO_SOURCE_MIC;
     }
 
-    if (((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) ||
-        ((inputSource == AUDIO_SOURCE_FM_TUNER) && !captureFmTunerAllowed())) {
+    if ((inputSource == AUDIO_SOURCE_HOTWORD) && !captureHotwordAllowed()) {
         return BAD_VALUE;
     }
 
@@ -564,10 +566,12 @@
                                               audio_io_handle_t *output,
                                               audio_session_t session __unused,
                                               audio_stream_type_t *stream,
+                                              uid_t uid __unused,
                                               uint32_t samplingRate,
                                               audio_format_t format,
                                               audio_channel_mask_t channelMask,
                                               audio_output_flags_t flags,
+                                              audio_port_handle_t selectedDeviceId __unused,
                                               const audio_offload_info_t *offloadInfo)
 {
     if (attr != NULL) {
@@ -603,4 +607,16 @@
     return INVALID_OPERATION;
 }
 
+status_t AudioPolicyService::startAudioSource(const struct audio_port_config *source,
+                                  const audio_attributes_t *attributes,
+                                  audio_io_handle_t *handle)
+{
+    return INVALID_OPERATION;
+}
+
+status_t AudioPolicyService::stopAudioSource(audio_io_handle_t handle)
+{
+    return INVALID_OPERATION;
+}
+
 }; // namespace android
diff --git a/services/audiopolicy/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
similarity index 92%
rename from services/audiopolicy/AudioPolicyService.cpp
rename to services/audiopolicy/service/AudioPolicyService.cpp
index eb9116d..eefff3d 100644
--- a/services/audiopolicy/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -166,6 +166,17 @@
     }
 }
 
+void AudioPolicyService::setAudioPortCallbacksEnabled(bool enabled)
+{
+    Mutex::Autolock _l(mNotificationClientsLock);
+
+    uid_t uid = IPCThreadState::self()->getCallingUid();
+    if (mNotificationClients.indexOfKey(uid) < 0) {
+        return;
+    }
+    mNotificationClients.valueFor(uid)->setAudioPortCallbacksEnabled(enabled);
+}
+
 // removeNotificationClient() is called when the client process dies.
 void AudioPolicyService::removeNotificationClient(uid_t uid)
 {
@@ -177,7 +188,7 @@
     {
         Mutex::Autolock _l(mLock);
         if (mAudioPolicyManager) {
-            mAudioPolicyManager->clearAudioPatches(uid);
+            mAudioPolicyManager->releaseResourcesForUid(uid);
         }
     }
 #endif
@@ -222,6 +233,21 @@
     }
 }
 
+void AudioPolicyService::onDynamicPolicyMixStateUpdate(String8 regId, int32_t state)
+{
+    ALOGV("AudioPolicyService::onDynamicPolicyMixStateUpdate(%s, %d)",
+            regId.string(), state);
+    mOutputCommandThread->dynamicPolicyMixStateUpdateCommand(regId, state);
+}
+
+void AudioPolicyService::doOnDynamicPolicyMixStateUpdate(String8 regId, int32_t state)
+{
+    Mutex::Autolock _l(mNotificationClientsLock);
+    for (size_t i = 0; i < mNotificationClients.size(); i++) {
+        mNotificationClients.valueAt(i)->onDynamicPolicyMixStateUpdate(regId, state);
+    }
+}
+
 status_t AudioPolicyService::clientSetAudioPortConfig(const struct audio_port_config *config,
                                                       int delayMs)
 {
@@ -231,7 +257,8 @@
 AudioPolicyService::NotificationClient::NotificationClient(const sp<AudioPolicyService>& service,
                                                      const sp<IAudioPolicyServiceClient>& client,
                                                      uid_t uid)
-    : mService(service), mUid(uid), mAudioPolicyServiceClient(client)
+    : mService(service), mUid(uid), mAudioPolicyServiceClient(client),
+      mAudioPortCallbacksEnabled(false)
 {
 }
 
@@ -250,18 +277,32 @@
 
 void AudioPolicyService::NotificationClient::onAudioPortListUpdate()
 {
-    if (mAudioPolicyServiceClient != 0) {
+    if (mAudioPolicyServiceClient != 0 && mAudioPortCallbacksEnabled) {
         mAudioPolicyServiceClient->onAudioPortListUpdate();
     }
 }
 
 void AudioPolicyService::NotificationClient::onAudioPatchListUpdate()
 {
-    if (mAudioPolicyServiceClient != 0) {
+    if (mAudioPolicyServiceClient != 0 && mAudioPortCallbacksEnabled) {
         mAudioPolicyServiceClient->onAudioPatchListUpdate();
     }
 }
 
+void AudioPolicyService::NotificationClient::onDynamicPolicyMixStateUpdate(
+        String8 regId, int32_t state)
+{
+    if (mAudioPolicyServiceClient != 0) {
+            mAudioPolicyServiceClient->onDynamicPolicyMixStateUpdate(regId, state);
+    }
+}
+
+void AudioPolicyService::NotificationClient::setAudioPortCallbacksEnabled(bool enabled)
+{
+    mAudioPortCallbacksEnabled = enabled;
+}
+
+
 void AudioPolicyService::binderDied(const wp<IBinder>& who) {
     ALOGW("binderDied() %p, calling pid %d", who.unsafe_get(),
             IPCThreadState::self()->getCallingPid());
@@ -511,6 +552,20 @@
                         command->mStatus = af->setAudioPortConfig(&data->mConfig);
                     }
                     } break;
+                case DYN_POLICY_MIX_STATE_UPDATE: {
+                    DynPolicyMixStateUpdateData *data =
+                            (DynPolicyMixStateUpdateData *)command->mParam.get();
+                    //###ALOGV("AudioCommandThread() processing dyn policy mix state update");
+                    ALOGV("AudioCommandThread() processing dyn policy mix state update %s %d",
+                            data->mRegId.string(), data->mState);
+                    svc = mService.promote();
+                    if (svc == 0) {
+                        break;
+                    }
+                    mLock.unlock();
+                    svc->doOnDynamicPolicyMixStateUpdate(data->mRegId, data->mState);
+                    mLock.lock();
+                    } break;
                 default:
                     ALOGW("AudioCommandThread() unknown command %d", command->mCommand);
                 }
@@ -532,7 +587,7 @@
         mLock.unlock();
         svc.clear();
         mLock.lock();
-        if (!exitPending() && mAudioCommands.isEmpty()) {
+        if (!exitPending() && (mAudioCommands.isEmpty() || waitTime != INT64_MAX)) {
             // release delayed commands wake lock
             release_wake_lock(mName.string());
             ALOGV("AudioCommandThread() going to sleep");
@@ -747,6 +802,20 @@
     return sendCommand(command, delayMs);
 }
 
+void AudioPolicyService::AudioCommandThread::dynamicPolicyMixStateUpdateCommand(
+        String8 regId, int32_t state)
+{
+    sp<AudioCommand> command = new AudioCommand();
+    command->mCommand = DYN_POLICY_MIX_STATE_UPDATE;
+    DynPolicyMixStateUpdateData *data = new DynPolicyMixStateUpdateData();
+    data->mRegId = regId;
+    data->mState = state;
+    command->mParam = data;
+    ALOGV("AudioCommandThread() sending dynamic policy mix (id=%s) state update to %d",
+            regId.string(), state);
+    sendCommand(command);
+}
+
 status_t AudioPolicyService::AudioCommandThread::sendCommand(sp<AudioCommand>& command, int delayMs)
 {
     {
@@ -861,6 +930,7 @@
                 patch2 = ((CreateAudioPatchData *)command2->mParam.get())->mPatch;
             } else {
                 handle2 = ((ReleaseAudioPatchData *)command2->mParam.get())->mHandle;
+                memset(&patch2, 0, sizeof(patch2));
             }
             if (handle != handle2) break;
             /* Filter CREATE_AUDIO_PATCH commands only when they are issued for
@@ -888,6 +958,10 @@
             delayMs = 1;
         } break;
 
+        case DYN_POLICY_MIX_STATE_UPDATE: {
+
+        } break;
+
         case START_TONE:
         case STOP_TONE:
         default:
diff --git a/services/audiopolicy/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
similarity index 92%
rename from services/audiopolicy/AudioPolicyService.h
rename to services/audiopolicy/service/AudioPolicyService.h
index 80284a4..a0d5aa2 100644
--- a/services/audiopolicy/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -35,7 +35,7 @@
 #include <hardware_legacy/AudioPolicyInterface.h>
 #endif
 #include "AudioPolicyEffects.h"
-#include "AudioPolicyManager.h"
+#include "managerdefault/AudioPolicyManager.h"
 
 
 namespace android {
@@ -61,7 +61,8 @@
 
     virtual status_t setDeviceConnectionState(audio_devices_t device,
                                               audio_policy_dev_state_t state,
-                                              const char *device_address);
+                                              const char *device_address,
+                                              const char *device_name);
     virtual audio_policy_dev_state_t getDeviceConnectionState(
                                                                 audio_devices_t device,
                                                                 const char *device_address);
@@ -79,10 +80,12 @@
                                       audio_io_handle_t *output,
                                       audio_session_t session,
                                       audio_stream_type_t *stream,
+                                      uid_t uid,
                                       uint32_t samplingRate = 0,
                                       audio_format_t format = AUDIO_FORMAT_DEFAULT,
                                       audio_channel_mask_t channelMask = 0,
                                       audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+                                      audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
                                       const audio_offload_info_t *offloadInfo = NULL);
     virtual status_t startOutput(audio_io_handle_t output,
                                  audio_stream_type_t stream,
@@ -96,10 +99,12 @@
     virtual status_t getInputForAttr(const audio_attributes_t *attr,
                                      audio_io_handle_t *input,
                                      audio_session_t session,
+                                     uid_t uid,
                                      uint32_t samplingRate,
                                      audio_format_t format,
                                      audio_channel_mask_t channelMask,
-                                     audio_input_flags_t flags);
+                                     audio_input_flags_t flags,
+                                     audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
     virtual status_t startInput(audio_io_handle_t input,
                                 audio_session_t session);
     virtual status_t stopInput(audio_io_handle_t input,
@@ -180,6 +185,8 @@
 
     virtual void registerClient(const sp<IAudioPolicyServiceClient>& client);
 
+    virtual void setAudioPortCallbacksEnabled(bool enabled);
+
     virtual status_t acquireSoundTriggerSession(audio_session_t *session,
                                            audio_io_handle_t *ioHandle,
                                            audio_devices_t *device);
@@ -190,6 +197,11 @@
 
     virtual status_t registerPolicyMixes(Vector<AudioMix> mixes, bool registration);
 
+    virtual status_t startAudioSource(const struct audio_port_config *source,
+                                      const audio_attributes_t *attributes,
+                                      audio_io_handle_t *handle);
+    virtual status_t stopAudioSource(audio_io_handle_t handle);
+
             status_t doStopOutput(audio_io_handle_t output,
                                   audio_stream_type_t stream,
                                   audio_session_t session);
@@ -211,6 +223,9 @@
             void onAudioPatchListUpdate();
             void doOnAudioPatchListUpdate();
 
+            void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
+            void doOnDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
+
 private:
                         AudioPolicyService() ANDROID_API;
     virtual             ~AudioPolicyService();
@@ -241,6 +256,7 @@
             UPDATE_AUDIOPORT_LIST,
             UPDATE_AUDIOPATCH_LIST,
             SET_AUDIOPORT_CONFIG,
+            DYN_POLICY_MIX_STATE_UPDATE
         };
 
         AudioCommandThread (String8 name, const wp<AudioPolicyService>& service);
@@ -278,6 +294,7 @@
                     void        updateAudioPatchListCommand();
                     status_t    setAudioPortConfigCommand(const struct audio_port_config *config,
                                                           int delayMs);
+                    void        dynamicPolicyMixStateUpdateCommand(String8 regId, int32_t state);
                     void        insertCommand_l(AudioCommand *command, int delayMs = 0);
 
     private:
@@ -362,6 +379,12 @@
             struct audio_port_config mConfig;
         };
 
+        class DynPolicyMixStateUpdateData : public AudioCommandData {
+        public:
+            String8 mRegId;
+            int32_t mState;
+        };
+
         Mutex   mLock;
         Condition mWaitWorkCV;
         Vector < sp<AudioCommand> > mAudioCommands; // list of pending commands
@@ -467,6 +490,7 @@
 
         virtual void onAudioPortListUpdate();
         virtual void onAudioPatchListUpdate();
+        virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
 
         virtual audio_unique_id_t newAudioUniqueId();
 
@@ -482,8 +506,10 @@
                                                 uid_t uid);
         virtual             ~NotificationClient();
 
-                            void        onAudioPortListUpdate();
-                            void        onAudioPatchListUpdate();
+                            void      onAudioPortListUpdate();
+                            void      onAudioPatchListUpdate();
+                            void      onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
+                            void      setAudioPortCallbacksEnabled(bool enabled);
 
                 // IBinder::DeathRecipient
                 virtual     void        binderDied(const wp<IBinder>& who);
@@ -495,6 +521,7 @@
         const wp<AudioPolicyService>        mService;
         const uid_t                         mUid;
         const sp<IAudioPolicyServiceClient> mAudioPolicyServiceClient;
+              bool                          mAudioPortCallbacksEnabled;
     };
 
     // Internal dump utilities.
diff --git a/services/audiopolicy/utilities/convert/convert.h b/services/audiopolicy/utilities/convert/convert.h
new file mode 100644
index 0000000..980b5d5
--- /dev/null
+++ b/services/audiopolicy/utilities/convert/convert.h
@@ -0,0 +1,383 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <limits>
+#include <sstream>
+#include <string>
+#include <vector>
+#include <stdint.h>
+#include <cmath>
+
+namespace android
+{
+
+namespace utilities
+{
+
+/**
+ * Convert a given source type to a given destination type.
+ *
+ * String conversion to T reads the value of the type T in the given string.
+ * The function does not allow to have white spaces around the value to parse
+ * and tries to parse the whole string, which means that if some bytes were not
+ * read in the string, the function fails.
+ * Hexadecimal representation (ie. numbers starting with 0x) is supported only
+ * for integral types conversions.
+ *
+ * Numeric conversion to string formats the source value to decimal space.
+ *
+ * Vector to vector conversion calls convertTo on each element.
+ *
+ * @tparam srcType source type, default value is string type
+ * @tparam dstType destination type
+ * @param[in] input The source to convert from.
+ * @param[out] result Converted value if success, undefined on failure.
+ *
+ * @return true if conversion was successful, false otherwise.
+ */
+template <typename srcType, typename dstType>
+static inline bool convertTo(const srcType &input, dstType &result);
+
+/* details namespace is here to hide implementation details to header end user. It
+ * is NOT intended to be used outside. */
+namespace details
+{
+
+/** Helper class to limit instantiation of templates */
+template <typename T>
+struct ConversionFromStringAllowed;
+template <typename T>
+struct ConversionToStringAllowed;
+
+/* List of allowed types for conversion */
+template <>
+struct ConversionFromStringAllowed<bool> {};
+template <>
+struct ConversionFromStringAllowed<uint64_t> {};
+template <>
+struct ConversionFromStringAllowed<int64_t> {};
+template <>
+struct ConversionFromStringAllowed<uint32_t> {};
+template <>
+struct ConversionFromStringAllowed<int32_t> {};
+template <>
+struct ConversionFromStringAllowed<uint16_t> {};
+template <>
+struct ConversionFromStringAllowed<int16_t> {};
+template <>
+struct ConversionFromStringAllowed<float> {};
+template <>
+struct ConversionFromStringAllowed<double> {};
+
+template <>
+struct ConversionToStringAllowed<int64_t> {};
+template <>
+struct ConversionToStringAllowed<uint64_t> {};
+template <>
+struct ConversionToStringAllowed<uint32_t> {};
+template <>
+struct ConversionToStringAllowed<int32_t> {};
+template <>
+struct ConversionToStringAllowed<double> {};
+template <>
+struct ConversionToStringAllowed<float> {};
+
+/**
+ * Set the decimal precision to 10 digits.
+ * Note that this setting is aligned with Android Audio Parameter
+ * policy concerning float storage into string.
+ */
+static const uint32_t gFloatPrecision = 10;
+
+template <typename T>
+static inline bool fromString(const std::string &str, T &result)
+{
+    /* Check that conversion to that type is allowed.
+     * If this fails, this means that this template was not intended to be used
+     * with this type, thus that the result is undefined. */
+    ConversionFromStringAllowed<T>();
+
+    if (str.find_first_of(std::string("\r\n\t\v ")) != std::string::npos) {
+        return false;
+    }
+
+    /* Check for a '-' in string. If type is unsigned and a - is found, the
+     * parsing fails. This is made necessary because "-1" is read as 65535 for
+     * uint16_t, for example */
+    if (str.find("-") != std::string::npos
+        && !std::numeric_limits<T>::is_signed) {
+        return false;
+    }
+
+    std::stringstream ss(str);
+
+    /* Sadly, the stream conversion does not handle hexadecimal format, thus
+     * check is done manually */
+    if (str.substr(0, 2) == "0x") {
+        if (std::numeric_limits<T>::is_integer) {
+            ss >> std::hex >> result;
+        } else {
+            /* Conversion undefined for non integers */
+            return false;
+        }
+    } else {
+        ss >> result;
+    }
+
+    return ss.eof() && !ss.fail() && !ss.bad();
+}
+
+template <typename T>
+static inline bool toString(const T &value, std::string &str)
+{
+    /* Check that conversion from that type is allowed.
+     * If this fails, this means that this template was not intended to be used
+     * with this type, thus that the result is undefined. */
+    ConversionToStringAllowed<T>();
+
+    std::stringstream oss;
+    oss.precision(gFloatPrecision);
+    oss << value;
+    str = oss.str();
+    return !oss.fail() && !oss.bad();
+}
+
+template <typename srcType, typename dstType>
+class Converter;
+
+template <typename dstType>
+class Converter<std::string, dstType>
+{
+public:
+    static inline bool run(const std::string &str, dstType &result)
+    {
+        return fromString<dstType>(str, result);
+    }
+};
+
+template <typename srcType>
+class Converter<srcType, std::string>
+{
+public:
+    static inline bool run(const srcType &str, std::string &result)
+    {
+        return toString<srcType>(str, result);
+    }
+};
+
+/** Convert a vector by applying convertTo on each element.
+ *
+ * @tparam SrcElem Type of the src elements.
+ * @tparam DstElem Type of the destination elements.
+ */
+template <typename SrcElem, typename DstElem>
+class Converter<std::vector<SrcElem>, std::vector<DstElem> >
+{
+public:
+    typedef const std::vector<SrcElem> Src;
+    typedef std::vector<DstElem> Dst;
+
+    static inline bool run(Src &src, Dst &dst)
+    {
+        typedef typename Src::const_iterator SrcIt;
+        dst.clear();
+        dst.reserve(src.size());
+        for (SrcIt it = src.begin(); it != src.end(); ++it) {
+            DstElem dstElem;
+            if (not convertTo(*it, dstElem)) {
+                return false;
+            }
+            dst.push_back(dstElem);
+        }
+        return true;
+    }
+};
+
+} // namespace details
+
+template <typename srcType, typename dstType>
+static inline bool convertTo(const srcType &input, dstType &result)
+{
+    return details::Converter<srcType, dstType>::run(input, result);
+}
+
+/**
+ * Specialization for int16_t of convertTo template function.
+ *
+ * This function follows the same paradigm than it's generic version.
+ *
+ * The specific implementation is made necessary because the stlport version of
+ * string streams is bugged and does not fail when giving overflowed values.
+ * This specialisation can be safely removed when stlport behaviour is fixed.
+ *
+ * @param[in]  str    the string to parse.
+ * @param[out] result reference to object where to store the result.
+ *
+ * @return true if conversion was successful, false otherwise.
+ */
+template <>
+inline bool convertTo<std::string, int16_t>(const std::string &str, int16_t &result)
+{
+    int64_t res;
+
+    if (!convertTo<std::string, int64_t>(str, res)) {
+        return false;
+    }
+
+    if (res > std::numeric_limits<int16_t>::max() || res < std::numeric_limits<int16_t>::min()) {
+        return false;
+    }
+
+    result = static_cast<int16_t>(res);
+    return true;
+}
+
+/**
+ * Specialization for float of convertTo template function.
+ *
+ * This function follows the same paradigm than it's generic version and is
+ * based on it but makes furthers checks on the returned value.
+ *
+ * The specific implementation is made necessary because the stlport conversion
+ * from string to float behaves differently than GNU STL: overflow produce
+ * +/-Infinity rather than an error.
+ *
+ * @param[in]  str    the string to parse.
+ * @param[out] result reference to object where to store the result.
+ *
+ * @return true if conversion was successful, false otherwise.
+ */
+template <>
+inline bool convertTo<std::string, float>(const std::string &str, float &result)
+{
+    if (!details::Converter<std::string, float>::run(str, result)) {
+        return false;
+    }
+
+    if (std::abs(result) == std::numeric_limits<float>::infinity() ||
+        result == std::numeric_limits<float>::quiet_NaN()) {
+        return false;
+    }
+
+    return true;
+}
+
+/**
+ * Specialization for double of convertTo template function.
+ *
+ * This function follows the same paradigm than it's generic version and is
+ * based on it but makes furthers checks on the returned value.
+ *
+ * The specific implementation is made necessary because the stlport conversion
+ * from string to double behaves differently than GNU STL: overflow produce
+ * +/-Infinity rather than an error.
+ *
+ * @param[in]  str    the string to parse.
+ * @param[out] result reference to object where to store the result.
+ *
+ * @return true if conversion was successful, false otherwise.
+ */
+template <>
+inline bool convertTo<std::string, double>(const std::string &str, double &result)
+{
+    if (!details::Converter<std::string, double>::run(str, result)) {
+        return false;
+    }
+
+    if (std::abs(result) == std::numeric_limits<double>::infinity() ||
+        result == std::numeric_limits<double>::quiet_NaN()) {
+        return false;
+    }
+
+    return true;
+}
+
+/**
+ * Specialization for boolean of convertTo template function.
+ *
+ * This function follows the same paradigm than it's generic version.
+ * This function accepts to parse boolean as "0/1" or "false/true" or
+ * "FALSE/TRUE".
+ * The specific implementation is made necessary because the behaviour of
+ * string streams when parsing boolean values is not sufficient to fit our
+ * requirements. Indeed, parsing "true" will correctly parse the value, but the
+ * end of stream is not reached which makes the ss.eof() fails in the generic
+ * implementation.
+ *
+ * @param[in]  str    the string to parse.
+ * @param[out] result reference to object where to store the result.
+ *
+ * @return true if conversion was successful, false otherwise.
+ */
+template <>
+inline bool convertTo<std::string, bool>(const std::string &str, bool &result)
+{
+    if (str == "0" || str == "FALSE" || str == "false") {
+        result = false;
+        return true;
+    }
+
+    if (str == "1" || str == "TRUE" || str == "true") {
+        result = true;
+        return true;
+    }
+
+    return false;
+}
+
+/**
+ * Specialization for boolean to string of convertTo template function.
+ *
+ * This function follows the same paradigm than it's generic version.
+ * This function arbitrarily decides to return "false/true".
+ * It is compatible with the specialization from string to boolean.
+ *
+ * @param[in]  isSet  boolean to convert to a string.
+ * @param[out] result reference to object where to store the result.
+ *
+ * @return true if conversion was successful, false otherwise.
+ */
+template <>
+inline bool convertTo<bool, std::string>(const bool &isSet, std::string &result)
+{
+    result = isSet ? "true" : "false";
+    return true;
+}
+
+/**
+ * Specialization for string to string of convertTo template function.
+ *
+ * This function is a dummy conversion from string to string.
+ * In case of clients using template as well, this implementation avoids adding extra
+ * specialization to bypass the conversion from string to string.
+ *
+ * @param[in]  str    the string to parse.
+ * @param[out] result reference to object where to store the result.
+ *
+ * @return true if conversion was successful, false otherwise.
+ */
+template <>
+inline bool convertTo<std::string, std::string>(const std::string &str, std::string &result)
+{
+    result = str;
+    return true;
+}
+
+} // namespace utilities
+
+} // namespace android
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index e184d97..e8ef24e 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -23,8 +23,10 @@
 LOCAL_SRC_FILES:=               \
     CameraService.cpp \
     CameraDeviceFactory.cpp \
+    CameraFlashlight.cpp \
     common/Camera2ClientBase.cpp \
     common/CameraDeviceBase.cpp \
+    common/CameraModule.cpp \
     common/FrameProcessorBase.cpp \
     api1/CameraClient.cpp \
     api1/Camera2Client.cpp \
@@ -40,7 +42,6 @@
     api1/client2/CaptureSequencer.cpp \
     api1/client2/ZslProcessor3.cpp \
     api2/CameraDeviceClient.cpp \
-    api_pro/ProCamera2Client.cpp \
     device2/Camera2Device.cpp \
     device3/Camera3Device.cpp \
     device3/Camera3Stream.cpp \
@@ -52,6 +53,7 @@
     device3/StatusTracker.cpp \
     gui/RingBufferConsumer.cpp \
     utils/CameraTraces.cpp \
+    utils/AutoConditionLock.cpp \
 
 LOCAL_SHARED_LIBRARIES:= \
     libui \
@@ -60,6 +62,7 @@
     libbinder \
     libcutils \
     libmedia \
+    libmediautils \
     libcamera_client \
     libgui \
     libhardware \
@@ -70,6 +73,7 @@
 LOCAL_C_INCLUDES += \
     system/media/camera/include \
     system/media/private/camera/include \
+    frameworks/native/include/media/openmax \
     external/jpeg
 
 
diff --git a/services/camera/libcameraservice/CameraDeviceFactory.cpp b/services/camera/libcameraservice/CameraDeviceFactory.cpp
index bfef50e..6589e27 100644
--- a/services/camera/libcameraservice/CameraDeviceFactory.cpp
+++ b/services/camera/libcameraservice/CameraDeviceFactory.cpp
@@ -48,6 +48,7 @@
         case CAMERA_DEVICE_API_VERSION_3_0:
         case CAMERA_DEVICE_API_VERSION_3_1:
         case CAMERA_DEVICE_API_VERSION_3_2:
+        case CAMERA_DEVICE_API_VERSION_3_3:
             device = new Camera3Device(cameraId);
             break;
         default:
diff --git a/services/camera/libcameraservice/CameraFlashlight.cpp b/services/camera/libcameraservice/CameraFlashlight.cpp
new file mode 100644
index 0000000..280bb9d
--- /dev/null
+++ b/services/camera/libcameraservice/CameraFlashlight.cpp
@@ -0,0 +1,886 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CameraFlashlight"
+#define ATRACE_TAG ATRACE_TAG_CAMERA
+// #define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <utils/Trace.h>
+#include <cutils/properties.h>
+
+#include "camera/CameraMetadata.h"
+#include "CameraFlashlight.h"
+#include "gui/IGraphicBufferConsumer.h"
+#include "gui/BufferQueue.h"
+#include "camera/camera2/CaptureRequest.h"
+#include "CameraDeviceFactory.h"
+
+
+namespace android {
+
+/////////////////////////////////////////////////////////////////////
+// CameraFlashlight implementation begins
+// used by camera service to control flashflight.
+/////////////////////////////////////////////////////////////////////
+CameraFlashlight::CameraFlashlight(CameraModule& cameraModule,
+        const camera_module_callbacks_t& callbacks) :
+        mCameraModule(&cameraModule),
+        mCallbacks(&callbacks),
+        mFlashlightMapInitialized(false) {
+}
+
+CameraFlashlight::~CameraFlashlight() {
+}
+
+status_t CameraFlashlight::createFlashlightControl(const String8& cameraId) {
+    ALOGV("%s: creating a flash light control for camera %s", __FUNCTION__,
+            cameraId.string());
+    if (mFlashControl != NULL) {
+        return INVALID_OPERATION;
+    }
+
+    status_t res = OK;
+
+    if (mCameraModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_4) {
+        mFlashControl = new ModuleFlashControl(*mCameraModule, *mCallbacks);
+        if (mFlashControl == NULL) {
+            ALOGV("%s: cannot create flash control for module api v2.4+",
+                     __FUNCTION__);
+            return NO_MEMORY;
+        }
+    } else {
+        uint32_t deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
+
+        if (mCameraModule->getModuleApiVersion() >=
+                    CAMERA_MODULE_API_VERSION_2_0) {
+            camera_info info;
+            res = mCameraModule->getCameraInfo(
+                    atoi(String8(cameraId).string()), &info);
+            if (res) {
+                ALOGE("%s: failed to get camera info for camera %s",
+                        __FUNCTION__, cameraId.string());
+                return res;
+            }
+            deviceVersion = info.device_version;
+        }
+
+        if (deviceVersion >= CAMERA_DEVICE_API_VERSION_2_0) {
+            CameraDeviceClientFlashControl *flashControl =
+                    new CameraDeviceClientFlashControl(*mCameraModule,
+                                                       *mCallbacks);
+            if (!flashControl) {
+                return NO_MEMORY;
+            }
+
+            mFlashControl = flashControl;
+        } else {
+            mFlashControl =
+                    new CameraHardwareInterfaceFlashControl(*mCameraModule,
+                                                            *mCallbacks);
+        }
+    }
+
+    return OK;
+}
+
+status_t CameraFlashlight::setTorchMode(const String8& cameraId, bool enabled) {
+    if (!mFlashlightMapInitialized) {
+        ALOGE("%s: findFlashUnits() must be called before this method.");
+        return NO_INIT;
+    }
+
+    ALOGV("%s: set torch mode of camera %s to %d", __FUNCTION__,
+            cameraId.string(), enabled);
+
+    status_t res = OK;
+    Mutex::Autolock l(mLock);
+
+    if (mOpenedCameraIds.indexOf(cameraId) != NAME_NOT_FOUND) {
+        // This case is needed to avoid state corruption during the following call sequence:
+        // CameraService::setTorchMode for camera ID 0 begins, does torch status checks
+        // CameraService::connect for camera ID 0 begins, calls prepareDeviceOpen, ends
+        // CameraService::setTorchMode for camera ID 0 continues, calls
+        //        CameraFlashlight::setTorchMode
+
+        // TODO: Move torch status checks and state updates behind this CameraFlashlight lock
+        // to avoid other similar race conditions.
+        ALOGE("%s: Camera device %s is in use, cannot set torch mode.",
+                __FUNCTION__, cameraId.string());
+        return -EBUSY;
+    }
+
+    if (mFlashControl == NULL) {
+        if (enabled == false) {
+            return OK;
+        }
+
+        res = createFlashlightControl(cameraId);
+        if (res) {
+            return res;
+        }
+        res =  mFlashControl->setTorchMode(cameraId, enabled);
+        return res;
+    }
+
+    // if flash control already exists, turning on torch mode may fail if it's
+    // tied to another camera device for module v2.3 and below.
+    res = mFlashControl->setTorchMode(cameraId, enabled);
+    if (res == BAD_INDEX) {
+        // flash control is tied to another camera device, need to close it and
+        // try again.
+        mFlashControl.clear();
+        res = createFlashlightControl(cameraId);
+        if (res) {
+            return res;
+        }
+        res = mFlashControl->setTorchMode(cameraId, enabled);
+    }
+
+    return res;
+}
+
+status_t CameraFlashlight::findFlashUnits() {
+    Mutex::Autolock l(mLock);
+    status_t res;
+    int32_t numCameras = mCameraModule->getNumberOfCameras();
+
+    mHasFlashlightMap.clear();
+    mFlashlightMapInitialized = false;
+
+    for (int32_t i = 0; i < numCameras; i++) {
+        bool hasFlash = false;
+        String8 id = String8::format("%d", i);
+
+        res = createFlashlightControl(id);
+        if (res) {
+            ALOGE("%s: failed to create flash control for %s", __FUNCTION__,
+                    id.string());
+        } else {
+            res = mFlashControl->hasFlashUnit(id, &hasFlash);
+            if (res == -EUSERS || res == -EBUSY) {
+                ALOGE("%s: failed to check if camera %s has a flash unit. Some "
+                        "camera devices may be opened", __FUNCTION__,
+                        id.string());
+                return res;
+            } else if (res) {
+                ALOGE("%s: failed to check if camera %s has a flash unit. %s"
+                        " (%d)", __FUNCTION__, id.string(), strerror(-res),
+                        res);
+            }
+
+            mFlashControl.clear();
+        }
+        mHasFlashlightMap.add(id, hasFlash);
+    }
+
+    mFlashlightMapInitialized = true;
+    return OK;
+}
+
+bool CameraFlashlight::hasFlashUnit(const String8& cameraId) {
+    status_t res;
+
+    Mutex::Autolock l(mLock);
+    return hasFlashUnitLocked(cameraId);
+}
+
+bool CameraFlashlight::hasFlashUnitLocked(const String8& cameraId) {
+    if (!mFlashlightMapInitialized) {
+        ALOGE("%s: findFlashUnits() must be called before this method.");
+        return false;
+    }
+
+    ssize_t index = mHasFlashlightMap.indexOfKey(cameraId);
+    if (index == NAME_NOT_FOUND) {
+        ALOGE("%s: camera %s not present when findFlashUnits() was called",
+                __FUNCTION__, cameraId.string());
+        return false;
+    }
+
+    return mHasFlashlightMap.valueAt(index);
+}
+
+status_t CameraFlashlight::prepareDeviceOpen(const String8& cameraId) {
+    ALOGV("%s: prepare for device open", __FUNCTION__);
+
+    Mutex::Autolock l(mLock);
+    if (!mFlashlightMapInitialized) {
+        ALOGE("%s: findFlashUnits() must be called before this method.");
+        return NO_INIT;
+    }
+
+    if (mCameraModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_4) {
+        // framework is going to open a camera device, all flash light control
+        // should be closed for backward compatible support.
+        mFlashControl.clear();
+
+        if (mOpenedCameraIds.size() == 0) {
+            // notify torch unavailable for all cameras with a flash
+            int numCameras = mCameraModule->getNumberOfCameras();
+            for (int i = 0; i < numCameras; i++) {
+                if (hasFlashUnitLocked(String8::format("%d", i))) {
+                    mCallbacks->torch_mode_status_change(mCallbacks,
+                            String8::format("%d", i).string(),
+                            TORCH_MODE_STATUS_NOT_AVAILABLE);
+                }
+            }
+        }
+
+        // close flash control that may be opened by calling hasFlashUnitLocked.
+        mFlashControl.clear();
+    }
+
+    if (mOpenedCameraIds.indexOf(cameraId) == NAME_NOT_FOUND) {
+        mOpenedCameraIds.add(cameraId);
+    }
+
+    return OK;
+}
+
+status_t CameraFlashlight::deviceClosed(const String8& cameraId) {
+    ALOGV("%s: device %s is closed", __FUNCTION__, cameraId.string());
+
+    Mutex::Autolock l(mLock);
+    if (!mFlashlightMapInitialized) {
+        ALOGE("%s: findFlashUnits() must be called before this method.");
+        return NO_INIT;
+    }
+
+    ssize_t index = mOpenedCameraIds.indexOf(cameraId);
+    if (index == NAME_NOT_FOUND) {
+        ALOGE("%s: couldn't find camera %s in the opened list", __FUNCTION__,
+                cameraId.string());
+    } else {
+        mOpenedCameraIds.removeAt(index);
+    }
+
+    // Cannot do anything until all cameras are closed.
+    if (mOpenedCameraIds.size() != 0)
+        return OK;
+
+    if (mCameraModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_4) {
+        // notify torch available for all cameras with a flash
+        int numCameras = mCameraModule->getNumberOfCameras();
+        for (int i = 0; i < numCameras; i++) {
+            if (hasFlashUnitLocked(String8::format("%d", i))) {
+                mCallbacks->torch_mode_status_change(mCallbacks,
+                        String8::format("%d", i).string(),
+                        TORCH_MODE_STATUS_AVAILABLE_OFF);
+            }
+        }
+    }
+
+    return OK;
+}
+// CameraFlashlight implementation ends
+
+
+FlashControlBase::~FlashControlBase() {
+}
+
+/////////////////////////////////////////////////////////////////////
+// ModuleFlashControl implementation begins
+// Flash control for camera module v2.4 and above.
+/////////////////////////////////////////////////////////////////////
+ModuleFlashControl::ModuleFlashControl(CameraModule& cameraModule,
+        const camera_module_callbacks_t& callbacks) :
+    mCameraModule(&cameraModule) {
+}
+
+ModuleFlashControl::~ModuleFlashControl() {
+}
+
+status_t ModuleFlashControl::hasFlashUnit(const String8& cameraId, bool *hasFlash) {
+    if (!hasFlash) {
+        return BAD_VALUE;
+    }
+
+    *hasFlash = false;
+    Mutex::Autolock l(mLock);
+
+    camera_info info;
+    status_t res = mCameraModule->getCameraInfo(atoi(cameraId.string()),
+            &info);
+    if (res != 0) {
+        return res;
+    }
+
+    CameraMetadata metadata;
+    metadata = info.static_camera_characteristics;
+    camera_metadata_entry flashAvailable =
+            metadata.find(ANDROID_FLASH_INFO_AVAILABLE);
+    if (flashAvailable.count == 1 && flashAvailable.data.u8[0] == 1) {
+        *hasFlash = true;
+    }
+
+    return OK;
+}
+
+status_t ModuleFlashControl::setTorchMode(const String8& cameraId, bool enabled) {
+    ALOGV("%s: set camera %s torch mode to %d", __FUNCTION__,
+            cameraId.string(), enabled);
+
+    Mutex::Autolock l(mLock);
+    return mCameraModule->setTorchMode(cameraId.string(), enabled);
+}
+// ModuleFlashControl implementation ends
+
+/////////////////////////////////////////////////////////////////////
+// CameraDeviceClientFlashControl implementation begins
+// Flash control for camera module <= v2.3 and camera HAL v2-v3
+/////////////////////////////////////////////////////////////////////
+CameraDeviceClientFlashControl::CameraDeviceClientFlashControl(
+        CameraModule& cameraModule,
+        const camera_module_callbacks_t& callbacks) :
+        mCameraModule(&cameraModule),
+        mCallbacks(&callbacks),
+        mTorchEnabled(false),
+        mMetadata(NULL),
+        mStreaming(false) {
+}
+
+CameraDeviceClientFlashControl::~CameraDeviceClientFlashControl() {
+    disconnectCameraDevice();
+    if (mMetadata) {
+        delete mMetadata;
+    }
+
+    mSurface.clear();
+    mSurfaceTexture.clear();
+    mProducer.clear();
+    mConsumer.clear();
+
+    if (mTorchEnabled) {
+        if (mCallbacks) {
+            ALOGV("%s: notify the framework that torch was turned off",
+                    __FUNCTION__);
+            mCallbacks->torch_mode_status_change(mCallbacks,
+                    mCameraId.string(), TORCH_MODE_STATUS_AVAILABLE_OFF);
+        }
+    }
+}
+
+status_t CameraDeviceClientFlashControl::initializeSurface(
+        sp<CameraDeviceBase> &device, int32_t width, int32_t height) {
+    status_t res;
+    BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+
+    mSurfaceTexture = new GLConsumer(mConsumer, 0, GLConsumer::TEXTURE_EXTERNAL,
+            true, true);
+    if (mSurfaceTexture == NULL) {
+        return NO_MEMORY;
+    }
+
+    int32_t format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+    res = mSurfaceTexture->setDefaultBufferSize(width, height);
+    if (res) {
+        return res;
+    }
+    res = mSurfaceTexture->setDefaultBufferFormat(format);
+    if (res) {
+        return res;
+    }
+
+    mSurface = new Surface(mProducer, /*useAsync*/ true);
+    if (mSurface == NULL) {
+        return NO_MEMORY;
+    }
+    res = device->createStream(mSurface, width, height, format,
+            HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0, &mStreamId);
+    if (res) {
+        return res;
+    }
+
+    res = device->configureStreams();
+    if (res) {
+        return res;
+    }
+
+    return res;
+}
+
+status_t CameraDeviceClientFlashControl::getSmallestSurfaceSize(
+        const camera_info& info, int32_t *width, int32_t *height) {
+    if (!width || !height) {
+        return BAD_VALUE;
+    }
+
+    int32_t w = INT32_MAX;
+    int32_t h = 1;
+
+    CameraMetadata metadata;
+    metadata = info.static_camera_characteristics;
+    camera_metadata_entry streamConfigs =
+            metadata.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
+    for (size_t i = 0; i < streamConfigs.count; i += 4) {
+        int32_t fmt = streamConfigs.data.i32[i];
+        if (fmt == ANDROID_SCALER_AVAILABLE_FORMATS_IMPLEMENTATION_DEFINED) {
+            int32_t ww = streamConfigs.data.i32[i + 1];
+            int32_t hh = streamConfigs.data.i32[i + 2];
+
+            if (w * h > ww * hh) {
+                w = ww;
+                h = hh;
+            }
+        }
+    }
+
+    // if stream configuration is not found, try available processed sizes.
+    if (streamConfigs.count == 0) {
+        camera_metadata_entry availableProcessedSizes =
+            metadata.find(ANDROID_SCALER_AVAILABLE_PROCESSED_SIZES);
+        for (size_t i = 0; i < availableProcessedSizes.count; i += 2) {
+            int32_t ww = availableProcessedSizes.data.i32[i];
+            int32_t hh = availableProcessedSizes.data.i32[i + 1];
+            if (w * h > ww * hh) {
+                w = ww;
+                h = hh;
+            }
+        }
+    }
+
+    if (w == INT32_MAX) {
+        return NAME_NOT_FOUND;
+    }
+
+    *width = w;
+    *height = h;
+
+    return OK;
+}
+
+status_t CameraDeviceClientFlashControl::connectCameraDevice(
+        const String8& cameraId) {
+    camera_info info;
+    status_t res = mCameraModule->getCameraInfo(atoi(cameraId.string()), &info);
+    if (res != 0) {
+        ALOGE("%s: failed to get camera info for camera %s", __FUNCTION__,
+                cameraId.string());
+        return res;
+    }
+
+    sp<CameraDeviceBase> device =
+            CameraDeviceFactory::createDevice(atoi(cameraId.string()));
+    if (device == NULL) {
+        return NO_MEMORY;
+    }
+
+    res = device->initialize(mCameraModule);
+    if (res) {
+        return res;
+    }
+
+    int32_t width, height;
+    res = getSmallestSurfaceSize(info, &width, &height);
+    if (res) {
+        return res;
+    }
+    res = initializeSurface(device, width, height);
+    if (res) {
+        return res;
+    }
+
+    mCameraId = cameraId;
+    mStreaming = (info.device_version <= CAMERA_DEVICE_API_VERSION_3_1);
+    mDevice = device;
+
+    return OK;
+}
+
+status_t CameraDeviceClientFlashControl::disconnectCameraDevice() {
+    if (mDevice != NULL) {
+        mDevice->disconnect();
+        mDevice.clear();
+    }
+
+    return OK;
+}
+
+
+
+status_t CameraDeviceClientFlashControl::hasFlashUnit(const String8& cameraId,
+        bool *hasFlash) {
+    ALOGV("%s: checking if camera %s has a flash unit", __FUNCTION__,
+            cameraId.string());
+
+    Mutex::Autolock l(mLock);
+    return hasFlashUnitLocked(cameraId, hasFlash);
+
+}
+
+status_t CameraDeviceClientFlashControl::hasFlashUnitLocked(
+        const String8& cameraId, bool *hasFlash) {
+    if (!hasFlash) {
+        return BAD_VALUE;
+    }
+
+    camera_info info;
+    status_t res = mCameraModule->getCameraInfo(
+            atoi(cameraId.string()), &info);
+    if (res != 0) {
+        ALOGE("%s: failed to get camera info for camera %s", __FUNCTION__,
+                cameraId.string());
+        return res;
+    }
+
+    CameraMetadata metadata;
+    metadata = info.static_camera_characteristics;
+    camera_metadata_entry flashAvailable =
+            metadata.find(ANDROID_FLASH_INFO_AVAILABLE);
+    if (flashAvailable.count == 1 && flashAvailable.data.u8[0] == 1) {
+        *hasFlash = true;
+    }
+
+    return OK;
+}
+
+status_t CameraDeviceClientFlashControl::submitTorchEnabledRequest() {
+    status_t res;
+
+    if (mMetadata == NULL) {
+        mMetadata = new CameraMetadata();
+        if (mMetadata == NULL) {
+            return NO_MEMORY;
+        }
+        res = mDevice->createDefaultRequest(
+                CAMERA3_TEMPLATE_PREVIEW, mMetadata);
+        if (res) {
+            return res;
+        }
+    }
+
+    uint8_t torchOn = ANDROID_FLASH_MODE_TORCH;
+    mMetadata->update(ANDROID_FLASH_MODE, &torchOn, 1);
+    mMetadata->update(ANDROID_REQUEST_OUTPUT_STREAMS, &mStreamId, 1);
+
+    uint8_t aeMode = ANDROID_CONTROL_AE_MODE_ON;
+    mMetadata->update(ANDROID_CONTROL_AE_MODE, &aeMode, 1);
+
+    int32_t requestId = 0;
+    mMetadata->update(ANDROID_REQUEST_ID, &requestId, 1);
+
+    if (mStreaming) {
+        res = mDevice->setStreamingRequest(*mMetadata);
+    } else {
+        res = mDevice->capture(*mMetadata);
+    }
+    return res;
+}
+
+
+
+
+status_t CameraDeviceClientFlashControl::setTorchMode(
+        const String8& cameraId, bool enabled) {
+    bool hasFlash = false;
+
+    Mutex::Autolock l(mLock);
+    status_t res = hasFlashUnitLocked(cameraId, &hasFlash);
+
+    // pre-check
+    if (enabled) {
+        // invalid camera?
+        if (res) {
+            return -EINVAL;
+        }
+        // no flash unit?
+        if (!hasFlash) {
+            return -ENOSYS;
+        }
+        // already opened for a different device?
+        if (mDevice != NULL && cameraId != mCameraId) {
+            return BAD_INDEX;
+        }
+    } else if (mDevice == NULL || cameraId != mCameraId) {
+        // disabling the torch mode of an un-opened or different device.
+        return OK;
+    } else {
+        // disabling the torch mode of currently opened device
+        disconnectCameraDevice();
+        mTorchEnabled = false;
+        mCallbacks->torch_mode_status_change(mCallbacks,
+            cameraId.string(), TORCH_MODE_STATUS_AVAILABLE_OFF);
+        return OK;
+    }
+
+    if (mDevice == NULL) {
+        res = connectCameraDevice(cameraId);
+        if (res) {
+            return res;
+        }
+    }
+
+    res = submitTorchEnabledRequest();
+    if (res) {
+        return res;
+    }
+
+    mTorchEnabled = true;
+    mCallbacks->torch_mode_status_change(mCallbacks,
+            cameraId.string(), TORCH_MODE_STATUS_AVAILABLE_ON);
+    return OK;
+}
+// CameraDeviceClientFlashControl implementation ends
+
+
+/////////////////////////////////////////////////////////////////////
+// CameraHardwareInterfaceFlashControl implementation begins
+// Flash control for camera module <= v2.3 and camera HAL v1
+/////////////////////////////////////////////////////////////////////
+CameraHardwareInterfaceFlashControl::CameraHardwareInterfaceFlashControl(
+        CameraModule& cameraModule,
+        const camera_module_callbacks_t& callbacks) :
+        mCameraModule(&cameraModule),
+        mCallbacks(&callbacks),
+        mTorchEnabled(false) {
+
+}
+
+CameraHardwareInterfaceFlashControl::~CameraHardwareInterfaceFlashControl() {
+    disconnectCameraDevice();
+
+    mSurface.clear();
+    mSurfaceTexture.clear();
+    mProducer.clear();
+    mConsumer.clear();
+
+    if (mTorchEnabled) {
+        if (mCallbacks) {
+            ALOGV("%s: notify the framework that torch was turned off",
+                    __FUNCTION__);
+            mCallbacks->torch_mode_status_change(mCallbacks,
+                    mCameraId.string(), TORCH_MODE_STATUS_AVAILABLE_OFF);
+        }
+    }
+}
+
+status_t CameraHardwareInterfaceFlashControl::setTorchMode(
+        const String8& cameraId, bool enabled) {
+    Mutex::Autolock l(mLock);
+
+    // pre-check
+    status_t res;
+    if (enabled) {
+        bool hasFlash = false;
+        res = hasFlashUnitLocked(cameraId, &hasFlash);
+        // invalid camera?
+        if (res) {
+            // hasFlashUnitLocked() returns BAD_INDEX if mDevice is connected to
+            // another camera device.
+            return res == BAD_INDEX ? BAD_INDEX : -EINVAL;
+        }
+        // no flash unit?
+        if (!hasFlash) {
+            return -ENOSYS;
+        }
+    } else if (mDevice == NULL || cameraId != mCameraId) {
+        // disabling the torch mode of an un-opened or different device.
+        return OK;
+    } else {
+        // disabling the torch mode of currently opened device
+        disconnectCameraDevice();
+        mTorchEnabled = false;
+        mCallbacks->torch_mode_status_change(mCallbacks,
+            cameraId.string(), TORCH_MODE_STATUS_AVAILABLE_OFF);
+        return OK;
+    }
+
+    res = startPreviewAndTorch();
+    if (res) {
+        return res;
+    }
+
+    mTorchEnabled = true;
+    mCallbacks->torch_mode_status_change(mCallbacks,
+            cameraId.string(), TORCH_MODE_STATUS_AVAILABLE_ON);
+    return OK;
+}
+
+status_t CameraHardwareInterfaceFlashControl::hasFlashUnit(
+        const String8& cameraId, bool *hasFlash) {
+    Mutex::Autolock l(mLock);
+    return hasFlashUnitLocked(cameraId, hasFlash);
+}
+
+status_t CameraHardwareInterfaceFlashControl::hasFlashUnitLocked(
+        const String8& cameraId, bool *hasFlash) {
+    if (!hasFlash) {
+        return BAD_VALUE;
+    }
+
+    status_t res;
+    if (mDevice == NULL) {
+        res = connectCameraDevice(cameraId);
+        if (res) {
+            return res;
+        }
+    }
+
+    if (cameraId != mCameraId) {
+        return BAD_INDEX;
+    }
+
+    const char *flashMode =
+            mParameters.get(CameraParameters::KEY_SUPPORTED_FLASH_MODES);
+    if (flashMode && strstr(flashMode, CameraParameters::FLASH_MODE_TORCH)) {
+        *hasFlash = true;
+    } else {
+        *hasFlash = false;
+    }
+
+    return OK;
+}
+
+status_t CameraHardwareInterfaceFlashControl::startPreviewAndTorch() {
+    status_t res = OK;
+    res = mDevice->startPreview();
+    if (res) {
+        ALOGE("%s: start preview failed. %s (%d)", __FUNCTION__,
+                strerror(-res), res);
+        return res;
+    }
+
+    mParameters.set(CameraParameters::KEY_FLASH_MODE,
+            CameraParameters::FLASH_MODE_TORCH);
+
+    return mDevice->setParameters(mParameters);
+}
+
+status_t CameraHardwareInterfaceFlashControl::getSmallestSurfaceSize(
+        int32_t *width, int32_t *height) {
+    if (!width || !height) {
+        return BAD_VALUE;
+    }
+
+    int32_t w = INT32_MAX;
+    int32_t h = 1;
+    Vector<Size> sizes;
+
+    mParameters.getSupportedPreviewSizes(sizes);
+    for (size_t i = 0; i < sizes.size(); i++) {
+        Size s = sizes[i];
+        if (w * h > s.width * s.height) {
+            w = s.width;
+            h = s.height;
+        }
+    }
+
+    if (w == INT32_MAX) {
+        return NAME_NOT_FOUND;
+    }
+
+    *width = w;
+    *height = h;
+
+    return OK;
+}
+
+status_t CameraHardwareInterfaceFlashControl::initializePreviewWindow(
+        sp<CameraHardwareInterface> device, int32_t width, int32_t height) {
+    status_t res;
+    BufferQueue::createBufferQueue(&mProducer, &mConsumer);
+
+    mSurfaceTexture = new GLConsumer(mConsumer, 0, GLConsumer::TEXTURE_EXTERNAL,
+            true, true);
+    if (mSurfaceTexture == NULL) {
+        return NO_MEMORY;
+    }
+
+    int32_t format = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+    res = mSurfaceTexture->setDefaultBufferSize(width, height);
+    if (res) {
+        return res;
+    }
+    res = mSurfaceTexture->setDefaultBufferFormat(format);
+    if (res) {
+        return res;
+    }
+
+    mSurface = new Surface(mProducer, /*useAsync*/ true);
+    if (mSurface == NULL) {
+        return NO_MEMORY;
+    }
+
+    res = native_window_api_connect(mSurface.get(), NATIVE_WINDOW_API_CAMERA);
+    if (res) {
+        ALOGE("%s: Unable to connect to native window", __FUNCTION__);
+        return res;
+    }
+
+    return device->setPreviewWindow(mSurface);
+}
+
+status_t CameraHardwareInterfaceFlashControl::connectCameraDevice(
+        const String8& cameraId) {
+    sp<CameraHardwareInterface> device =
+            new CameraHardwareInterface(cameraId.string());
+
+    status_t res = device->initialize(mCameraModule);
+    if (res) {
+        ALOGE("%s: initializing camera %s failed", __FUNCTION__,
+                cameraId.string());
+        return res;
+    }
+
+    // need to set __get_memory in set_callbacks().
+    device->setCallbacks(NULL, NULL, NULL, NULL);
+
+    mParameters = device->getParameters();
+
+    int32_t width, height;
+    res = getSmallestSurfaceSize(&width, &height);
+    if (res) {
+        ALOGE("%s: failed to get smallest surface size for camera %s",
+                __FUNCTION__, cameraId.string());
+        return res;
+    }
+
+    res = initializePreviewWindow(device, width, height);
+    if (res) {
+        ALOGE("%s: failed to initialize preview window for camera %s",
+                __FUNCTION__, cameraId.string());
+        return res;
+    }
+
+    mCameraId = cameraId;
+    mDevice = device;
+    return OK;
+}
+
+status_t CameraHardwareInterfaceFlashControl::disconnectCameraDevice() {
+    if (mDevice == NULL) {
+        return OK;
+    }
+
+    mParameters.set(CameraParameters::KEY_FLASH_MODE,
+            CameraParameters::FLASH_MODE_OFF);
+    mDevice->setParameters(mParameters);
+    mDevice->stopPreview();
+    status_t res = native_window_api_disconnect(mSurface.get(),
+            NATIVE_WINDOW_API_CAMERA);
+    if (res) {
+        ALOGW("%s: native_window_api_disconnect failed: %s (%d)",
+                __FUNCTION__, strerror(-res), res);
+    }
+    mDevice->setPreviewWindow(NULL);
+    mDevice->release();
+
+    return OK;
+}
+// CameraHardwareInterfaceFlashControl implementation ends
+
+}
diff --git a/services/camera/libcameraservice/CameraFlashlight.h b/services/camera/libcameraservice/CameraFlashlight.h
new file mode 100644
index 0000000..4d5fe8d
--- /dev/null
+++ b/services/camera/libcameraservice/CameraFlashlight.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERAFLASHLIGHT_H
+#define ANDROID_SERVERS_CAMERA_CAMERAFLASHLIGHT_H
+
+#include "hardware/camera_common.h"
+#include "utils/KeyedVector.h"
+#include "utils/SortedVector.h"
+#include "gui/GLConsumer.h"
+#include "gui/Surface.h"
+#include "common/CameraDeviceBase.h"
+#include "device1/CameraHardwareInterface.h"
+
+namespace android {
+
+/**
+ * FlashControlBase is a base class for flash control. It defines the functions
+ * that a flash control for each camera module/device version should implement.
+ */
+class FlashControlBase : public virtual VirtualLightRefBase {
+    public:
+        virtual ~FlashControlBase();
+
+        // Whether a camera device has a flash unit. Calling this function may
+        // cause the torch mode to be turned off in HAL v1 devices. If
+        // previously-on torch mode is turned off,
+        // callbacks.torch_mode_status_change() should be invoked.
+        virtual status_t hasFlashUnit(const String8& cameraId,
+                    bool *hasFlash) = 0;
+
+        // set the torch mode to on or off.
+        virtual status_t setTorchMode(const String8& cameraId,
+                    bool enabled) = 0;
+};
+
+/**
+ * CameraFlashlight can be used by camera service to control flashflight.
+ */
+class CameraFlashlight : public virtual VirtualLightRefBase {
+    public:
+        CameraFlashlight(CameraModule& cameraModule,
+                const camera_module_callbacks_t& callbacks);
+        virtual ~CameraFlashlight();
+
+        // Find all flash units. This must be called before other methods. All
+        // camera devices must be closed when it's called because HAL v1 devices
+        // need to be opened to query available flash modes.
+        status_t findFlashUnits();
+
+        // Whether a camera device has a flash unit. Before findFlashUnits() is
+        // called, this function always returns false.
+        bool hasFlashUnit(const String8& cameraId);
+
+        // set the torch mode to on or off.
+        status_t setTorchMode(const String8& cameraId, bool enabled);
+
+        // Notify CameraFlashlight that camera service is going to open a camera
+        // device. CameraFlashlight will free the resources that may cause the
+        // camera open to fail. Camera service must call this function before
+        // opening a camera device.
+        status_t prepareDeviceOpen(const String8& cameraId);
+
+        // Notify CameraFlashlight that camera service has closed a camera
+        // device. CameraFlashlight may invoke callbacks for torch mode
+        // available depending on the implementation.
+        status_t deviceClosed(const String8& cameraId);
+
+    private:
+        // create flashlight control based on camera module API and camera
+        // device API versions.
+        status_t createFlashlightControl(const String8& cameraId);
+
+        // mLock should be locked.
+        bool hasFlashUnitLocked(const String8& cameraId);
+
+        sp<FlashControlBase> mFlashControl;
+        CameraModule *mCameraModule;
+        const camera_module_callbacks_t *mCallbacks;
+        SortedVector<String8> mOpenedCameraIds;
+
+        // camera id -> if it has a flash unit
+        KeyedVector<String8, bool> mHasFlashlightMap;
+        bool mFlashlightMapInitialized;
+
+        Mutex mLock; // protect CameraFlashlight API
+};
+
+/**
+ * Flash control for camera module v2.4 and above.
+ */
+class ModuleFlashControl : public FlashControlBase {
+    public:
+        ModuleFlashControl(CameraModule& cameraModule,
+                const camera_module_callbacks_t& callbacks);
+        virtual ~ModuleFlashControl();
+
+        // FlashControlBase
+        status_t hasFlashUnit(const String8& cameraId, bool *hasFlash);
+        status_t setTorchMode(const String8& cameraId, bool enabled);
+
+    private:
+        CameraModule *mCameraModule;
+
+        Mutex mLock;
+};
+
+/**
+ * Flash control for camera module <= v2.3 and camera HAL v2-v3
+ */
+class CameraDeviceClientFlashControl : public FlashControlBase {
+    public:
+        CameraDeviceClientFlashControl(CameraModule& cameraModule,
+                const camera_module_callbacks_t& callbacks);
+        virtual ~CameraDeviceClientFlashControl();
+
+        // FlashControlBase
+        status_t setTorchMode(const String8& cameraId, bool enabled);
+        status_t hasFlashUnit(const String8& cameraId, bool *hasFlash);
+
+    private:
+        // connect to a camera device
+        status_t connectCameraDevice(const String8& cameraId);
+        // disconnect and free mDevice
+        status_t disconnectCameraDevice();
+
+        // initialize a surface
+        status_t initializeSurface(sp<CameraDeviceBase>& device, int32_t width,
+                int32_t height);
+
+        // submit a request to enable the torch mode
+        status_t submitTorchEnabledRequest();
+
+        // get the smallest surface size of IMPLEMENTATION_DEFINED
+        status_t getSmallestSurfaceSize(const camera_info& info, int32_t *width,
+                    int32_t *height);
+
+        // protected by mLock
+        status_t hasFlashUnitLocked(const String8& cameraId, bool *hasFlash);
+
+        CameraModule *mCameraModule;
+        const camera_module_callbacks_t *mCallbacks;
+        String8 mCameraId;
+        bool mTorchEnabled;
+        CameraMetadata *mMetadata;
+        // WORKAROUND: will be set to true for HAL v2 devices where
+        // setStreamingRequest() needs to be call for torch mode settings to
+        // take effect.
+        bool mStreaming;
+
+        sp<CameraDeviceBase> mDevice;
+
+        sp<IGraphicBufferProducer> mProducer;
+        sp<IGraphicBufferConsumer>  mConsumer;
+        sp<GLConsumer> mSurfaceTexture;
+        sp<Surface> mSurface;
+        int32_t mStreamId;
+
+        Mutex mLock;
+};
+
+/**
+ * Flash control for camera module <= v2.3 and camera HAL v1
+ */
+class CameraHardwareInterfaceFlashControl : public FlashControlBase {
+    public:
+        CameraHardwareInterfaceFlashControl(CameraModule& cameraModule,
+                const camera_module_callbacks_t& callbacks);
+        virtual ~CameraHardwareInterfaceFlashControl();
+
+        // FlashControlBase
+        status_t setTorchMode(const String8& cameraId, bool enabled);
+        status_t hasFlashUnit(const String8& cameraId, bool *hasFlash);
+
+    private:
+        // connect to a camera device
+        status_t connectCameraDevice(const String8& cameraId);
+
+        // disconnect and free mDevice
+        status_t disconnectCameraDevice();
+
+        // initialize the preview window
+        status_t initializePreviewWindow(sp<CameraHardwareInterface> device,
+                int32_t width, int32_t height);
+
+        // start preview and enable torch
+        status_t startPreviewAndTorch();
+
+        // get the smallest surface
+        status_t getSmallestSurfaceSize(int32_t *width, int32_t *height);
+
+        // protected by mLock
+        status_t hasFlashUnitLocked(const String8& cameraId, bool *hasFlash);
+
+        CameraModule *mCameraModule;
+        const camera_module_callbacks_t *mCallbacks;
+        sp<CameraHardwareInterface> mDevice;
+        String8 mCameraId;
+        CameraParameters mParameters;
+        bool mTorchEnabled;
+
+        sp<IGraphicBufferProducer> mProducer;
+        sp<IGraphicBufferConsumer>  mConsumer;
+        sp<GLConsumer> mSurfaceTexture;
+        sp<Surface> mSurface;
+
+        Mutex mLock;
+};
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 1232c32..43a8ec4 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -17,9 +17,14 @@
 #define LOG_TAG "CameraService"
 //#define LOG_NDEBUG 0
 
+#include <algorithm>
+#include <climits>
 #include <stdio.h>
-#include <string.h>
+#include <cstring>
+#include <ctime>
+#include <string>
 #include <sys/types.h>
+#include <inttypes.h>
 #include <pthread.h>
 
 #include <binder/AppOpsManager.h>
@@ -27,6 +32,8 @@
 #include <binder/IServiceManager.h>
 #include <binder/MemoryBase.h>
 #include <binder/MemoryHeapBase.h>
+#include <binder/ProcessInfoService.h>
+#include <camera/ICameraServiceProxy.h>
 #include <cutils/atomic.h>
 #include <cutils/properties.h>
 #include <gui/Surface.h>
@@ -34,6 +41,7 @@
 #include <media/AudioSystem.h>
 #include <media/IMediaHTTPService.h>
 #include <media/mediaplayer.h>
+#include <mediautils/BatteryNotifier.h>
 #include <utils/Errors.h>
 #include <utils/Log.h>
 #include <utils/String16.h>
@@ -45,7 +53,6 @@
 #include "CameraService.h"
 #include "api1/CameraClient.h"
 #include "api1/Camera2Client.h"
-#include "api_pro/ProCamera2Client.h"
 #include "api2/CameraDeviceClient.h"
 #include "utils/CameraTraces.h"
 #include "CameraDeviceFactory.h"
@@ -66,25 +73,48 @@
 
 // ----------------------------------------------------------------------------
 
-static int getCallingPid() {
-    return IPCThreadState::self()->getCallingPid();
-}
-
-static int getCallingUid() {
-    return IPCThreadState::self()->getCallingUid();
-}
-
 extern "C" {
 static void camera_device_status_change(
         const struct camera_module_callbacks* callbacks,
         int camera_id,
         int new_status) {
     sp<CameraService> cs = const_cast<CameraService*>(
+            static_cast<const CameraService*>(callbacks));
+
+    cs->onDeviceStatusChanged(static_cast<camera_device_status_t>(camera_id),
+            static_cast<camera_device_status_t>(new_status));
+}
+
+static void torch_mode_status_change(
+        const struct camera_module_callbacks* callbacks,
+        const char* camera_id,
+        int new_status) {
+    if (!callbacks || !camera_id) {
+        ALOGE("%s invalid parameters. callbacks %p, camera_id %p", __FUNCTION__,
+                callbacks, camera_id);
+    }
+    sp<CameraService> cs = const_cast<CameraService*>(
                                 static_cast<const CameraService*>(callbacks));
 
-    cs->onDeviceStatusChanged(
-        camera_id,
-        new_status);
+    ICameraServiceListener::TorchStatus status;
+    switch (new_status) {
+        case TORCH_MODE_STATUS_NOT_AVAILABLE:
+            status = ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE;
+            break;
+        case TORCH_MODE_STATUS_AVAILABLE_OFF:
+            status = ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF;
+            break;
+        case TORCH_MODE_STATUS_AVAILABLE_ON:
+            status = ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON;
+            break;
+        default:
+            ALOGE("Unknown torch status %d", new_status);
+            return;
+    }
+
+    cs->onTorchStatusChanged(
+        String8(camera_id),
+        status);
 }
 } // extern "C"
 
@@ -94,134 +124,294 @@
 // should be ok for now.
 static CameraService *gCameraService;
 
-CameraService::CameraService()
-    :mSoundRef(0), mModule(0)
-{
+CameraService::CameraService() : mEventLog(DEFAULT_EVENT_LOG_LENGTH), mAllowedUsers(),
+        mSoundRef(0), mModule(0), mFlashlight(0) {
     ALOGI("CameraService started (pid=%d)", getpid());
     gCameraService = this;
 
-    for (size_t i = 0; i < MAX_CAMERAS; ++i) {
-        mStatusList[i] = ICameraServiceListener::STATUS_PRESENT;
-    }
-
     this->camera_device_status_change = android::camera_device_status_change;
+    this->torch_mode_status_change = android::torch_mode_status_change;
+
+    mServiceLockWrapper = std::make_shared<WaitableMutexWrapper>(&mServiceLock);
 }
 
 void CameraService::onFirstRef()
 {
-    LOG1("CameraService::onFirstRef");
+    ALOGI("CameraService process starting");
 
     BnCameraService::onFirstRef();
 
-    if (hw_get_module(CAMERA_HARDWARE_MODULE_ID,
-                (const hw_module_t **)&mModule) < 0) {
-        ALOGE("Could not load camera HAL module");
+    // Update battery life tracking if service is restarting
+    BatteryNotifier& notifier(BatteryNotifier::getInstance());
+    notifier.noteResetCamera();
+    notifier.noteResetFlashlight();
+
+    camera_module_t *rawModule;
+    int err = hw_get_module(CAMERA_HARDWARE_MODULE_ID,
+            (const hw_module_t **)&rawModule);
+    if (err < 0) {
+        ALOGE("Could not load camera HAL module: %d (%s)", err, strerror(-err));
+        logServiceError("Could not load camera HAL module", err);
         mNumberOfCameras = 0;
+        return;
     }
-    else {
-        ALOGI("Loaded \"%s\" camera module", mModule->common.name);
-        mNumberOfCameras = mModule->get_number_of_cameras();
-        if (mNumberOfCameras > MAX_CAMERAS) {
-            ALOGE("Number of cameras(%d) > MAX_CAMERAS(%d).",
-                    mNumberOfCameras, MAX_CAMERAS);
-            mNumberOfCameras = MAX_CAMERAS;
-        }
-        for (int i = 0; i < mNumberOfCameras; i++) {
-            setCameraFree(i);
-        }
 
-        if (mModule->common.module_api_version >=
-                CAMERA_MODULE_API_VERSION_2_1) {
-            mModule->set_callbacks(this);
-        }
+    mModule = new CameraModule(rawModule);
+    ALOGI("Loaded \"%s\" camera module", mModule->getModuleName());
+    err = mModule->init();
+    if (err != OK) {
+        ALOGE("Could not initialize camera HAL module: %d (%s)", err,
+            strerror(-err));
+        logServiceError("Could not initialize camera HAL module", err);
 
-        VendorTagDescriptor::clearGlobalVendorTagDescriptor();
-
-        if (mModule->common.module_api_version >= CAMERA_MODULE_API_VERSION_2_2) {
-            setUpVendorTags();
-        }
-
-        CameraDeviceFactory::registerService(this);
+        mNumberOfCameras = 0;
+        delete mModule;
+        mModule = nullptr;
+        return;
     }
-}
 
-CameraService::~CameraService() {
+    mNumberOfCameras = mModule->getNumberOfCameras();
+    mNumberOfNormalCameras = mNumberOfCameras;
+
+    mFlashlight = new CameraFlashlight(*mModule, *this);
+    status_t res = mFlashlight->findFlashUnits();
+    if (res) {
+        // impossible because we haven't open any camera devices.
+        ALOGE("Failed to find flash units.");
+    }
+
+    int latestStrangeCameraId = INT_MAX;
     for (int i = 0; i < mNumberOfCameras; i++) {
-        if (mBusy[i]) {
-            ALOGE("camera %d is still in use in destructor!", i);
+        String8 cameraId = String8::format("%d", i);
+
+        // Get camera info
+
+        struct camera_info info;
+        bool haveInfo = true;
+        status_t rc = mModule->getCameraInfo(i, &info);
+        if (rc != NO_ERROR) {
+            ALOGE("%s: Received error loading camera info for device %d, cost and"
+                    " conflicting devices fields set to defaults for this device.",
+                    __FUNCTION__, i);
+            haveInfo = false;
         }
+
+        // Check for backwards-compatibility support
+        if (haveInfo) {
+            if (checkCameraCapabilities(i, info, &latestStrangeCameraId) != OK) {
+                delete mModule;
+                mModule = nullptr;
+                return;
+            }
+        }
+
+        // Defaults to use for cost and conflicting devices
+        int cost = 100;
+        char** conflicting_devices = nullptr;
+        size_t conflicting_devices_length = 0;
+
+        // If using post-2.4 module version, query the cost + conflicting devices from the HAL
+        if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_4 && haveInfo) {
+            cost = info.resource_cost;
+            conflicting_devices = info.conflicting_devices;
+            conflicting_devices_length = info.conflicting_devices_length;
+        }
+
+        std::set<String8> conflicting;
+        for (size_t i = 0; i < conflicting_devices_length; i++) {
+            conflicting.emplace(String8(conflicting_devices[i]));
+        }
+
+        // Initialize state for each camera device
+        {
+            Mutex::Autolock lock(mCameraStatesLock);
+            mCameraStates.emplace(cameraId, std::make_shared<CameraState>(cameraId, cost,
+                    conflicting));
+        }
+
+        if (mFlashlight->hasFlashUnit(cameraId)) {
+            mTorchStatusMap.add(cameraId,
+                    ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF);
+        }
+    }
+
+    if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_1) {
+        mModule->setCallbacks(this);
     }
 
     VendorTagDescriptor::clearGlobalVendorTagDescriptor();
-    gCameraService = NULL;
+
+    if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_2) {
+        setUpVendorTags();
+    }
+
+    CameraDeviceFactory::registerService(this);
+
+    CameraService::pingCameraServiceProxy();
 }
 
-void CameraService::onDeviceStatusChanged(int cameraId,
-                                          int newStatus)
-{
+void CameraService::pingCameraServiceProxy() {
+    sp<IServiceManager> sm = defaultServiceManager();
+    sp<IBinder> binder = sm->getService(String16("media.camera.proxy"));
+    if (binder == nullptr) {
+        return;
+    }
+    sp<ICameraServiceProxy> proxyBinder = interface_cast<ICameraServiceProxy>(binder);
+    proxyBinder->pingForUserUpdate();
+}
+
+CameraService::~CameraService() {
+    if (mModule) {
+        delete mModule;
+        mModule = nullptr;
+    }
+    VendorTagDescriptor::clearGlobalVendorTagDescriptor();
+    gCameraService = nullptr;
+}
+
+void CameraService::onDeviceStatusChanged(camera_device_status_t  cameraId,
+        camera_device_status_t newStatus) {
     ALOGI("%s: Status changed for cameraId=%d, newStatus=%d", __FUNCTION__,
           cameraId, newStatus);
 
-    if (cameraId < 0 || cameraId >= MAX_CAMERAS) {
+    String8 id = String8::format("%d", cameraId);
+    std::shared_ptr<CameraState> state = getCameraState(id);
+
+    if (state == nullptr) {
         ALOGE("%s: Bad camera ID %d", __FUNCTION__, cameraId);
         return;
     }
 
-    if ((int)getStatus(cameraId) == newStatus) {
-        ALOGE("%s: State transition to the same status 0x%x not allowed",
-              __FUNCTION__, (uint32_t)newStatus);
+    ICameraServiceListener::Status oldStatus = state->getStatus();
+
+    if (oldStatus == static_cast<ICameraServiceListener::Status>(newStatus)) {
+        ALOGE("%s: State transition to the same status %#x not allowed", __FUNCTION__, newStatus);
         return;
     }
 
-    /* don't do this in updateStatus
-       since it is also called from connect and we could get into a deadlock */
     if (newStatus == CAMERA_DEVICE_STATUS_NOT_PRESENT) {
-        Vector<sp<BasicClient> > clientsToDisconnect;
+        logDeviceRemoved(id, String8::format("Device status changed from %d to %d", oldStatus,
+                newStatus));
+        sp<BasicClient> clientToDisconnect;
         {
-           Mutex::Autolock al(mServiceLock);
+            // Don't do this in updateStatus to avoid deadlock over mServiceLock
+            Mutex::Autolock lock(mServiceLock);
 
-           /* Remove cached parameters from shim cache */
-           mShimParams.removeItem(cameraId);
+            // Set the device status to NOT_PRESENT, clients will no longer be able to connect
+            // to this device until the status changes
+            updateStatus(ICameraServiceListener::STATUS_NOT_PRESENT, id);
 
-           /* Find all clients that we need to disconnect */
-           sp<BasicClient> client = mClient[cameraId].promote();
-           if (client.get() != NULL) {
-               clientsToDisconnect.push_back(client);
-           }
+            // Remove cached shim parameters
+            state->setShimParams(CameraParameters());
 
-           int i = cameraId;
-           for (size_t j = 0; j < mProClientList[i].size(); ++j) {
-               sp<ProClient> cl = mProClientList[i][j].promote();
-               if (cl != NULL) {
-                   clientsToDisconnect.push_back(cl);
-               }
-           }
+            // Remove the client from the list of active clients
+            clientToDisconnect = removeClientLocked(id);
+
+            // Notify the client of disconnection
+            clientToDisconnect->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+                    CaptureResultExtras{});
         }
 
-        /* now disconnect them. don't hold the lock
-           or we can get into a deadlock */
+        ALOGI("%s: Client for camera ID %s evicted due to device status change from HAL",
+                __FUNCTION__, id.string());
 
-        for (size_t i = 0; i < clientsToDisconnect.size(); ++i) {
-            sp<BasicClient> client = clientsToDisconnect[i];
-
-            client->disconnect();
-            /**
-             * The remote app will no longer be able to call methods on the
-             * client since the client PID will be reset to 0
-             */
+        // Disconnect client
+        if (clientToDisconnect.get() != nullptr) {
+            // Ensure not in binder RPC so client disconnect PID checks work correctly
+            LOG_ALWAYS_FATAL_IF(getCallingPid() != getpid(),
+                    "onDeviceStatusChanged must be called from the camera service process!");
+            clientToDisconnect->disconnect();
         }
 
-        ALOGV("%s: After unplug, disconnected %zu clients",
-              __FUNCTION__, clientsToDisconnect.size());
+    } else {
+        if (oldStatus == ICameraServiceListener::Status::STATUS_NOT_PRESENT) {
+            logDeviceAdded(id, String8::format("Device status changed from %d to %d", oldStatus,
+                    newStatus));
+        }
+        updateStatus(static_cast<ICameraServiceListener::Status>(newStatus), id);
     }
 
-    updateStatus(
-            static_cast<ICameraServiceListener::Status>(newStatus), cameraId);
-
 }
 
+void CameraService::onTorchStatusChanged(const String8& cameraId,
+        ICameraServiceListener::TorchStatus newStatus) {
+    Mutex::Autolock al(mTorchStatusMutex);
+    onTorchStatusChangedLocked(cameraId, newStatus);
+}
+
+void CameraService::onTorchStatusChangedLocked(const String8& cameraId,
+        ICameraServiceListener::TorchStatus newStatus) {
+    ALOGI("%s: Torch status changed for cameraId=%s, newStatus=%d",
+            __FUNCTION__, cameraId.string(), newStatus);
+
+    ICameraServiceListener::TorchStatus status;
+    status_t res = getTorchStatusLocked(cameraId, &status);
+    if (res) {
+        ALOGE("%s: cannot get torch status of camera %s: %s (%d)",
+                __FUNCTION__, cameraId.string(), strerror(-res), res);
+        return;
+    }
+    if (status == newStatus) {
+        return;
+    }
+
+    res = setTorchStatusLocked(cameraId, newStatus);
+    if (res) {
+        ALOGE("%s: Failed to set the torch status", __FUNCTION__, (uint32_t)newStatus);
+        return;
+    }
+
+    {
+        // Update battery life logging for flashlight
+        Mutex::Autolock al(mTorchClientMapMutex);
+        auto iter = mTorchUidMap.find(cameraId);
+        if (iter != mTorchUidMap.end()) {
+            int oldUid = iter->second.second;
+            int newUid = iter->second.first;
+            BatteryNotifier& notifier(BatteryNotifier::getInstance());
+            if (oldUid != newUid) {
+                // If the UID has changed, log the status and update current UID in mTorchUidMap
+                if (status == ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON) {
+                    notifier.noteFlashlightOff(cameraId, oldUid);
+                }
+                if (newStatus == ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON) {
+                    notifier.noteFlashlightOn(cameraId, newUid);
+                }
+                iter->second.second = newUid;
+            } else {
+                // If the UID has not changed, log the status
+                if (newStatus == ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON) {
+                    notifier.noteFlashlightOn(cameraId, oldUid);
+                } else {
+                    notifier.noteFlashlightOff(cameraId, oldUid);
+                }
+            }
+        }
+    }
+
+    {
+        Mutex::Autolock lock(mStatusListenerLock);
+        for (auto& i : mListenerList) {
+            i->onTorchStatusChanged(newStatus, String16{cameraId});
+        }
+    }
+}
+
 int32_t CameraService::getNumberOfCameras() {
-    return mNumberOfCameras;
+    return getNumberOfCameras(CAMERA_TYPE_BACKWARD_COMPATIBLE);
+}
+
+int32_t CameraService::getNumberOfCameras(int type) {
+    switch (type) {
+        case CAMERA_TYPE_BACKWARD_COMPATIBLE:
+            return mNumberOfNormalCameras;
+        case CAMERA_TYPE_ALL:
+            return mNumberOfCameras;
+        default:
+            ALOGW("%s: Unknown camera type %d, returning 0",
+                    __FUNCTION__, type);
+            return 0;
+    }
 }
 
 status_t CameraService::getCameraInfo(int cameraId,
@@ -236,12 +426,21 @@
 
     struct camera_info info;
     status_t rc = filterGetInfoErrorCode(
-        mModule->get_camera_info(cameraId, &info));
+        mModule->getCameraInfo(cameraId, &info));
     cameraInfo->facing = info.facing;
     cameraInfo->orientation = info.orientation;
     return rc;
 }
 
+int CameraService::cameraIdToInt(const String8& cameraId) {
+    errno = 0;
+    size_t pos = 0;
+    int ret = stoi(std::string{cameraId.string()}, &pos);
+    if (errno != 0 || pos != cameraId.size()) {
+        return -1;
+    }
+    return ret;
+}
 
 status_t CameraService::generateShimMetadata(int cameraId, /*out*/CameraMetadata* cameraInfo) {
     status_t ret = OK;
@@ -347,7 +546,7 @@
 
     int facing;
     status_t ret = OK;
-    if (mModule->common.module_api_version < CAMERA_MODULE_API_VERSION_2_0 ||
+    if (mModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_0 ||
             getDeviceVersion(cameraId, &facing) <= CAMERA_DEVICE_API_VERSION_2_1 ) {
         /**
          * Backwards compatibility mode for old HALs:
@@ -368,13 +567,39 @@
          * Normal HAL 2.1+ codepath.
          */
         struct camera_info info;
-        ret = filterGetInfoErrorCode(mModule->get_camera_info(cameraId, &info));
+        ret = filterGetInfoErrorCode(mModule->getCameraInfo(cameraId, &info));
         *cameraInfo = info.static_camera_characteristics;
     }
 
     return ret;
 }
 
+int CameraService::getCallingPid() {
+    return IPCThreadState::self()->getCallingPid();
+}
+
+int CameraService::getCallingUid() {
+    return IPCThreadState::self()->getCallingUid();
+}
+
+String8 CameraService::getFormattedCurrentTime() {
+    time_t now = time(nullptr);
+    char formattedTime[64];
+    strftime(formattedTime, sizeof(formattedTime), "%m-%d %H:%M:%S", localtime(&now));
+    return String8(formattedTime);
+}
+
+int CameraService::getCameraPriorityFromProcState(int procState) {
+    // Find the priority for the camera usage based on the process state.  Higher priority clients
+    // win for evictions.
+    if (procState < 0) {
+        ALOGE("%s: Received invalid process state %d from ActivityManagerService!", __FUNCTION__,
+                procState);
+        return -1;
+    }
+    return INT_MAX - procState;
+}
+
 status_t CameraService::getCameraVendorTagDescriptor(/*out*/sp<VendorTagDescriptor>& desc) {
     if (!mModule) {
         ALOGE("%s: camera hardware module doesn't exist", __FUNCTION__);
@@ -387,12 +612,12 @@
 
 int CameraService::getDeviceVersion(int cameraId, int* facing) {
     struct camera_info info;
-    if (mModule->get_camera_info(cameraId, &info) != OK) {
+    if (mModule->getCameraInfo(cameraId, &info) != OK) {
         return -1;
     }
 
     int deviceVersion;
-    if (mModule->common.module_api_version >= CAMERA_MODULE_API_VERSION_2_0) {
+    if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_0) {
         deviceVersion = info.device_version;
     } else {
         deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
@@ -405,19 +630,6 @@
     return deviceVersion;
 }
 
-status_t CameraService::filterOpenErrorCode(status_t err) {
-    switch(err) {
-        case NO_ERROR:
-        case -EBUSY:
-        case -EINVAL:
-        case -EUSERS:
-            return err;
-        default:
-            break;
-    }
-    return -ENODEV;
-}
-
 status_t CameraService::filterGetInfoErrorCode(status_t err) {
     switch(err) {
         case NO_ERROR:
@@ -433,13 +645,13 @@
     vendor_tag_ops_t vOps = vendor_tag_ops_t();
 
     // Check if vendor operations have been implemented
-    if (mModule->get_vendor_tag_ops == NULL) {
+    if (!mModule->isVendorTagDefined()) {
         ALOGI("%s: No vendor tags defined for this device.", __FUNCTION__);
         return false;
     }
 
     ATRACE_BEGIN("camera3->get_metadata_vendor_tag_ops");
-    mModule->get_vendor_tag_ops(&vOps);
+    mModule->getVendorTagOps(&vOps);
     ATRACE_END();
 
     // Ensure all vendor operations are present
@@ -467,54 +679,104 @@
     return true;
 }
 
-status_t CameraService::initializeShimMetadata(int cameraId) {
-    int pid = getCallingPid();
-    int uid = getCallingUid();
-    status_t ret = validateConnect(cameraId, uid);
-    if (ret != OK) {
-        // Error already logged by callee
-        return ret;
+status_t CameraService::makeClient(const sp<CameraService>& cameraService,
+        const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
+        int facing, int clientPid, uid_t clientUid, int servicePid, bool legacyMode,
+        int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
+        /*out*/sp<BasicClient>* client) {
+
+    // TODO: Update CameraClients + HAL interface to use strings for Camera IDs
+    int id = cameraIdToInt(cameraId);
+    if (id == -1) {
+        ALOGE("%s: Invalid camera ID %s, cannot convert to integer.", __FUNCTION__,
+                cameraId.string());
+        return BAD_VALUE;
     }
 
-    bool needsNewClient = false;
-    sp<Client> client;
+    if (halVersion < 0 || halVersion == deviceVersion) {
+        // Default path: HAL version is unspecified by caller, create CameraClient
+        // based on device version reported by the HAL.
+        switch(deviceVersion) {
+          case CAMERA_DEVICE_API_VERSION_1_0:
+            if (effectiveApiLevel == API_1) {  // Camera1 API route
+                sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
+                *client = new CameraClient(cameraService, tmp, packageName, id, facing,
+                        clientPid, clientUid, getpid(), legacyMode);
+            } else { // Camera2 API route
+                ALOGW("Camera using old HAL version: %d", deviceVersion);
+                return -EOPNOTSUPP;
+            }
+            break;
+          case CAMERA_DEVICE_API_VERSION_2_0:
+          case CAMERA_DEVICE_API_VERSION_2_1:
+          case CAMERA_DEVICE_API_VERSION_3_0:
+          case CAMERA_DEVICE_API_VERSION_3_1:
+          case CAMERA_DEVICE_API_VERSION_3_2:
+          case CAMERA_DEVICE_API_VERSION_3_3:
+            if (effectiveApiLevel == API_1) { // Camera1 API route
+                sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
+                *client = new Camera2Client(cameraService, tmp, packageName, id, facing,
+                        clientPid, clientUid, servicePid, legacyMode);
+            } else { // Camera2 API route
+                sp<ICameraDeviceCallbacks> tmp =
+                        static_cast<ICameraDeviceCallbacks*>(cameraCb.get());
+                *client = new CameraDeviceClient(cameraService, tmp, packageName, id,
+                        facing, clientPid, clientUid, servicePid);
+            }
+            break;
+          default:
+            // Should not be reachable
+            ALOGE("Unknown camera device HAL version: %d", deviceVersion);
+            return INVALID_OPERATION;
+        }
+    } else {
+        // A particular HAL version is requested by caller. Create CameraClient
+        // based on the requested HAL version.
+        if (deviceVersion > CAMERA_DEVICE_API_VERSION_1_0 &&
+            halVersion == CAMERA_DEVICE_API_VERSION_1_0) {
+            // Only support higher HAL version device opened as HAL1.0 device.
+            sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
+            *client = new CameraClient(cameraService, tmp, packageName, id, facing,
+                    clientPid, clientUid, servicePid, legacyMode);
+        } else {
+            // Other combinations (e.g. HAL3.x open as HAL2.x) are not supported yet.
+            ALOGE("Invalid camera HAL version %x: HAL %x device can only be"
+                    " opened as HAL %x device", halVersion, deviceVersion,
+                    CAMERA_DEVICE_API_VERSION_1_0);
+            return INVALID_OPERATION;
+        }
+    }
+    return NO_ERROR;
+}
+
+String8 CameraService::toString(std::set<userid_t> intSet) {
+    String8 s("");
+    bool first = true;
+    for (userid_t i : intSet) {
+        if (first) {
+            s.appendFormat("%d", i);
+            first = false;
+        } else {
+            s.appendFormat(", %d", i);
+        }
+    }
+    return s;
+}
+
+status_t CameraService::initializeShimMetadata(int cameraId) {
+    int uid = getCallingUid();
 
     String16 internalPackageName("media");
-    {   // Scope for service lock
-        Mutex::Autolock lock(mServiceLock);
-        if (mClient[cameraId] != NULL) {
-            client = static_cast<Client*>(mClient[cameraId].promote().get());
-        }
-        if (client == NULL) {
-            needsNewClient = true;
-            ret = connectHelperLocked(/*out*/client,
-                                      /*cameraClient*/NULL, // Empty binder callbacks
-                                      cameraId,
-                                      internalPackageName,
-                                      uid,
-                                      pid);
-
-            if (ret != OK) {
-                // Error already logged by callee
-                return ret;
-            }
-        }
-
-        if (client == NULL) {
-            ALOGE("%s: Could not connect to client camera device.", __FUNCTION__);
-            return BAD_VALUE;
-        }
-
-        String8 rawParams = client->getParameters();
-        CameraParameters params(rawParams);
-        mShimParams.add(cameraId, params);
+    String8 id = String8::format("%d", cameraId);
+    status_t ret = NO_ERROR;
+    sp<Client> tmp = nullptr;
+    if ((ret = connectHelper<ICameraClient,Client>(sp<ICameraClient>{nullptr}, id,
+            static_cast<int>(CAMERA_HAL_API_VERSION_UNSPECIFIED), internalPackageName, uid, API_1,
+            false, true, tmp)) != NO_ERROR) {
+        ALOGE("%s: Error %d (%s) initializing shim metadata.", __FUNCTION__, ret, strerror(ret));
+        return ret;
     }
-
-    // Close client if one was opened solely for this call
-    if (needsNewClient) {
-        client->disconnect();
-    }
-    return OK;
+    return NO_ERROR;
 }
 
 status_t CameraService::getLegacyParametersLazy(int cameraId,
@@ -530,42 +792,55 @@
         return BAD_VALUE;
     }
 
-    ssize_t index = -1;
-    {   // Scope for service lock
+    String8 id = String8::format("%d", cameraId);
+
+    // Check if we already have parameters
+    {
+        // Scope for service lock
         Mutex::Autolock lock(mServiceLock);
-        index = mShimParams.indexOfKey(cameraId);
-        // Release service lock so initializeShimMetadata can be called correctly.
-
-        if (index >= 0) {
-            *parameters = mShimParams[index];
+        auto cameraState = getCameraState(id);
+        if (cameraState == nullptr) {
+            ALOGE("%s: Invalid camera ID: %s", __FUNCTION__, id.string());
+            return BAD_VALUE;
+        }
+        CameraParameters p = cameraState->getShimParams();
+        if (!p.isEmpty()) {
+            *parameters = p;
+            return NO_ERROR;
         }
     }
 
-    if (index < 0) {
-        int64_t token = IPCThreadState::self()->clearCallingIdentity();
-        ret = initializeShimMetadata(cameraId);
-        IPCThreadState::self()->restoreCallingIdentity(token);
-        if (ret != OK) {
-            // Error already logged by callee
-            return ret;
+    int64_t token = IPCThreadState::self()->clearCallingIdentity();
+    ret = initializeShimMetadata(cameraId);
+    IPCThreadState::self()->restoreCallingIdentity(token);
+    if (ret != NO_ERROR) {
+        // Error already logged by callee
+        return ret;
+    }
+
+    // Check for parameters again
+    {
+        // Scope for service lock
+        Mutex::Autolock lock(mServiceLock);
+        auto cameraState = getCameraState(id);
+        if (cameraState == nullptr) {
+            ALOGE("%s: Invalid camera ID: %s", __FUNCTION__, id.string());
+            return BAD_VALUE;
         }
-
-        {   // Scope for service lock
-            Mutex::Autolock lock(mServiceLock);
-            index = mShimParams.indexOfKey(cameraId);
-
-            LOG_ALWAYS_FATAL_IF(index < 0, "index should have been initialized");
-
-            *parameters = mShimParams[index];
+        CameraParameters p = cameraState->getShimParams();
+        if (!p.isEmpty()) {
+            *parameters = p;
+            return NO_ERROR;
         }
     }
 
-    return OK;
+    ALOGE("%s: Parameters were not initialized, or were empty.  Device may not be present.",
+            __FUNCTION__);
+    return INVALID_OPERATION;
 }
 
-status_t CameraService::validateConnect(int cameraId,
-                                    /*inout*/
-                                    int& clientUid) const {
+status_t CameraService::validateConnectLocked(const String8& cameraId, /*inout*/int& clientUid)
+        const {
 
     int callingPid = getCallingPid();
 
@@ -574,160 +849,278 @@
     } else {
         // We only trust our own process to forward client UIDs
         if (callingPid != getpid()) {
-            ALOGE("CameraService::connect X (pid %d) rejected (don't trust clientUid)",
-                    callingPid);
+            ALOGE("CameraService::connect X (PID %d) rejected (don't trust clientUid %d)",
+                    callingPid, clientUid);
             return PERMISSION_DENIED;
         }
     }
 
     if (!mModule) {
-        ALOGE("Camera HAL module not loaded");
+        ALOGE("CameraService::connect X (PID %d) rejected (camera HAL module not loaded)",
+                callingPid);
         return -ENODEV;
     }
 
-    if (cameraId < 0 || cameraId >= mNumberOfCameras) {
-        ALOGE("CameraService::connect X (pid %d) rejected (invalid cameraId %d).",
-            callingPid, cameraId);
+    if (getCameraState(cameraId) == nullptr) {
+        ALOGE("CameraService::connect X (PID %d) rejected (invalid camera ID %s)", callingPid,
+                cameraId.string());
         return -ENODEV;
     }
 
+    // Check device policy for this camera
     char value[PROPERTY_VALUE_MAX];
-    property_get("sys.secpolicy.camera.disabled", value, "0");
+    char key[PROPERTY_KEY_MAX];
+    userid_t clientUserId = multiuser_get_user_id(clientUid);
+    snprintf(key, PROPERTY_KEY_MAX, "sys.secpolicy.camera.off_%d", clientUserId);
+    property_get(key, value, "0");
     if (strcmp(value, "1") == 0) {
         // Camera is disabled by DevicePolicyManager.
-        ALOGI("Camera is disabled. connect X (pid %d) rejected", callingPid);
+        ALOGE("CameraService::connect X (PID %d) rejected (camera %s is disabled by device "
+                "policy)", callingPid, cameraId.string());
         return -EACCES;
     }
 
-    ICameraServiceListener::Status currentStatus = getStatus(cameraId);
+    // Only allow clients who are being used by the current foreground device user, unless calling
+    // from our own process.
+    if (callingPid != getpid() && (mAllowedUsers.find(clientUserId) == mAllowedUsers.end())) {
+        ALOGE("CameraService::connect X (PID %d) rejected (cannot connect from "
+                "device user %d, currently allowed device users: %s)", callingPid, clientUserId,
+                toString(mAllowedUsers).string());
+        return PERMISSION_DENIED;
+    }
+
+    return checkIfDeviceIsUsable(cameraId);
+}
+
+status_t CameraService::checkIfDeviceIsUsable(const String8& cameraId) const {
+    auto cameraState = getCameraState(cameraId);
+    int callingPid = getCallingPid();
+    if (cameraState == nullptr) {
+        ALOGE("CameraService::connect X (PID %d) rejected (invalid camera ID %s)", callingPid,
+                cameraId.string());
+        return -ENODEV;
+    }
+
+    ICameraServiceListener::Status currentStatus = cameraState->getStatus();
     if (currentStatus == ICameraServiceListener::STATUS_NOT_PRESENT) {
-        ALOGI("Camera is not plugged in,"
-               " connect X (pid %d) rejected", callingPid);
+        ALOGE("CameraService::connect X (PID %d) rejected (camera %s is not connected)",
+                callingPid, cameraId.string());
         return -ENODEV;
     } else if (currentStatus == ICameraServiceListener::STATUS_ENUMERATING) {
-        ALOGI("Camera is enumerating,"
-               " connect X (pid %d) rejected", callingPid);
+        ALOGE("CameraService::connect X (PID %d) rejected, (camera %s is initializing)",
+                callingPid, cameraId.string());
         return -EBUSY;
     }
-    // Else don't check for STATUS_NOT_AVAILABLE.
-    //  -- It's done implicitly in canConnectUnsafe /w the mBusy array
 
-    return OK;
+    return NO_ERROR;
 }
 
-bool CameraService::canConnectUnsafe(int cameraId,
-                                     const String16& clientPackageName,
-                                     const sp<IBinder>& remoteCallback,
-                                     sp<BasicClient> &client) {
-    String8 clientName8(clientPackageName);
-    int callingPid = getCallingPid();
+void CameraService::finishConnectLocked(const sp<BasicClient>& client,
+        const CameraService::DescriptorPtr& desc) {
 
-    if (mClient[cameraId] != 0) {
-        client = mClient[cameraId].promote();
-        if (client != 0) {
-            if (remoteCallback == client->getRemote()) {
-                LOG1("CameraService::connect X (pid %d) (the same client)",
-                     callingPid);
-                return true;
-            } else {
-                // TODOSC: need to support 1 regular client,
-                // multiple shared clients here
-                ALOGW("CameraService::connect X (pid %d) rejected"
-                      " (existing client).", callingPid);
-                return false;
+    // Make a descriptor for the incoming client
+    auto clientDescriptor = CameraService::CameraClientManager::makeClientDescriptor(client, desc);
+    auto evicted = mActiveClientManager.addAndEvict(clientDescriptor);
+
+    logConnected(desc->getKey(), static_cast<int>(desc->getOwnerId()),
+            String8(client->getPackageName()));
+
+    if (evicted.size() > 0) {
+        // This should never happen - clients should already have been removed in disconnect
+        for (auto& i : evicted) {
+            ALOGE("%s: Invalid state: Client for camera %s was not removed in disconnect",
+                    __FUNCTION__, i->getKey().string());
+        }
+
+        LOG_ALWAYS_FATAL("%s: Invalid state for CameraService, clients not evicted properly",
+                __FUNCTION__);
+    }
+}
+
+status_t CameraService::handleEvictionsLocked(const String8& cameraId, int clientPid,
+        apiLevel effectiveApiLevel, const sp<IBinder>& remoteCallback, const String8& packageName,
+        /*out*/
+        sp<BasicClient>* client,
+        std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>>* partial) {
+
+    status_t ret = NO_ERROR;
+    std::vector<DescriptorPtr> evictedClients;
+    DescriptorPtr clientDescriptor;
+    {
+        if (effectiveApiLevel == API_1) {
+            // If we are using API1, any existing client for this camera ID with the same remote
+            // should be returned rather than evicted to allow MediaRecorder to work properly.
+
+            auto current = mActiveClientManager.get(cameraId);
+            if (current != nullptr) {
+                auto clientSp = current->getValue();
+                if (clientSp.get() != nullptr) { // should never be needed
+                    if (!clientSp->canCastToApiClient(effectiveApiLevel)) {
+                        ALOGW("CameraService connect called from same client, but with a different"
+                                " API level, evicting prior client...");
+                    } else if (clientSp->getRemote() == remoteCallback) {
+                        ALOGI("CameraService::connect X (PID %d) (second call from same"
+                                " app binder, returning the same client)", clientPid);
+                        *client = clientSp;
+                        return NO_ERROR;
+                    }
+                }
             }
         }
-        mClient[cameraId].clear();
-    }
 
-    /*
-    mBusy is set to false as the last step of the Client destructor,
-    after which it is guaranteed that the Client destructor has finished (
-    including any inherited destructors)
+        // Return error if the device was unplugged or removed by the HAL for some reason
+        if ((ret = checkIfDeviceIsUsable(cameraId)) != NO_ERROR) {
+            return ret;
+        }
 
-    We only need this for a Client subclasses since we don't allow
-    multiple Clents to be opened concurrently, but multiple BasicClient
-    would be fine
-    */
-    if (mBusy[cameraId]) {
-        ALOGW("CameraService::connect X (pid %d, \"%s\") rejected"
-                " (camera %d is still busy).", callingPid,
-                clientName8.string(), cameraId);
-        return false;
-    }
+        // Get current active client PIDs
+        std::vector<int> ownerPids(mActiveClientManager.getAllOwners());
+        ownerPids.push_back(clientPid);
 
-    return true;
-}
+        // Use the value +PROCESS_STATE_NONEXISTENT, to avoid taking
+        // address of PROCESS_STATE_NONEXISTENT as a reference argument
+        // for the vector constructor. PROCESS_STATE_NONEXISTENT does
+        // not have an out-of-class definition.
+        std::vector<int> priorities(ownerPids.size(), +PROCESS_STATE_NONEXISTENT);
 
-status_t CameraService::connectHelperLocked(
-        /*out*/
-        sp<Client>& client,
-        /*in*/
-        const sp<ICameraClient>& cameraClient,
-        int cameraId,
-        const String16& clientPackageName,
-        int clientUid,
-        int callingPid,
-        int halVersion,
-        bool legacyMode) {
+        // Get priorites of all active PIDs
+        ProcessInfoService::getProcessStatesFromPids(ownerPids.size(), &ownerPids[0],
+                /*out*/&priorities[0]);
 
-    int facing = -1;
-    int deviceVersion = getDeviceVersion(cameraId, &facing);
+        // Update all active clients' priorities
+        std::map<int,int> pidToPriorityMap;
+        for (size_t i = 0; i < ownerPids.size() - 1; i++) {
+            pidToPriorityMap.emplace(ownerPids[i], getCameraPriorityFromProcState(priorities[i]));
+        }
+        mActiveClientManager.updatePriorities(pidToPriorityMap);
 
-    if (halVersion < 0 || halVersion == deviceVersion) {
-        // Default path: HAL version is unspecified by caller, create CameraClient
-        // based on device version reported by the HAL.
-        switch(deviceVersion) {
-          case CAMERA_DEVICE_API_VERSION_1_0:
-            client = new CameraClient(this, cameraClient,
-                    clientPackageName, cameraId,
-                    facing, callingPid, clientUid, getpid(), legacyMode);
-            break;
-          case CAMERA_DEVICE_API_VERSION_2_0:
-          case CAMERA_DEVICE_API_VERSION_2_1:
-          case CAMERA_DEVICE_API_VERSION_3_0:
-          case CAMERA_DEVICE_API_VERSION_3_1:
-          case CAMERA_DEVICE_API_VERSION_3_2:
-            client = new Camera2Client(this, cameraClient,
-                    clientPackageName, cameraId,
-                    facing, callingPid, clientUid, getpid(), legacyMode);
-            break;
-          case -1:
-            ALOGE("Invalid camera id %d", cameraId);
+        // Get state for the given cameraId
+        auto state = getCameraState(cameraId);
+        if (state == nullptr) {
+            ALOGE("CameraService::connect X (PID %d) rejected (no camera device with ID %s)",
+                clientPid, cameraId.string());
             return BAD_VALUE;
-          default:
-            ALOGE("Unknown camera device HAL version: %d", deviceVersion);
-            return INVALID_OPERATION;
         }
-    } else {
-        // A particular HAL version is requested by caller. Create CameraClient
-        // based on the requested HAL version.
-        if (deviceVersion > CAMERA_DEVICE_API_VERSION_1_0 &&
-            halVersion == CAMERA_DEVICE_API_VERSION_1_0) {
-            // Only support higher HAL version device opened as HAL1.0 device.
-            client = new CameraClient(this, cameraClient,
-                    clientPackageName, cameraId,
-                    facing, callingPid, clientUid, getpid(), legacyMode);
-        } else {
-            // Other combinations (e.g. HAL3.x open as HAL2.x) are not supported yet.
-            ALOGE("Invalid camera HAL version %x: HAL %x device can only be"
-                    " opened as HAL %x device", halVersion, deviceVersion,
-                    CAMERA_DEVICE_API_VERSION_1_0);
-            return INVALID_OPERATION;
+
+        // Make descriptor for incoming client
+        clientDescriptor = CameraClientManager::makeClientDescriptor(cameraId,
+                sp<BasicClient>{nullptr}, static_cast<int32_t>(state->getCost()),
+                state->getConflicting(),
+                getCameraPriorityFromProcState(priorities[priorities.size() - 1]), clientPid);
+
+        // Find clients that would be evicted
+        auto evicted = mActiveClientManager.wouldEvict(clientDescriptor);
+
+        // If the incoming client was 'evicted,' higher priority clients have the camera in the
+        // background, so we cannot do evictions
+        if (std::find(evicted.begin(), evicted.end(), clientDescriptor) != evicted.end()) {
+            ALOGE("CameraService::connect X (PID %d) rejected (existing client(s) with higher"
+                    " priority).", clientPid);
+
+            sp<BasicClient> clientSp = clientDescriptor->getValue();
+            String8 curTime = getFormattedCurrentTime();
+            auto incompatibleClients =
+                    mActiveClientManager.getIncompatibleClients(clientDescriptor);
+
+            String8 msg = String8::format("%s : DENIED connect device %s client for package %s "
+                    "(PID %d, priority %d) due to eviction policy", curTime.string(),
+                    cameraId.string(), packageName.string(), clientPid,
+                    getCameraPriorityFromProcState(priorities[priorities.size() - 1]));
+
+            for (auto& i : incompatibleClients) {
+                msg.appendFormat("\n   - Blocked by existing device %s client for package %s"
+                        "(PID %" PRId32 ", priority %" PRId32 ")", i->getKey().string(),
+                        String8{i->getValue()->getPackageName()}.string(), i->getOwnerId(),
+                        i->getPriority());
+                ALOGE("   Conflicts with: Device %s, client package %s (PID %"
+                        PRId32 ", priority %" PRId32 ")", i->getKey().string(),
+                        String8{i->getValue()->getPackageName()}.string(), i->getOwnerId(),
+                        i->getPriority());
+            }
+
+            // Log the client's attempt
+            Mutex::Autolock l(mLogLock);
+            mEventLog.add(msg);
+
+            return -EBUSY;
+        }
+
+        for (auto& i : evicted) {
+            sp<BasicClient> clientSp = i->getValue();
+            if (clientSp.get() == nullptr) {
+                ALOGE("%s: Invalid state: Null client in active client list.", __FUNCTION__);
+
+                // TODO: Remove this
+                LOG_ALWAYS_FATAL("%s: Invalid state for CameraService, null client in active list",
+                        __FUNCTION__);
+                mActiveClientManager.remove(i);
+                continue;
+            }
+
+            ALOGE("CameraService::connect evicting conflicting client for camera ID %s",
+                    i->getKey().string());
+            evictedClients.push_back(i);
+
+            // Log the clients evicted
+            logEvent(String8::format("EVICT device %s client held by package %s (PID"
+                    " %" PRId32 ", priority %" PRId32 ")\n   - Evicted by device %s client for"
+                    " package %s (PID %d, priority %" PRId32 ")",
+                    i->getKey().string(), String8{clientSp->getPackageName()}.string(),
+                    i->getOwnerId(), i->getPriority(), cameraId.string(),
+                    packageName.string(), clientPid,
+                    getCameraPriorityFromProcState(priorities[priorities.size() - 1])));
+
+            // Notify the client of disconnection
+            clientSp->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+                    CaptureResultExtras());
         }
     }
 
-    status_t status = connectFinishUnsafe(client, client->getRemote());
-    if (status != OK) {
-        // this is probably not recoverable.. maybe the client can try again
-        return status;
+    // Do not hold mServiceLock while disconnecting clients, but retain the condition blocking
+    // other clients from connecting in mServiceLockWrapper if held
+    mServiceLock.unlock();
+
+    // Clear caller identity temporarily so client disconnect PID checks work correctly
+    int64_t token = IPCThreadState::self()->clearCallingIdentity();
+
+    // Destroy evicted clients
+    for (auto& i : evictedClients) {
+        // Disconnect is blocking, and should only have returned when HAL has cleaned up
+        i->getValue()->disconnect(); // Clients will remove themselves from the active client list
     }
 
-    mClient[cameraId] = client;
-    LOG1("CameraService::connect X (id %d, this pid is %d)", cameraId,
-         getpid());
+    IPCThreadState::self()->restoreCallingIdentity(token);
 
-    return OK;
+    for (const auto& i : evictedClients) {
+        ALOGV("%s: Waiting for disconnect to complete for client for device %s (PID %" PRId32 ")",
+                __FUNCTION__, i->getKey().string(), i->getOwnerId());
+        ret = mActiveClientManager.waitUntilRemoved(i, DEFAULT_DISCONNECT_TIMEOUT_NS);
+        if (ret == TIMED_OUT) {
+            ALOGE("%s: Timed out waiting for client for device %s to disconnect, "
+                    "current clients:\n%s", __FUNCTION__, i->getKey().string(),
+                    mActiveClientManager.toString().string());
+            return -EBUSY;
+        }
+        if (ret != NO_ERROR) {
+            ALOGE("%s: Received error waiting for client for device %s to disconnect: %s (%d), "
+                    "current clients:\n%s", __FUNCTION__, i->getKey().string(), strerror(-ret),
+                    ret, mActiveClientManager.toString().string());
+            return ret;
+        }
+    }
+
+    evictedClients.clear();
+
+    // Once clients have been disconnected, relock
+    mServiceLock.lock();
+
+    // Check again if the device was unplugged or something while we weren't holding mServiceLock
+    if ((ret = checkIfDeviceIsUsable(cameraId)) != NO_ERROR) {
+        return ret;
+    }
+
+    *partial = clientDescriptor;
+    return NO_ERROR;
 }
 
 status_t CameraService::connect(
@@ -738,47 +1131,20 @@
         /*out*/
         sp<ICamera>& device) {
 
-    String8 clientName8(clientPackageName);
-    int callingPid = getCallingPid();
+    status_t ret = NO_ERROR;
+    String8 id = String8::format("%d", cameraId);
+    sp<Client> client = nullptr;
+    ret = connectHelper<ICameraClient,Client>(cameraClient, id, CAMERA_HAL_API_VERSION_UNSPECIFIED,
+            clientPackageName, clientUid, API_1, false, false, /*out*/client);
 
-    LOG1("CameraService::connect E (pid %d \"%s\", id %d)", callingPid,
-            clientName8.string(), cameraId);
-
-    status_t status = validateConnect(cameraId, /*inout*/clientUid);
-    if (status != OK) {
-        return status;
+    if(ret != NO_ERROR) {
+        logRejected(id, getCallingPid(), String8(clientPackageName),
+                String8::format("%s (%d)", strerror(-ret), ret));
+        return ret;
     }
 
-
-    sp<Client> client;
-    {
-        Mutex::Autolock lock(mServiceLock);
-        sp<BasicClient> clientTmp;
-        if (!canConnectUnsafe(cameraId, clientPackageName,
-                              IInterface::asBinder(cameraClient),
-                              /*out*/clientTmp)) {
-            return -EBUSY;
-        } else if (client.get() != NULL) {
-            device = static_cast<Client*>(clientTmp.get());
-            return OK;
-        }
-
-        status = connectHelperLocked(/*out*/client,
-                                     cameraClient,
-                                     cameraId,
-                                     clientPackageName,
-                                     clientUid,
-                                     callingPid);
-        if (status != OK) {
-            return status;
-        }
-
-    }
-    // important: release the mutex here so the client can call back
-    //    into the service from its destructor (can be at the end of the call)
-
     device = client;
-    return OK;
+    return NO_ERROR;
 }
 
 status_t CameraService::connectLegacy(
@@ -789,8 +1155,10 @@
         /*out*/
         sp<ICamera>& device) {
 
+    String8 id = String8::format("%d", cameraId);
+    int apiVersion = mModule->getModuleApiVersion();
     if (halVersion != CAMERA_HAL_API_VERSION_UNSPECIFIED &&
-            mModule->common.module_api_version < CAMERA_MODULE_API_VERSION_2_3) {
+            apiVersion < CAMERA_MODULE_API_VERSION_2_3) {
         /*
          * Either the HAL version is unspecified in which case this just creates
          * a camera client selected by the latest device version, or
@@ -798,142 +1166,25 @@
          * the open_legacy call
          */
         ALOGE("%s: camera HAL module version %x doesn't support connecting to legacy HAL devices!",
-                __FUNCTION__, mModule->common.module_api_version);
+                __FUNCTION__, apiVersion);
+        logRejected(id, getCallingPid(), String8(clientPackageName),
+                String8("HAL module version doesn't support legacy HAL connections"));
         return INVALID_OPERATION;
     }
 
-    String8 clientName8(clientPackageName);
-    int callingPid = getCallingPid();
+    status_t ret = NO_ERROR;
+    sp<Client> client = nullptr;
+    ret = connectHelper<ICameraClient,Client>(cameraClient, id, halVersion, clientPackageName,
+            clientUid, API_1, true, false, /*out*/client);
 
-    LOG1("CameraService::connect legacy E (pid %d \"%s\", id %d)", callingPid,
-            clientName8.string(), cameraId);
-
-    status_t status = validateConnect(cameraId, /*inout*/clientUid);
-    if (status != OK) {
-        return status;
+    if(ret != NO_ERROR) {
+        logRejected(id, getCallingPid(), String8(clientPackageName),
+                String8::format("%s (%d)", strerror(-ret), ret));
+        return ret;
     }
 
-    sp<Client> client;
-    {
-        Mutex::Autolock lock(mServiceLock);
-        sp<BasicClient> clientTmp;
-        if (!canConnectUnsafe(cameraId, clientPackageName,
-                              IInterface::asBinder(cameraClient),
-                              /*out*/clientTmp)) {
-            return -EBUSY;
-        } else if (client.get() != NULL) {
-            device = static_cast<Client*>(clientTmp.get());
-            return OK;
-        }
-
-        status = connectHelperLocked(/*out*/client,
-                                     cameraClient,
-                                     cameraId,
-                                     clientPackageName,
-                                     clientUid,
-                                     callingPid,
-                                     halVersion,
-                                     /*legacyMode*/true);
-        if (status != OK) {
-            return status;
-        }
-
-    }
-    // important: release the mutex here so the client can call back
-    //    into the service from its destructor (can be at the end of the call)
-
     device = client;
-    return OK;
-}
-
-status_t CameraService::connectFinishUnsafe(const sp<BasicClient>& client,
-                                            const sp<IBinder>& remoteCallback) {
-    status_t status = client->initialize(mModule);
-    if (status != OK) {
-        ALOGE("%s: Could not initialize client from HAL module.", __FUNCTION__);
-        return status;
-    }
-    if (remoteCallback != NULL) {
-        remoteCallback->linkToDeath(this);
-    }
-
-    return OK;
-}
-
-status_t CameraService::connectPro(
-                                        const sp<IProCameraCallbacks>& cameraCb,
-                                        int cameraId,
-                                        const String16& clientPackageName,
-                                        int clientUid,
-                                        /*out*/
-                                        sp<IProCameraUser>& device)
-{
-    if (cameraCb == 0) {
-        ALOGE("%s: Callback must not be null", __FUNCTION__);
-        return BAD_VALUE;
-    }
-
-    String8 clientName8(clientPackageName);
-    int callingPid = getCallingPid();
-
-    LOG1("CameraService::connectPro E (pid %d \"%s\", id %d)", callingPid,
-            clientName8.string(), cameraId);
-    status_t status = validateConnect(cameraId, /*inout*/clientUid);
-    if (status != OK) {
-        return status;
-    }
-
-    sp<ProClient> client;
-    {
-        Mutex::Autolock lock(mServiceLock);
-        {
-            sp<BasicClient> client;
-            if (!canConnectUnsafe(cameraId, clientPackageName,
-                                  IInterface::asBinder(cameraCb),
-                                  /*out*/client)) {
-                return -EBUSY;
-            }
-        }
-
-        int facing = -1;
-        int deviceVersion = getDeviceVersion(cameraId, &facing);
-
-        switch(deviceVersion) {
-          case CAMERA_DEVICE_API_VERSION_1_0:
-            ALOGE("Camera id %d uses HALv1, doesn't support ProCamera",
-                  cameraId);
-            return -EOPNOTSUPP;
-            break;
-          case CAMERA_DEVICE_API_VERSION_2_0:
-          case CAMERA_DEVICE_API_VERSION_2_1:
-          case CAMERA_DEVICE_API_VERSION_3_0:
-          case CAMERA_DEVICE_API_VERSION_3_1:
-          case CAMERA_DEVICE_API_VERSION_3_2:
-            client = new ProCamera2Client(this, cameraCb, clientPackageName,
-                    cameraId, facing, callingPid, clientUid, getpid());
-            break;
-          case -1:
-            ALOGE("Invalid camera id %d", cameraId);
-            return BAD_VALUE;
-          default:
-            ALOGE("Unknown camera device HAL version: %d", deviceVersion);
-            return INVALID_OPERATION;
-        }
-
-        status_t status = connectFinishUnsafe(client, client->getRemote());
-        if (status != OK) {
-            return status;
-        }
-
-        mProClientList[cameraId].push(client);
-
-        LOG1("CameraService::connectPro X (id %d, this pid is %d)", cameraId,
-                getpid());
-    }
-    // important: release the mutex here so the client can call back
-    //    into the service from its destructor (can be at the end of the call)
-    device = client;
-    return OK;
+    return NO_ERROR;
 }
 
 status_t CameraService::connectDevice(
@@ -942,110 +1193,178 @@
         const String16& clientPackageName,
         int clientUid,
         /*out*/
-        sp<ICameraDeviceUser>& device)
-{
+        sp<ICameraDeviceUser>& device) {
 
-    String8 clientName8(clientPackageName);
-    int callingPid = getCallingPid();
+    status_t ret = NO_ERROR;
+    String8 id = String8::format("%d", cameraId);
+    sp<CameraDeviceClient> client = nullptr;
+    ret = connectHelper<ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
+            CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName, clientUid, API_2, false, false,
+            /*out*/client);
 
-    LOG1("CameraService::connectDevice E (pid %d \"%s\", id %d)", callingPid,
-            clientName8.string(), cameraId);
-
-    status_t status = validateConnect(cameraId, /*inout*/clientUid);
-    if (status != OK) {
-        return status;
+    if(ret != NO_ERROR) {
+        logRejected(id, getCallingPid(), String8(clientPackageName),
+                String8::format("%s (%d)", strerror(-ret), ret));
+        return ret;
     }
 
-    sp<CameraDeviceClient> client;
-    {
-        Mutex::Autolock lock(mServiceLock);
-        {
-            sp<BasicClient> client;
-            if (!canConnectUnsafe(cameraId, clientPackageName,
-                                  IInterface::asBinder(cameraCb),
-                                  /*out*/client)) {
-                return -EBUSY;
-            }
-        }
-
-        int facing = -1;
-        int deviceVersion = getDeviceVersion(cameraId, &facing);
-
-        switch(deviceVersion) {
-          case CAMERA_DEVICE_API_VERSION_1_0:
-            ALOGW("Camera using old HAL version: %d", deviceVersion);
-            return -EOPNOTSUPP;
-           // TODO: don't allow 2.0  Only allow 2.1 and higher
-          case CAMERA_DEVICE_API_VERSION_2_0:
-          case CAMERA_DEVICE_API_VERSION_2_1:
-          case CAMERA_DEVICE_API_VERSION_3_0:
-          case CAMERA_DEVICE_API_VERSION_3_1:
-          case CAMERA_DEVICE_API_VERSION_3_2:
-            client = new CameraDeviceClient(this, cameraCb, clientPackageName,
-                    cameraId, facing, callingPid, clientUid, getpid());
-            break;
-          case -1:
-            ALOGE("Invalid camera id %d", cameraId);
-            return BAD_VALUE;
-          default:
-            ALOGE("Unknown camera device HAL version: %d", deviceVersion);
-            return INVALID_OPERATION;
-        }
-
-        status_t status = connectFinishUnsafe(client, client->getRemote());
-        if (status != OK) {
-            // this is probably not recoverable.. maybe the client can try again
-            return status;
-        }
-
-        LOG1("CameraService::connectDevice X (id %d, this pid is %d)", cameraId,
-                getpid());
-
-        mClient[cameraId] = client;
-    }
-    // important: release the mutex here so the client can call back
-    //    into the service from its destructor (can be at the end of the call)
-
     device = client;
+    return NO_ERROR;
+}
+
+status_t CameraService::setTorchMode(const String16& cameraId, bool enabled,
+        const sp<IBinder>& clientBinder) {
+    if (enabled && clientBinder == nullptr) {
+        ALOGE("%s: torch client binder is NULL", __FUNCTION__);
+        return -EINVAL;
+    }
+
+    String8 id = String8(cameraId.string());
+    int uid = getCallingUid();
+
+    // verify id is valid.
+    auto state = getCameraState(id);
+    if (state == nullptr) {
+        ALOGE("%s: camera id is invalid %s", __FUNCTION__, id.string());
+        return -EINVAL;
+    }
+
+    ICameraServiceListener::Status cameraStatus = state->getStatus();
+    if (cameraStatus != ICameraServiceListener::STATUS_PRESENT &&
+            cameraStatus != ICameraServiceListener::STATUS_NOT_AVAILABLE) {
+        ALOGE("%s: camera id is invalid %s", __FUNCTION__, id.string());
+        return -EINVAL;
+    }
+
+    {
+        Mutex::Autolock al(mTorchStatusMutex);
+        ICameraServiceListener::TorchStatus status;
+        status_t res = getTorchStatusLocked(id, &status);
+        if (res) {
+            ALOGE("%s: getting current torch status failed for camera %s",
+                    __FUNCTION__, id.string());
+            return -EINVAL;
+        }
+
+        if (status == ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE) {
+            if (cameraStatus == ICameraServiceListener::STATUS_NOT_AVAILABLE) {
+                ALOGE("%s: torch mode of camera %s is not available because "
+                        "camera is in use", __FUNCTION__, id.string());
+                return -EBUSY;
+            } else {
+                ALOGE("%s: torch mode of camera %s is not available due to "
+                        "insufficient resources", __FUNCTION__, id.string());
+                return -EUSERS;
+            }
+        }
+    }
+
+    {
+        // Update UID map - this is used in the torch status changed callbacks, so must be done
+        // before setTorchMode
+        Mutex::Autolock al(mTorchClientMapMutex);
+        if (mTorchUidMap.find(id) == mTorchUidMap.end()) {
+            mTorchUidMap[id].first = uid;
+            mTorchUidMap[id].second = uid;
+        } else {
+            // Set the pending UID
+            mTorchUidMap[id].first = uid;
+        }
+    }
+
+    status_t res = mFlashlight->setTorchMode(id, enabled);
+
+    if (res) {
+        ALOGE("%s: setting torch mode of camera %s to %d failed. %s (%d)",
+                __FUNCTION__, id.string(), enabled, strerror(-res), res);
+        return res;
+    }
+
+    {
+        // update the link to client's death
+        Mutex::Autolock al(mTorchClientMapMutex);
+        ssize_t index = mTorchClientMap.indexOfKey(id);
+        BatteryNotifier& notifier(BatteryNotifier::getInstance());
+        if (enabled) {
+            if (index == NAME_NOT_FOUND) {
+                mTorchClientMap.add(id, clientBinder);
+            } else {
+                mTorchClientMap.valueAt(index)->unlinkToDeath(this);
+                mTorchClientMap.replaceValueAt(index, clientBinder);
+            }
+            clientBinder->linkToDeath(this);
+        } else if (index != NAME_NOT_FOUND) {
+            mTorchClientMap.valueAt(index)->unlinkToDeath(this);
+        }
+    }
+
     return OK;
 }
 
+void CameraService::notifySystemEvent(int32_t eventId, const int32_t* args, size_t length) {
+    switch(eventId) {
+        case ICameraService::USER_SWITCHED: {
+            doUserSwitch(/*newUserIds*/args, /*length*/length);
+            break;
+        }
+        case ICameraService::NO_EVENT:
+        default: {
+            ALOGW("%s: Received invalid system event from system_server: %d", __FUNCTION__,
+                    eventId);
+            break;
+        }
+    }
+}
 
-status_t CameraService::addListener(
-                                const sp<ICameraServiceListener>& listener) {
+status_t CameraService::addListener(const sp<ICameraServiceListener>& listener) {
     ALOGV("%s: Add listener %p", __FUNCTION__, listener.get());
 
-    if (listener == 0) {
+    if (listener == nullptr) {
         ALOGE("%s: Listener must not be null", __FUNCTION__);
         return BAD_VALUE;
     }
 
     Mutex::Autolock lock(mServiceLock);
 
-    Vector<sp<ICameraServiceListener> >::iterator it, end;
-    for (it = mListenerList.begin(); it != mListenerList.end(); ++it) {
-        if (IInterface::asBinder(*it) == IInterface::asBinder(listener)) {
-            ALOGW("%s: Tried to add listener %p which was already subscribed",
-                  __FUNCTION__, listener.get());
-            return ALREADY_EXISTS;
+    {
+        Mutex::Autolock lock(mStatusListenerLock);
+        for (auto& it : mListenerList) {
+            if (IInterface::asBinder(it) == IInterface::asBinder(listener)) {
+                ALOGW("%s: Tried to add listener %p which was already subscribed",
+                      __FUNCTION__, listener.get());
+                return ALREADY_EXISTS;
+            }
         }
+
+        mListenerList.push_back(listener);
     }
 
-    mListenerList.push_back(listener);
 
     /* Immediately signal current status to this listener only */
     {
-        Mutex::Autolock m(mStatusMutex) ;
-        int numCams = getNumberOfCameras();
-        for (int i = 0; i < numCams; ++i) {
-            listener->onStatusChanged(mStatusList[i], i);
+        Mutex::Autolock lock(mCameraStatesLock);
+        for (auto& i : mCameraStates) {
+            // TODO: Update binder to use String16 for camera IDs and remove;
+            int id = cameraIdToInt(i.first);
+            if (id == -1) continue;
+
+            listener->onStatusChanged(i.second->getStatus(), id);
+        }
+    }
+
+    /* Immediately signal current torch status to this listener only */
+    {
+        Mutex::Autolock al(mTorchStatusMutex);
+        for (size_t i = 0; i < mTorchStatusMap.size(); i++ ) {
+            String16 id = String16(mTorchStatusMap.keyAt(i).string());
+            listener->onTorchStatusChanged(mTorchStatusMap.valueAt(i), id);
         }
     }
 
     return OK;
 }
-status_t CameraService::removeListener(
-                                const sp<ICameraServiceListener>& listener) {
+
+status_t CameraService::removeListener(const sp<ICameraServiceListener>& listener) {
     ALOGV("%s: Remove listener %p", __FUNCTION__, listener.get());
 
     if (listener == 0) {
@@ -1055,11 +1374,13 @@
 
     Mutex::Autolock lock(mServiceLock);
 
-    Vector<sp<ICameraServiceListener> >::iterator it;
-    for (it = mListenerList.begin(); it != mListenerList.end(); ++it) {
-        if (IInterface::asBinder(*it) == IInterface::asBinder(listener)) {
-            mListenerList.erase(it);
-            return OK;
+    {
+        Mutex::Autolock lock(mStatusListenerLock);
+        for (auto it = mListenerList.begin(); it != mListenerList.end(); it++) {
+            if (IInterface::asBinder(*it) == IInterface::asBinder(listener)) {
+                mListenerList.erase(it);
+                return OK;
+            }
         }
     }
 
@@ -1069,10 +1390,7 @@
     return BAD_VALUE;
 }
 
-status_t CameraService::getLegacyParameters(
-            int cameraId,
-            /*out*/
-            String16* parameters) {
+status_t CameraService::getLegacyParameters(int cameraId, /*out*/String16* parameters) {
     ALOGV("%s: for camera ID = %d", __FUNCTION__, cameraId);
 
     if (parameters == NULL) {
@@ -1127,6 +1445,7 @@
             return OK;
         }
       case CAMERA_DEVICE_API_VERSION_3_2:
+      case CAMERA_DEVICE_API_VERSION_3_3:
         ALOGV("%s: Camera id %d uses HAL3.2 or newer, supports api1/api2 directly",
                 __FUNCTION__, cameraId);
         return OK;
@@ -1141,140 +1460,281 @@
     return OK;
 }
 
-void CameraService::removeClientByRemote(const wp<IBinder>& remoteBinder) {
-    int callingPid = getCallingPid();
-    LOG1("CameraService::removeClientByRemote E (pid %d)", callingPid);
-
-    // Declare this before the lock to make absolutely sure the
-    // destructor won't be called with the lock held.
+void CameraService::removeByClient(const BasicClient* client) {
     Mutex::Autolock lock(mServiceLock);
-
-    int outIndex;
-    sp<BasicClient> client = findClientUnsafe(remoteBinder, outIndex);
-
-    if (client != 0) {
-        // Found our camera, clear and leave.
-        LOG1("removeClient: clear camera %d", outIndex);
-
-        sp<IBinder> remote = client->getRemote();
-        if (remote != NULL) {
-            remote->unlinkToDeath(this);
-        }
-
-        mClient[outIndex].clear();
-    } else {
-
-        sp<ProClient> clientPro = findProClientUnsafe(remoteBinder);
-
-        if (clientPro != NULL) {
-            // Found our camera, clear and leave.
-            LOG1("removeClient: clear pro %p", clientPro.get());
-
-            IInterface::asBinder(clientPro->getRemoteCallback())->unlinkToDeath(this);
+    for (auto& i : mActiveClientManager.getAll()) {
+        auto clientSp = i->getValue();
+        if (clientSp.get() == client) {
+            mActiveClientManager.remove(i);
         }
     }
-
-    LOG1("CameraService::removeClientByRemote X (pid %d)", callingPid);
 }
 
-sp<CameraService::ProClient> CameraService::findProClientUnsafe(
-                        const wp<IBinder>& cameraCallbacksRemote)
-{
-    sp<ProClient> clientPro;
+bool CameraService::evictClientIdByRemote(const wp<IBinder>& remote) {
+    const int callingPid = getCallingPid();
+    const int servicePid = getpid();
+    bool ret = false;
+    {
+        // Acquire mServiceLock and prevent other clients from connecting
+        std::unique_ptr<AutoConditionLock> lock =
+                AutoConditionLock::waitAndAcquire(mServiceLockWrapper);
 
-    for (int i = 0; i < mNumberOfCameras; ++i) {
-        Vector<size_t> removeIdx;
 
-        for (size_t j = 0; j < mProClientList[i].size(); ++j) {
-            wp<ProClient> cl = mProClientList[i][j];
+        std::vector<sp<BasicClient>> evicted;
+        for (auto& i : mActiveClientManager.getAll()) {
+            auto clientSp = i->getValue();
+            if (clientSp.get() == nullptr) {
+                ALOGE("%s: Dead client still in mActiveClientManager.", __FUNCTION__);
+                mActiveClientManager.remove(i);
+                continue;
+            }
+            if (remote == clientSp->getRemote() && (callingPid == servicePid ||
+                    callingPid == clientSp->getClientPid())) {
+                mActiveClientManager.remove(i);
+                evicted.push_back(clientSp);
 
-            sp<ProClient> clStrong = cl.promote();
-            if (clStrong != NULL && clStrong->getRemote() == cameraCallbacksRemote) {
-                clientPro = clStrong;
-                break;
-            } else if (clStrong == NULL) {
-                // mark to clean up dead ptr
-                removeIdx.push(j);
+                // Notify the client of disconnection
+                clientSp->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+                        CaptureResultExtras());
             }
         }
 
-        // remove stale ptrs (in reverse so the indices dont change)
-        for (ssize_t j = (ssize_t)removeIdx.size() - 1; j >= 0; --j) {
-            mProClientList[i].removeAt(removeIdx[j]);
+        // Do not hold mServiceLock while disconnecting clients, but retain the condition blocking
+        // other clients from connecting in mServiceLockWrapper if held
+        mServiceLock.unlock();
+
+        // Do not clear caller identity, remote caller should be client proccess
+
+        for (auto& i : evicted) {
+            if (i.get() != nullptr) {
+                i->disconnect();
+                ret = true;
+            }
         }
 
-    }
+        // Reacquire mServiceLock
+        mServiceLock.lock();
 
-    return clientPro;
+    } // lock is destroyed, allow further connect calls
+
+    return ret;
 }
 
-sp<CameraService::BasicClient> CameraService::findClientUnsafe(
-                        const wp<IBinder>& cameraClient, int& outIndex) {
-    sp<BasicClient> client;
 
-    for (int i = 0; i < mNumberOfCameras; i++) {
+/**
+ * Check camera capabilities, such as support for basic color operation
+ */
+int CameraService::checkCameraCapabilities(int id, camera_info info, int *latestStrangeCameraId) {
 
-        // This happens when we have already disconnected (or this is
-        // just another unused camera).
-        if (mClient[i] == 0) continue;
+    // Assume all devices pre-v3.3 are backward-compatible
+    bool isBackwardCompatible = true;
+    if (mModule->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_0
+            && info.device_version >= CAMERA_DEVICE_API_VERSION_3_3) {
+        isBackwardCompatible = false;
+        status_t res;
+        camera_metadata_ro_entry_t caps;
+        res = find_camera_metadata_ro_entry(
+            info.static_camera_characteristics,
+            ANDROID_REQUEST_AVAILABLE_CAPABILITIES,
+            &caps);
+        if (res != 0) {
+            ALOGW("%s: Unable to find camera capabilities for camera device %d",
+                    __FUNCTION__, id);
+            caps.count = 0;
+        }
+        for (size_t i = 0; i < caps.count; i++) {
+            if (caps.data.u8[i] ==
+                    ANDROID_REQUEST_AVAILABLE_CAPABILITIES_BACKWARD_COMPATIBLE) {
+                isBackwardCompatible = true;
+                break;
+            }
+        }
+    }
 
-        // Promote mClient. It can fail if we are called from this path:
-        // Client::~Client() -> disconnect() -> removeClientByRemote().
-        client = mClient[i].promote();
+    if (!isBackwardCompatible) {
+        mNumberOfNormalCameras--;
+        *latestStrangeCameraId = id;
+    } else {
+        if (id > *latestStrangeCameraId) {
+            ALOGE("%s: Normal camera ID %d higher than strange camera ID %d. "
+                    "This is not allowed due backward-compatibility requirements",
+                    __FUNCTION__, id, *latestStrangeCameraId);
+            logServiceError("Invalid order of camera devices", ENODEV);
+            mNumberOfCameras = 0;
+            mNumberOfNormalCameras = 0;
+            return INVALID_OPERATION;
+        }
+    }
+    return OK;
+}
 
-        // Clean up stale client entry
-        if (client == NULL) {
-            mClient[i].clear();
+std::shared_ptr<CameraService::CameraState> CameraService::getCameraState(
+        const String8& cameraId) const {
+    std::shared_ptr<CameraState> state;
+    {
+        Mutex::Autolock lock(mCameraStatesLock);
+        auto iter = mCameraStates.find(cameraId);
+        if (iter != mCameraStates.end()) {
+            state = iter->second;
+        }
+    }
+    return state;
+}
+
+sp<CameraService::BasicClient> CameraService::removeClientLocked(const String8& cameraId) {
+    // Remove from active clients list
+    auto clientDescriptorPtr = mActiveClientManager.remove(cameraId);
+    if (clientDescriptorPtr == nullptr) {
+        ALOGW("%s: Could not evict client, no client for camera ID %s", __FUNCTION__,
+                cameraId.string());
+        return sp<BasicClient>{nullptr};
+    }
+
+    return clientDescriptorPtr->getValue();
+}
+
+void CameraService::doUserSwitch(const int32_t* newUserId, size_t length) {
+    // Acquire mServiceLock and prevent other clients from connecting
+    std::unique_ptr<AutoConditionLock> lock =
+            AutoConditionLock::waitAndAcquire(mServiceLockWrapper);
+
+    std::set<userid_t> newAllowedUsers;
+    for (size_t i = 0; i < length; i++) {
+        if (newUserId[i] < 0) {
+            ALOGE("%s: Bad user ID %d given during user switch, ignoring.",
+                    __FUNCTION__, newUserId[i]);
+            return;
+        }
+        newAllowedUsers.insert(static_cast<userid_t>(newUserId[i]));
+    }
+
+
+    if (newAllowedUsers == mAllowedUsers) {
+        ALOGW("%s: Received notification of user switch with no updated user IDs.", __FUNCTION__);
+        return;
+    }
+
+    logUserSwitch(mAllowedUsers, newAllowedUsers);
+
+    mAllowedUsers = std::move(newAllowedUsers);
+
+    // Current user has switched, evict all current clients.
+    std::vector<sp<BasicClient>> evicted;
+    for (auto& i : mActiveClientManager.getAll()) {
+        auto clientSp = i->getValue();
+
+        if (clientSp.get() == nullptr) {
+            ALOGE("%s: Dead client still in mActiveClientManager.", __FUNCTION__);
             continue;
         }
 
-        if (cameraClient == client->getRemote()) {
-            // Found our camera
-            outIndex = i;
-            return client;
+        // Don't evict clients that are still allowed.
+        uid_t clientUid = clientSp->getClientUid();
+        userid_t clientUserId = multiuser_get_user_id(clientUid);
+        if (mAllowedUsers.find(clientUserId) != mAllowedUsers.end()) {
+            continue;
         }
+
+        evicted.push_back(clientSp);
+
+        String8 curTime = getFormattedCurrentTime();
+
+        ALOGE("Evicting conflicting client for camera ID %s due to user change",
+                i->getKey().string());
+
+        // Log the clients evicted
+        logEvent(String8::format("EVICT device %s client held by package %s (PID %"
+                PRId32 ", priority %" PRId32 ")\n   - Evicted due to user switch.",
+                i->getKey().string(), String8{clientSp->getPackageName()}.string(),
+                i->getOwnerId(), i->getPriority()));
+
     }
 
-    outIndex = -1;
-    return NULL;
+    // Do not hold mServiceLock while disconnecting clients, but retain the condition
+    // blocking other clients from connecting in mServiceLockWrapper if held.
+    mServiceLock.unlock();
+
+    // Clear caller identity temporarily so client disconnect PID checks work correctly
+    int64_t token = IPCThreadState::self()->clearCallingIdentity();
+
+    for (auto& i : evicted) {
+        i->disconnect();
+    }
+
+    IPCThreadState::self()->restoreCallingIdentity(token);
+
+    // Reacquire mServiceLock
+    mServiceLock.lock();
 }
 
-CameraService::BasicClient* CameraService::getClientByIdUnsafe(int cameraId) {
-    if (cameraId < 0 || cameraId >= mNumberOfCameras) return NULL;
-    return mClient[cameraId].unsafe_get();
+void CameraService::logEvent(const char* event) {
+    String8 curTime = getFormattedCurrentTime();
+    Mutex::Autolock l(mLogLock);
+    mEventLog.add(String8::format("%s : %s", curTime.string(), event));
 }
 
-Mutex* CameraService::getClientLockById(int cameraId) {
-    if (cameraId < 0 || cameraId >= mNumberOfCameras) return NULL;
-    return &mClientLock[cameraId];
+void CameraService::logDisconnected(const char* cameraId, int clientPid,
+        const char* clientPackage) {
+    // Log the clients evicted
+    logEvent(String8::format("DISCONNECT device %s client for package %s (PID %d)", cameraId,
+            clientPackage, clientPid));
 }
 
-sp<CameraService::BasicClient> CameraService::getClientByRemote(
-                                const wp<IBinder>& cameraClient) {
-
-    // Declare this before the lock to make absolutely sure the
-    // destructor won't be called with the lock held.
-    sp<BasicClient> client;
-
-    Mutex::Autolock lock(mServiceLock);
-
-    int outIndex;
-    client = findClientUnsafe(cameraClient, outIndex);
-
-    return client;
+void CameraService::logConnected(const char* cameraId, int clientPid,
+        const char* clientPackage) {
+    // Log the clients evicted
+    logEvent(String8::format("CONNECT device %s client for package %s (PID %d)", cameraId,
+            clientPackage, clientPid));
 }
 
-status_t CameraService::onTransact(
-    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) {
+void CameraService::logRejected(const char* cameraId, int clientPid,
+        const char* clientPackage, const char* reason) {
+    // Log the client rejected
+    logEvent(String8::format("REJECT device %s client for package %s (PID %d), reason: (%s)",
+            cameraId, clientPackage, clientPid, reason));
+}
+
+void CameraService::logUserSwitch(const std::set<userid_t>& oldUserIds,
+        const std::set<userid_t>& newUserIds) {
+    String8 newUsers = toString(newUserIds);
+    String8 oldUsers = toString(oldUserIds);
+    // Log the new and old users
+    logEvent(String8::format("USER_SWITCH previous allowed users: %s , current allowed users: %s",
+            oldUsers.string(), newUsers.string()));
+}
+
+void CameraService::logDeviceRemoved(const char* cameraId, const char* reason) {
+    // Log the device removal
+    logEvent(String8::format("REMOVE device %s, reason: (%s)", cameraId, reason));
+}
+
+void CameraService::logDeviceAdded(const char* cameraId, const char* reason) {
+    // Log the device removal
+    logEvent(String8::format("ADD device %s, reason: (%s)", cameraId, reason));
+}
+
+void CameraService::logClientDied(int clientPid, const char* reason) {
+    // Log the device removal
+    logEvent(String8::format("DIED client(s) with PID %d, reason: (%s)", clientPid, reason));
+}
+
+void CameraService::logServiceError(const char* msg, int errorCode) {
+    String8 curTime = getFormattedCurrentTime();
+    logEvent(String8::format("SERVICE ERROR: %s : %d (%s)", msg, errorCode, strerror(errorCode)));
+}
+
+status_t CameraService::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
+        uint32_t flags) {
+
+    const int pid = getCallingPid();
+    const int selfPid = getpid();
+
     // Permission checks
     switch (code) {
         case BnCameraService::CONNECT:
-        case BnCameraService::CONNECT_PRO:
         case BnCameraService::CONNECT_DEVICE:
-        case BnCameraService::CONNECT_LEGACY:
-            const int pid = getCallingPid();
-            const int self_pid = getpid();
-            if (pid != self_pid) {
+        case BnCameraService::CONNECT_LEGACY: {
+            if (pid != selfPid) {
                 // we're called from a different process, do the real check
                 if (!checkCallingPermission(
                         String16("android.permission.CAMERA"))) {
@@ -1285,29 +1745,26 @@
                 }
             }
             break;
+        }
+        case BnCameraService::NOTIFY_SYSTEM_EVENT: {
+            if (pid != selfPid) {
+                // Ensure we're being called by system_server, or similar process with
+                // permissions to notify the camera service about system events
+                if (!checkCallingPermission(
+                        String16("android.permission.CAMERA_SEND_SYSTEM_EVENTS"))) {
+                    const int uid = getCallingUid();
+                    ALOGE("Permission Denial: cannot send updates to camera service about system"
+                            " events from pid=%d, uid=%d", pid, uid);
+                    return PERMISSION_DENIED;
+                }
+            }
+            break;
+        }
     }
 
     return BnCameraService::onTransact(code, data, reply, flags);
 }
 
-// The reason we need this busy bit is a new CameraService::connect() request
-// may come in while the previous Client's destructor has not been run or is
-// still running. If the last strong reference of the previous Client is gone
-// but the destructor has not been finished, we should not allow the new Client
-// to be created because we need to wait for the previous Client to tear down
-// the hardware first.
-void CameraService::setCameraBusy(int cameraId) {
-    android_atomic_write(1, &mBusy[cameraId]);
-
-    ALOGV("setCameraBusy cameraId=%d", cameraId);
-}
-
-void CameraService::setCameraFree(int cameraId) {
-    android_atomic_write(0, &mBusy[cameraId]);
-
-    ALOGV("setCameraFree cameraId=%d", cameraId);
-}
-
 // We share the media players for shutter and recording sound for all clients.
 // A reference count is kept to determine when we will actually release the
 // media players.
@@ -1376,7 +1833,6 @@
 
     mRemoteCallback = cameraClient;
 
-    cameraService->setCameraBusy(cameraId);
     cameraService->loadSound();
 
     LOG1("Client::Client X (pid %d, id %d)", callingPid, cameraId);
@@ -1398,7 +1854,7 @@
         int cameraId, int cameraFacing,
         int clientPid, uid_t clientUid,
         int servicePid):
-        mClientPackageName(clientPackageName)
+        mClientPackageName(clientPackageName), mDisconnected(false)
 {
     mCameraService = cameraService;
     mRemoteBinder = remoteCallback;
@@ -1417,14 +1873,47 @@
 }
 
 void CameraService::BasicClient::disconnect() {
-    ALOGV("BasicClient::disconnect");
-    mCameraService->removeClientByRemote(mRemoteBinder);
+    if (mDisconnected) {
+        ALOGE("%s: Disconnect called on already disconnected client for device %d", __FUNCTION__,
+                mCameraId);
+        return;
+    }
+    mDisconnected = true;;
+
+    mCameraService->removeByClient(this);
+    mCameraService->logDisconnected(String8::format("%d", mCameraId), mClientPid,
+            String8(mClientPackageName));
+
+    sp<IBinder> remote = getRemote();
+    if (remote != nullptr) {
+        remote->unlinkToDeath(mCameraService);
+    }
 
     finishCameraOps();
+    ALOGI("%s: Disconnected client for camera %d for PID %d", __FUNCTION__, mCameraId, mClientPid);
+
     // client shouldn't be able to call into us anymore
     mClientPid = 0;
 }
 
+String16 CameraService::BasicClient::getPackageName() const {
+    return mClientPackageName;
+}
+
+
+int CameraService::BasicClient::getClientPid() const {
+    return mClientPid;
+}
+
+uid_t CameraService::BasicClient::getClientUid() const {
+    return mClientUid;
+}
+
+bool CameraService::BasicClient::canCastToApiClient(apiLevel level) const {
+    // Defaults to API2.
+    return level == API_2;
+}
+
 status_t CameraService::BasicClient::startCameraOps() {
     int32_t res;
     // Notify app ops that the camera is not available
@@ -1440,17 +1929,24 @@
     res = mAppOpsManager.startOp(AppOpsManager::OP_CAMERA,
             mClientUid, mClientPackageName);
 
-    if (res != AppOpsManager::MODE_ALLOWED) {
+    if (res == AppOpsManager::MODE_ERRORED) {
         ALOGI("Camera %d: Access for \"%s\" has been revoked",
                 mCameraId, String8(mClientPackageName).string());
         return PERMISSION_DENIED;
     }
 
+    if (res == AppOpsManager::MODE_IGNORED) {
+        ALOGI("Camera %d: Access for \"%s\" has been restricted",
+                mCameraId, String8(mClientPackageName).string());
+        // Return the same error as for device policy manager rejection
+        return -EACCES;
+    }
+
     mOpsActive = true;
 
     // Transition device availability listeners from PRESENT -> NOT_AVAILABLE
     mCameraService->updateStatus(ICameraServiceListener::STATUS_NOT_AVAILABLE,
-            mCameraId);
+            String8::format("%d", mCameraId));
 
     return OK;
 }
@@ -1463,19 +1959,16 @@
                 mClientPackageName);
         mOpsActive = false;
 
-        // Notify device availability listeners that this camera is available
-        // again
+        auto rejected = {ICameraServiceListener::STATUS_NOT_PRESENT,
+                ICameraServiceListener::STATUS_ENUMERATING};
 
-        StatusVector rejectSourceStates;
-        rejectSourceStates.push_back(ICameraServiceListener::STATUS_NOT_PRESENT);
-        rejectSourceStates.push_back(ICameraServiceListener::STATUS_ENUMERATING);
-
-        // Transition to PRESENT if the camera is not in either of above 2
-        // states
+        // Transition to PRESENT if the camera is not in either of the rejected states
         mCameraService->updateStatus(ICameraServiceListener::STATUS_PRESENT,
-                mCameraId,
-                &rejectSourceStates);
+                String8::format("%d", mCameraId), rejected);
 
+        // Notify flashlight that a camera device is closed.
+        mCameraService->mFlashlight->deviceClosed(
+                String8::format("%d", mCameraId));
     }
     // Always stop watching, even if no camera op is active
     if (mOpsCallback != NULL) {
@@ -1518,26 +2011,15 @@
 
 // ----------------------------------------------------------------------------
 
-Mutex* CameraService::Client::getClientLockFromCookie(void* user) {
-    return gCameraService->getClientLockById((int)(intptr_t) user);
-}
-
-// Provide client pointer for callbacks. Client lock returned from getClientLockFromCookie should
-// be acquired for this to be safe
-CameraService::Client* CameraService::Client::getClientFromCookie(void* user) {
-    BasicClient *basicClient = gCameraService->getClientByIdUnsafe((int)(intptr_t) user);
-    // OK: only CameraClient calls this, and they already cast anyway.
-    Client* client = static_cast<Client*>(basicClient);
-
-    // This could happen if the Client is in the process of shutting down (the
-    // last strong reference is gone, but the destructor hasn't finished
-    // stopping the hardware).
-    if (client == NULL) return NULL;
-
-    // destruction already started, so should not be accessed
-    if (client->mDestructionStarted) return NULL;
-
-    return client;
+// Provide client strong pointer for callbacks.
+sp<CameraService::Client> CameraService::Client::getClientFromCookie(void* user) {
+    String8 cameraId = String8::format("%d", (int)(intptr_t) user);
+    auto clientDescriptor = gCameraService->mActiveClientManager.get(cameraId);
+    if (clientDescriptor != nullptr) {
+        return sp<Client>{
+                static_cast<Client*>(clientDescriptor->getValue().get())};
+    }
+    return sp<Client>{nullptr};
 }
 
 void CameraService::Client::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
@@ -1549,7 +2031,10 @@
 void CameraService::Client::disconnect() {
     ALOGV("Client::disconnect");
     BasicClient::disconnect();
-    mCameraService->setCameraFree(mCameraId);
+}
+
+bool CameraService::Client::canCastToApiClient(apiLevel level) const {
+    return level == API_1;
 }
 
 CameraService::Client::OpsCallback::OpsCallback(wp<BasicClient> client):
@@ -1565,30 +2050,138 @@
 }
 
 // ----------------------------------------------------------------------------
-//                  IProCamera
+//                  CameraState
 // ----------------------------------------------------------------------------
 
-CameraService::ProClient::ProClient(const sp<CameraService>& cameraService,
-        const sp<IProCameraCallbacks>& remoteCallback,
-        const String16& clientPackageName,
-        int cameraId,
-        int cameraFacing,
-        int clientPid,
-        uid_t clientUid,
-        int servicePid)
-        : CameraService::BasicClient(cameraService, IInterface::asBinder(remoteCallback),
-                clientPackageName, cameraId, cameraFacing,
-                clientPid,  clientUid, servicePid)
-{
-    mRemoteCallback = remoteCallback;
+CameraService::CameraState::CameraState(const String8& id, int cost,
+        const std::set<String8>& conflicting) : mId(id),
+        mStatus(ICameraServiceListener::STATUS_PRESENT), mCost(cost), mConflicting(conflicting) {}
+
+CameraService::CameraState::~CameraState() {}
+
+ICameraServiceListener::Status CameraService::CameraState::getStatus() const {
+    Mutex::Autolock lock(mStatusLock);
+    return mStatus;
 }
 
-CameraService::ProClient::~ProClient() {
+CameraParameters CameraService::CameraState::getShimParams() const {
+    return mShimParams;
 }
 
-void CameraService::ProClient::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
-        const CaptureResultExtras& resultExtras) {
-    mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, CAMERA_ERROR_RELEASED, 0);
+void CameraService::CameraState::setShimParams(const CameraParameters& params) {
+    mShimParams = params;
+}
+
+int CameraService::CameraState::getCost() const {
+    return mCost;
+}
+
+std::set<String8> CameraService::CameraState::getConflicting() const {
+    return mConflicting;
+}
+
+String8 CameraService::CameraState::getId() const {
+    return mId;
+}
+
+// ----------------------------------------------------------------------------
+//                  ClientEventListener
+// ----------------------------------------------------------------------------
+
+void CameraService::ClientEventListener::onClientAdded(
+        const resource_policy::ClientDescriptor<String8,
+        sp<CameraService::BasicClient>>& descriptor) {
+    auto basicClient = descriptor.getValue();
+    if (basicClient.get() != nullptr) {
+        BatteryNotifier& notifier(BatteryNotifier::getInstance());
+        notifier.noteStartCamera(descriptor.getKey(),
+                static_cast<int>(basicClient->getClientUid()));
+    }
+}
+
+void CameraService::ClientEventListener::onClientRemoved(
+        const resource_policy::ClientDescriptor<String8,
+        sp<CameraService::BasicClient>>& descriptor) {
+    auto basicClient = descriptor.getValue();
+    if (basicClient.get() != nullptr) {
+        BatteryNotifier& notifier(BatteryNotifier::getInstance());
+        notifier.noteStopCamera(descriptor.getKey(),
+                static_cast<int>(basicClient->getClientUid()));
+    }
+}
+
+
+// ----------------------------------------------------------------------------
+//                  CameraClientManager
+// ----------------------------------------------------------------------------
+
+CameraService::CameraClientManager::CameraClientManager() {
+    setListener(std::make_shared<ClientEventListener>());
+}
+
+CameraService::CameraClientManager::~CameraClientManager() {}
+
+sp<CameraService::BasicClient> CameraService::CameraClientManager::getCameraClient(
+        const String8& id) const {
+    auto descriptor = get(id);
+    if (descriptor == nullptr) {
+        return sp<BasicClient>{nullptr};
+    }
+    return descriptor->getValue();
+}
+
+String8 CameraService::CameraClientManager::toString() const {
+    auto all = getAll();
+    String8 ret("[");
+    bool hasAny = false;
+    for (auto& i : all) {
+        hasAny = true;
+        String8 key = i->getKey();
+        int32_t cost = i->getCost();
+        int32_t pid = i->getOwnerId();
+        int32_t priority = i->getPriority();
+        auto conflicting = i->getConflicting();
+        auto clientSp = i->getValue();
+        String8 packageName;
+        userid_t clientUserId = 0;
+        if (clientSp.get() != nullptr) {
+            packageName = String8{clientSp->getPackageName()};
+            uid_t clientUid = clientSp->getClientUid();
+            clientUserId = multiuser_get_user_id(clientUid);
+        }
+        ret.appendFormat("\n(Camera ID: %s, Cost: %" PRId32 ", PID: %" PRId32 ", Priority: %"
+                PRId32 ", ", key.string(), cost, pid, priority);
+
+        if (clientSp.get() != nullptr) {
+            ret.appendFormat("User Id: %d, ", clientUserId);
+        }
+        if (packageName.size() != 0) {
+            ret.appendFormat("Client Package Name: %s", packageName.string());
+        }
+
+        ret.append(", Conflicting Client Devices: {");
+        for (auto& j : conflicting) {
+            ret.appendFormat("%s, ", j.string());
+        }
+        ret.append("})");
+    }
+    if (hasAny) ret.append("\n");
+    ret.append("]\n");
+    return ret;
+}
+
+CameraService::DescriptorPtr CameraService::CameraClientManager::makeClientDescriptor(
+        const String8& key, const sp<BasicClient>& value, int32_t cost,
+        const std::set<String8>& conflictingKeys, int32_t priority, int32_t ownerId) {
+
+    return std::make_shared<resource_policy::ClientDescriptor<String8, sp<BasicClient>>>(
+            key, value, cost, conflictingKeys, priority, ownerId);
+}
+
+CameraService::DescriptorPtr CameraService::CameraClientManager::makeClientDescriptor(
+        const sp<BasicClient>& value, const CameraService::DescriptorPtr& partial) {
+    return makeClientDescriptor(partial->getKey(), value, partial->getCost(),
+            partial->getConflicting(), partial->getPriority(), partial->getOwnerId());
 }
 
 // ----------------------------------------------------------------------------
@@ -1610,7 +2203,7 @@
 }
 
 status_t CameraService::dump(int fd, const Vector<String16>& args) {
-    String8 result;
+    String8 result("Dump of the Camera Service:\n");
     if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
         result.appendFormat("Permission Denial: "
                 "can't dump CameraService from pid=%d, uid=%d\n",
@@ -1629,19 +2222,22 @@
         if (!mModule) {
             result = String8::format("No camera module available!\n");
             write(fd, result.string(), result.size());
+
+            // Dump event log for error information
+            dumpEventLog(fd);
+
             if (locked) mServiceLock.unlock();
             return NO_ERROR;
         }
 
-        result = String8::format("Camera module HAL API version: 0x%x\n",
-                mModule->common.hal_api_version);
-        result.appendFormat("Camera module API version: 0x%x\n",
-                mModule->common.module_api_version);
-        result.appendFormat("Camera module name: %s\n",
-                mModule->common.name);
-        result.appendFormat("Camera module author: %s\n",
-                mModule->common.author);
-        result.appendFormat("Number of camera devices: %d\n\n", mNumberOfCameras);
+        result = String8::format("Camera module HAL API version: 0x%x\n", mModule->getHalApiVersion());
+        result.appendFormat("Camera module API version: 0x%x\n", mModule->getModuleApiVersion());
+        result.appendFormat("Camera module name: %s\n", mModule->getModuleName());
+        result.appendFormat("Camera module author: %s\n", mModule->getModuleAuthor());
+        result.appendFormat("Number of camera devices: %d\n", mNumberOfCameras);
+        String8 activeClientString = mActiveClientManager.toString();
+        result.appendFormat("Active Camera Clients:\n%s", activeClientString.string());
+        result.appendFormat("Allowed users:\n%s\n", toString(mAllowedUsers).string());
 
         sp<VendorTagDescriptor> desc = VendorTagDescriptor::getGlobalVendorTagDescriptor();
         if (desc == NULL) {
@@ -1656,11 +2252,21 @@
             desc->dump(fd, /*verbosity*/2, /*indentation*/4);
         }
 
-        for (int i = 0; i < mNumberOfCameras; i++) {
-            result = String8::format("Camera %d static information:\n", i);
+        dumpEventLog(fd);
+
+        bool stateLocked = tryLock(mCameraStatesLock);
+        if (!stateLocked) {
+            result = String8::format("CameraStates in use, may be deadlocked\n");
+            write(fd, result.string(), result.size());
+        }
+
+        for (auto& state : mCameraStates) {
+            String8 cameraId = state.first;
+            result = String8::format("Camera %s information:\n", cameraId.string());
             camera_info info;
 
-            status_t rc = mModule->get_camera_info(i, &info);
+            // TODO: Change getCameraInfo + HAL to use String cameraIds
+            status_t rc = mModule->getCameraInfo(cameraIdToInt(cameraId), &info);
             if (rc != OK) {
                 result.appendFormat("  Error reading static information!\n");
                 write(fd, result.string(), result.size());
@@ -1669,13 +2275,24 @@
                         info.facing == CAMERA_FACING_BACK ? "BACK" : "FRONT");
                 result.appendFormat("  Orientation: %d\n", info.orientation);
                 int deviceVersion;
-                if (mModule->common.module_api_version <
-                        CAMERA_MODULE_API_VERSION_2_0) {
+                if (mModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_0) {
                     deviceVersion = CAMERA_DEVICE_API_VERSION_1_0;
                 } else {
                     deviceVersion = info.device_version;
                 }
-                result.appendFormat("  Device version: 0x%x\n", deviceVersion);
+
+                auto conflicting = state.second->getConflicting();
+                result.appendFormat("  Resource Cost: %d\n", state.second->getCost());
+                result.appendFormat("  Conflicting Devices:");
+                for (auto& id : conflicting) {
+                    result.appendFormat(" %s", cameraId.string());
+                }
+                if (conflicting.size() == 0) {
+                    result.appendFormat(" NONE");
+                }
+                result.appendFormat("\n");
+
+                result.appendFormat("  Device version: %#x\n", deviceVersion);
                 if (deviceVersion >= CAMERA_DEVICE_API_VERSION_2_0) {
                     result.appendFormat("  Device static metadata:\n");
                     write(fd, result.string(), result.size());
@@ -1684,19 +2301,38 @@
                 } else {
                     write(fd, result.string(), result.size());
                 }
+
+                CameraParameters p = state.second->getShimParams();
+                if (!p.isEmpty()) {
+                    result = String8::format("  Camera1 API shim is using parameters:\n        ");
+                    write(fd, result.string(), result.size());
+                    p.dump(fd, args);
+                }
             }
 
-            sp<BasicClient> client = mClient[i].promote();
-            if (client == 0) {
-                result = String8::format("  Device is closed, no client instance\n");
+            auto clientDescriptor = mActiveClientManager.get(cameraId);
+            if (clientDescriptor == nullptr) {
+                result = String8::format("  Device %s is closed, no client instance\n",
+                        cameraId.string());
                 write(fd, result.string(), result.size());
                 continue;
             }
             hasClient = true;
-            result = String8::format("  Device is open. Client instance dump:\n");
+            result = String8::format("  Device %s is open. Client instance dump:\n\n",
+                    cameraId.string());
+            result.appendFormat("Client priority level: %d\n", clientDescriptor->getPriority());
+            result.appendFormat("Client PID: %d\n", clientDescriptor->getOwnerId());
+
+            auto client = clientDescriptor->getValue();
+            result.appendFormat("Client package: %s\n",
+                    String8(client->getPackageName()).string());
             write(fd, result.string(), result.size());
+
             client->dump(fd, args);
         }
+
+        if (stateLocked) mCameraStatesLock.unlock();
+
         if (!hasClient) {
             result = String8::format("\nNo active camera clients yet.\n");
             write(fd, result.string(), result.size());
@@ -1720,112 +2356,143 @@
                 write(fd, result.string(), result.size());
             }
         }
-
     }
     return NO_ERROR;
 }
 
-/*virtual*/void CameraService::binderDied(
-    const wp<IBinder> &who) {
+void CameraService::dumpEventLog(int fd) {
+    String8 result = String8("\nPrior client events (most recent at top):\n");
+
+    Mutex::Autolock l(mLogLock);
+    for (const auto& msg : mEventLog) {
+        result.appendFormat("  %s\n", msg.string());
+    }
+
+    if (mEventLog.size() == DEFAULT_EVENT_LOG_LENGTH) {
+        result.append("  ...\n");
+    } else if (mEventLog.size() == 0) {
+        result.append("  [no events yet]\n");
+    }
+    result.append("\n");
+
+    write(fd, result.string(), result.size());
+}
+
+void CameraService::handleTorchClientBinderDied(const wp<IBinder> &who) {
+    Mutex::Autolock al(mTorchClientMapMutex);
+    for (size_t i = 0; i < mTorchClientMap.size(); i++) {
+        if (mTorchClientMap[i] == who) {
+            // turn off the torch mode that was turned on by dead client
+            String8 cameraId = mTorchClientMap.keyAt(i);
+            status_t res = mFlashlight->setTorchMode(cameraId, false);
+            if (res) {
+                ALOGE("%s: torch client died but couldn't turn off torch: "
+                    "%s (%d)", __FUNCTION__, strerror(-res), res);
+                return;
+            }
+            mTorchClientMap.removeItemsAt(i);
+            break;
+        }
+    }
+}
+
+/*virtual*/void CameraService::binderDied(const wp<IBinder> &who) {
 
     /**
-      * While tempting to promote the wp<IBinder> into a sp,
-      * it's actually not supported by the binder driver
+      * While tempting to promote the wp<IBinder> into a sp, it's actually not supported by the
+      * binder driver
       */
 
-    ALOGV("java clients' binder died");
+    logClientDied(getCallingPid(), String8("Binder died unexpectedly"));
 
-    sp<BasicClient> cameraClient = getClientByRemote(who);
+    // check torch client
+    handleTorchClientBinderDied(who);
 
-    if (cameraClient == 0) {
-        ALOGV("java clients' binder death already cleaned up (normal case)");
+    // check camera device client
+    if(!evictClientIdByRemote(who)) {
+        ALOGV("%s: Java client's binder death already cleaned up (normal case)", __FUNCTION__);
         return;
     }
 
-    ALOGW("Disconnecting camera client %p since the binder for it "
-          "died (this pid %d)", cameraClient.get(), getCallingPid());
-
-    cameraClient->disconnect();
-
+    ALOGE("%s: Java client's binder died, removing it from the list of active clients",
+            __FUNCTION__);
 }
 
-void CameraService::updateStatus(ICameraServiceListener::Status status,
-                                 int32_t cameraId,
-                                 const StatusVector *rejectSourceStates) {
-    // do not lock mServiceLock here or can get into a deadlock from
-    //  connect() -> ProClient::disconnect -> updateStatus
-    Mutex::Autolock lock(mStatusMutex);
-
-    ICameraServiceListener::Status oldStatus = mStatusList[cameraId];
-
-    mStatusList[cameraId] = status;
-
-    if (oldStatus != status) {
-        ALOGV("%s: Status has changed for camera ID %d from 0x%x to 0x%x",
-              __FUNCTION__, cameraId, (uint32_t)oldStatus, (uint32_t)status);
-
-        if (oldStatus == ICameraServiceListener::STATUS_NOT_PRESENT &&
-            (status != ICameraServiceListener::STATUS_PRESENT &&
-             status != ICameraServiceListener::STATUS_ENUMERATING)) {
-
-            ALOGW("%s: From NOT_PRESENT can only transition into PRESENT"
-                  " or ENUMERATING", __FUNCTION__);
-            mStatusList[cameraId] = oldStatus;
-            return;
-        }
-
-        if (rejectSourceStates != NULL) {
-            const StatusVector &rejectList = *rejectSourceStates;
-            StatusVector::const_iterator it = rejectList.begin();
-
-            /**
-             * Sometimes we want to conditionally do a transition.
-             * For example if a client disconnects, we want to go to PRESENT
-             * only if we weren't already in NOT_PRESENT or ENUMERATING.
-             */
-            for (; it != rejectList.end(); ++it) {
-                if (oldStatus == *it) {
-                    ALOGV("%s: Rejecting status transition for Camera ID %d, "
-                          " since the source state was was in one of the bad "
-                          " states.", __FUNCTION__, cameraId);
-                    mStatusList[cameraId] = oldStatus;
-                    return;
-                }
-            }
-        }
-
-        /**
-          * ProClients lose their exclusive lock.
-          * - Done before the CameraClient can initialize the HAL device,
-          *   since we want to be able to close it before they get to initialize
-          */
-        if (status == ICameraServiceListener::STATUS_NOT_AVAILABLE) {
-            Vector<wp<ProClient> > proClients(mProClientList[cameraId]);
-            Vector<wp<ProClient> >::const_iterator it;
-
-            for (it = proClients.begin(); it != proClients.end(); ++it) {
-                sp<ProClient> proCl = it->promote();
-                if (proCl.get() != NULL) {
-                    proCl->onExclusiveLockStolen();
-                }
-            }
-        }
-
-        Vector<sp<ICameraServiceListener> >::const_iterator it;
-        for (it = mListenerList.begin(); it != mListenerList.end(); ++it) {
-            (*it)->onStatusChanged(status, cameraId);
-        }
-    }
+void CameraService::updateStatus(ICameraServiceListener::Status status, const String8& cameraId) {
+    updateStatus(status, cameraId, {});
 }
 
-ICameraServiceListener::Status CameraService::getStatus(int cameraId) const {
-    if (cameraId < 0 || cameraId >= MAX_CAMERAS) {
-        ALOGE("%s: Invalid camera ID %d", __FUNCTION__, cameraId);
-        return ICameraServiceListener::STATUS_UNKNOWN;
+void CameraService::updateStatus(ICameraServiceListener::Status status, const String8& cameraId,
+        std::initializer_list<ICameraServiceListener::Status> rejectSourceStates) {
+    // Do not lock mServiceLock here or can get into a deadlock from
+    // connect() -> disconnect -> updateStatus
+
+    auto state = getCameraState(cameraId);
+
+    if (state == nullptr) {
+        ALOGW("%s: Could not update the status for %s, no such device exists", __FUNCTION__,
+                cameraId.string());
+        return;
     }
 
-    Mutex::Autolock al(mStatusMutex);
-    return mStatusList[cameraId];
+    // Update the status for this camera state, then send the onStatusChangedCallbacks to each
+    // of the listeners with both the mStatusStatus and mStatusListenerLock held
+    state->updateStatus(status, cameraId, rejectSourceStates, [this]
+            (const String8& cameraId, ICameraServiceListener::Status status) {
+
+            if (status != ICameraServiceListener::STATUS_ENUMERATING) {
+                // Update torch status if it has a flash unit.
+                Mutex::Autolock al(mTorchStatusMutex);
+                ICameraServiceListener::TorchStatus torchStatus;
+                if (getTorchStatusLocked(cameraId, &torchStatus) !=
+                        NAME_NOT_FOUND) {
+                    ICameraServiceListener::TorchStatus newTorchStatus =
+                            status == ICameraServiceListener::STATUS_PRESENT ?
+                            ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF :
+                            ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE;
+                    if (torchStatus != newTorchStatus) {
+                        onTorchStatusChangedLocked(cameraId, newTorchStatus);
+                    }
+                }
+            }
+
+            Mutex::Autolock lock(mStatusListenerLock);
+
+            for (auto& listener : mListenerList) {
+                // TODO: Refactor status listeners to use strings for Camera IDs and remove this.
+                int id = cameraIdToInt(cameraId);
+                if (id != -1) listener->onStatusChanged(status, id);
+            }
+        });
+}
+
+status_t CameraService::getTorchStatusLocked(
+        const String8& cameraId,
+        ICameraServiceListener::TorchStatus *status) const {
+    if (!status) {
+        return BAD_VALUE;
+    }
+    ssize_t index = mTorchStatusMap.indexOfKey(cameraId);
+    if (index == NAME_NOT_FOUND) {
+        // invalid camera ID or the camera doesn't have a flash unit
+        return NAME_NOT_FOUND;
+    }
+
+    *status = mTorchStatusMap.valueAt(index);
+    return OK;
+}
+
+status_t CameraService::setTorchStatusLocked(const String8& cameraId,
+        ICameraServiceListener::TorchStatus status) {
+    ssize_t index = mTorchStatusMap.indexOfKey(cameraId);
+    if (index == NAME_NOT_FOUND) {
+        return BAD_VALUE;
+    }
+    ICameraServiceListener::TorchStatus& item =
+            mTorchStatusMap.editValueAt(index);
+    item = status;
+
+    return OK;
 }
 
 }; // namespace android
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 126d8d9..7f4d43f 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -17,6 +17,7 @@
 #ifndef ANDROID_SERVERS_CAMERA_CAMERASERVICE_H
 #define ANDROID_SERVERS_CAMERA_CAMERASERVICE_H
 
+#include <cutils/multiuser.h>
 #include <utils/Vector.h>
 #include <utils/KeyedVector.h>
 #include <binder/AppOpsManager.h>
@@ -27,8 +28,6 @@
 
 #include <camera/ICamera.h>
 #include <camera/ICameraClient.h>
-#include <camera/IProCameraUser.h>
-#include <camera/IProCameraCallbacks.h>
 #include <camera/camera2/ICameraDeviceUser.h>
 #include <camera/camera2/ICameraDeviceCallbacks.h>
 #include <camera/VendorTagDescriptor.h>
@@ -36,9 +35,18 @@
 #include <camera/CameraParameters.h>
 
 #include <camera/ICameraServiceListener.h>
+#include "CameraFlashlight.h"
 
-/* This needs to be increased if we can have more cameras */
-#define MAX_CAMERAS 2
+#include "common/CameraModule.h"
+#include "media/RingBuffer.h"
+#include "utils/AutoConditionLock.h"
+#include "utils/ClientManager.h"
+
+#include <set>
+#include <string>
+#include <map>
+#include <memory>
+#include <utility>
 
 namespace android {
 
@@ -58,6 +66,24 @@
     class Client;
     class BasicClient;
 
+    // The effective API level.  The Camera2 API running in LEGACY mode counts as API_1.
+    enum apiLevel {
+        API_1 = 1,
+        API_2 = 2
+    };
+
+    // Process state (mirrors frameworks/base/core/java/android/app/ActivityManager.java)
+    static const int PROCESS_STATE_NONEXISTENT = -1;
+
+    // 3 second busy timeout when other clients are connecting
+    static const nsecs_t DEFAULT_CONNECT_TIMEOUT_NS = 3000000000;
+
+    // 1 second busy timeout when other clients are disconnecting
+    static const nsecs_t DEFAULT_DISCONNECT_TIMEOUT_NS = 1000000000;
+
+    // Default number of messages to store in eviction log
+    static const size_t DEFAULT_EVENT_LOG_LENGTH = 100;
+
     // Implementation of BinderService<T>
     static char const* getServiceName() { return "media.camera"; }
 
@@ -66,12 +92,17 @@
 
     /////////////////////////////////////////////////////////////////////
     // HAL Callbacks
-    virtual void        onDeviceStatusChanged(int cameraId,
-                                              int newStatus);
+    virtual void        onDeviceStatusChanged(camera_device_status_t cameraId,
+                                              camera_device_status_t newStatus);
+    virtual void        onTorchStatusChanged(const String8& cameraId,
+                                             ICameraServiceListener::TorchStatus
+                                                   newStatus);
 
     /////////////////////////////////////////////////////////////////////
     // ICameraService
+    virtual int32_t     getNumberOfCameras(int type);
     virtual int32_t     getNumberOfCameras();
+
     virtual status_t    getCameraInfo(int cameraId,
                                       struct CameraInfo* cameraInfo);
     virtual status_t    getCameraCharacteristics(int cameraId,
@@ -88,11 +119,6 @@
             /*out*/
             sp<ICamera>& device);
 
-    virtual status_t connectPro(const sp<IProCameraCallbacks>& cameraCb,
-            int cameraId, const String16& clientPackageName, int clientUid,
-            /*out*/
-            sp<IProCameraUser>& device);
-
     virtual status_t connectDevice(
             const sp<ICameraDeviceCallbacks>& cameraCb,
             int cameraId,
@@ -110,6 +136,11 @@
             /*out*/
             String16* parameters);
 
+    virtual status_t    setTorchMode(const String16& cameraId, bool enabled,
+            const sp<IBinder>& clientBinder);
+
+    virtual void notifySystemEvent(int32_t eventId, const int32_t* args, size_t length);
+
     // OK = supports api of that version, -EOPNOTSUPP = does not support
     virtual status_t    supportsCameraApi(
             int cameraId, int apiVersion);
@@ -122,7 +153,6 @@
 
     /////////////////////////////////////////////////////////////////////
     // Client functionality
-    virtual void        removeClientByRemote(const wp<IBinder>& remoteBinder);
 
     enum sound_kind {
         SOUND_SHUTTER = 0,
@@ -140,33 +170,43 @@
 
     /////////////////////////////////////////////////////////////////////
     // Shared utilities
-    static status_t     filterOpenErrorCode(status_t err);
     static status_t     filterGetInfoErrorCode(status_t err);
 
     /////////////////////////////////////////////////////////////////////
     // CameraClient functionality
 
-    // returns plain pointer of client. Note that mClientLock should be acquired to
-    // prevent the client from destruction. The result can be NULL.
-    virtual BasicClient* getClientByIdUnsafe(int cameraId);
-    virtual Mutex*      getClientLockById(int cameraId);
-
     class BasicClient : public virtual RefBase {
     public:
-        virtual status_t    initialize(camera_module_t *module) = 0;
+        virtual status_t    initialize(CameraModule *module) = 0;
         virtual void        disconnect();
 
         // because we can't virtually inherit IInterface, which breaks
         // virtual inheritance
         virtual sp<IBinder> asBinderWrapper() = 0;
 
-        // Return the remote callback binder object (e.g. IProCameraCallbacks)
+        // Return the remote callback binder object (e.g. ICameraDeviceCallbacks)
         sp<IBinder>         getRemote() {
             return mRemoteBinder;
         }
 
         virtual status_t    dump(int fd, const Vector<String16>& args) = 0;
 
+        // Return the package name for this client
+        virtual String16 getPackageName() const;
+
+        // Notify client about a fatal error
+        virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+                const CaptureResultExtras& resultExtras) = 0;
+
+        // Get the UID of the application client using this
+        virtual uid_t getClientUid() const;
+
+        // Get the PID of the application client using this
+        virtual int getClientPid() const;
+
+        // Check what API level is used for this client. This is used to determine which
+        // superclass this can be cast to.
+        virtual bool canCastToApiClient(apiLevel level) const;
     protected:
         BasicClient(const sp<CameraService>& cameraService,
                 const sp<IBinder>& remoteCallback,
@@ -193,6 +233,7 @@
         pid_t                           mClientPid;
         uid_t                           mClientUid;      // immutable after constructor
         pid_t                           mServicePid;     // immutable after constructor
+        bool                            mDisconnected;
 
         // - The app-side Binder interface to receive callbacks from us
         sp<IBinder>                     mRemoteBinder;   // immutable after constructor
@@ -201,10 +242,6 @@
         status_t                        startCameraOps();
         status_t                        finishCameraOps();
 
-        // Notify client about a fatal error
-        virtual void                    notifyError(
-                ICameraDeviceCallbacks::CameraErrorCode errorCode,
-                const CaptureResultExtras& resultExtras) = 0;
     private:
         AppOpsManager                   mAppOpsManager;
 
@@ -276,14 +313,16 @@
             return asBinder(this);
         }
 
-    protected:
-        static Mutex*        getClientLockFromCookie(void* user);
-        // convert client from cookie. Client lock should be acquired before getting Client.
-        static Client*       getClientFromCookie(void* user);
-
         virtual void         notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
                                          const CaptureResultExtras& resultExtras);
 
+        // Check what API level is used for this client. This is used to determine which
+        // superclass this can be cast to.
+        virtual bool canCastToApiClient(apiLevel level) const;
+    protected:
+        // Convert client from cookie.
+        static sp<CameraService::Client> getClientFromCookie(void* user);
+
         // Initialized in constructor
 
         // - The app-side Binder interface to receive callbacks from us
@@ -291,92 +330,294 @@
 
     }; // class Client
 
-    class ProClient : public BnProCameraUser, public BasicClient {
+    /**
+     * A listener class that implements the LISTENER interface for use with a ClientManager, and
+     * implements the following methods:
+     *    void onClientRemoved(const ClientDescriptor<KEY, VALUE>& descriptor);
+     *    void onClientAdded(const ClientDescriptor<KEY, VALUE>& descriptor);
+     */
+    class ClientEventListener {
     public:
-        typedef IProCameraCallbacks TCamCallbacks;
+        void onClientAdded(const resource_policy::ClientDescriptor<String8,
+                sp<CameraService::BasicClient>>& descriptor);
+        void onClientRemoved(const resource_policy::ClientDescriptor<String8,
+                sp<CameraService::BasicClient>>& descriptor);
+    }; // class ClientEventListener
 
-        ProClient(const sp<CameraService>& cameraService,
-                const sp<IProCameraCallbacks>& remoteCallback,
-                const String16& clientPackageName,
-                int cameraId,
-                int cameraFacing,
-                int clientPid,
-                uid_t clientUid,
-                int servicePid);
+    typedef std::shared_ptr<resource_policy::ClientDescriptor<String8,
+            sp<CameraService::BasicClient>>> DescriptorPtr;
 
-        virtual ~ProClient();
+    /**
+     * A container class for managing active camera clients that are using HAL devices.  Active
+     * clients are represented by ClientDescriptor objects that contain strong pointers to the
+     * actual BasicClient subclass binder interface implementation.
+     *
+     * This class manages the eviction behavior for the camera clients.  See the parent class
+     * implementation in utils/ClientManager for the specifics of this behavior.
+     */
+    class CameraClientManager : public resource_policy::ClientManager<String8,
+            sp<CameraService::BasicClient>, ClientEventListener> {
+    public:
+        CameraClientManager();
+        virtual ~CameraClientManager();
 
-        const sp<IProCameraCallbacks>& getRemoteCallback() {
-            return mRemoteCallback;
-        }
+        /**
+         * Return a strong pointer to the active BasicClient for this camera ID, or an empty
+         * if none exists.
+         */
+        sp<CameraService::BasicClient> getCameraClient(const String8& id) const;
 
-        /***
-            IProCamera implementation
-         ***/
-        virtual status_t      connect(const sp<IProCameraCallbacks>& callbacks)
-                                                                            = 0;
-        virtual status_t      exclusiveTryLock() = 0;
-        virtual status_t      exclusiveLock() = 0;
-        virtual status_t      exclusiveUnlock() = 0;
+        /**
+         * Return a string describing the current state.
+         */
+        String8 toString() const;
 
-        virtual bool          hasExclusiveLock() = 0;
+        /**
+         * Make a ClientDescriptor object wrapping the given BasicClient strong pointer.
+         */
+        static DescriptorPtr makeClientDescriptor(const String8& key, const sp<BasicClient>& value,
+                int32_t cost, const std::set<String8>& conflictingKeys, int32_t priority,
+                int32_t ownerId);
 
-        // Note that the callee gets a copy of the metadata.
-        virtual int           submitRequest(camera_metadata_t* metadata,
-                                            bool streaming = false) = 0;
-        virtual status_t      cancelRequest(int requestId) = 0;
+        /**
+         * Make a ClientDescriptor object wrapping the given BasicClient strong pointer with
+         * values intialized from a prior ClientDescriptor.
+         */
+        static DescriptorPtr makeClientDescriptor(const sp<BasicClient>& value,
+                const CameraService::DescriptorPtr& partial);
 
-        // Callbacks from camera service
-        virtual void          onExclusiveLockStolen() = 0;
-
-    protected:
-        virtual void          notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
-                                          const CaptureResultExtras& resultExtras);
-
-        sp<IProCameraCallbacks> mRemoteCallback;
-    }; // class ProClient
+    }; // class CameraClientManager
 
 private:
 
+    /**
+     * Container class for the state of each logical camera device, including: ID, status, and
+     * dependencies on other devices.  The mapping of camera ID -> state saved in mCameraStates
+     * represents the camera devices advertised by the HAL (and any USB devices, when we add
+     * those).
+     *
+     * This container does NOT represent an active camera client.  These are represented using
+     * the ClientDescriptors stored in mActiveClientManager.
+     */
+    class CameraState {
+    public:
+        /**
+         * Make a new CameraState and set the ID, cost, and conflicting devices using the values
+         * returned in the HAL's camera_info struct for each device.
+         */
+        CameraState(const String8& id, int cost, const std::set<String8>& conflicting);
+        virtual ~CameraState();
+
+        /**
+         * Return the status for this device.
+         *
+         * This method acquires mStatusLock.
+         */
+        ICameraServiceListener::Status getStatus() const;
+
+        /**
+         * This function updates the status for this camera device, unless the given status
+         * is in the given list of rejected status states, and execute the function passed in
+         * with a signature onStatusUpdateLocked(const String8&, ICameraServiceListener::Status)
+         * if the status has changed.
+         *
+         * This method is idempotent, and will not result in the function passed to
+         * onStatusUpdateLocked being called more than once for the same arguments.
+         * This method aquires mStatusLock.
+         */
+        template<class Func>
+        void updateStatus(ICameraServiceListener::Status status, const String8& cameraId,
+                std::initializer_list<ICameraServiceListener::Status> rejectSourceStates,
+                Func onStatusUpdatedLocked);
+
+        /**
+         * Return the last set CameraParameters object generated from the information returned by
+         * the HAL for this device (or an empty CameraParameters object if none has been set).
+         */
+        CameraParameters getShimParams() const;
+
+        /**
+         * Set the CameraParameters for this device.
+         */
+        void setShimParams(const CameraParameters& params);
+
+        /**
+         * Return the resource_cost advertised by the HAL for this device.
+         */
+        int getCost() const;
+
+        /**
+         * Return a set of the IDs of conflicting devices advertised by the HAL for this device.
+         */
+        std::set<String8> getConflicting() const;
+
+        /**
+         * Return the ID of this camera device.
+         */
+        String8 getId() const;
+
+    private:
+        const String8 mId;
+        ICameraServiceListener::Status mStatus; // protected by mStatusLock
+        const int mCost;
+        std::set<String8> mConflicting;
+        mutable Mutex mStatusLock;
+        CameraParameters mShimParams;
+    }; // class CameraState
+
     // Delay-load the Camera HAL module
     virtual void onFirstRef();
 
-    // Step 1. Check if we can connect, before we acquire the service lock.
-    status_t            validateConnect(int cameraId,
-                                        /*inout*/
-                                        int& clientUid) const;
+    // Check if we can connect, before we acquire the service lock.
+    status_t validateConnectLocked(const String8& cameraId, /*inout*/int& clientUid) const;
 
-    // Step 2. Check if we can connect, after we acquire the service lock.
-    bool                canConnectUnsafe(int cameraId,
-                                         const String16& clientPackageName,
-                                         const sp<IBinder>& remoteCallback,
-                                         /*out*/
-                                         sp<BasicClient> &client);
+    // Handle active client evictions, and update service state.
+    // Only call with with mServiceLock held.
+    status_t handleEvictionsLocked(const String8& cameraId, int clientPid,
+        apiLevel effectiveApiLevel, const sp<IBinder>& remoteCallback, const String8& packageName,
+        /*out*/
+        sp<BasicClient>* client,
+        std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>>* partial);
 
-    // When connection is successful, initialize client and track its death
-    status_t            connectFinishUnsafe(const sp<BasicClient>& client,
-                                            const sp<IBinder>& remoteCallback);
+    // Single implementation shared between the various connect calls
+    template<class CALLBACK, class CLIENT>
+    status_t connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId, int halVersion,
+            const String16& clientPackageName, int clientUid, apiLevel effectiveApiLevel,
+            bool legacyMode, bool shimUpdateOnly, /*out*/sp<CLIENT>& device);
 
-    virtual sp<BasicClient>  getClientByRemote(const wp<IBinder>& cameraClient);
-
+    // Lock guarding camera service state
     Mutex               mServiceLock;
-    // either a Client or CameraDeviceClient
-    wp<BasicClient>     mClient[MAX_CAMERAS];  // protected by mServiceLock
-    Mutex               mClientLock[MAX_CAMERAS]; // prevent Client destruction inside callbacks
+
+    // Condition to use with mServiceLock, used to handle simultaneous connect calls from clients
+    std::shared_ptr<WaitableMutexWrapper> mServiceLockWrapper;
+
+    // Return NO_ERROR if the device with a give ID can be connected to
+    status_t checkIfDeviceIsUsable(const String8& cameraId) const;
+
+    // Container for managing currently active application-layer clients
+    CameraClientManager mActiveClientManager;
+
+    // Mapping from camera ID -> state for each device, map is protected by mCameraStatesLock
+    std::map<String8, std::shared_ptr<CameraState>> mCameraStates;
+
+    // Mutex guarding mCameraStates map
+    mutable Mutex mCameraStatesLock;
+
+    // Circular buffer for storing event logging for dumps
+    RingBuffer<String8> mEventLog;
+    Mutex mLogLock;
+
+    // Currently allowed user IDs
+    std::set<userid_t> mAllowedUsers;
+
+    /**
+     * Check camera capabilities, such as support for basic color operation
+     */
+    int checkCameraCapabilities(int id, camera_info info, int *latestStrangeCameraId);
+
+    /**
+     * Get the camera state for a given camera id.
+     *
+     * This acquires mCameraStatesLock.
+     */
+    std::shared_ptr<CameraService::CameraState> getCameraState(const String8& cameraId) const;
+
+    /**
+     * Evict client who's remote binder has died.  Returns true if this client was in the active
+     * list and was disconnected.
+     *
+     * This method acquires mServiceLock.
+     */
+    bool evictClientIdByRemote(const wp<IBinder>& cameraClient);
+
+    /**
+     * Remove the given client from the active clients list; does not disconnect the client.
+     *
+     * This method acquires mServiceLock.
+     */
+    void removeByClient(const BasicClient* client);
+
+    /**
+     * Add new client to active clients list after conflicting clients have disconnected using the
+     * values set in the partial descriptor passed in to construct the actual client descriptor.
+     * This is typically called at the end of a connect call.
+     *
+     * This method must be called with mServiceLock held.
+     */
+    void finishConnectLocked(const sp<BasicClient>& client, const DescriptorPtr& desc);
+
+    /**
+     * Returns the integer corresponding to the given camera ID string, or -1 on failure.
+     */
+    static int cameraIdToInt(const String8& cameraId);
+
+    /**
+     * Remove a single client corresponding to the given camera id from the list of active clients.
+     * If none exists, return an empty strongpointer.
+     *
+     * This method must be called with mServiceLock held.
+     */
+    sp<CameraService::BasicClient> removeClientLocked(const String8& cameraId);
+
+    /**
+     * Handle a notification that the current device user has changed.
+     */
+    void doUserSwitch(const int32_t* newUserId, size_t length);
+
+    /**
+     * Add an event log message.
+     */
+    void logEvent(const char* event);
+
+    /**
+     * Add an event log message that a client has been disconnected.
+     */
+    void logDisconnected(const char* cameraId, int clientPid, const char* clientPackage);
+
+    /**
+     * Add an event log message that a client has been connected.
+     */
+    void logConnected(const char* cameraId, int clientPid, const char* clientPackage);
+
+    /**
+     * Add an event log message that a client's connect attempt has been rejected.
+     */
+    void logRejected(const char* cameraId, int clientPid, const char* clientPackage,
+            const char* reason);
+
+    /**
+     * Add an event log message that the current device user has been switched.
+     */
+    void logUserSwitch(const std::set<userid_t>& oldUserIds,
+        const std::set<userid_t>& newUserIds);
+
+    /**
+     * Add an event log message that a device has been removed by the HAL
+     */
+    void logDeviceRemoved(const char* cameraId, const char* reason);
+
+    /**
+     * Add an event log message that a device has been added by the HAL
+     */
+    void logDeviceAdded(const char* cameraId, const char* reason);
+
+    /**
+     * Add an event log message that a client has unexpectedly died.
+     */
+    void logClientDied(int clientPid, const char* reason);
+
+    /**
+     * Add a event log message that a serious service-level error has occured
+     */
+    void logServiceError(const char* msg, int errorCode);
+
+    /**
+     * Dump the event log to an FD
+     */
+    void dumpEventLog(int fd);
+
     int                 mNumberOfCameras;
-
-    typedef wp<ProClient> weak_pro_client_ptr;
-    Vector<weak_pro_client_ptr> mProClientList[MAX_CAMERAS];
-
-    // needs to be called with mServiceLock held
-    sp<BasicClient>     findClientUnsafe(const wp<IBinder>& cameraClient, int& outIndex);
-    sp<ProClient>       findProClientUnsafe(
-                                     const wp<IBinder>& cameraCallbacksRemote);
-
-    // atomics to record whether the hardware is allocated to some client.
-    volatile int32_t    mBusy[MAX_CAMERAS];
-    void                setCameraBusy(int cameraId);
-    void                setCameraFree(int cameraId);
+    int                 mNumberOfNormalCameras;
 
     // sounds
     MediaPlayer*        newMediaPlayer(const char *file);
@@ -385,45 +626,62 @@
     sp<MediaPlayer>     mSoundPlayer[NUM_SOUNDS];
     int                 mSoundRef;  // reference count (release all MediaPlayer when 0)
 
-    camera_module_t *mModule;
+    CameraModule*     mModule;
 
-    Vector<sp<ICameraServiceListener> >
-                        mListenerList;
+    // Guarded by mStatusListenerMutex
+    std::vector<sp<ICameraServiceListener>> mListenerList;
+    Mutex       mStatusListenerLock;
 
-    // guard only mStatusList and the broadcasting of ICameraServiceListener
-    mutable Mutex       mStatusMutex;
-    ICameraServiceListener::Status
-                        mStatusList[MAX_CAMERAS];
+    /**
+     * Update the status for the given camera id (if that device exists), and broadcast the
+     * status update to all current ICameraServiceListeners if the status has changed.  Any
+     * statuses in rejectedSourceStates will be ignored.
+     *
+     * This method must be idempotent.
+     * This method acquires mStatusLock and mStatusListenerLock.
+     */
+    void updateStatus(ICameraServiceListener::Status status, const String8& cameraId,
+            std::initializer_list<ICameraServiceListener::Status> rejectedSourceStates);
+    void updateStatus(ICameraServiceListener::Status status, const String8& cameraId);
 
-    // Read the current status (locks mStatusMutex)
-    ICameraServiceListener::Status
-                        getStatus(int cameraId) const;
+    // flashlight control
+    sp<CameraFlashlight> mFlashlight;
+    // guard mTorchStatusMap
+    Mutex                mTorchStatusMutex;
+    // guard mTorchClientMap, mTorchUidMap
+    Mutex                mTorchClientMapMutex;
+    // camera id -> torch status
+    KeyedVector<String8, ICameraServiceListener::TorchStatus> mTorchStatusMap;
+    // camera id -> torch client binder
+    // only store the last client that turns on each camera's torch mode
+    KeyedVector<String8, sp<IBinder>> mTorchClientMap;
+    // camera id -> [incoming uid, current uid] pair
+    std::map<String8, std::pair<int, int>> mTorchUidMap;
 
-    typedef Vector<ICameraServiceListener::Status> StatusVector;
-    // Broadcast the new status if it changed (locks the service mutex)
-    void                updateStatus(
-                            ICameraServiceListener::Status status,
-                            int32_t cameraId,
-                            const StatusVector *rejectSourceStates = NULL);
+    // check and handle if torch client's process has died
+    void handleTorchClientBinderDied(const wp<IBinder> &who);
+
+    // handle torch mode status change and invoke callbacks. mTorchStatusMutex
+    // should be locked.
+    void onTorchStatusChangedLocked(const String8& cameraId,
+            ICameraServiceListener::TorchStatus newStatus);
+
+    // get a camera's torch status. mTorchStatusMutex should be locked.
+    status_t getTorchStatusLocked(const String8 &cameraId,
+            ICameraServiceListener::TorchStatus *status) const;
+
+    // set a camera's torch status. mTorchStatusMutex should be locked.
+    status_t setTorchStatusLocked(const String8 &cameraId,
+            ICameraServiceListener::TorchStatus status);
 
     // IBinder::DeathRecipient implementation
     virtual void        binderDied(const wp<IBinder> &who);
 
     // Helpers
 
-    bool                isValidCameraId(int cameraId);
-
     bool                setUpVendorTags();
 
     /**
-     * A mapping of camera ids to CameraParameters returned by that camera device.
-     *
-     * This cache is used to generate CameraCharacteristic metadata when using
-     * the HAL1 shim.
-     */
-    KeyedVector<int, CameraParameters>    mShimParams;
-
-    /**
      * Initialize and cache the metadata used by the HAL1 shim for a given cameraId.
      *
      * Returns OK on success, or a negative error code.
@@ -446,25 +704,199 @@
      */
     status_t            generateShimMetadata(int cameraId, /*out*/CameraMetadata* cameraInfo);
 
+    static int getCallingPid();
+
+    static int getCallingUid();
+
     /**
-     * Connect a new camera client.  This should only be used while holding the
-     * mutex for mServiceLock.
-     *
-     * Returns OK on success, or a negative error code.
+     * Get the current system time as a formatted string.
      */
-    status_t            connectHelperLocked(
-            /*out*/
-            sp<Client>& client,
-            /*in*/
-            const sp<ICameraClient>& cameraClient,
-            int cameraId,
-            const String16& clientPackageName,
-            int clientUid,
-            int callingPid,
-            int halVersion = CAMERA_HAL_API_VERSION_UNSPECIFIED,
-            bool legacyMode = false);
+    static String8 getFormattedCurrentTime();
+
+    /**
+     * Get the camera eviction priority from the current process state given by ActivityManager.
+     */
+    static int getCameraPriorityFromProcState(int procState);
+
+    static status_t makeClient(const sp<CameraService>& cameraService,
+            const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
+            int facing, int clientPid, uid_t clientUid, int servicePid, bool legacyMode,
+            int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
+            /*out*/sp<BasicClient>* client);
+
+    status_t checkCameraAccess(const String16& opPackageName);
+
+    static String8 toString(std::set<userid_t> intSet);
+
+    static void pingCameraServiceProxy();
+
 };
 
+template<class Func>
+void CameraService::CameraState::updateStatus(ICameraServiceListener::Status status,
+        const String8& cameraId,
+        std::initializer_list<ICameraServiceListener::Status> rejectSourceStates,
+        Func onStatusUpdatedLocked) {
+    Mutex::Autolock lock(mStatusLock);
+    ICameraServiceListener::Status oldStatus = mStatus;
+    mStatus = status;
+
+    if (oldStatus == status) {
+        return;
+    }
+
+    ALOGV("%s: Status has changed for camera ID %s from %#x to %#x", __FUNCTION__,
+            cameraId.string(), oldStatus, status);
+
+    if (oldStatus == ICameraServiceListener::STATUS_NOT_PRESENT &&
+        (status != ICameraServiceListener::STATUS_PRESENT &&
+         status != ICameraServiceListener::STATUS_ENUMERATING)) {
+
+        ALOGW("%s: From NOT_PRESENT can only transition into PRESENT or ENUMERATING",
+                __FUNCTION__);
+        mStatus = oldStatus;
+        return;
+    }
+
+    /**
+     * Sometimes we want to conditionally do a transition.
+     * For example if a client disconnects, we want to go to PRESENT
+     * only if we weren't already in NOT_PRESENT or ENUMERATING.
+     */
+    for (auto& rejectStatus : rejectSourceStates) {
+        if (oldStatus == rejectStatus) {
+            ALOGV("%s: Rejecting status transition for Camera ID %s,  since the source "
+                    "state was was in one of the bad states.", __FUNCTION__, cameraId.string());
+            mStatus = oldStatus;
+            return;
+        }
+    }
+
+    onStatusUpdatedLocked(cameraId, status);
+}
+
+
+template<class CALLBACK, class CLIENT>
+status_t CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
+        int halVersion, const String16& clientPackageName, int clientUid,
+        apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
+        /*out*/sp<CLIENT>& device) {
+    status_t ret = NO_ERROR;
+    String8 clientName8(clientPackageName);
+    int clientPid = getCallingPid();
+
+    ALOGI("CameraService::connect call (PID %d \"%s\", camera ID %s) for HAL version %s and "
+            "Camera API version %d", clientPid, clientName8.string(), cameraId.string(),
+            (halVersion == -1) ? "default" : std::to_string(halVersion).c_str(),
+            static_cast<int>(effectiveApiLevel));
+
+    sp<CLIENT> client = nullptr;
+    {
+        // Acquire mServiceLock and prevent other clients from connecting
+        std::unique_ptr<AutoConditionLock> lock =
+                AutoConditionLock::waitAndAcquire(mServiceLockWrapper, DEFAULT_CONNECT_TIMEOUT_NS);
+
+        if (lock == nullptr) {
+            ALOGE("CameraService::connect X (PID %d) rejected (too many other clients connecting)."
+                    , clientPid);
+            return -EBUSY;
+        }
+
+        // Enforce client permissions and do basic sanity checks
+        if((ret = validateConnectLocked(cameraId, /*inout*/clientUid)) != NO_ERROR) {
+            return ret;
+        }
+
+        // Check the shim parameters after acquiring lock, if they have already been updated and
+        // we were doing a shim update, return immediately
+        if (shimUpdateOnly) {
+            auto cameraState = getCameraState(cameraId);
+            if (cameraState != nullptr) {
+                if (!cameraState->getShimParams().isEmpty()) return NO_ERROR;
+            }
+        }
+
+        sp<BasicClient> clientTmp = nullptr;
+        std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>> partial;
+        if ((ret = handleEvictionsLocked(cameraId, clientPid, effectiveApiLevel,
+                IInterface::asBinder(cameraCb), clientName8, /*out*/&clientTmp,
+                /*out*/&partial)) != NO_ERROR) {
+            return ret;
+        }
+
+        if (clientTmp.get() != nullptr) {
+            // Handle special case for API1 MediaRecorder where the existing client is returned
+            device = static_cast<CLIENT*>(clientTmp.get());
+            return NO_ERROR;
+        }
+
+        // give flashlight a chance to close devices if necessary.
+        mFlashlight->prepareDeviceOpen(cameraId);
+
+        // TODO: Update getDeviceVersion + HAL interface to use strings for Camera IDs
+        int id = cameraIdToInt(cameraId);
+        if (id == -1) {
+            ALOGE("%s: Invalid camera ID %s, cannot get device version from HAL.", __FUNCTION__,
+                    cameraId.string());
+            return BAD_VALUE;
+        }
+
+        int facing = -1;
+        int deviceVersion = getDeviceVersion(id, /*out*/&facing);
+        sp<BasicClient> tmp = nullptr;
+        if((ret = makeClient(this, cameraCb, clientPackageName, cameraId, facing, clientPid,
+                clientUid, getpid(), legacyMode, halVersion, deviceVersion, effectiveApiLevel,
+                /*out*/&tmp)) != NO_ERROR) {
+            return ret;
+        }
+        client = static_cast<CLIENT*>(tmp.get());
+
+        LOG_ALWAYS_FATAL_IF(client.get() == nullptr, "%s: CameraService in invalid state",
+                __FUNCTION__);
+
+        if ((ret = client->initialize(mModule)) != OK) {
+            ALOGE("%s: Could not initialize client from HAL module.", __FUNCTION__);
+            return ret;
+        }
+
+        sp<IBinder> remoteCallback = client->getRemote();
+        if (remoteCallback != nullptr) {
+            remoteCallback->linkToDeath(this);
+        }
+
+        // Update shim paremeters for legacy clients
+        if (effectiveApiLevel == API_1) {
+            // Assume we have always received a Client subclass for API1
+            sp<Client> shimClient = reinterpret_cast<Client*>(client.get());
+            String8 rawParams = shimClient->getParameters();
+            CameraParameters params(rawParams);
+
+            auto cameraState = getCameraState(cameraId);
+            if (cameraState != nullptr) {
+                cameraState->setShimParams(params);
+            } else {
+                ALOGE("%s: Cannot update shim parameters for camera %s, no such device exists.",
+                        __FUNCTION__, cameraId.string());
+            }
+        }
+
+        if (shimUpdateOnly) {
+            // If only updating legacy shim parameters, immediately disconnect client
+            mServiceLock.unlock();
+            client->disconnect();
+            mServiceLock.lock();
+        } else {
+            // Otherwise, add client to active clients list
+            finishConnectLocked(client, partial);
+        }
+    } // lock is destroyed, allow further connect calls
+
+    // Important: release the mutex here so the client can call back into the service from its
+    // destructor (can be at the end of the call)
+    device = client;
+    return NO_ERROR;
+}
+
 } // namespace android
 
 #endif
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 0ed5586..36e99dd 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -67,7 +67,7 @@
     mLegacyMode = legacyMode;
 }
 
-status_t Camera2Client::initialize(camera_module_t *module)
+status_t Camera2Client::initialize(CameraModule *module)
 {
     ATRACE_CALL();
     ALOGV("%s: Initializing client for camera %d", __FUNCTION__, mCameraId);
@@ -121,7 +121,8 @@
         }
         case CAMERA_DEVICE_API_VERSION_3_0:
         case CAMERA_DEVICE_API_VERSION_3_1:
-        case CAMERA_DEVICE_API_VERSION_3_2: {
+        case CAMERA_DEVICE_API_VERSION_3_2:
+        case CAMERA_DEVICE_API_VERSION_3_3: {
             sp<ZslProcessor3> zslProc =
                     new ZslProcessor3(this, mCaptureSequencer);
             mZslProcessor = zslProc;
@@ -163,11 +164,9 @@
 
 status_t Camera2Client::dump(int fd, const Vector<String16>& args) {
     String8 result;
-    result.appendFormat("Client2[%d] (%p) Client: %s PID: %d, dump:\n",
-            mCameraId,
+    result.appendFormat("Client2[%d] (%p) PID: %d, dump:\n", mCameraId,
             (getRemoteCallback() != NULL ?
                     (IInterface::asBinder(getRemoteCallback()).get()) : NULL),
-            String8(mClientPackageName).string(),
             mClientPid);
     result.append("  State: ");
 #define CASE_APPEND_ENUM(x) case x: result.append(#x "\n"); break;
@@ -530,7 +529,7 @@
     if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
 
     sp<IBinder> binder;
-    sp<ANativeWindow> window;
+    sp<Surface> window;
     if (bufferProducer != 0) {
         binder = IInterface::asBinder(bufferProducer);
         // Using controlledByApp flag to ensure that the buffer queue remains in
@@ -542,7 +541,7 @@
 }
 
 status_t Camera2Client::setPreviewWindowL(const sp<IBinder>& binder,
-        sp<ANativeWindow> window) {
+        sp<Surface> window) {
     ATRACE_CALL();
     status_t res;
 
@@ -667,7 +666,7 @@
     status_t res;
     if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
 
-    sp<ANativeWindow> window;
+    sp<Surface> window;
     if (callbackProducer != 0) {
         window = new Surface(callbackProducer);
     }
@@ -765,16 +764,22 @@
     // first capture latency on HAL3 devices, and potentially on some HAL2
     // devices. So create it unconditionally at preview start. As a drawback,
     // this increases gralloc memory consumption for applications that don't
-    // ever take a picture.
+    // ever take a picture. Do not enter this mode when jpeg stream will slow
+    // down preview.
     // TODO: Find a better compromise, though this likely would involve HAL
     // changes.
     int lastJpegStreamId = mJpegProcessor->getStreamId();
-    res = updateProcessorStream(mJpegProcessor, params);
-    if (res != OK) {
-        ALOGE("%s: Camera %d: Can't pre-configure still image "
-                "stream: %s (%d)",
-                __FUNCTION__, mCameraId, strerror(-res), res);
-        return res;
+    // If jpeg stream will slow down preview, make sure we remove it before starting preview
+    if (params.slowJpegMode) {
+        mJpegProcessor->deleteStream();
+    } else {
+        res = updateProcessorStream(mJpegProcessor, params);
+        if (res != OK) {
+            ALOGE("%s: Camera %d: Can't pre-configure still image "
+                    "stream: %s (%d)",
+                    __FUNCTION__, mCameraId, strerror(-res), res);
+            return res;
+        }
     }
     bool jpegStreamChanged = mJpegProcessor->getStreamId() != lastJpegStreamId;
 
@@ -1454,9 +1459,12 @@
         }
 
         ALOGV("%s: Camera %d: Starting picture capture", __FUNCTION__, mCameraId);
-
         int lastJpegStreamId = mJpegProcessor->getStreamId();
-        res = updateProcessorStream(mJpegProcessor, l.mParameters);
+        // slowJpegMode will create jpeg stream in CaptureSequencer before capturing
+        if (!l.mParameters.slowJpegMode) {
+            res = updateProcessorStream(mJpegProcessor, l.mParameters);
+        }
+
         // If video snapshot fail to configureStream, try override video snapshot size to
         // video size
         if (res == BAD_VALUE && l.mParameters.state == Parameters::VIDEO_SNAPSHOT) {
@@ -1560,6 +1568,9 @@
             return commandPingL();
         case CAMERA_CMD_SET_VIDEO_BUFFER_COUNT:
             return commandSetVideoBufferCountL(arg1);
+        case CAMERA_CMD_SET_VIDEO_FORMAT:
+            return commandSetVideoFormatL(arg1,
+                    static_cast<android_dataspace>(arg2));
         default:
             ALOGE("%s: Unknown command %d (arguments %d, %d)",
                     __FUNCTION__, cmd, arg1, arg2);
@@ -1711,6 +1722,51 @@
     return mStreamingProcessor->setRecordingBufferCount(count);
 }
 
+status_t Camera2Client::commandSetVideoFormatL(int format,
+        android_dataspace dataspace) {
+    if (recordingEnabledL()) {
+        ALOGE("%s: Camera %d: Error setting video format after "
+                "recording was started", __FUNCTION__, mCameraId);
+        return INVALID_OPERATION;
+    }
+
+    return mStreamingProcessor->setRecordingFormat(format, dataspace);
+}
+
+void Camera2Client::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+        const CaptureResultExtras& resultExtras) {
+    int32_t err = CAMERA_ERROR_UNKNOWN;
+    switch(errorCode) {
+        case ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED:
+            err = CAMERA_ERROR_RELEASED;
+            break;
+        case ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE:
+            err = CAMERA_ERROR_UNKNOWN;
+            break;
+        case ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE:
+            err = CAMERA_ERROR_SERVER_DIED;
+            break;
+        case ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
+        case ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
+        case ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
+            ALOGW("%s: Received recoverable error %d from HAL - ignoring, requestId %" PRId32,
+                    __FUNCTION__, errorCode, resultExtras.requestId);
+            return;
+        default:
+            err = CAMERA_ERROR_UNKNOWN;
+            break;
+    }
+
+    ALOGE("%s: Error condition %d reported by HAL, requestId %" PRId32, __FUNCTION__, errorCode,
+              resultExtras.requestId);
+
+    SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
+    if (l.mRemoteCallback != nullptr) {
+        l.mRemoteCallback->notifyCallback(CAMERA_MSG_ERROR, err, 0);
+    }
+}
+
+
 /** Device-related methods */
 void Camera2Client::notifyAutoFocus(uint8_t newState, int triggerId) {
     ALOGV("%s: Autofocus state now %d, last trigger %d",
@@ -1848,6 +1904,16 @@
     mCaptureSequencer->notifyAutoExposure(newState, triggerId);
 }
 
+void Camera2Client::notifyShutter(const CaptureResultExtras& resultExtras,
+                                  nsecs_t timestamp) {
+    (void)resultExtras;
+    (void)timestamp;
+
+    ALOGV("%s: Shutter notification for request id %" PRId32 " at time %" PRId64,
+            __FUNCTION__, resultExtras.requestId, timestamp);
+    mCaptureSequencer->notifyShutter(resultExtras, timestamp);
+}
+
 camera2::SharedParameters& Camera2Client::getParameters() {
     return mParameters;
 }
@@ -1886,6 +1952,39 @@
     return mStreamingProcessor->stopStream();
 }
 
+status_t Camera2Client::createJpegStreamL(Parameters &params) {
+    status_t res = OK;
+    int lastJpegStreamId = mJpegProcessor->getStreamId();
+    if (lastJpegStreamId != NO_STREAM) {
+        return INVALID_OPERATION;
+    }
+
+    res = mStreamingProcessor->togglePauseStream(/*pause*/true);
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Can't pause streaming: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+        return res;
+    }
+
+    res = mDevice->flush();
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Unable flush device: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+        return res;
+    }
+
+    // Ideally we don't need this, but current camera device
+    // status tracking mechanism demands it.
+    res = mDevice->waitUntilDrained();
+    if (res != OK) {
+        ALOGE("%s: Camera %d: Waiting device drain failed: %s (%d)",
+                __FUNCTION__, mCameraId, strerror(-res), res);
+    }
+
+    res = updateProcessorStream(mJpegProcessor, params);
+    return res;
+}
+
 const int32_t Camera2Client::kPreviewRequestIdStart;
 const int32_t Camera2Client::kPreviewRequestIdEnd;
 const int32_t Camera2Client::kRecordingRequestIdStart;
@@ -1959,7 +2058,7 @@
             return width * height * 2;
         case HAL_PIXEL_FORMAT_RGBA_8888:
             return width * height * 4;
-        case HAL_PIXEL_FORMAT_RAW_SENSOR:
+        case HAL_PIXEL_FORMAT_RAW16:
             return width * height * 2;
         default:
             ALOGE("%s: Unknown preview format: %x",
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index d68bb29..d50bf63 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -77,6 +77,8 @@
     virtual status_t        setParameters(const String8& params);
     virtual String8         getParameters() const;
     virtual status_t        sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
+    virtual void            notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+                                        const CaptureResultExtras& resultExtras);
 
     /**
      * Interface used by CameraService
@@ -94,7 +96,7 @@
 
     virtual ~Camera2Client();
 
-    status_t initialize(camera_module_t *module);
+    status_t initialize(CameraModule *module);
 
     virtual status_t dump(int fd, const Vector<String16>& args);
 
@@ -104,6 +106,8 @@
 
     virtual void notifyAutoFocus(uint8_t newState, int triggerId);
     virtual void notifyAutoExposure(uint8_t newState, int triggerId);
+    virtual void notifyShutter(const CaptureResultExtras& resultExtras,
+                               nsecs_t timestamp);
 
     /**
      * Interface used by independent components of Camera2Client.
@@ -125,6 +129,9 @@
 
     status_t stopStream();
 
+    // For the slowJpegMode to create jpeg stream when precapture sequence is done
+    status_t createJpegStreamL(camera2::Parameters &params);
+
     static size_t calculateBufferSize(int width, int height,
             int format, int stride);
 
@@ -141,12 +148,15 @@
     static const char* kAutofocusLabel;
     static const char* kTakepictureLabel;
 
+    // Used with stream IDs
+    static const int NO_STREAM = -1;
+
 private:
     /** ICamera interface-related private members */
     typedef camera2::Parameters Parameters;
 
     status_t setPreviewWindowL(const sp<IBinder>& binder,
-            sp<ANativeWindow> window);
+            sp<Surface> window);
     status_t startPreviewL(Parameters &params, bool restart);
     void     stopPreviewL();
     status_t startRecordingL(Parameters &params, bool restart);
@@ -163,6 +173,7 @@
     status_t commandEnableFocusMoveMsgL(bool enable);
     status_t commandPingL();
     status_t commandSetVideoBufferCountL(size_t count);
+    status_t commandSetVideoFormatL(int format, android_dataspace dataSpace);
 
     // Current camera device configuration
     camera2::SharedParameters mParameters;
@@ -172,9 +183,6 @@
     void     setPreviewCallbackFlagL(Parameters &params, int flag);
     status_t updateRequests(Parameters &params);
 
-    // Used with stream IDs
-    static const int NO_STREAM = -1;
-
     template <typename ProcessorT>
     status_t updateProcessorStream(sp<ProcessorT> processor, Parameters params);
     template <typename ProcessorT,
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index bbb2fe0..e552633 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -59,7 +59,7 @@
     LOG1("CameraClient::CameraClient X (pid %d, id %d)", callingPid, cameraId);
 }
 
-status_t CameraClient::initialize(camera_module_t *module) {
+status_t CameraClient::initialize(CameraModule *module) {
     int callingPid = getCallingPid();
     status_t res;
 
@@ -75,7 +75,7 @@
     snprintf(camera_device_name, sizeof(camera_device_name), "%d", mCameraId);
 
     mHardware = new CameraHardwareInterface(camera_device_name);
-    res = mHardware->initialize(&module->common);
+    res = mHardware->initialize(module);
     if (res != OK) {
         ALOGE("%s: Camera %d: unable to initialize device: %s (%d)",
                 __FUNCTION__, mCameraId, strerror(-res), res);
@@ -99,12 +99,7 @@
 
 // tear down the client
 CameraClient::~CameraClient() {
-    // this lock should never be NULL
-    Mutex* lock = mCameraService->getClientLockById(mCameraId);
-    lock->lock();
     mDestructionStarted = true;
-    // client will not be accessed from callback. should unlock to prevent dead-lock in disconnect
-    lock->unlock();
     int callingPid = getCallingPid();
     LOG1("CameraClient::~CameraClient E (pid %d, this %p)", callingPid, this);
 
@@ -116,11 +111,11 @@
     const size_t SIZE = 256;
     char buffer[SIZE];
 
-    size_t len = snprintf(buffer, SIZE, "Client[%d] (%p) PID: %d\n",
+    size_t len = snprintf(buffer, SIZE, "Client[%d] (%p) with UID %d\n",
             mCameraId,
             (getRemoteCallback() != NULL ?
                     IInterface::asBinder(getRemoteCallback()).get() : NULL),
-            mClientPid);
+            mClientUid);
     len = (len > SIZE - 1) ? SIZE - 1 : len;
     write(fd, buffer, len);
 
@@ -677,6 +672,13 @@
                 LOG1("lockIfMessageWanted(%d): waited for %d ms",
                     msgType, sleepCount * CHECK_MESSAGE_INTERVAL);
             }
+
+            // If messages are no longer enabled after acquiring lock, release and drop message
+            if ((mMsgEnabled & msgType) == 0) {
+                mLock.unlock();
+                break;
+            }
+
             return true;
         }
         if (sleepCount++ == 0) {
@@ -702,26 +704,13 @@
 //      (others)                        c->dataCallback
 // dataCallbackTimestamp
 //      (others)                        c->dataCallbackTimestamp
-//
-// NOTE: the *Callback functions grab mLock of the client before passing
-// control to handle* functions. So the handle* functions must release the
-// lock before calling the ICameraClient's callbacks, so those callbacks can
-// invoke methods in the Client class again (For example, the preview frame
-// callback may want to releaseRecordingFrame). The handle* functions must
-// release the lock after all accesses to member variables, so it must be
-// handled very carefully.
 
 void CameraClient::notifyCallback(int32_t msgType, int32_t ext1,
         int32_t ext2, void* user) {
     LOG2("notifyCallback(%d)", msgType);
 
-    Mutex* lock = getClientLockFromCookie(user);
-    if (lock == NULL) return;
-    Mutex::Autolock alock(*lock);
-
-    CameraClient* client =
-            static_cast<CameraClient*>(getClientFromCookie(user));
-    if (client == NULL) return;
+    sp<CameraClient> client = static_cast<CameraClient*>(getClientFromCookie(user).get());
+    if (client.get() == nullptr) return;
 
     if (!client->lockIfMessageWanted(msgType)) return;
 
@@ -740,13 +729,8 @@
         const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata, void* user) {
     LOG2("dataCallback(%d)", msgType);
 
-    Mutex* lock = getClientLockFromCookie(user);
-    if (lock == NULL) return;
-    Mutex::Autolock alock(*lock);
-
-    CameraClient* client =
-            static_cast<CameraClient*>(getClientFromCookie(user));
-    if (client == NULL) return;
+    sp<CameraClient> client = static_cast<CameraClient*>(getClientFromCookie(user).get());
+    if (client.get() == nullptr) return;
 
     if (!client->lockIfMessageWanted(msgType)) return;
     if (dataPtr == 0 && metadata == NULL) {
@@ -778,13 +762,8 @@
         int32_t msgType, const sp<IMemory>& dataPtr, void* user) {
     LOG2("dataCallbackTimestamp(%d)", msgType);
 
-    Mutex* lock = getClientLockFromCookie(user);
-    if (lock == NULL) return;
-    Mutex::Autolock alock(*lock);
-
-    CameraClient* client =
-            static_cast<CameraClient*>(getClientFromCookie(user));
-    if (client == NULL) return;
+    sp<CameraClient> client = static_cast<CameraClient*>(getClientFromCookie(user).get());
+    if (client.get() == nullptr) return;
 
     if (!client->lockIfMessageWanted(msgType)) return;
 
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
index 63a9d0f..95616b2 100644
--- a/services/camera/libcameraservice/api1/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -68,7 +68,7 @@
             bool legacyMode = false);
     ~CameraClient();
 
-    status_t initialize(camera_module_t *module);
+    status_t initialize(CameraModule *module);
 
     status_t dump(int fd, const Vector<String16>& args);
 
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
index eadaa00..5f4fb22 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
@@ -55,7 +55,7 @@
 }
 
 status_t CallbackProcessor::setCallbackWindow(
-        sp<ANativeWindow> callbackWindow) {
+        sp<Surface> callbackWindow) {
     ATRACE_CALL();
     status_t res;
 
@@ -115,7 +115,7 @@
         BufferQueue::createBufferQueue(&producer, &consumer);
         mCallbackConsumer = new CpuConsumer(consumer, kCallbackHeapCount);
         mCallbackConsumer->setFrameAvailableListener(this);
-        mCallbackConsumer->setName(String8("Camera2Client::CallbackConsumer"));
+        mCallbackConsumer->setName(String8("Camera2-CallbackConsumer"));
         mCallbackWindow = new Surface(producer);
     }
 
@@ -123,7 +123,7 @@
         // Check if stream parameters have to change
         uint32_t currentWidth, currentHeight, currentFormat;
         res = device->getStreamInfo(mCallbackStreamId,
-                &currentWidth, &currentHeight, &currentFormat);
+                &currentWidth, &currentHeight, &currentFormat, 0);
         if (res != OK) {
             ALOGE("%s: Camera %d: Error querying callback output stream info: "
                     "%s (%d)", __FUNCTION__, mId,
@@ -154,8 +154,8 @@
                 params.previewWidth, params.previewHeight,
                 callbackFormat, params.previewFormat);
         res = device->createStream(mCallbackWindow,
-                params.previewWidth, params.previewHeight,
-                callbackFormat, &mCallbackStreamId);
+                params.previewWidth, params.previewHeight, callbackFormat,
+                HAL_DATASPACE_JFIF, CAMERA3_STREAM_ROTATION_0, &mCallbackStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't create output stream for callbacks: "
                     "%s (%d)", __FUNCTION__, mId,
@@ -395,7 +395,7 @@
 
         heapIdx = mCallbackHeapHead;
 
-        mCallbackHeapHead = (mCallbackHeapHead + 1) & kCallbackHeapCount;
+        mCallbackHeapHead = (mCallbackHeapHead + 1) % kCallbackHeapCount;
         mCallbackHeapFree--;
 
         // TODO: Get rid of this copy by passing the gralloc queue all the way
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.h b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
index 7fdc329..a290536 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.h
@@ -47,7 +47,7 @@
     void onFrameAvailable(const BufferItem& item);
 
     // Set to NULL to disable the direct-to-app callback window
-    status_t setCallbackWindow(sp<ANativeWindow> callbackWindow);
+    status_t setCallbackWindow(sp<Surface> callbackWindow);
     status_t updateStream(const Parameters &params);
     status_t deleteStream();
     int getStreamId() const;
@@ -73,7 +73,7 @@
     int mCallbackStreamId;
     static const size_t kCallbackHeapCount = 6;
     sp<CpuConsumer>    mCallbackConsumer;
-    sp<ANativeWindow>  mCallbackWindow;
+    sp<Surface>        mCallbackWindow;
     sp<Camera2Heap>    mCallbackHeap;
     int mCallbackHeapId;
     size_t mCallbackHeapHead, mCallbackHeapFree;
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index 9849f4d..5f7fd74 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -43,6 +43,8 @@
         mNewFrameReceived(false),
         mNewCaptureReceived(false),
         mShutterNotified(false),
+        mHalNotifiedShutter(false),
+        mShutterCaptureId(-1),
         mClient(client),
         mCaptureState(IDLE),
         mStateTransitionCount(0),
@@ -106,6 +108,16 @@
     }
 }
 
+void CaptureSequencer::notifyShutter(const CaptureResultExtras& resultExtras,
+                                     nsecs_t timestamp) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mInputMutex);
+    if (!mHalNotifiedShutter && resultExtras.requestId == mShutterCaptureId) {
+        mHalNotifiedShutter = true;
+        mShutterNotifySignal.signal();
+    }
+}
+
 void CaptureSequencer::onResultAvailable(const CaptureResult &result) {
     ATRACE_CALL();
     ALOGV("%s: New result available.", __FUNCTION__);
@@ -335,6 +347,11 @@
     } else {
         nextState = STANDARD_START;
     }
+    {
+        Mutex::Autolock l(mInputMutex);
+        mShutterCaptureId = mCaptureId;
+        mHalNotifiedShutter = false;
+    }
     mShutterNotified = false;
 
     return nextState;
@@ -487,6 +504,17 @@
      *  - recording (if recording enabled)
      */
     outputStreams.push(client->getPreviewStreamId());
+
+    int captureStreamId = client->getCaptureStreamId();
+    if (captureStreamId == Camera2Client::NO_STREAM) {
+        res = client->createJpegStreamL(l.mParameters);
+        if (res != OK || client->getCaptureStreamId() == Camera2Client::NO_STREAM) {
+            ALOGE("%s: Camera %d: cannot create jpeg stream for slowJpeg mode: %s (%d)",
+                  __FUNCTION__, client->getCameraId(), strerror(-res), res);
+            return DONE;
+        }
+    }
+
     outputStreams.push(client->getCaptureStreamId());
 
     if (l.mParameters.previewCallbackFlags &
@@ -541,6 +569,7 @@
             return DONE;
         }
     }
+
     // TODO: Capture should be atomic with setStreamingRequest here
     res = client->getCameraDevice()->capture(captureCopy);
     if (res != OK) {
@@ -560,6 +589,31 @@
     ATRACE_CALL();
     Mutex::Autolock l(mInputMutex);
 
+
+    // Wait for shutter callback
+    while (!mHalNotifiedShutter) {
+        if (mTimeoutCount <= 0) {
+            break;
+        }
+        res = mShutterNotifySignal.waitRelative(mInputMutex, kWaitDuration);
+        if (res == TIMED_OUT) {
+            mTimeoutCount--;
+            return STANDARD_CAPTURE_WAIT;
+        }
+    }
+
+    if (mHalNotifiedShutter) {
+        if (!mShutterNotified) {
+            SharedParameters::Lock l(client->getParameters());
+            /* warning: this also locks a SharedCameraCallbacks */
+            shutterNotifyLocked(l.mParameters, client, mMsgType);
+            mShutterNotified = true;
+        }
+    } else if (mTimeoutCount <= 0) {
+        ALOGW("Timed out waiting for shutter notification");
+        return DONE;
+    }
+
     // Wait for new metadata result (mNewFrame)
     while (!mNewFrameReceived) {
         res = mNewFrameSignal.waitRelative(mInputMutex, kWaitDuration);
@@ -569,15 +623,6 @@
         }
     }
 
-    // Approximation of the shutter being closed
-    // - TODO: use the hal3 exposure callback in Camera3Device instead
-    if (mNewFrameReceived && !mShutterNotified) {
-        SharedParameters::Lock l(client->getParameters());
-        /* warning: this also locks a SharedCameraCallbacks */
-        shutterNotifyLocked(l.mParameters, client, mMsgType);
-        mShutterNotified = true;
-    }
-
     // Wait until jpeg was captured by JpegProcessor
     while (mNewFrameReceived && !mNewCaptureReceived) {
         res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration);
@@ -591,6 +636,7 @@
         return DONE;
     }
     if (mNewFrameReceived && mNewCaptureReceived) {
+
         if (mNewFrameId != mCaptureId) {
             ALOGW("Mismatched capture frame IDs: Expected %d, got %d",
                     mCaptureId, mNewFrameId);
@@ -667,7 +713,6 @@
         sp<Camera2Client> &/*client*/) {
     status_t res;
     ATRACE_CALL();
-
     while (!mNewCaptureReceived) {
         res = mNewCaptureSignal.waitRelative(mInputMutex, kWaitDuration);
         if (res == TIMED_OUT) {
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
index d42ab13..10252fb 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
@@ -62,6 +62,10 @@
     // Notifications about AE state changes
     void notifyAutoExposure(uint8_t newState, int triggerId);
 
+    // Notifications about shutter (capture start)
+    void notifyShutter(const CaptureResultExtras& resultExtras,
+                       nsecs_t timestamp);
+
     // Notification from the frame processor
     virtual void onResultAvailable(const CaptureResult &result);
 
@@ -95,7 +99,10 @@
     sp<MemoryBase> mCaptureBuffer;
     Condition mNewCaptureSignal;
 
-    bool mShutterNotified;
+    bool mShutterNotified; // Has CaptureSequencer sent shutter to Client
+    bool mHalNotifiedShutter; // Has HAL sent shutter to CaptureSequencer
+    int32_t mShutterCaptureId; // The captureId which is waiting for shutter notification
+    Condition mShutterNotifySignal;
 
     /**
      * Internal to CaptureSequencer
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index 2772267..bd9786f 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -87,7 +87,7 @@
         BufferQueue::createBufferQueue(&producer, &consumer);
         mCaptureConsumer = new CpuConsumer(consumer, 1);
         mCaptureConsumer->setFrameAvailableListener(this);
-        mCaptureConsumer->setName(String8("Camera2Client::CaptureConsumer"));
+        mCaptureConsumer->setName(String8("Camera2-JpegConsumer"));
         mCaptureWindow = new Surface(producer);
     }
 
@@ -115,7 +115,7 @@
         // Check if stream parameters have to change
         uint32_t currentWidth, currentHeight;
         res = device->getStreamInfo(mCaptureStreamId,
-                &currentWidth, &currentHeight, 0);
+                &currentWidth, &currentHeight, 0, 0);
         if (res != OK) {
             ALOGE("%s: Camera %d: Error querying capture output stream info: "
                     "%s (%d)", __FUNCTION__,
@@ -145,7 +145,8 @@
         // Create stream for HAL production
         res = device->createStream(mCaptureWindow,
                 params.pictureWidth, params.pictureHeight,
-                HAL_PIXEL_FORMAT_BLOB, &mCaptureStreamId);
+                HAL_PIXEL_FORMAT_BLOB, HAL_DATASPACE_JFIF,
+                CAMERA3_STREAM_ROTATION_0, &mCaptureStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't create output stream for capture: "
                     "%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.h b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
index 2040b30..fbdae11 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
@@ -70,8 +70,8 @@
 
     int mCaptureStreamId;
     sp<CpuConsumer>    mCaptureConsumer;
-    sp<ANativeWindow>  mCaptureWindow;
-    sp<MemoryHeapBase>    mCaptureHeap;
+    sp<Surface>        mCaptureWindow;
+    sp<MemoryHeapBase> mCaptureHeap;
 
     virtual bool threadLoop();
 
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 4f4cfb0..442eb75 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -65,15 +65,29 @@
     const Size MAX_PREVIEW_SIZE = { MAX_PREVIEW_WIDTH, MAX_PREVIEW_HEIGHT };
     // Treat the H.264 max size as the max supported video size.
     MediaProfiles *videoEncoderProfiles = MediaProfiles::getInstance();
-    int32_t maxVideoWidth = videoEncoderProfiles->getVideoEncoderParamByName(
-                            "enc.vid.width.max", VIDEO_ENCODER_H264);
-    int32_t maxVideoHeight = videoEncoderProfiles->getVideoEncoderParamByName(
-                            "enc.vid.height.max", VIDEO_ENCODER_H264);
-    const Size MAX_VIDEO_SIZE = {maxVideoWidth, maxVideoHeight};
+    Vector<video_encoder> encoders = videoEncoderProfiles->getVideoEncoders();
+    int32_t maxVideoWidth = 0;
+    int32_t maxVideoHeight = 0;
+    for (size_t i = 0; i < encoders.size(); i++) {
+        int width = videoEncoderProfiles->getVideoEncoderParamByName(
+                "enc.vid.width.max", encoders[i]);
+        int height = videoEncoderProfiles->getVideoEncoderParamByName(
+                "enc.vid.height.max", encoders[i]);
+        // Treat width/height separately here to handle the case where different
+        // profile might report max size of different aspect ratio
+        if (width > maxVideoWidth) {
+            maxVideoWidth = width;
+        }
+        if (height > maxVideoHeight) {
+            maxVideoHeight = height;
+        }
+    }
+    // This is just an upper bound and may not be an actually valid video size
+    const Size VIDEO_SIZE_UPPER_BOUND = {maxVideoWidth, maxVideoHeight};
 
     res = getFilteredSizes(MAX_PREVIEW_SIZE, &availablePreviewSizes);
     if (res != OK) return res;
-    res = getFilteredSizes(MAX_VIDEO_SIZE, &availableVideoSizes);
+    res = getFilteredSizes(VIDEO_SIZE_UPPER_BOUND, &availableVideoSizes);
     if (res != OK) return res;
 
     // Select initial preview and video size that's under the initial bound and
@@ -182,9 +196,9 @@
                 supportedPreviewFormats +=
                     CameraParameters::PIXEL_FORMAT_YUV420SP;
                 break;
-            // Not advertizing JPEG, RAW_SENSOR, etc, for preview formats
+            // Not advertizing JPEG, RAW16, etc, for preview formats
             case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
-            case HAL_PIXEL_FORMAT_RAW_SENSOR:
+            case HAL_PIXEL_FORMAT_RAW16:
             case HAL_PIXEL_FORMAT_BLOB:
                 addComma = false;
                 break;
@@ -875,15 +889,30 @@
     previewCallbackOneShot = false;
     previewCallbackSurface = false;
 
+    Size maxJpegSize = getMaxSize(getAvailableJpegSizes());
+    int64_t minFrameDurationNs = getJpegStreamMinFrameDurationNs(maxJpegSize);
+
+    slowJpegMode = false;
+    if (minFrameDurationNs > kSlowJpegModeThreshold) {
+        slowJpegMode = true;
+        // Slow jpeg devices does not support video snapshot without
+        // slowing down preview.
+        // TODO: support video size video snapshot only?
+        params.set(CameraParameters::KEY_VIDEO_SNAPSHOT_SUPPORTED,
+            CameraParameters::FALSE);
+    }
+
     char value[PROPERTY_VALUE_MAX];
     property_get("camera.disable_zsl_mode", value, "0");
-    if (!strcmp(value,"1")) {
+    if (!strcmp(value,"1") || slowJpegMode) {
         ALOGI("Camera %d: Disabling ZSL mode", cameraId);
         zslMode = false;
     } else {
         zslMode = true;
     }
 
+    ALOGI("%s: zslMode: %d slowJpegMode %d", __FUNCTION__, zslMode, slowJpegMode);
+
     lightFx = LIGHTFX_NONE;
 
     state = STOPPED;
@@ -2086,12 +2115,7 @@
 
     delete[] reqMeteringAreas;
 
-    /* don't include jpeg thumbnail size - it's valid for
-       it to be set to (0,0), meaning 'no thumbnail' */
-    CropRegion crop = calculateCropRegion( (CropRegion::Outputs)(
-            CropRegion::OUTPUT_PREVIEW     |
-            CropRegion::OUTPUT_VIDEO       |
-            CropRegion::OUTPUT_PICTURE    ));
+    CropRegion crop = calculateCropRegion(/*previewOnly*/ false);
     int32_t reqCropRegion[4] = {
         static_cast<int32_t>(crop.left),
         static_cast<int32_t>(crop.top),
@@ -2253,7 +2277,7 @@
         case HAL_PIXEL_FORMAT_RGBA_8888:   // RGBA8888
             fmt = CameraParameters::PIXEL_FORMAT_RGBA8888;
             break;
-        case HAL_PIXEL_FORMAT_RAW_SENSOR:
+        case HAL_PIXEL_FORMAT_RAW16:
             ALOGW("Raw sensor preview format requested.");
             fmt = CameraParameters::PIXEL_FORMAT_BAYER_RGGB;
             break;
@@ -2589,7 +2613,7 @@
     ALOG_ASSERT(x >= 0, "Crop-relative X coordinate = '%d' is out of bounds"
                          "(lower = 0)", x);
 
-    CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
+    CropRegion previewCrop = calculateCropRegion(/*previewOnly*/ true);
     ALOG_ASSERT(x < previewCrop.width, "Crop-relative X coordinate = '%d' "
                     "is out of bounds (upper = %f)", x, previewCrop.width);
 
@@ -2605,7 +2629,7 @@
     ALOG_ASSERT(y >= 0, "Crop-relative Y coordinate = '%d' is out of bounds "
         "(lower = 0)", y);
 
-    CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
+    CropRegion previewCrop = calculateCropRegion(/*previewOnly*/ true);
     ALOG_ASSERT(y < previewCrop.height, "Crop-relative Y coordinate = '%d' is "
                 "out of bounds (upper = %f)", y, previewCrop.height);
 
@@ -2620,12 +2644,12 @@
 }
 
 int Parameters::normalizedXToCrop(int x) const {
-    CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
+    CropRegion previewCrop = calculateCropRegion(/*previewOnly*/ true);
     return (x + 1000) * (previewCrop.width - 1) / 2000;
 }
 
 int Parameters::normalizedYToCrop(int y) const {
-    CropRegion previewCrop = calculateCropRegion(CropRegion::OUTPUT_PREVIEW);
+    CropRegion previewCrop = calculateCropRegion(/*previewOnly*/ true);
     return (y + 1000) * (previewCrop.height - 1) / 2000;
 }
 
@@ -2769,6 +2793,17 @@
     return maxSize;
 }
 
+Parameters::Size Parameters::getMaxSize(const Vector<Parameters::Size> &sizes) {
+    Size maxSize = {-1, -1};
+    for (size_t i = 0; i < sizes.size(); i++) {
+        if (sizes[i].width > maxSize.width ||
+                (sizes[i].width == maxSize.width && sizes[i].height > maxSize.height )) {
+            maxSize = sizes[i];
+        }
+    }
+    return maxSize;
+}
+
 Vector<Parameters::StreamConfiguration> Parameters::getStreamConfigurations() {
     const int STREAM_CONFIGURATION_SIZE = 4;
     const int STREAM_FORMAT_OFFSET = 0;
@@ -2783,7 +2818,7 @@
 
     camera_metadata_ro_entry_t availableStreamConfigs =
                 staticInfo(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
-    for (size_t i=0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) {
+    for (size_t i = 0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) {
         int32_t format = availableStreamConfigs.data.i32[i + STREAM_FORMAT_OFFSET];
         int32_t width = availableStreamConfigs.data.i32[i + STREAM_WIDTH_OFFSET];
         int32_t height = availableStreamConfigs.data.i32[i + STREAM_HEIGHT_OFFSET];
@@ -2794,11 +2829,52 @@
     return scs;
 }
 
+int64_t Parameters::getJpegStreamMinFrameDurationNs(Parameters::Size size) {
+    if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
+        const int STREAM_DURATION_SIZE = 4;
+        const int STREAM_FORMAT_OFFSET = 0;
+        const int STREAM_WIDTH_OFFSET = 1;
+        const int STREAM_HEIGHT_OFFSET = 2;
+        const int STREAM_DURATION_OFFSET = 3;
+        camera_metadata_ro_entry_t availableStreamMinDurations =
+                    staticInfo(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS);
+        for (size_t i = 0; i < availableStreamMinDurations.count; i+= STREAM_DURATION_SIZE) {
+            int64_t format = availableStreamMinDurations.data.i64[i + STREAM_FORMAT_OFFSET];
+            int64_t width = availableStreamMinDurations.data.i64[i + STREAM_WIDTH_OFFSET];
+            int64_t height = availableStreamMinDurations.data.i64[i + STREAM_HEIGHT_OFFSET];
+            int64_t duration = availableStreamMinDurations.data.i64[i + STREAM_DURATION_OFFSET];
+            if (format == HAL_PIXEL_FORMAT_BLOB && width == size.width && height == size.height) {
+                return duration;
+            }
+        }
+    } else {
+        Vector<Size> availableJpegSizes = getAvailableJpegSizes();
+        size_t streamIdx = availableJpegSizes.size();
+        for (size_t i = 0; i < availableJpegSizes.size(); i++) {
+            if (availableJpegSizes[i].width == size.width &&
+                    availableJpegSizes[i].height == size.height) {
+                streamIdx = i;
+                break;
+            }
+        }
+        if (streamIdx != availableJpegSizes.size()) {
+            camera_metadata_ro_entry_t jpegMinDurations =
+                    staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_MIN_DURATIONS);
+            if (streamIdx < jpegMinDurations.count) {
+                return jpegMinDurations.data.i64[streamIdx];
+            }
+        }
+    }
+    ALOGE("%s: cannot find min frame duration for jpeg size %dx%d",
+            __FUNCTION__, size.width, size.height);
+    return -1;
+}
+
 SortedVector<int32_t> Parameters::getAvailableOutputFormats() {
     SortedVector<int32_t> outputFormats; // Non-duplicated output formats
     if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
         Vector<StreamConfiguration> scs = getStreamConfigurations();
-        for (size_t i=0; i < scs.size(); i++) {
+        for (size_t i = 0; i < scs.size(); i++) {
             const StreamConfiguration &sc = scs[i];
             if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT) {
                 outputFormats.add(sc.format);
@@ -2806,7 +2882,7 @@
         }
     } else {
         camera_metadata_ro_entry_t availableFormats = staticInfo(ANDROID_SCALER_AVAILABLE_FORMATS);
-        for (size_t i=0; i < availableFormats.count; i++) {
+        for (size_t i = 0; i < availableFormats.count; i++) {
             outputFormats.add(availableFormats.data.i32[i]);
         }
     }
@@ -2817,7 +2893,7 @@
     Vector<Parameters::Size> jpegSizes;
     if (mDeviceVersion >= CAMERA_DEVICE_API_VERSION_3_2) {
         Vector<StreamConfiguration> scs = getStreamConfigurations();
-        for (size_t i=0; i < scs.size(); i++) {
+        for (size_t i = 0; i < scs.size(); i++) {
             const StreamConfiguration &sc = scs[i];
             if (sc.isInput == ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT &&
                     sc.format == HAL_PIXEL_FORMAT_BLOB) {
@@ -2831,7 +2907,7 @@
         const int HEIGHT_OFFSET = 1;
         camera_metadata_ro_entry_t availableJpegSizes =
             staticInfo(ANDROID_SCALER_AVAILABLE_JPEG_SIZES);
-        for (size_t i=0; i < availableJpegSizes.count; i+= JPEG_SIZE_ENTRY_COUNT) {
+        for (size_t i = 0; i < availableJpegSizes.count; i+= JPEG_SIZE_ENTRY_COUNT) {
             int width = availableJpegSizes.data.i32[i + WIDTH_OFFSET];
             int height = availableJpegSizes.data.i32[i + HEIGHT_OFFSET];
             Size sz = {width, height};
@@ -2841,8 +2917,7 @@
     return jpegSizes;
 }
 
-Parameters::CropRegion Parameters::calculateCropRegion(
-                            Parameters::CropRegion::Outputs outputs) const {
+Parameters::CropRegion Parameters::calculateCropRegion(bool previewOnly) const {
 
     float zoomLeft, zoomTop, zoomWidth, zoomHeight;
 
@@ -2866,90 +2941,45 @@
           maxDigitalZoom.data.f[0], zoomIncrement, zoomRatio, previewWidth,
           previewHeight, fastInfo.arrayWidth, fastInfo.arrayHeight);
 
-    /*
-     * Assumption: On the HAL side each stream buffer calculates its crop
-     * rectangle as follows:
-     *   cropRect = (zoomLeft, zoomRight,
-     *               zoomWidth, zoomHeight * zoomWidth / outputWidth);
-     *
-     * Note that if zoomWidth > bufferWidth, the new cropHeight > zoomHeight
-     *      (we can then get into trouble if the cropHeight > arrayHeight).
-     * By selecting the zoomRatio based on the smallest outputRatio, we
-     * guarantee this will never happen.
-     */
+    if (previewOnly) {
+        // Calculate a tight crop region for the preview stream only
+        float previewRatio = static_cast<float>(previewWidth) / previewHeight;
 
-    // Enumerate all possible output sizes, select the one with the smallest
-    // aspect ratio
-    float minOutputWidth, minOutputHeight, minOutputRatio;
-    {
-        float outputSizes[][2] = {
-            { static_cast<float>(previewWidth),
-              static_cast<float>(previewHeight) },
-            { static_cast<float>(videoWidth),
-              static_cast<float>(videoHeight) },
-            { static_cast<float>(jpegThumbSize[0]),
-              static_cast<float>(jpegThumbSize[1]) },
-            { static_cast<float>(pictureWidth),
-              static_cast<float>(pictureHeight) },
-        };
+        /* Ensure that the width/height never go out of bounds
+         * by scaling across a diffent dimension if an out-of-bounds
+         * possibility exists.
+         *
+         * e.g. if the previewratio < arrayratio and e.g. zoomratio = 1.0, then by
+         * calculating the zoomWidth from zoomHeight we'll actually get a
+         * zoomheight > arrayheight
+         */
+        float arrayRatio = 1.f * fastInfo.arrayWidth / fastInfo.arrayHeight;
+        if (previewRatio >= arrayRatio) {
+            // Adjust the height based on the width
+            zoomWidth =  fastInfo.arrayWidth / zoomRatio;
+            zoomHeight = zoomWidth *
+                    previewHeight / previewWidth;
 
-        minOutputWidth = outputSizes[0][0];
-        minOutputHeight = outputSizes[0][1];
-        minOutputRatio = minOutputWidth / minOutputHeight;
-        for (unsigned int i = 0;
-             i < sizeof(outputSizes) / sizeof(outputSizes[0]);
-             ++i) {
-
-            // skip over outputs we don't want to consider for the crop region
-            if ( !((1 << i) & outputs) ) {
-                continue;
-            }
-
-            float outputWidth = outputSizes[i][0];
-            float outputHeight = outputSizes[i][1];
-            float outputRatio = outputWidth / outputHeight;
-
-            if (minOutputRatio > outputRatio) {
-                minOutputRatio = outputRatio;
-                minOutputWidth = outputWidth;
-                minOutputHeight = outputHeight;
-            }
-
-            // and then use this output ratio instead of preview output ratio
-            ALOGV("Enumerating output ratio %f = %f / %f, min is %f",
-                  outputRatio, outputWidth, outputHeight, minOutputRatio);
+        } else {
+            // Adjust the width based on the height
+            zoomHeight = fastInfo.arrayHeight / zoomRatio;
+            zoomWidth = zoomHeight *
+                    previewWidth / previewHeight;
         }
-    }
-
-    /* Ensure that the width/height never go out of bounds
-     * by scaling across a diffent dimension if an out-of-bounds
-     * possibility exists.
-     *
-     * e.g. if the previewratio < arrayratio and e.g. zoomratio = 1.0, then by
-     * calculating the zoomWidth from zoomHeight we'll actually get a
-     * zoomheight > arrayheight
-     */
-    float arrayRatio = 1.f * fastInfo.arrayWidth / fastInfo.arrayHeight;
-    if (minOutputRatio >= arrayRatio) {
-        // Adjust the height based on the width
-        zoomWidth =  fastInfo.arrayWidth / zoomRatio;
-        zoomHeight = zoomWidth *
-                minOutputHeight / minOutputWidth;
-
     } else {
-        // Adjust the width based on the height
+        // Calculate the global crop region with a shape matching the active
+        // array.
+        zoomWidth = fastInfo.arrayWidth / zoomRatio;
         zoomHeight = fastInfo.arrayHeight / zoomRatio;
-        zoomWidth = zoomHeight *
-                minOutputWidth / minOutputHeight;
     }
-    // centering the zoom area within the active area
+
+    // center the zoom area within the active area
     zoomLeft = (fastInfo.arrayWidth - zoomWidth) / 2;
     zoomTop = (fastInfo.arrayHeight - zoomHeight) / 2;
 
     ALOGV("Crop region calculated (x=%d,y=%d,w=%f,h=%f) for zoom=%d",
         (int32_t)zoomLeft, (int32_t)zoomTop, zoomWidth, zoomHeight, this->zoom);
 
-
     CropRegion crop = { zoomLeft, zoomTop, zoomWidth, zoomHeight };
     return crop;
 }
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index e628a7e..972d007 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -166,6 +166,9 @@
     bool previewCallbackSurface;
 
     bool zslMode;
+    // Whether the jpeg stream is slower than 30FPS and can slow down preview.
+    // When slowJpegMode is true, zslMode must be false to avoid slowing down preview.
+    bool slowJpegMode;
 
     // Overall camera state
     enum State {
@@ -190,6 +193,8 @@
     static const int MAX_INITIAL_PREVIEW_HEIGHT = 1080;
     // Aspect ratio tolerance
     static const CONSTEXPR float ASPECT_RATIO_TOLERANCE = 0.001;
+    // Threshold for slow jpeg mode
+    static const int64_t kSlowJpegModeThreshold = 33400000LL; // 33.4 ms
 
     // Full static camera info, object owned by someone else, such as
     // Camera2Device.
@@ -271,21 +276,16 @@
     // if video snapshot size is currently overridden
     bool isJpegSizeOverridden();
 
-    // Calculate the crop region rectangle based on current stream sizes
+    // Calculate the crop region rectangle, either tightly about the preview
+    // resolution, or a region just based on the active array; both take
+    // into account the current zoom level.
     struct CropRegion {
         float left;
         float top;
         float width;
         float height;
-
-        enum Outputs {
-            OUTPUT_PREVIEW         = 0x01,
-            OUTPUT_VIDEO           = 0x02,
-            OUTPUT_JPEG_THUMBNAIL  = 0x04,
-            OUTPUT_PICTURE         = 0x08,
-        };
     };
-    CropRegion calculateCropRegion(CropRegion::Outputs outputs) const;
+    CropRegion calculateCropRegion(bool previewOnly) const;
 
     // Calculate the field of view of the high-resolution JPEG capture
     status_t calculatePictureFovs(float *horizFov, float *vertFov) const;
@@ -382,15 +382,23 @@
         int32_t height;
         int32_t isInput;
     };
+
     // Helper function extract available stream configuration
     // Only valid since device HAL version 3.2
     // returns an empty Vector if device HAL version does support it
     Vector<StreamConfiguration> getStreamConfigurations();
 
+    // Helper function to get minimum frame duration for a jpeg size
+    // return -1 if input jpeg size cannot be found in supported size list
+    int64_t getJpegStreamMinFrameDurationNs(Parameters::Size size);
+
     // Helper function to get non-duplicated available output formats
     SortedVector<int32_t> getAvailableOutputFormats();
     // Helper function to get available output jpeg sizes
     Vector<Size> getAvailableJpegSizes();
+    // Helper function to get maximum size in input Size vector.
+    // The maximum size is defined by comparing width first, when width ties comparing height.
+    Size getMaxSize(const Vector<Size>& sizes);
 
     int mDeviceVersion;
 };
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
index 146d572..66d7b00 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.cpp
@@ -25,11 +25,12 @@
 #define ALOGVV(...) ((void)0)
 #endif
 
+#include <cutils/properties.h>
 #include <utils/Log.h>
 #include <utils/Trace.h>
 #include <gui/BufferItem.h>
 #include <gui/Surface.h>
-#include <media/hardware/MetadataBufferType.h>
+#include <media/hardware/HardwareAPI.h>
 
 #include "common/CameraDeviceBase.h"
 #include "api1/Camera2Client.h"
@@ -51,7 +52,10 @@
         mRecordingStreamId(NO_STREAM),
         mRecordingFrameAvailable(false),
         mRecordingHeapCount(kDefaultRecordingHeapCount),
-        mRecordingHeapFree(kDefaultRecordingHeapCount)
+        mRecordingHeapFree(kDefaultRecordingHeapCount),
+        mRecordingFormat(kDefaultRecordingFormat),
+        mRecordingDataSpace(kDefaultRecordingDataSpace),
+        mRecordingGrallocUsage(kDefaultRecordingGrallocUsage)
 {
 }
 
@@ -60,7 +64,7 @@
     deleteRecordingStream();
 }
 
-status_t StreamingProcessor::setPreviewWindow(sp<ANativeWindow> window) {
+status_t StreamingProcessor::setPreviewWindow(sp<Surface> window) {
     ATRACE_CALL();
     status_t res;
 
@@ -151,7 +155,7 @@
         // Check if stream parameters have to change
         uint32_t currentWidth, currentHeight;
         res = device->getStreamInfo(mPreviewStreamId,
-                &currentWidth, &currentHeight, 0);
+                &currentWidth, &currentHeight, 0, 0);
         if (res != OK) {
             ALOGE("%s: Camera %d: Error querying preview stream info: "
                     "%s (%d)", __FUNCTION__, mId, strerror(-res), res);
@@ -182,7 +186,8 @@
     if (mPreviewStreamId == NO_STREAM) {
         res = device->createStream(mPreviewWindow,
                 params.previewWidth, params.previewHeight,
-                CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, &mPreviewStreamId);
+                CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, HAL_DATASPACE_UNKNOWN,
+                CAMERA3_STREAM_ROTATION_0, &mPreviewStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Unable to create preview stream: %s (%d)",
                     __FUNCTION__, mId, strerror(-res), res);
@@ -279,6 +284,46 @@
     return OK;
 }
 
+status_t StreamingProcessor::setRecordingFormat(int format,
+        android_dataspace dataSpace) {
+    ATRACE_CALL();
+
+    Mutex::Autolock m(mMutex);
+
+    ALOGV("%s: Camera %d: New recording format/dataspace from encoder: %X, %X",
+            __FUNCTION__, mId, format, dataSpace);
+
+    mRecordingFormat = format;
+    mRecordingDataSpace = dataSpace;
+    int prevGrallocUsage = mRecordingGrallocUsage;
+    if (mRecordingFormat == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
+        mRecordingGrallocUsage = GRALLOC_USAGE_HW_VIDEO_ENCODER;
+    } else {
+        mRecordingGrallocUsage = GRALLOC_USAGE_SW_READ_OFTEN;
+    }
+
+    ALOGV("%s: Camera %d: New recording gralloc usage: %08X", __FUNCTION__, mId,
+            mRecordingGrallocUsage);
+
+    if (prevGrallocUsage != mRecordingGrallocUsage) {
+        ALOGV("%s: Camera %d: Resetting recording consumer for new usage",
+            __FUNCTION__, mId);
+
+        if (isStreamActive(mActiveStreamIds, mRecordingStreamId)) {
+            ALOGE("%s: Camera %d: Changing recording format when "
+                    "recording stream is already active!", __FUNCTION__,
+                    mId);
+            return INVALID_OPERATION;
+        }
+
+        releaseAllRecordingFramesLocked();
+
+        mRecordingConsumer.clear();
+    }
+
+    return OK;
+}
+
 status_t StreamingProcessor::updateRecordingRequest(const Parameters &params) {
     ATRACE_CALL();
     status_t res;
@@ -339,9 +384,10 @@
         return INVALID_OPERATION;
     }
 
-    uint32_t currentWidth, currentHeight;
+    uint32_t currentWidth, currentHeight, currentFormat;
+    android_dataspace currentDataSpace;
     res = device->getStreamInfo(mRecordingStreamId,
-            &currentWidth, &currentHeight, 0);
+            &currentWidth, &currentHeight, &currentFormat, &currentDataSpace);
     if (res != OK) {
         ALOGE("%s: Camera %d: Error querying recording output stream info: "
                 "%s (%d)", __FUNCTION__, mId,
@@ -349,8 +395,11 @@
         return res;
     }
 
-    if (mRecordingConsumer == 0 || currentWidth != (uint32_t)params.videoWidth ||
-            currentHeight != (uint32_t)params.videoHeight) {
+    if (mRecordingConsumer == 0 ||
+            currentWidth != (uint32_t)params.videoWidth ||
+            currentHeight != (uint32_t)params.videoHeight ||
+            currentFormat != (uint32_t)mRecordingFormat ||
+            currentDataSpace != mRecordingDataSpace) {
         *needsUpdate = true;
     }
     *needsUpdate = false;
@@ -379,7 +428,7 @@
         sp<IGraphicBufferConsumer> consumer;
         BufferQueue::createBufferQueue(&producer, &consumer);
         mRecordingConsumer = new BufferItemConsumer(consumer,
-                GRALLOC_USAGE_HW_VIDEO_ENCODER,
+                mRecordingGrallocUsage,
                 mRecordingHeapCount + 1);
         mRecordingConsumer->setFrameAvailableListener(this);
         mRecordingConsumer->setName(String8("Camera2-RecordingConsumer"));
@@ -391,8 +440,11 @@
     if (mRecordingStreamId != NO_STREAM) {
         // Check if stream parameters have to change
         uint32_t currentWidth, currentHeight;
+        uint32_t currentFormat;
+        android_dataspace currentDataSpace;
         res = device->getStreamInfo(mRecordingStreamId,
-                &currentWidth, &currentHeight, 0);
+                &currentWidth, &currentHeight,
+                &currentFormat, &currentDataSpace);
         if (res != OK) {
             ALOGE("%s: Camera %d: Error querying recording output stream info: "
                     "%s (%d)", __FUNCTION__, mId,
@@ -400,7 +452,10 @@
             return res;
         }
         if (currentWidth != (uint32_t)params.videoWidth ||
-                currentHeight != (uint32_t)params.videoHeight || newConsumer) {
+                currentHeight != (uint32_t)params.videoHeight ||
+                currentFormat != (uint32_t)mRecordingFormat ||
+                currentDataSpace != mRecordingDataSpace ||
+                newConsumer) {
             // TODO: Should wait to be sure previous recording has finished
             res = device->deleteStream(mRecordingStreamId);
 
@@ -423,7 +478,8 @@
         mRecordingFrameCount = 0;
         res = device->createStream(mRecordingWindow,
                 params.videoWidth, params.videoHeight,
-                CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, &mRecordingStreamId);
+                mRecordingFormat, mRecordingDataSpace,
+                CAMERA3_STREAM_ROTATION_0, &mRecordingStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't create output stream for recording: "
                     "%s (%d)", __FUNCTION__, mId,
@@ -718,12 +774,12 @@
         }
 
         if (mRecordingHeap == 0) {
-            const size_t bufferSize = 4 + sizeof(buffer_handle_t);
+            size_t payloadSize = sizeof(VideoNativeMetadata);
             ALOGV("%s: Camera %d: Creating recording heap with %zu buffers of "
                     "size %zu bytes", __FUNCTION__, mId,
-                    mRecordingHeapCount, bufferSize);
+                    mRecordingHeapCount, payloadSize);
 
-            mRecordingHeap = new Camera2Heap(bufferSize, mRecordingHeapCount,
+            mRecordingHeap = new Camera2Heap(payloadSize, mRecordingHeapCount,
                     "Camera2Client::RecordingHeap");
             if (mRecordingHeap->mHeap->getSize() == 0) {
                 ALOGE("%s: Camera %d: Unable to allocate memory for recording",
@@ -746,7 +802,7 @@
             mRecordingHeapFree = mRecordingHeapCount;
         }
 
-        if ( mRecordingHeapFree == 0) {
+        if (mRecordingHeapFree == 0) {
             ALOGE("%s: Camera %d: No free recording buffers, dropping frame",
                     __FUNCTION__, mId);
             mRecordingConsumer->releaseBuffer(imgBuffer);
@@ -766,13 +822,15 @@
                 mRecordingHeap->mBuffers[heapIdx]->getMemory(&offset,
                         &size);
 
-        uint8_t *data = (uint8_t*)heap->getBase() + offset;
-        uint32_t type = kMetadataBufferTypeGrallocSource;
-        *((uint32_t*)data) = type;
-        *((buffer_handle_t*)(data + 4)) = imgBuffer.mGraphicBuffer->handle;
-        ALOGVV("%s: Camera %d: Sending out buffer_handle_t %p",
-                __FUNCTION__, mId,
-                imgBuffer.mGraphicBuffer->handle);
+        VideoNativeMetadata *payload = reinterpret_cast<VideoNativeMetadata*>(
+            (uint8_t*)heap->getBase() + offset);
+        payload->eType = kMetadataBufferTypeANWBuffer;
+        payload->pBuffer = imgBuffer.mGraphicBuffer->getNativeBuffer();
+        payload->nFenceFd = -1;
+
+        ALOGVV("%s: Camera %d: Sending out ANWBuffer %p",
+                __FUNCTION__, mId, payload->pBuffer);
+
         mRecordingBuffers.replaceAt(imgBuffer, heapIdx);
         recordingHeap = mRecordingHeap;
     }
@@ -805,42 +863,42 @@
                 heap->getHeapID(), mRecordingHeap->mHeap->getHeapID());
         return;
     }
-    uint8_t *data = (uint8_t*)heap->getBase() + offset;
-    uint32_t type = *(uint32_t*)data;
-    if (type != kMetadataBufferTypeGrallocSource) {
+
+    VideoNativeMetadata *payload = reinterpret_cast<VideoNativeMetadata*>(
+        (uint8_t*)heap->getBase() + offset);
+
+    if (payload->eType != kMetadataBufferTypeANWBuffer) {
         ALOGE("%s: Camera %d: Recording frame type invalid (got %x, expected %x)",
-                __FUNCTION__, mId, type,
-                kMetadataBufferTypeGrallocSource);
+                __FUNCTION__, mId, payload->eType,
+                kMetadataBufferTypeANWBuffer);
         return;
     }
 
     // Release the buffer back to the recording queue
-
-    buffer_handle_t imgHandle = *(buffer_handle_t*)(data + 4);
-
     size_t itemIndex;
     for (itemIndex = 0; itemIndex < mRecordingBuffers.size(); itemIndex++) {
         const BufferItem item = mRecordingBuffers[itemIndex];
         if (item.mBuf != BufferItemConsumer::INVALID_BUFFER_SLOT &&
-                item.mGraphicBuffer->handle == imgHandle) {
-            break;
+                item.mGraphicBuffer->getNativeBuffer() == payload->pBuffer) {
+                break;
         }
     }
+
     if (itemIndex == mRecordingBuffers.size()) {
-        ALOGE("%s: Camera %d: Can't find buffer_handle_t %p in list of "
+        ALOGE("%s: Camera %d: Can't find returned ANW Buffer %p in list of "
                 "outstanding buffers", __FUNCTION__, mId,
-                imgHandle);
+                payload->pBuffer);
         return;
     }
 
-    ALOGVV("%s: Camera %d: Freeing buffer_handle_t %p", __FUNCTION__,
-            mId, imgHandle);
+    ALOGVV("%s: Camera %d: Freeing returned ANW buffer %p index %d", __FUNCTION__,
+            mId, payload->pBuffer, itemIndex);
 
     res = mRecordingConsumer->releaseBuffer(mRecordingBuffers[itemIndex]);
     if (res != OK) {
         ALOGE("%s: Camera %d: Unable to free recording frame "
-                "(buffer_handle_t: %p): %s (%d)", __FUNCTION__,
-                mId, imgHandle, strerror(-res), res);
+                "(Returned ANW buffer: %p): %s (%d)", __FUNCTION__,
+                mId, payload->pBuffer, strerror(-res), res);
         return;
     }
     mRecordingBuffers.replaceAt(itemIndex);
diff --git a/services/camera/libcameraservice/api1/client2/StreamingProcessor.h b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
index 2474062..e0cad3a 100644
--- a/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/StreamingProcessor.h
@@ -43,7 +43,7 @@
     StreamingProcessor(sp<Camera2Client> client);
     ~StreamingProcessor();
 
-    status_t setPreviewWindow(sp<ANativeWindow> window);
+    status_t setPreviewWindow(sp<Surface> window);
 
     bool haveValidPreviewWindow() const;
 
@@ -53,6 +53,8 @@
     int getPreviewStreamId() const;
 
     status_t setRecordingBufferCount(size_t count);
+    status_t setRecordingFormat(int format, android_dataspace_t dataspace);
+
     status_t updateRecordingRequest(const Parameters &params);
     // If needsUpdate is set to true, a updateRecordingStream call with params will recreate
     // recording stream
@@ -106,7 +108,7 @@
     int32_t mPreviewRequestId;
     int mPreviewStreamId;
     CameraMetadata mPreviewRequest;
-    sp<ANativeWindow> mPreviewWindow;
+    sp<Surface> mPreviewWindow;
 
     // Recording-related members
     static const nsecs_t kWaitDuration = 50000000; // 50 ms
@@ -115,7 +117,7 @@
     int mRecordingStreamId;
     int mRecordingFrameCount;
     sp<BufferItemConsumer> mRecordingConsumer;
-    sp<ANativeWindow>  mRecordingWindow;
+    sp<Surface>  mRecordingWindow;
     CameraMetadata mRecordingRequest;
     sp<camera2::Camera2Heap> mRecordingHeap;
 
@@ -127,6 +129,18 @@
     Vector<BufferItem> mRecordingBuffers;
     size_t mRecordingHeapHead, mRecordingHeapFree;
 
+    static const int kDefaultRecordingFormat =
+            HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+    int mRecordingFormat;
+
+    static const android_dataspace kDefaultRecordingDataSpace =
+            HAL_DATASPACE_BT709;
+    android_dataspace mRecordingDataSpace;
+
+    static const int kDefaultRecordingGrallocUsage =
+            GRALLOC_USAGE_HW_VIDEO_ENCODER;
+    int mRecordingGrallocUsage;
+
     virtual bool threadLoop();
 
     status_t processRecordingFrame();
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
index 186ce6c..0b79b31 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.cpp
@@ -139,7 +139,7 @@
             GRALLOC_USAGE_HW_CAMERA_ZSL,
             kZslBufferDepth);
         mZslConsumer->setFrameAvailableListener(this);
-        mZslConsumer->setName(String8("Camera2Client::ZslConsumer"));
+        mZslConsumer->setName(String8("Camera2-ZslConsumer"));
         mZslWindow = new Surface(producer);
     }
 
@@ -147,7 +147,7 @@
         // Check if stream parameters have to change
         uint32_t currentWidth, currentHeight;
         res = device->getStreamInfo(mZslStreamId,
-                &currentWidth, &currentHeight, 0);
+                &currentWidth, &currentHeight, 0, 0);
         if (res != OK) {
             ALOGE("%s: Camera %d: Error querying capture output stream info: "
                     "%s (%d)", __FUNCTION__,
@@ -185,8 +185,8 @@
                 (int)CAMERA2_HAL_PIXEL_FORMAT_ZSL :
                 (int)HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
         res = device->createStream(mZslWindow,
-                params.fastInfo.arrayWidth, params.fastInfo.arrayHeight,
-                streamType, &mZslStreamId);
+                params.fastInfo.arrayWidth, params.fastInfo.arrayHeight, streamType,
+                HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0, &mZslStreamId);
         if (res != OK) {
             ALOGE("%s: Camera %d: Can't create output stream for ZSL: "
                     "%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor.h b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
index 5f50d7b..5870bd3 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor.h
@@ -101,7 +101,7 @@
     int mZslStreamId;
     int mZslReprocessStreamId;
     sp<BufferItemConsumer> mZslConsumer;
-    sp<ANativeWindow>      mZslWindow;
+    sp<Surface>            mZslWindow;
 
     struct ZslPair {
         BufferItem buffer;
diff --git a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
index 470a6d6..69620ac 100644
--- a/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
+++ b/services/camera/libcameraservice/api1/client2/ZslProcessor3.cpp
@@ -150,7 +150,7 @@
         // Check if stream parameters have to change
         uint32_t currentWidth, currentHeight;
         res = device->getStreamInfo(mZslStreamId,
-                &currentWidth, &currentHeight, 0);
+                &currentWidth, &currentHeight, 0, 0);
         if (res != OK) {
             ALOGE("%s: Camera %d: Error querying capture output stream info: "
                     "%s (%d)", __FUNCTION__,
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 6a1ee44..c717a56 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -65,13 +65,14 @@
                                    int servicePid) :
     Camera2ClientBase(cameraService, remoteCallback, clientPackageName,
                 cameraId, cameraFacing, clientPid, clientUid, servicePid),
+    mInputStream(),
     mRequestIdCounter(0) {
 
     ATRACE_CALL();
     ALOGI("CameraDeviceClient %d: Opened", cameraId);
 }
 
-status_t CameraDeviceClient::initialize(camera_module_t *module)
+status_t CameraDeviceClient::initialize(CameraModule *module)
 {
     ATRACE_CALL();
     status_t res;
@@ -134,6 +135,15 @@
             ALOGE("%s: Camera %d: Sent null request.",
                     __FUNCTION__, mCameraId);
             return BAD_VALUE;
+        } else if (request->mIsReprocess) {
+            if (!mInputStream.configured) {
+                ALOGE("%s: Camera %d: no input stream is configured.", __FUNCTION__, mCameraId);
+                return BAD_VALUE;
+            } else if (streaming) {
+                ALOGE("%s: Camera %d: streaming reprocess requests not supported.", __FUNCTION__,
+                        mCameraId);
+                return BAD_VALUE;
+            }
         }
 
         CameraMetadata metadata(request->mMetadata);
@@ -182,6 +192,10 @@
         metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0],
                         outputStreamIds.size());
 
+        if (request->mIsReprocess) {
+            metadata.update(ANDROID_REQUEST_INPUT_STREAMS, &mInputStream.id, 1);
+        }
+
         metadata.update(ANDROID_REQUEST_ID, &requestId, /*size*/1);
         loopCounter++; // loopCounter starts from 1
         ALOGV("%s: Camera %d: Creating request with ID %d (%d of %zu)",
@@ -255,13 +269,33 @@
 
 status_t CameraDeviceClient::beginConfigure() {
     // TODO: Implement this.
-    ALOGE("%s: Not implemented yet.", __FUNCTION__);
+    ALOGV("%s: Not implemented yet.", __FUNCTION__);
     return OK;
 }
 
-status_t CameraDeviceClient::endConfigure() {
-    ALOGV("%s: ending configure (%zu streams)",
-            __FUNCTION__, mStreamMap.size());
+status_t CameraDeviceClient::endConfigure(bool isConstrainedHighSpeed) {
+    ALOGV("%s: ending configure (%d input stream, %zu output streams)",
+            __FUNCTION__, mInputStream.configured ? 1 : 0, mStreamMap.size());
+
+    // Sanitize the high speed session against necessary capability bit.
+    if (isConstrainedHighSpeed) {
+        CameraMetadata staticInfo = mDevice->info();
+        camera_metadata_entry_t entry = staticInfo.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
+        bool isConstrainedHighSpeedSupported = false;
+        for(size_t i = 0; i < entry.count; ++i) {
+            uint8_t capability = entry.data.u8[i];
+            if (capability == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_CONSTRAINED_HIGH_SPEED_VIDEO) {
+                isConstrainedHighSpeedSupported = true;
+                break;
+            }
+        }
+        if (!isConstrainedHighSpeedSupported) {
+            ALOGE("%s: Camera %d: Try to create a constrained high speed configuration on a device"
+                    " that doesn't support it.",
+                          __FUNCTION__, mCameraId);
+            return INVALID_OPERATION;
+        }
+    }
 
     status_t res;
     if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
@@ -270,7 +304,7 @@
 
     if (!mDevice.get()) return DEAD_OBJECT;
 
-    return mDevice->configureStreams();
+    return mDevice->configureStreams(isConstrainedHighSpeed);
 }
 
 status_t CameraDeviceClient::deleteStream(int streamId) {
@@ -284,19 +318,25 @@
 
     if (!mDevice.get()) return DEAD_OBJECT;
 
-    // Guard against trying to delete non-created streams
+    bool isInput = false;
     ssize_t index = NAME_NOT_FOUND;
-    for (size_t i = 0; i < mStreamMap.size(); ++i) {
-        if (streamId == mStreamMap.valueAt(i)) {
-            index = i;
-            break;
-        }
-    }
 
-    if (index == NAME_NOT_FOUND) {
-        ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream "
-              "created yet", __FUNCTION__, mCameraId, streamId);
-        return BAD_VALUE;
+    if (mInputStream.configured && mInputStream.id == streamId) {
+        isInput = true;
+    } else {
+        // Guard against trying to delete non-created streams
+        for (size_t i = 0; i < mStreamMap.size(); ++i) {
+            if (streamId == mStreamMap.valueAt(i)) {
+                index = i;
+                break;
+            }
+        }
+
+        if (index == NAME_NOT_FOUND) {
+            ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream "
+                  "created yet", __FUNCTION__, mCameraId, streamId);
+            return BAD_VALUE;
+        }
     }
 
     // Also returns BAD_VALUE if stream ID was not valid
@@ -307,24 +347,27 @@
               " already checked and the stream ID (%d) should be valid.",
               __FUNCTION__, mCameraId, streamId);
     } else if (res == OK) {
-        mStreamMap.removeItemsAt(index);
-
+        if (isInput) {
+            mInputStream.configured = false;
+        } else {
+            mStreamMap.removeItemsAt(index);
+        }
     }
 
     return res;
 }
 
-status_t CameraDeviceClient::createStream(int width, int height, int format,
-                      const sp<IGraphicBufferProducer>& bufferProducer)
+status_t CameraDeviceClient::createStream(const OutputConfiguration &outputConfiguration)
 {
     ATRACE_CALL();
-    ALOGV("%s (w = %d, h = %d, f = 0x%x)", __FUNCTION__, width, height, format);
 
     status_t res;
     if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
 
     Mutex::Autolock icl(mBinderSerializationLock);
 
+
+    sp<IGraphicBufferProducer> bufferProducer = outputConfiguration.getGraphicBufferProducer();
     if (bufferProducer == NULL) {
         ALOGE("%s: bufferProducer must not be null", __FUNCTION__);
         return BAD_VALUE;
@@ -368,25 +411,33 @@
             (consumerUsage & allowedFlags) != 0;
 
     sp<IBinder> binder = IInterface::asBinder(bufferProducer);
-    sp<ANativeWindow> anw = new Surface(bufferProducer, useAsync);
+    sp<Surface> surface = new Surface(bufferProducer, useAsync);
+    ANativeWindow *anw = surface.get();
 
-    // TODO: remove w,h,f since we are ignoring them
+    int width, height, format;
+    android_dataspace dataSpace;
 
-    if ((res = anw->query(anw.get(), NATIVE_WINDOW_WIDTH, &width)) != OK) {
+    if ((res = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
         ALOGE("%s: Camera %d: Failed to query Surface width", __FUNCTION__,
               mCameraId);
         return res;
     }
-    if ((res = anw->query(anw.get(), NATIVE_WINDOW_HEIGHT, &height)) != OK) {
+    if ((res = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
         ALOGE("%s: Camera %d: Failed to query Surface height", __FUNCTION__,
               mCameraId);
         return res;
     }
-    if ((res = anw->query(anw.get(), NATIVE_WINDOW_FORMAT, &format)) != OK) {
+    if ((res = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
         ALOGE("%s: Camera %d: Failed to query Surface format", __FUNCTION__,
               mCameraId);
         return res;
     }
+    if ((res = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
+                            reinterpret_cast<int*>(&dataSpace))) != OK) {
+        ALOGE("%s: Camera %d: Failed to query Surface dataSpace", __FUNCTION__,
+              mCameraId);
+        return res;
+    }
 
     // FIXME: remove this override since the default format should be
     //       IMPLEMENTATION_DEFINED. b/9487482
@@ -399,14 +450,17 @@
 
     // Round dimensions to the nearest dimensions available for this format
     if (flexibleConsumer && !CameraDeviceClient::roundBufferDimensionNearest(width, height,
-            format, mDevice->info(), /*out*/&width, /*out*/&height)) {
+            format, dataSpace, mDevice->info(), /*out*/&width, /*out*/&height)) {
         ALOGE("%s: No stream configurations with the format %#x defined, failed to create stream.",
                 __FUNCTION__, format);
         return BAD_VALUE;
     }
 
     int streamId = -1;
-    res = mDevice->createStream(anw, width, height, format, &streamId);
+    res = mDevice->createStream(surface, width, height, format, dataSpace,
+                                static_cast<camera3_stream_rotation_t>
+                                        (outputConfiguration.getRotation()),
+                                &streamId);
 
     if (res == OK) {
         mStreamMap.add(binder, streamId);
@@ -440,11 +494,65 @@
 }
 
 
+status_t CameraDeviceClient::createInputStream(int width, int height,
+        int format) {
+
+    ATRACE_CALL();
+    ALOGV("%s (w = %d, h = %d, f = 0x%x)", __FUNCTION__, width, height, format);
+
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+    if (!mDevice.get()) return DEAD_OBJECT;
+
+    if (mInputStream.configured) {
+        ALOGE("%s: Camera %d: Already has an input stream "
+                " configuration. (ID %zd)", __FUNCTION__, mCameraId,
+                mInputStream.id);
+        return ALREADY_EXISTS;
+    }
+
+    int streamId = -1;
+    res = mDevice->createInputStream(width, height, format, &streamId);
+    if (res == OK) {
+        mInputStream.configured = true;
+        mInputStream.width = width;
+        mInputStream.height = height;
+        mInputStream.format = format;
+        mInputStream.id = streamId;
+
+        ALOGV("%s: Camera %d: Successfully created a new input stream ID %d",
+              __FUNCTION__, mCameraId, streamId);
+
+        return streamId;
+    }
+
+    return res;
+}
+
+status_t CameraDeviceClient::getInputBufferProducer(
+        /*out*/sp<IGraphicBufferProducer> *producer) {
+    status_t res;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    if (producer == NULL) {
+        return BAD_VALUE;
+    }
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+    if (!mDevice.get()) return DEAD_OBJECT;
+
+    return mDevice->getInputBufferProducer(producer);
+}
+
 bool CameraDeviceClient::roundBufferDimensionNearest(int32_t width, int32_t height,
-        int32_t format, const CameraMetadata& info,
+        int32_t format, android_dataspace dataSpace, const CameraMetadata& info,
         /*out*/int32_t* outWidth, /*out*/int32_t* outHeight) {
 
     camera_metadata_ro_entry streamConfigs =
+            (dataSpace == HAL_DATASPACE_DEPTH) ?
+            info.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS) :
             info.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
 
     int32_t bestWidth = -1;
@@ -580,25 +688,92 @@
     return mDevice->flush(lastFrameNumber);
 }
 
+status_t CameraDeviceClient::prepare(int streamId) {
+    ATRACE_CALL();
+    ALOGV("%s", __FUNCTION__);
+
+    status_t res = OK;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    // Guard against trying to prepare non-created streams
+    ssize_t index = NAME_NOT_FOUND;
+    for (size_t i = 0; i < mStreamMap.size(); ++i) {
+        if (streamId == mStreamMap.valueAt(i)) {
+            index = i;
+            break;
+        }
+    }
+
+    if (index == NAME_NOT_FOUND) {
+        ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream "
+              "created yet", __FUNCTION__, mCameraId, streamId);
+        return BAD_VALUE;
+    }
+
+    // Also returns BAD_VALUE if stream ID was not valid, or stream already
+    // has been used
+    res = mDevice->prepare(streamId);
+
+    return res;
+}
+
+status_t CameraDeviceClient::tearDown(int streamId) {
+    ATRACE_CALL();
+    ALOGV("%s", __FUNCTION__);
+
+    status_t res = OK;
+    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+
+    Mutex::Autolock icl(mBinderSerializationLock);
+
+    // Guard against trying to prepare non-created streams
+    ssize_t index = NAME_NOT_FOUND;
+    for (size_t i = 0; i < mStreamMap.size(); ++i) {
+        if (streamId == mStreamMap.valueAt(i)) {
+            index = i;
+            break;
+        }
+    }
+
+    if (index == NAME_NOT_FOUND) {
+        ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream "
+              "created yet", __FUNCTION__, mCameraId, streamId);
+        return BAD_VALUE;
+    }
+
+    // Also returns BAD_VALUE if stream ID was not valid or if the stream is in
+    // use
+    res = mDevice->tearDown(streamId);
+
+    return res;
+}
+
+
 status_t CameraDeviceClient::dump(int fd, const Vector<String16>& args) {
     String8 result;
     result.appendFormat("CameraDeviceClient[%d] (%p) dump:\n",
             mCameraId,
             (getRemoteCallback() != NULL ?
                     IInterface::asBinder(getRemoteCallback()).get() : NULL) );
-    result.appendFormat("  Current client: %s (PID %d, UID %u)\n",
-            String8(mClientPackageName).string(),
-            mClientPid, mClientUid);
+    result.appendFormat("  Current client UID %u\n", mClientUid);
 
     result.append("  State:\n");
     result.appendFormat("    Request ID counter: %d\n", mRequestIdCounter);
+    if (mInputStream.configured) {
+        result.appendFormat("    Current input stream ID: %d\n",
+                    mInputStream.id);
+    } else {
+        result.append("    No input stream configured.\n");
+    }
     if (!mStreamMap.isEmpty()) {
-        result.append("    Current stream IDs:\n");
+        result.append("    Current output stream IDs:\n");
         for (size_t i = 0; i < mStreamMap.size(); i++) {
             result.appendFormat("      Stream %d\n", mStreamMap.valueAt(i));
         }
     } else {
-        result.append("    No streams configured.\n");
+        result.append("    No output streams configured.\n");
     }
     write(fd, result.string(), result.size());
     // TODO: print dynamic/request section from most recent requests
@@ -635,8 +810,13 @@
     }
 }
 
-// TODO: refactor the code below this with IProCameraUser.
-// it's 100% copy-pasted, so lets not change it right now to make it easier.
+void CameraDeviceClient::notifyPrepared(int streamId) {
+    // Thread safe. Don't bother locking.
+    sp<ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
+    if (remoteCb != 0) {
+        remoteCb->onPrepared(streamId);
+    }
+}
 
 void CameraDeviceClient::detachDevice() {
     if (mDevice == 0) return;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 84e46b7..1f8b39d 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -19,6 +19,7 @@
 
 #include <camera/camera2/ICameraDeviceUser.h>
 #include <camera/camera2/ICameraDeviceCallbacks.h>
+#include <camera/camera2/OutputConfiguration.h>
 
 #include "CameraService.h"
 #include "common/FrameProcessorBase.h"
@@ -26,8 +27,7 @@
 
 namespace android {
 
-struct CameraDeviceClientBase :
-        public CameraService::BasicClient, public BnCameraDeviceUser
+struct CameraDeviceClientBase : public CameraService::BasicClient, public BnCameraDeviceUser
 {
     typedef ICameraDeviceCallbacks TCamCallbacks;
 
@@ -78,16 +78,19 @@
 
     virtual status_t beginConfigure();
 
-    virtual status_t endConfigure();
+    virtual status_t endConfigure(bool isConstrainedHighSpeed = false);
 
     // Returns -EBUSY if device is not idle
     virtual status_t      deleteStream(int streamId);
 
-    virtual status_t      createStream(
-            int width,
-            int height,
-            int format,
-            const sp<IGraphicBufferProducer>& bufferProducer);
+    virtual status_t      createStream(const OutputConfiguration &outputConfiguration);
+
+    // Create an input stream of width, height, and format.
+    virtual status_t      createInputStream(int width, int height, int format);
+
+    // Get the buffer producer of the input stream
+    virtual status_t      getInputBufferProducer(
+                                /*out*/sp<IGraphicBufferProducer> *producer);
 
     // Create a request object from a template.
     virtual status_t      createDefaultRequest(int templateId,
@@ -105,6 +108,12 @@
     virtual status_t      flush(/*out*/
                                 int64_t* lastFrameNumber = NULL);
 
+    // Prepare stream by preallocating its buffers
+    virtual status_t      prepare(int streamId);
+
+    // Tear down stream resources by freeing its unused buffers
+    virtual status_t      tearDown(int streamId);
+
     /**
      * Interface used by CameraService
      */
@@ -119,7 +128,7 @@
             int servicePid);
     virtual ~CameraDeviceClient();
 
-    virtual status_t      initialize(camera_module_t *module);
+    virtual status_t      initialize(CameraModule *module);
 
     virtual status_t      dump(int fd, const Vector<String16>& args);
 
@@ -131,6 +140,7 @@
     virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
                              const CaptureResultExtras& resultExtras);
     virtual void notifyShutter(const CaptureResultExtras& resultExtras, nsecs_t timestamp);
+    virtual void notifyPrepared(int streamId);
 
     /**
      * Interface used by independent components of CameraDeviceClient.
@@ -159,14 +169,23 @@
 
     // Find the closest dimensions for a given format in available stream configurations with
     // a width <= ROUNDING_WIDTH_CAP
-    static const int32_t ROUNDING_WIDTH_CAP = 1080;
+    static const int32_t ROUNDING_WIDTH_CAP = 1920;
     static bool roundBufferDimensionNearest(int32_t width, int32_t height, int32_t format,
-            const CameraMetadata& info, /*out*/int32_t* outWidth, /*out*/int32_t* outHeight);
+            android_dataspace dataSpace, const CameraMetadata& info,
+            /*out*/int32_t* outWidth, /*out*/int32_t* outHeight);
 
-    // IGraphicsBufferProducer binder -> Stream ID
+    // IGraphicsBufferProducer binder -> Stream ID for output streams
     KeyedVector<sp<IBinder>, int> mStreamMap;
 
-    // Stream ID
+    struct InputStreamConfiguration {
+        bool configured;
+        int32_t width;
+        int32_t height;
+        int32_t format;
+        int32_t id;
+    } mInputStream;
+
+    // Request ID
     Vector<int> mStreamingRequestList;
 
     int32_t mRequestIdCounter;
diff --git a/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp b/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
deleted file mode 100644
index 59e5083..0000000
--- a/services/camera/libcameraservice/api_pro/ProCamera2Client.cpp
+++ /dev/null
@@ -1,444 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "ProCamera2Client"
-#define ATRACE_TAG ATRACE_TAG_CAMERA
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-#include <utils/Trace.h>
-
-#include <cutils/properties.h>
-#include <gui/Surface.h>
-#include <gui/Surface.h>
-
-#include "api_pro/ProCamera2Client.h"
-#include "common/CameraDeviceBase.h"
-
-namespace android {
-using namespace camera2;
-
-// Interface used by CameraService
-
-ProCamera2Client::ProCamera2Client(const sp<CameraService>& cameraService,
-                                   const sp<IProCameraCallbacks>& remoteCallback,
-                                   const String16& clientPackageName,
-                                   int cameraId,
-                                   int cameraFacing,
-                                   int clientPid,
-                                   uid_t clientUid,
-                                   int servicePid) :
-    Camera2ClientBase(cameraService, remoteCallback, clientPackageName,
-                cameraId, cameraFacing, clientPid, clientUid, servicePid)
-{
-    ATRACE_CALL();
-    ALOGI("ProCamera %d: Opened", cameraId);
-
-    mExclusiveLock = false;
-}
-
-status_t ProCamera2Client::initialize(camera_module_t *module)
-{
-    ATRACE_CALL();
-    status_t res;
-
-    res = Camera2ClientBase::initialize(module);
-    if (res != OK) {
-        return res;
-    }
-
-    String8 threadName;
-    mFrameProcessor = new FrameProcessorBase(mDevice);
-    threadName = String8::format("PC2-%d-FrameProc", mCameraId);
-    mFrameProcessor->run(threadName.string());
-
-    mFrameProcessor->registerListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
-                                      FRAME_PROCESSOR_LISTENER_MAX_ID,
-                                      /*listener*/this);
-
-    return OK;
-}
-
-ProCamera2Client::~ProCamera2Client() {
-}
-
-status_t ProCamera2Client::exclusiveTryLock() {
-    ATRACE_CALL();
-    ALOGV("%s", __FUNCTION__);
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-    SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
-
-    if (!mDevice.get()) return PERMISSION_DENIED;
-
-    if (!mExclusiveLock) {
-        mExclusiveLock = true;
-
-        if (mRemoteCallback != NULL) {
-            mRemoteCallback->onLockStatusChanged(
-                              IProCameraCallbacks::LOCK_ACQUIRED);
-        }
-
-        ALOGV("%s: exclusive lock acquired", __FUNCTION__);
-
-        return OK;
-    }
-
-    // TODO: have a PERMISSION_DENIED case for when someone else owns the lock
-
-    // don't allow recursive locking
-    ALOGW("%s: exclusive lock already exists - recursive locking is not"
-          "allowed", __FUNCTION__);
-
-    return ALREADY_EXISTS;
-}
-
-status_t ProCamera2Client::exclusiveLock() {
-    ATRACE_CALL();
-    ALOGV("%s", __FUNCTION__);
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-    SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
-
-    if (!mDevice.get()) return PERMISSION_DENIED;
-
-    /**
-     * TODO: this should asynchronously 'wait' until the lock becomes available
-     * if another client already has an exclusive lock.
-     *
-     * once we have proper sharing support this will need to do
-     * more than just return immediately
-     */
-    if (!mExclusiveLock) {
-        mExclusiveLock = true;
-
-        if (mRemoteCallback != NULL) {
-            mRemoteCallback->onLockStatusChanged(IProCameraCallbacks::LOCK_ACQUIRED);
-        }
-
-        ALOGV("%s: exclusive lock acquired", __FUNCTION__);
-
-        return OK;
-    }
-
-    // don't allow recursive locking
-    ALOGW("%s: exclusive lock already exists - recursive locking is not allowed"
-                                                                , __FUNCTION__);
-    return ALREADY_EXISTS;
-}
-
-status_t ProCamera2Client::exclusiveUnlock() {
-    ATRACE_CALL();
-    ALOGV("%s", __FUNCTION__);
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-    SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
-
-    // don't allow unlocking if we have no lock
-    if (!mExclusiveLock) {
-        ALOGW("%s: cannot unlock, no lock was held in the first place",
-              __FUNCTION__);
-        return BAD_VALUE;
-    }
-
-    mExclusiveLock = false;
-    if (mRemoteCallback != NULL ) {
-        mRemoteCallback->onLockStatusChanged(
-                                       IProCameraCallbacks::LOCK_RELEASED);
-    }
-    ALOGV("%s: exclusive lock released", __FUNCTION__);
-
-    return OK;
-}
-
-bool ProCamera2Client::hasExclusiveLock() {
-    Mutex::Autolock icl(mBinderSerializationLock);
-    return mExclusiveLock;
-}
-
-void ProCamera2Client::onExclusiveLockStolen() {
-    ALOGV("%s: ProClient lost exclusivity (id %d)",
-          __FUNCTION__, mCameraId);
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-    SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
-
-    if (mExclusiveLock && mRemoteCallback.get() != NULL) {
-        mRemoteCallback->onLockStatusChanged(
-                                       IProCameraCallbacks::LOCK_STOLEN);
-    }
-
-    mExclusiveLock = false;
-
-    //TODO: we should not need to detach the device, merely reset it.
-    detachDevice();
-}
-
-status_t ProCamera2Client::submitRequest(camera_metadata_t* request,
-                                         bool streaming) {
-    ATRACE_CALL();
-    ALOGV("%s", __FUNCTION__);
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-
-    if (!mDevice.get()) return DEAD_OBJECT;
-
-    if (!mExclusiveLock) {
-        return PERMISSION_DENIED;
-    }
-
-    CameraMetadata metadata(request);
-
-    if (!enforceRequestPermissions(metadata)) {
-        return PERMISSION_DENIED;
-    }
-
-    if (streaming) {
-        return mDevice->setStreamingRequest(metadata);
-    } else {
-        return mDevice->capture(metadata);
-    }
-
-    // unreachable. thx gcc for a useless warning
-    return OK;
-}
-
-status_t ProCamera2Client::cancelRequest(int requestId) {
-    (void)requestId;
-    ATRACE_CALL();
-    ALOGV("%s", __FUNCTION__);
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-
-    if (!mDevice.get()) return DEAD_OBJECT;
-
-    if (!mExclusiveLock) {
-        return PERMISSION_DENIED;
-    }
-
-    // TODO: implement
-    ALOGE("%s: not fully implemented yet", __FUNCTION__);
-    return INVALID_OPERATION;
-}
-
-status_t ProCamera2Client::deleteStream(int streamId) {
-    ATRACE_CALL();
-    ALOGV("%s (streamId = 0x%x)", __FUNCTION__, streamId);
-
-    status_t res;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-
-    if (!mDevice.get()) return DEAD_OBJECT;
-    mDevice->clearStreamingRequest();
-
-    status_t code;
-    if ((code = mDevice->waitUntilDrained()) != OK) {
-        ALOGE("%s: waitUntilDrained failed with code 0x%x", __FUNCTION__, code);
-    }
-
-    return mDevice->deleteStream(streamId);
-}
-
-status_t ProCamera2Client::createStream(int width, int height, int format,
-                      const sp<IGraphicBufferProducer>& bufferProducer,
-                      /*out*/
-                      int* streamId)
-{
-    if (streamId) {
-        *streamId = -1;
-    }
-
-    ATRACE_CALL();
-    ALOGV("%s (w = %d, h = %d, f = 0x%x)", __FUNCTION__, width, height, format);
-
-    status_t res;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-
-    if (!mDevice.get()) return DEAD_OBJECT;
-
-    sp<IBinder> binder;
-    sp<ANativeWindow> window;
-    if (bufferProducer != 0) {
-        binder = IInterface::asBinder(bufferProducer);
-        window = new Surface(bufferProducer);
-    }
-
-    return mDevice->createStream(window, width, height, format,
-                                 streamId);
-}
-
-// Create a request object from a template.
-// -- Caller owns the newly allocated metadata
-status_t ProCamera2Client::createDefaultRequest(int templateId,
-                             /*out*/
-                              camera_metadata** request)
-{
-    ATRACE_CALL();
-    ALOGV("%s (templateId = 0x%x)", __FUNCTION__, templateId);
-
-    if (request) {
-        *request = NULL;
-    }
-
-    status_t res;
-    if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-
-    if (!mDevice.get()) return DEAD_OBJECT;
-
-    CameraMetadata metadata;
-    if ( (res = mDevice->createDefaultRequest(templateId, &metadata) ) == OK) {
-        *request = metadata.release();
-    }
-
-    return res;
-}
-
-status_t ProCamera2Client::getCameraInfo(int cameraId,
-                                         /*out*/
-                                         camera_metadata** info)
-{
-    if (cameraId != mCameraId) {
-        return INVALID_OPERATION;
-    }
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-
-    if (!mDevice.get()) return DEAD_OBJECT;
-
-    CameraMetadata deviceInfo = mDevice->info();
-    *info = deviceInfo.release();
-
-    return OK;
-}
-
-status_t ProCamera2Client::dump(int fd, const Vector<String16>& args) {
-    String8 result;
-    result.appendFormat("ProCamera2Client[%d] (%p) PID: %d, dump:\n",
-            mCameraId,
-            (getRemoteCallback() != NULL ?
-                    IInterface::asBinder(getRemoteCallback()).get() : NULL),
-            mClientPid);
-    result.append("  State:\n");
-    write(fd, result.string(), result.size());
-
-    // TODO: print dynamic/request section from most recent requests
-    mFrameProcessor->dump(fd, args);
-    return dumpDevice(fd, args);
-}
-
-// IProCameraUser interface
-
-void ProCamera2Client::detachDevice() {
-    if (mDevice == 0) return;
-
-    ALOGV("Camera %d: Stopping processors", mCameraId);
-
-    mFrameProcessor->removeListener(FRAME_PROCESSOR_LISTENER_MIN_ID,
-                                    FRAME_PROCESSOR_LISTENER_MAX_ID,
-                                    /*listener*/this);
-    mFrameProcessor->requestExit();
-    ALOGV("Camera %d: Waiting for threads", mCameraId);
-    mFrameProcessor->join();
-    ALOGV("Camera %d: Disconnecting device", mCameraId);
-
-    // WORKAROUND: HAL refuses to disconnect while there's streams in flight
-    {
-        mDevice->clearStreamingRequest();
-
-        status_t code;
-        if ((code = mDevice->waitUntilDrained()) != OK) {
-            ALOGE("%s: waitUntilDrained failed with code 0x%x", __FUNCTION__,
-                  code);
-        }
-    }
-
-    Camera2ClientBase::detachDevice();
-}
-
-void ProCamera2Client::onResultAvailable(const CaptureResult& result) {
-    ATRACE_CALL();
-    ALOGV("%s", __FUNCTION__);
-
-    Mutex::Autolock icl(mBinderSerializationLock);
-    SharedCameraCallbacks::Lock l(mSharedCameraCallbacks);
-
-    if (mRemoteCallback != NULL) {
-        CameraMetadata tmp(result.mMetadata);
-        camera_metadata_t* meta = tmp.release();
-        ALOGV("%s: meta = %p ", __FUNCTION__, meta);
-        mRemoteCallback->onResultReceived(result.mResultExtras.requestId, meta);
-        tmp.acquire(meta);
-    }
-}
-
-bool ProCamera2Client::enforceRequestPermissions(CameraMetadata& metadata) {
-
-    const int pid = IPCThreadState::self()->getCallingPid();
-    const int selfPid = getpid();
-    camera_metadata_entry_t entry;
-
-    /**
-     * Mixin default important security values
-     * - android.led.transmit = defaulted ON
-     */
-    CameraMetadata staticInfo = mDevice->info();
-    entry = staticInfo.find(ANDROID_LED_AVAILABLE_LEDS);
-    for(size_t i = 0; i < entry.count; ++i) {
-        uint8_t led = entry.data.u8[i];
-
-        switch(led) {
-            case ANDROID_LED_AVAILABLE_LEDS_TRANSMIT: {
-                uint8_t transmitDefault = ANDROID_LED_TRANSMIT_ON;
-                if (!metadata.exists(ANDROID_LED_TRANSMIT)) {
-                    metadata.update(ANDROID_LED_TRANSMIT,
-                                    &transmitDefault, 1);
-                }
-                break;
-            }
-        }
-    }
-
-    // We can do anything!
-    if (pid == selfPid) {
-        return true;
-    }
-
-    /**
-     * Permission check special fields in the request
-     * - android.led.transmit = android.permission.CAMERA_DISABLE_TRANSMIT
-     */
-    entry = metadata.find(ANDROID_LED_TRANSMIT);
-    if (entry.count > 0 && entry.data.u8[0] != ANDROID_LED_TRANSMIT_ON) {
-        String16 permissionString =
-            String16("android.permission.CAMERA_DISABLE_TRANSMIT_LED");
-        if (!checkCallingPermission(permissionString)) {
-            const int uid = IPCThreadState::self()->getCallingUid();
-            ALOGE("Permission Denial: "
-                  "can't disable transmit LED pid=%d, uid=%d", pid, uid);
-            return false;
-        }
-    }
-
-    return true;
-}
-
-} // namespace android
diff --git a/services/camera/libcameraservice/api_pro/ProCamera2Client.h b/services/camera/libcameraservice/api_pro/ProCamera2Client.h
deleted file mode 100644
index 9d83122..0000000
--- a/services/camera/libcameraservice/api_pro/ProCamera2Client.h
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_SERVERS_CAMERA_PROCAMERA2CLIENT_H
-#define ANDROID_SERVERS_CAMERA_PROCAMERA2CLIENT_H
-
-#include "CameraService.h"
-#include "common/FrameProcessorBase.h"
-#include "common/Camera2ClientBase.h"
-#include "device2/Camera2Device.h"
-#include "camera/CaptureResult.h"
-
-namespace android {
-
-class IMemory;
-/**
- * Implements the binder IProCameraUser API,
- * meant for HAL2-level private API access.
- */
-class ProCamera2Client :
-        public Camera2ClientBase<CameraService::ProClient>,
-        public camera2::FrameProcessorBase::FilteredListener
-{
-public:
-    /**
-     * IProCameraUser interface (see IProCameraUser for details)
-     */
-    virtual status_t      exclusiveTryLock();
-    virtual status_t      exclusiveLock();
-    virtual status_t      exclusiveUnlock();
-
-    virtual bool          hasExclusiveLock();
-
-    // Note that the callee gets a copy of the metadata.
-    virtual int           submitRequest(camera_metadata_t* metadata,
-                                        bool streaming = false);
-    virtual status_t      cancelRequest(int requestId);
-
-    virtual status_t      deleteStream(int streamId);
-
-    virtual status_t      createStream(
-            int width,
-            int height,
-            int format,
-            const sp<IGraphicBufferProducer>& bufferProducer,
-            /*out*/
-            int* streamId);
-
-    // Create a request object from a template.
-    // -- Caller owns the newly allocated metadata
-    virtual status_t      createDefaultRequest(int templateId,
-                                               /*out*/
-                                               camera_metadata** request);
-
-    // Get the static metadata for the camera
-    // -- Caller owns the newly allocated metadata
-    virtual status_t      getCameraInfo(int cameraId,
-                                        /*out*/
-                                        camera_metadata** info);
-
-    /**
-     * Interface used by CameraService
-     */
-
-    ProCamera2Client(const sp<CameraService>& cameraService,
-            const sp<IProCameraCallbacks>& remoteCallback,
-            const String16& clientPackageName,
-            int cameraId,
-            int cameraFacing,
-            int clientPid,
-            uid_t clientUid,
-            int servicePid);
-    virtual ~ProCamera2Client();
-
-    virtual status_t      initialize(camera_module_t *module);
-
-    virtual status_t      dump(int fd, const Vector<String16>& args);
-
-    // Callbacks from camera service
-    virtual void onExclusiveLockStolen();
-
-    /**
-     * Interface used by independent components of ProCamera2Client.
-     */
-
-protected:
-    /** FilteredListener implementation **/
-    virtual void onResultAvailable(const CaptureResult& result);
-
-    virtual void          detachDevice();
-
-private:
-    /** IProCameraUser interface-related private members */
-
-    /** Preview callback related members */
-    sp<camera2::FrameProcessorBase> mFrameProcessor;
-    static const int32_t FRAME_PROCESSOR_LISTENER_MIN_ID = 0;
-    static const int32_t FRAME_PROCESSOR_LISTENER_MAX_ID = 0x7fffffffL;
-
-    /** Utility members */
-    bool enforceRequestPermissions(CameraMetadata& metadata);
-
-    // Whether or not we have an exclusive lock on the device
-    // - if no we can't modify the request queue.
-    // note that creating/deleting streams we own is still OK
-    bool mExclusiveLock;
-};
-
-}; // namespace android
-
-#endif
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 453c8bd..ba0b264 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -78,7 +78,7 @@
 }
 
 template <typename TClientBase>
-status_t Camera2ClientBase<TClientBase>::initialize(camera_module_t *module) {
+status_t Camera2ClientBase<TClientBase>::initialize(CameraModule *module) {
     ATRACE_CALL();
     ALOGV("%s: Initializing client for camera %d", __FUNCTION__,
           TClientBase::mCameraId);
@@ -280,6 +280,14 @@
 }
 
 template <typename TClientBase>
+void Camera2ClientBase<TClientBase>::notifyPrepared(int streamId) {
+    (void)streamId;
+
+    ALOGV("%s: Stream %d now prepared",
+            __FUNCTION__, streamId);
+}
+
+template <typename TClientBase>
 int Camera2ClientBase<TClientBase>::getCameraId() const {
     return TClientBase::mCameraId;
 }
@@ -337,7 +345,6 @@
     mRemoteCallback.clear();
 }
 
-template class Camera2ClientBase<CameraService::ProClient>;
 template class Camera2ClientBase<CameraService::Client>;
 template class Camera2ClientBase<CameraDeviceClientBase>;
 
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index e09c1b5..f1cacdf 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -18,6 +18,7 @@
 #define ANDROID_SERVERS_CAMERA_CAMERA2CLIENT_BASE_H
 
 #include "common/CameraDeviceBase.h"
+#include "common/CameraModule.h"
 #include "camera/CaptureResult.h"
 
 namespace android {
@@ -35,7 +36,7 @@
     typedef typename TClientBase::TCamCallbacks TCamCallbacks;
 
     /**
-     * Base binder interface (see ICamera/IProCameraUser for details)
+     * Base binder interface (see ICamera/ICameraDeviceUser for details)
      */
     virtual status_t      connect(const sp<TCamCallbacks>& callbacks);
     virtual void          disconnect();
@@ -55,7 +56,7 @@
                       int servicePid);
     virtual ~Camera2ClientBase();
 
-    virtual status_t      initialize(camera_module_t *module);
+    virtual status_t      initialize(CameraModule *module);
     virtual status_t      dump(int fd, const Vector<String16>& args);
 
     /**
@@ -71,7 +72,7 @@
     virtual void          notifyAutoExposure(uint8_t newState, int triggerId);
     virtual void          notifyAutoWhitebalance(uint8_t newState,
                                                  int triggerId);
-
+    virtual void          notifyPrepared(int streamId);
 
     int                   getCameraId() const;
     const sp<CameraDeviceBase>&
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index d26e20c..cd25949 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -29,6 +29,8 @@
 #include "hardware/camera3.h"
 #include "camera/CameraMetadata.h"
 #include "camera/CaptureResult.h"
+#include "common/CameraModule.h"
+#include "gui/IGraphicBufferProducer.h"
 
 namespace android {
 
@@ -45,7 +47,7 @@
      */
     virtual int      getId() const = 0;
 
-    virtual status_t initialize(camera_module_t *module) = 0;
+    virtual status_t initialize(CameraModule *module) = 0;
     virtual status_t disconnect() = 0;
 
     virtual status_t dump(int fd, const Vector<String16> &args) = 0;
@@ -99,17 +101,22 @@
             nsecs_t timeout) = 0;
 
     /**
-     * Create an output stream of the requested size and format.
+     * Create an output stream of the requested size, format, rotation and dataspace
      *
-     * If format is CAMERA2_HAL_PIXEL_FORMAT_OPAQUE, then the HAL device selects
-     * an appropriate format; it can be queried with getStreamInfo.
-     *
-     * If format is HAL_PIXEL_FORMAT_COMPRESSED, the size parameter must be
-     * equal to the size in bytes of the buffers to allocate for the stream. For
-     * other formats, the size parameter is ignored.
+     * For HAL_PIXEL_FORMAT_BLOB formats, the width and height should be the
+     * logical dimensions of the buffer, not the number of bytes.
      */
-    virtual status_t createStream(sp<ANativeWindow> consumer,
-            uint32_t width, uint32_t height, int format, int *id) = 0;
+    virtual status_t createStream(sp<Surface> consumer,
+            uint32_t width, uint32_t height, int format,
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id) = 0;
+
+    /**
+     * Create an input stream of width, height, and format.
+     *
+     * Return value is the stream ID if non-negative and an error if negative.
+     */
+    virtual status_t createInputStream(uint32_t width, uint32_t height,
+            int32_t format, /*out*/ int32_t *id) = 0;
 
     /**
      * Create an input reprocess stream that uses buffers from an existing
@@ -121,7 +128,8 @@
      * Get information about a given stream.
      */
     virtual status_t getStreamInfo(int id,
-            uint32_t *width, uint32_t *height, uint32_t *format) = 0;
+            uint32_t *width, uint32_t *height,
+            uint32_t *format, android_dataspace *dataSpace) = 0;
 
     /**
      * Set stream gralloc buffer transform
@@ -150,7 +158,11 @@
      * - BAD_VALUE if the set of streams was invalid (e.g. fmts or sizes)
      * - INVALID_OPERATION if the device was in the wrong state
      */
-    virtual status_t configureStreams() = 0;
+    virtual status_t configureStreams(bool isConstrainedHighSpeed = false) = 0;
+
+    // get the buffer producer of the input stream
+    virtual status_t getInputBufferProducer(
+            sp<IGraphicBufferProducer> *producer) = 0;
 
     /**
      * Create a metadata buffer with fields that the HAL device believes are
@@ -188,6 +200,7 @@
         virtual void notifyIdle() = 0;
         virtual void notifyShutter(const CaptureResultExtras &resultExtras,
                 nsecs_t timestamp) = 0;
+        virtual void notifyPrepared(int streamId) = 0;
 
         // Required only for API1
         virtual void notifyAutoFocus(uint8_t newState, int triggerId) = 0;
@@ -270,6 +283,17 @@
     virtual status_t flush(int64_t *lastFrameNumber = NULL) = 0;
 
     /**
+     * Prepare stream by preallocating buffers for it asynchronously.
+     * Calls notifyPrepared() once allocation is complete.
+     */
+    virtual status_t prepare(int streamId) = 0;
+
+    /**
+     * Free stream resources by dumping its unused gralloc buffers.
+     */
+    virtual status_t tearDown(int streamId) = 0;
+
+    /**
      * Get the HAL device version.
      */
     virtual uint32_t getDeviceVersion() = 0;
diff --git a/services/camera/libcameraservice/common/CameraModule.cpp b/services/camera/libcameraservice/common/CameraModule.cpp
new file mode 100644
index 0000000..6a4dfe0
--- /dev/null
+++ b/services/camera/libcameraservice/common/CameraModule.cpp
@@ -0,0 +1,283 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "CameraModule"
+//#define LOG_NDEBUG 0
+
+#include "CameraModule.h"
+
+namespace android {
+
+void CameraModule::deriveCameraCharacteristicsKeys(
+        uint32_t deviceVersion, CameraMetadata &chars) {
+    // HAL1 devices should not reach here
+    if (deviceVersion < CAMERA_DEVICE_API_VERSION_2_0) {
+        ALOGV("%s: Cannot derive keys for HAL version < 2.0");
+        return;
+    }
+
+    // Keys added in HAL3.3
+    if (deviceVersion < CAMERA_DEVICE_API_VERSION_3_3) {
+        const size_t NUM_DERIVED_KEYS_HAL3_3 = 5;
+        Vector<uint8_t> controlModes;
+        uint8_t data = ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE;
+        chars.update(ANDROID_CONTROL_AE_LOCK_AVAILABLE, &data, /*count*/1);
+        data = ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE;
+        chars.update(ANDROID_CONTROL_AWB_LOCK_AVAILABLE, &data, /*count*/1);
+        controlModes.push(ANDROID_CONTROL_MODE_AUTO);
+        camera_metadata_entry entry = chars.find(ANDROID_CONTROL_AVAILABLE_SCENE_MODES);
+        if (entry.count > 1 || entry.data.u8[0] != ANDROID_CONTROL_SCENE_MODE_DISABLED) {
+            controlModes.push(ANDROID_CONTROL_MODE_USE_SCENE_MODE);
+        }
+
+        // Only advertise CONTROL_OFF mode if 3A manual controls are supported.
+        bool isManualAeSupported = false;
+        bool isManualAfSupported = false;
+        bool isManualAwbSupported = false;
+        entry = chars.find(ANDROID_CONTROL_AE_AVAILABLE_MODES);
+        if (entry.count > 0) {
+            for (size_t i = 0; i < entry.count; i++) {
+                if (entry.data.u8[i] == ANDROID_CONTROL_AE_MODE_OFF) {
+                    isManualAeSupported = true;
+                    break;
+                }
+            }
+        }
+        entry = chars.find(ANDROID_CONTROL_AF_AVAILABLE_MODES);
+        if (entry.count > 0) {
+            for (size_t i = 0; i < entry.count; i++) {
+                if (entry.data.u8[i] == ANDROID_CONTROL_AF_MODE_OFF) {
+                    isManualAfSupported = true;
+                    break;
+                }
+            }
+        }
+        entry = chars.find(ANDROID_CONTROL_AWB_AVAILABLE_MODES);
+        if (entry.count > 0) {
+            for (size_t i = 0; i < entry.count; i++) {
+                if (entry.data.u8[i] == ANDROID_CONTROL_AWB_MODE_OFF) {
+                    isManualAwbSupported = true;
+                    break;
+                }
+            }
+        }
+        if (isManualAeSupported && isManualAfSupported && isManualAwbSupported) {
+            controlModes.push(ANDROID_CONTROL_MODE_OFF);
+        }
+
+        chars.update(ANDROID_CONTROL_AVAILABLE_MODES, controlModes);
+
+        entry = chars.find(ANDROID_REQUEST_AVAILABLE_REQUEST_KEYS);
+        // HAL3.2 devices passing existing CTS test should all support all LSC modes and LSC map
+        bool lensShadingModeSupported = false;
+        if (entry.count > 0) {
+            for (size_t i = 0; i < entry.count; i++) {
+                if (entry.data.i32[i] == ANDROID_SHADING_MODE) {
+                    lensShadingModeSupported = true;
+                    break;
+                }
+            }
+        }
+        Vector<uint8_t> lscModes;
+        Vector<uint8_t> lscMapModes;
+        lscModes.push(ANDROID_SHADING_MODE_FAST);
+        lscModes.push(ANDROID_SHADING_MODE_HIGH_QUALITY);
+        lscMapModes.push(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_OFF);
+        if (lensShadingModeSupported) {
+            lscModes.push(ANDROID_SHADING_MODE_OFF);
+            lscMapModes.push(ANDROID_STATISTICS_LENS_SHADING_MAP_MODE_ON);
+        }
+        chars.update(ANDROID_SHADING_AVAILABLE_MODES, lscModes);
+        chars.update(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES, lscMapModes);
+
+        entry = chars.find(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS);
+        Vector<int32_t> availableCharsKeys;
+        availableCharsKeys.setCapacity(entry.count + NUM_DERIVED_KEYS_HAL3_3);
+        for (size_t i = 0; i < entry.count; i++) {
+            availableCharsKeys.push(entry.data.i32[i]);
+        }
+        availableCharsKeys.push(ANDROID_CONTROL_AE_LOCK_AVAILABLE);
+        availableCharsKeys.push(ANDROID_CONTROL_AWB_LOCK_AVAILABLE);
+        availableCharsKeys.push(ANDROID_CONTROL_AVAILABLE_MODES);
+        availableCharsKeys.push(ANDROID_SHADING_AVAILABLE_MODES);
+        availableCharsKeys.push(ANDROID_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES);
+        chars.update(ANDROID_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, availableCharsKeys);
+
+        // Need update android.control.availableHighSpeedVideoConfigurations since HAL3.3
+        // adds batch size to this array.
+        entry = chars.find(ANDROID_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS);
+        if (entry.count > 0) {
+            Vector<int32_t> highSpeedConfig;
+            for (size_t i = 0; i < entry.count; i += 4) {
+                highSpeedConfig.add(entry.data.i32[i]); // width
+                highSpeedConfig.add(entry.data.i32[i + 1]); // height
+                highSpeedConfig.add(entry.data.i32[i + 2]); // fps_min
+                highSpeedConfig.add(entry.data.i32[i + 3]); // fps_max
+                highSpeedConfig.add(1); // batchSize_max. default to 1 for HAL3.2
+            }
+            chars.update(ANDROID_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS,
+                    highSpeedConfig);
+        }
+    }
+
+    // Always add a default for the pre-correction active array if the vendor chooses to omit this
+    camera_metadata_entry entry = chars.find(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE);
+    if (entry.count == 0) {
+        Vector<int32_t> preCorrectionArray;
+        entry = chars.find(ANDROID_SENSOR_INFO_ACTIVE_ARRAY_SIZE);
+        preCorrectionArray.appendArray(entry.data.i32, entry.count);
+        chars.update(ANDROID_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, preCorrectionArray);
+    }
+
+    return;
+}
+
+CameraModule::CameraModule(camera_module_t *module) {
+    if (module == NULL) {
+        ALOGE("%s: camera hardware module must not be null", __FUNCTION__);
+        assert(0);
+    }
+
+    mModule = module;
+    mCameraInfoMap.setCapacity(getNumberOfCameras());
+}
+
+CameraModule::~CameraModule()
+{
+    while (mCameraInfoMap.size() > 0) {
+        camera_info cameraInfo = mCameraInfoMap.editValueAt(0);
+        if (cameraInfo.static_camera_characteristics != NULL) {
+            free_camera_metadata(
+                    const_cast<camera_metadata_t*>(cameraInfo.static_camera_characteristics));
+        }
+        mCameraInfoMap.removeItemsAt(0);
+    }
+}
+
+int CameraModule::init() {
+    if (getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_4 &&
+            mModule->init != NULL) {
+        return mModule->init();
+    }
+    return OK;
+}
+
+int CameraModule::getCameraInfo(int cameraId, struct camera_info *info) {
+    Mutex::Autolock lock(mCameraInfoLock);
+    if (cameraId < 0) {
+        ALOGE("%s: Invalid camera ID %d", __FUNCTION__, cameraId);
+        return -EINVAL;
+    }
+
+    // Only override static_camera_characteristics for API2 devices
+    int apiVersion = mModule->common.module_api_version;
+    if (apiVersion < CAMERA_MODULE_API_VERSION_2_0) {
+        return mModule->get_camera_info(cameraId, info);
+    }
+
+    ssize_t index = mCameraInfoMap.indexOfKey(cameraId);
+    if (index == NAME_NOT_FOUND) {
+        // Get camera info from raw module and cache it
+        camera_info rawInfo, cameraInfo;
+        int ret = mModule->get_camera_info(cameraId, &rawInfo);
+        if (ret != 0) {
+            return ret;
+        }
+        int deviceVersion = rawInfo.device_version;
+        if (deviceVersion < CAMERA_DEVICE_API_VERSION_2_0) {
+            // static_camera_characteristics is invalid
+            *info = rawInfo;
+            return ret;
+        }
+        CameraMetadata m;
+        m = rawInfo.static_camera_characteristics;
+        deriveCameraCharacteristicsKeys(rawInfo.device_version, m);
+        cameraInfo = rawInfo;
+        cameraInfo.static_camera_characteristics = m.release();
+        index = mCameraInfoMap.add(cameraId, cameraInfo);
+    }
+
+    assert(index != NAME_NOT_FOUND);
+    // return the cached camera info
+    *info = mCameraInfoMap[index];
+    return OK;
+}
+
+int CameraModule::open(const char* id, struct hw_device_t** device) {
+    return filterOpenErrorCode(mModule->common.methods->open(&mModule->common, id, device));
+}
+
+int CameraModule::openLegacy(
+        const char* id, uint32_t halVersion, struct hw_device_t** device) {
+    return mModule->open_legacy(&mModule->common, id, halVersion, device);
+}
+
+int CameraModule::getNumberOfCameras() {
+    return mModule->get_number_of_cameras();
+}
+
+int CameraModule::setCallbacks(const camera_module_callbacks_t *callbacks) {
+    return mModule->set_callbacks(callbacks);
+}
+
+bool CameraModule::isVendorTagDefined() {
+    return mModule->get_vendor_tag_ops != NULL;
+}
+
+void CameraModule::getVendorTagOps(vendor_tag_ops_t* ops) {
+    if (mModule->get_vendor_tag_ops) {
+        mModule->get_vendor_tag_ops(ops);
+    }
+}
+
+int CameraModule::setTorchMode(const char* camera_id, bool enable) {
+    return mModule->set_torch_mode(camera_id, enable);
+}
+
+status_t CameraModule::filterOpenErrorCode(status_t err) {
+    switch(err) {
+        case NO_ERROR:
+        case -EBUSY:
+        case -EINVAL:
+        case -EUSERS:
+            return err;
+        default:
+            break;
+    }
+    return -ENODEV;
+}
+
+uint16_t CameraModule::getModuleApiVersion() {
+    return mModule->common.module_api_version;
+}
+
+const char* CameraModule::getModuleName() {
+    return mModule->common.name;
+}
+
+uint16_t CameraModule::getHalApiVersion() {
+    return mModule->common.hal_api_version;
+}
+
+const char* CameraModule::getModuleAuthor() {
+    return mModule->common.author;
+}
+
+void* CameraModule::getDso() {
+    return mModule->common.dso;
+}
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraModule.h b/services/camera/libcameraservice/common/CameraModule.h
new file mode 100644
index 0000000..36822c7
--- /dev/null
+++ b/services/camera/libcameraservice/common/CameraModule.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA_CAMERAMODULE_H
+#define ANDROID_SERVERS_CAMERA_CAMERAMODULE_H
+
+#include <hardware/camera.h>
+#include <camera/CameraMetadata.h>
+#include <utils/Mutex.h>
+#include <utils/KeyedVector.h>
+
+namespace android {
+/**
+ * A wrapper class for HAL camera module.
+ *
+ * This class wraps camera_module_t returned from HAL to provide a wrapped
+ * get_camera_info implementation which CameraService generates some
+ * camera characteristics keys defined in newer HAL version on an older HAL.
+ */
+class CameraModule {
+public:
+    CameraModule(camera_module_t *module);
+    virtual ~CameraModule();
+
+    // Must be called after construction
+    // Returns OK on success, NO_INIT on failure
+    int init();
+
+    int getCameraInfo(int cameraId, struct camera_info *info);
+    int getNumberOfCameras(void);
+    int open(const char* id, struct hw_device_t** device);
+    int openLegacy(const char* id, uint32_t halVersion, struct hw_device_t** device);
+    int setCallbacks(const camera_module_callbacks_t *callbacks);
+    bool isVendorTagDefined();
+    void getVendorTagOps(vendor_tag_ops_t* ops);
+    int setTorchMode(const char* camera_id, bool enable);
+    uint16_t getModuleApiVersion();
+    const char* getModuleName();
+    uint16_t getHalApiVersion();
+    const char* getModuleAuthor();
+    // Only used by CameraModuleFixture native test. Do NOT use elsewhere.
+    void *getDso();
+
+private:
+    // Derive camera characteristics keys defined after HAL device version
+    static void deriveCameraCharacteristicsKeys(uint32_t deviceVersion, CameraMetadata &chars);
+    status_t filterOpenErrorCode(status_t err);
+
+    camera_module_t *mModule;
+    KeyedVector<int, camera_info> mCameraInfoMap;
+    Mutex mCameraInfoLock;
+};
+
+} // namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
index 1935c2b..7f14cd4 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
@@ -89,24 +89,22 @@
         }
     }
 
-    status_t initialize(hw_module_t *module)
+    status_t initialize(CameraModule *module)
     {
         ALOGI("Opening camera %s", mName.string());
-        camera_module_t *cameraModule = reinterpret_cast<camera_module_t *>(module);
         camera_info info;
-        status_t res = cameraModule->get_camera_info(atoi(mName.string()), &info);
+        status_t res = module->getCameraInfo(atoi(mName.string()), &info);
         if (res != OK) return res;
 
         int rc = OK;
-        if (module->module_api_version >= CAMERA_MODULE_API_VERSION_2_3 &&
+        if (module->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_3 &&
             info.device_version > CAMERA_DEVICE_API_VERSION_1_0) {
             // Open higher version camera device as HAL1.0 device.
-            rc = cameraModule->open_legacy(module, mName.string(),
-                                               CAMERA_DEVICE_API_VERSION_1_0,
-                                               (hw_device_t **)&mDevice);
+            rc = module->openLegacy(mName.string(),
+                                     CAMERA_DEVICE_API_VERSION_1_0,
+                                     (hw_device_t **)&mDevice);
         } else {
-            rc = CameraService::filterOpenErrorCode(module->methods->open(
-                module, mName.string(), (hw_device_t **)&mDevice));
+            rc = module->open(mName.string(), (hw_device_t **)&mDevice);
         }
         if (rc != OK) {
             ALOGE("Could not open camera %s: %d", mName.string(), rc);
diff --git a/services/camera/libcameraservice/device2/Camera2Device.cpp b/services/camera/libcameraservice/device2/Camera2Device.cpp
index d1158d6..c9c990c 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.cpp
+++ b/services/camera/libcameraservice/device2/Camera2Device.cpp
@@ -53,7 +53,7 @@
     return mId;
 }
 
-status_t Camera2Device::initialize(camera_module_t *module)
+status_t Camera2Device::initialize(CameraModule *module)
 {
     ATRACE_CALL();
     ALOGV("%s: Initializing device for camera %d", __FUNCTION__, mId);
@@ -68,8 +68,7 @@
 
     camera2_device_t *device;
 
-    res = CameraService::filterOpenErrorCode(module->common.methods->open(
-        &module->common, name, reinterpret_cast<hw_device_t**>(&device)));
+    res = module->open(name, reinterpret_cast<hw_device_t**>(&device));
 
     if (res != OK) {
         ALOGE("%s: Could not open camera %d: %s (%d)", __FUNCTION__,
@@ -87,7 +86,7 @@
     }
 
     camera_info info;
-    res = module->get_camera_info(mId, &info);
+    res = module->getCameraInfo(mId, &info);
     if (res != OK ) return res;
 
     if (info.device_version != device->common.version) {
@@ -241,8 +240,9 @@
     return mRequestQueue.waitForDequeue(requestId, timeout);
 }
 
-status_t Camera2Device::createStream(sp<ANativeWindow> consumer,
-        uint32_t width, uint32_t height, int format, int *id) {
+status_t Camera2Device::createStream(sp<Surface> consumer,
+        uint32_t width, uint32_t height, int format,
+        android_dataspace /*dataSpace*/, camera3_stream_rotation_t rotation, int *id) {
     ATRACE_CALL();
     status_t res;
     ALOGV("%s: E", __FUNCTION__);
@@ -315,7 +315,8 @@
 
 
 status_t Camera2Device::getStreamInfo(int id,
-        uint32_t *width, uint32_t *height, uint32_t *format) {
+        uint32_t *width, uint32_t *height,
+        uint32_t *format, android_dataspace *dataSpace) {
     ATRACE_CALL();
     ALOGV("%s: E", __FUNCTION__);
     bool found = false;
@@ -336,6 +337,7 @@
     if (width) *width = (*streamI)->getWidth();
     if (height) *height = (*streamI)->getHeight();
     if (format) *format = (*streamI)->getFormat();
+    if (dataSpace) *dataSpace = HAL_DATASPACE_UNKNOWN;
 
     return OK;
 }
@@ -415,7 +417,7 @@
     return OK;
 }
 
-status_t Camera2Device::configureStreams() {
+status_t Camera2Device::configureStreams(bool isConstrainedHighSpeed) {
     ATRACE_CALL();
     ALOGV("%s: E", __FUNCTION__);
 
@@ -618,6 +620,18 @@
     return waitUntilDrained();
 }
 
+status_t Camera2Device::prepare(int streamId) {
+    ATRACE_CALL();
+    ALOGE("%s: Camera %d: unimplemented", __FUNCTION__, mId);
+    return NO_INIT;
+}
+
+status_t Camera2Device::tearDown(int streamId) {
+    ATRACE_CALL();
+    ALOGE("%s: Camera %d: unimplemented", __FUNCTION__, mId);
+    return NO_INIT;
+}
+
 uint32_t Camera2Device::getDeviceVersion() {
     ATRACE_CALL();
     return mDeviceVersion;
@@ -1581,4 +1595,18 @@
     return OK;
 }
 
+// camera 2 devices don't support reprocessing
+status_t Camera2Device::createInputStream(
+    uint32_t width, uint32_t height, int format, int *id) {
+    ALOGE("%s: camera 2 devices don't support reprocessing", __FUNCTION__);
+    return INVALID_OPERATION;
+}
+
+// camera 2 devices don't support reprocessing
+status_t Camera2Device::getInputBufferProducer(
+        sp<IGraphicBufferProducer> *producer) {
+    ALOGE("%s: camera 2 devices don't support reprocessing", __FUNCTION__);
+    return INVALID_OPERATION;
+}
+
 }; // namespace android
diff --git a/services/camera/libcameraservice/device2/Camera2Device.h b/services/camera/libcameraservice/device2/Camera2Device.h
index 4def8ae..34c1ded 100644
--- a/services/camera/libcameraservice/device2/Camera2Device.h
+++ b/services/camera/libcameraservice/device2/Camera2Device.h
@@ -43,7 +43,7 @@
      * CameraDevice interface
      */
     virtual int      getId() const;
-    virtual status_t initialize(camera_module_t *module);
+    virtual status_t initialize(CameraModule *module);
     virtual status_t disconnect();
     virtual status_t dump(int fd, const Vector<String16>& args);
     virtual const CameraMetadata& info() const;
@@ -56,16 +56,22 @@
                                              int64_t *lastFrameNumber = NULL);
     virtual status_t clearStreamingRequest(int64_t *lastFrameNumber = NULL);
     virtual status_t waitUntilRequestReceived(int32_t requestId, nsecs_t timeout);
-    virtual status_t createStream(sp<ANativeWindow> consumer,
+    virtual status_t createStream(sp<Surface> consumer,
+            uint32_t width, uint32_t height, int format,
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id);
+    virtual status_t createInputStream(
             uint32_t width, uint32_t height, int format, int *id);
     virtual status_t createReprocessStreamFromStream(int outputId, int *id);
     virtual status_t getStreamInfo(int id,
-            uint32_t *width, uint32_t *height, uint32_t *format);
+            uint32_t *width, uint32_t *height,
+            uint32_t *format, android_dataspace *dataSpace);
     virtual status_t setStreamTransform(int id, int transform);
     virtual status_t deleteStream(int id);
     virtual status_t deleteReprocessStream(int id);
     // No-op on HAL2 devices
-    virtual status_t configureStreams();
+    virtual status_t configureStreams(bool isConstrainedHighSpeed = false);
+    virtual status_t getInputBufferProducer(
+            sp<IGraphicBufferProducer> *producer);
     virtual status_t createDefaultRequest(int templateId, CameraMetadata *request);
     virtual status_t waitUntilDrained();
     virtual status_t setNotifyCallback(NotificationListener *listener);
@@ -79,6 +85,10 @@
             buffer_handle_t *buffer, wp<BufferReleasedListener> listener);
     // Flush implemented as just a wait
     virtual status_t flush(int64_t *lastFrameNumber = NULL);
+    // Prepare and tearDown are no-ops
+    virtual status_t prepare(int streamId);
+    virtual status_t tearDown(int streamId);
+
     virtual uint32_t getDeviceVersion();
     virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const;
 
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 53e6fa9..0c941fb 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -57,11 +57,14 @@
 
 Camera3Device::Camera3Device(int id):
         mId(id),
+        mIsConstrainedHighSpeedConfiguration(false),
         mHal3Device(NULL),
         mStatus(STATUS_UNINITIALIZED),
+        mStatusWaiters(0),
         mUsePartialResult(false),
         mNumPartialResults(1),
         mNextResultFrameNumber(0),
+        mNextReprocessResultFrameNumber(0),
         mNextShutterFrameNumber(0),
         mListener(NULL)
 {
@@ -86,7 +89,7 @@
  * CameraDeviceBase interface
  */
 
-status_t Camera3Device::initialize(camera_module_t *module)
+status_t Camera3Device::initialize(CameraModule *module)
 {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
@@ -106,9 +109,8 @@
     camera3_device_t *device;
 
     ATRACE_BEGIN("camera3->open");
-    res = CameraService::filterOpenErrorCode(module->common.methods->open(
-        &module->common, deviceName.string(),
-        reinterpret_cast<hw_device_t**>(&device)));
+    res = module->open(deviceName.string(),
+            reinterpret_cast<hw_device_t**>(&device));
     ATRACE_END();
 
     if (res != OK) {
@@ -127,7 +129,7 @@
     }
 
     camera_info info;
-    res = CameraService::filterGetInfoErrorCode(module->get_camera_info(
+    res = CameraService::filterGetInfoErrorCode(module->getCameraInfo(
         mId, &info));
     if (res != OK) return res;
 
@@ -163,9 +165,17 @@
         return res;
     }
 
-    /** Start up request queue thread */
+    bool aeLockAvailable = false;
+    camera_metadata_ro_entry aeLockAvailableEntry;
+    res = find_camera_metadata_ro_entry(info.static_camera_characteristics,
+            ANDROID_CONTROL_AE_LOCK_AVAILABLE, &aeLockAvailableEntry);
+    if (res == OK && aeLockAvailableEntry.count > 0) {
+        aeLockAvailable = (aeLockAvailableEntry.data.u8[0] ==
+                ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE);
+    }
 
-    mRequestThread = new RequestThread(this, mStatusTracker, device);
+    /** Start up request queue thread */
+    mRequestThread = new RequestThread(this, mStatusTracker, device, aeLockAvailable);
     res = mRequestThread->run(String8::format("C3Dev-%d-ReqQueue", mId).string());
     if (res != OK) {
         SET_ERR_L("Unable to start request queue thread: %s (%d)",
@@ -175,12 +185,15 @@
         return res;
     }
 
+    mPreparerThread = new PreparerThread();
+
     /** Everything is good to go */
 
     mDeviceVersion = device->common.version;
     mDeviceInfo = info.static_camera_characteristics;
     mHal3Device = device;
-    mStatus = STATUS_UNCONFIGURED;
+
+    internalUpdateStatusLocked(STATUS_UNCONFIGURED);
     mNextStreamId = 0;
     mDummyStreamId = NO_STREAM;
     mNeedConfig = true;
@@ -202,6 +215,17 @@
         }
     }
 
+    camera_metadata_entry configs =
+            mDeviceInfo.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
+    for (uint32_t i = 0; i < configs.count; i += 4) {
+        if (configs.data.i32[i] == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
+                configs.data.i32[i + 3] ==
+                ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_INPUT) {
+            mSupportedOpaqueInputSizes.add(Size(configs.data.i32[i + 1],
+                    configs.data.i32[i + 2]));
+        }
+    }
+
     return OK;
 }
 
@@ -274,7 +298,7 @@
             mHal3Device = NULL;
         }
 
-        mStatus = STATUS_UNINITIALIZED;
+        internalUpdateStatusLocked(STATUS_UNINITIALIZED);
     }
 
     ALOGV("%s: X", __FUNCTION__);
@@ -348,7 +372,7 @@
     // Get max jpeg size (area-wise).
     Size maxJpegResolution = getMaxJpegResolution();
     if (maxJpegResolution.width == 0) {
-        ALOGE("%s: Camera %d: Can't find find valid available jpeg sizes in static metadata!",
+        ALOGE("%s: Camera %d: Can't find valid available jpeg sizes in static metadata!",
                 __FUNCTION__, mId);
         return BAD_VALUE;
     }
@@ -375,6 +399,21 @@
     return jpegBufferSize;
 }
 
+ssize_t Camera3Device::getPointCloudBufferSize() const {
+    const int FLOATS_PER_POINT=4;
+    camera_metadata_ro_entry maxPointCount = mDeviceInfo.find(ANDROID_DEPTH_MAX_DEPTH_SAMPLES);
+    if (maxPointCount.count == 0) {
+        ALOGE("%s: Camera %d: Can't find maximum depth point cloud size in static metadata!",
+                __FUNCTION__, mId);
+        return BAD_VALUE;
+    }
+    ssize_t maxBytesForPointCloud = sizeof(android_depth_points) +
+            maxPointCount.data.i32[0] * sizeof(float) * FLOATS_PER_POINT;
+    return maxBytesForPointCloud;
+}
+
+
+
 status_t Camera3Device::dump(int fd, const Vector<String16> &args) {
     ATRACE_CALL();
     (void)args;
@@ -406,6 +445,8 @@
         lines.appendFormat("    Error cause: %s\n", mErrorCause.string());
     }
     lines.appendFormat("    Stream configuration:\n");
+    lines.appendFormat("    Operation mode: %s \n", mIsConstrainedHighSpeedConfiguration ?
+            "CONSTRAINED HIGH SPEED VIDEO" : "NORMAL");
 
     if (mInputStream != NULL) {
         write(fd, lines.string(), lines.size());
@@ -801,13 +842,14 @@
     return OK;
 }
 
-status_t Camera3Device::createStream(sp<ANativeWindow> consumer,
-        uint32_t width, uint32_t height, int format, int *id) {
+status_t Camera3Device::createStream(sp<Surface> consumer,
+        uint32_t width, uint32_t height, int format, android_dataspace dataSpace,
+        camera3_stream_rotation_t rotation, int *id) {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
-    ALOGV("Camera %d: Creating new stream %d: %d x %d, format %d",
-            mId, mNextStreamId, width, height, format);
+    ALOGV("Camera %d: Creating new stream %d: %d x %d, format %d, dataspace %d rotation %d",
+            mId, mNextStreamId, width, height, format, dataSpace, rotation);
 
     status_t res;
     bool wasActive = false;
@@ -840,17 +882,25 @@
 
     sp<Camera3OutputStream> newStream;
     if (format == HAL_PIXEL_FORMAT_BLOB) {
-        ssize_t jpegBufferSize = getJpegBufferSize(width, height);
-        if (jpegBufferSize <= 0) {
-            SET_ERR_L("Invalid jpeg buffer size %zd", jpegBufferSize);
-            return BAD_VALUE;
+        ssize_t blobBufferSize;
+        if (dataSpace != HAL_DATASPACE_DEPTH) {
+            blobBufferSize = getJpegBufferSize(width, height);
+            if (blobBufferSize <= 0) {
+                SET_ERR_L("Invalid jpeg buffer size %zd", blobBufferSize);
+                return BAD_VALUE;
+            }
+        } else {
+            blobBufferSize = getPointCloudBufferSize();
+            if (blobBufferSize <= 0) {
+                SET_ERR_L("Invalid point cloud buffer size %zd", blobBufferSize);
+                return BAD_VALUE;
+            }
         }
-
         newStream = new Camera3OutputStream(mNextStreamId, consumer,
-                width, height, jpegBufferSize, format);
+                width, height, blobBufferSize, format, dataSpace, rotation);
     } else {
         newStream = new Camera3OutputStream(mNextStreamId, consumer,
-                width, height, format);
+                width, height, format, dataSpace, rotation);
     }
     newStream->setStatusTracker(mStatusTracker);
 
@@ -888,7 +938,8 @@
 
 
 status_t Camera3Device::getStreamInfo(int id,
-        uint32_t *width, uint32_t *height, uint32_t *format) {
+        uint32_t *width, uint32_t *height,
+        uint32_t *format, android_dataspace *dataSpace) {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
@@ -919,7 +970,7 @@
     if (width) *width  = mOutputStreams[idx]->getWidth();
     if (height) *height = mOutputStreams[idx]->getHeight();
     if (format) *format = mOutputStreams[idx]->getFormat();
-
+    if (dataSpace) *dataSpace = mOutputStreams[idx]->getDataSpace();
     return OK;
 }
 
@@ -1009,16 +1060,35 @@
     return INVALID_OPERATION;
 }
 
-status_t Camera3Device::configureStreams() {
+status_t Camera3Device::configureStreams(bool isConstrainedHighSpeed) {
     ATRACE_CALL();
     ALOGV("%s: E", __FUNCTION__);
 
     Mutex::Autolock il(mInterfaceLock);
     Mutex::Autolock l(mLock);
 
+    if (mIsConstrainedHighSpeedConfiguration != isConstrainedHighSpeed) {
+        mNeedConfig = true;
+        mIsConstrainedHighSpeedConfiguration = isConstrainedHighSpeed;
+    }
+
     return configureStreamsLocked();
 }
 
+status_t Camera3Device::getInputBufferProducer(
+        sp<IGraphicBufferProducer> *producer) {
+    Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock l(mLock);
+
+    if (producer == NULL) {
+        return BAD_VALUE;
+    } else if (mInputStream == NULL) {
+        return INVALID_OPERATION;
+    }
+
+    return mInputStream->getInputBufferProducer(producer);
+}
+
 status_t Camera3Device::createDefaultRequest(int templateId,
         CameraMetadata *request) {
     ATRACE_CALL();
@@ -1054,9 +1124,9 @@
         mHal3Device, templateId);
     ATRACE_END();
     if (rawRequest == NULL) {
-        SET_ERR_L("HAL is unable to construct default settings for template %d",
-                templateId);
-        return DEAD_OBJECT;
+        ALOGI("%s: template %d is not supported on this camera device",
+              __FUNCTION__, templateId);
+        return BAD_VALUE;
     }
     *request = rawRequest;
     mRequestTemplateCache[templateId] = rawRequest;
@@ -1098,6 +1168,13 @@
     return res;
 }
 
+
+void Camera3Device::internalUpdateStatusLocked(Status status) {
+    mStatus = status;
+    mRecentStatusUpdates.add(mStatus);
+    mStatusChanged.broadcast();
+}
+
 // Pause to reconfigure
 status_t Camera3Device::internalPauseAndWaitLocked() {
     mRequestThread->setPaused(true);
@@ -1128,23 +1205,40 @@
     return OK;
 }
 
-status_t Camera3Device::waitUntilStateThenRelock(bool active,
-        nsecs_t timeout) {
+status_t Camera3Device::waitUntilStateThenRelock(bool active, nsecs_t timeout) {
     status_t res = OK;
-    if (active == (mStatus == STATUS_ACTIVE)) {
-        // Desired state already reached
-        return res;
+
+    size_t startIndex = 0;
+    if (mStatusWaiters == 0) {
+        // Clear the list of recent statuses if there are no existing threads waiting on updates to
+        // this status list
+        mRecentStatusUpdates.clear();
+    } else {
+        // If other threads are waiting on updates to this status list, set the position of the
+        // first element that this list will check rather than clearing the list.
+        startIndex = mRecentStatusUpdates.size();
     }
 
+    mStatusWaiters++;
+
     bool stateSeen = false;
     do {
-        mRecentStatusUpdates.clear();
+        if (active == (mStatus == STATUS_ACTIVE)) {
+            // Desired state is current
+            break;
+        }
 
         res = mStatusChanged.waitRelative(mLock, timeout);
         if (res != OK) break;
 
-        // Check state change history during wait
-        for (size_t i = 0; i < mRecentStatusUpdates.size(); i++) {
+        // This is impossible, but if not, could result in subtle deadlocks and invalid state
+        // transitions.
+        LOG_ALWAYS_FATAL_IF(startIndex > mRecentStatusUpdates.size(),
+                "%s: Skipping status updates in Camera3Device, may result in deadlock.",
+                __FUNCTION__);
+
+        // Encountered desired state since we began waiting
+        for (size_t i = startIndex; i < mRecentStatusUpdates.size(); i++) {
             if (active == (mRecentStatusUpdates[i] == STATUS_ACTIVE) ) {
                 stateSeen = true;
                 break;
@@ -1152,6 +1246,8 @@
         }
     } while (!stateSeen);
 
+    mStatusWaiters--;
+
     return res;
 }
 
@@ -1164,7 +1260,8 @@
         ALOGW("%s: Replacing old callback listener", __FUNCTION__);
     }
     mListener = listener;
-    mRequestThread->setNotifyCallback(listener);
+    mRequestThread->setNotificationListener(listener);
+    mPreparerThread->setNotificationListener(listener);
 
     return OK;
 }
@@ -1310,6 +1407,65 @@
     return res;
 }
 
+status_t Camera3Device::prepare(int streamId) {
+    ATRACE_CALL();
+    ALOGV("%s: Camera %d: Preparing stream %d", __FUNCTION__, mId, streamId);
+    Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock l(mLock);
+
+    sp<Camera3StreamInterface> stream;
+    ssize_t outputStreamIdx = mOutputStreams.indexOfKey(streamId);
+    if (outputStreamIdx == NAME_NOT_FOUND) {
+        CLOGE("Stream %d does not exist", streamId);
+        return BAD_VALUE;
+    }
+
+    stream = mOutputStreams.editValueAt(outputStreamIdx);
+
+    if (stream->isUnpreparable() || stream->hasOutstandingBuffers() ) {
+        CLOGE("Stream %d has already been a request target", streamId);
+        return BAD_VALUE;
+    }
+
+    if (mRequestThread->isStreamPending(stream)) {
+        CLOGE("Stream %d is already a target in a pending request", streamId);
+        return BAD_VALUE;
+    }
+
+    return mPreparerThread->prepare(stream);
+}
+
+status_t Camera3Device::tearDown(int streamId) {
+    ATRACE_CALL();
+    ALOGV("%s: Camera %d: Tearing down stream %d", __FUNCTION__, mId, streamId);
+    Mutex::Autolock il(mInterfaceLock);
+    Mutex::Autolock l(mLock);
+
+    // Teardown can only be accomplished on devices that don't require register_stream_buffers,
+    // since we cannot call register_stream_buffers except right after configure_streams.
+    if (mHal3Device->common.version < CAMERA_DEVICE_API_VERSION_3_2) {
+        ALOGE("%s: Unable to tear down streams on device HAL v%x",
+                __FUNCTION__, mHal3Device->common.version);
+        return NO_INIT;
+    }
+
+    sp<Camera3StreamInterface> stream;
+    ssize_t outputStreamIdx = mOutputStreams.indexOfKey(streamId);
+    if (outputStreamIdx == NAME_NOT_FOUND) {
+        CLOGE("Stream %d does not exist", streamId);
+        return BAD_VALUE;
+    }
+
+    stream = mOutputStreams.editValueAt(outputStreamIdx);
+
+    if (stream->hasOutstandingBuffers() || mRequestThread->isStreamPending(stream)) {
+        CLOGE("Stream %d is a target of a in-progress request", streamId);
+        return BAD_VALUE;
+    }
+
+    return stream->tearDown();
+}
+
 uint32_t Camera3Device::getDeviceVersion() {
     ATRACE_CALL();
     Mutex::Autolock il(mInterfaceLock);
@@ -1333,9 +1489,7 @@
         }
         ALOGV("%s: Camera %d: Now %s", __FUNCTION__, mId,
                 idle ? "idle" : "active");
-        mStatus = idle ? STATUS_CONFIGURED : STATUS_ACTIVE;
-        mRecentStatusUpdates.add(mStatus);
-        mStatusChanged.signal();
+        internalUpdateStatusLocked(idle ? STATUS_CONFIGURED : STATUS_ACTIVE);
 
         // Skip notifying listener if we're doing some user-transparent
         // state changes
@@ -1383,6 +1537,11 @@
                 return NULL;
             }
         }
+        // Check if stream is being prepared
+        if (mInputStream->isPreparing()) {
+            CLOGE("Request references an input stream that's being prepared!");
+            return NULL;
+        }
 
         newRequest->mInputStream = mInputStream;
         newRequest->mSettings.erase(ANDROID_REQUEST_INPUT_STREAMS);
@@ -1415,6 +1574,11 @@
                 return NULL;
             }
         }
+        // Check if stream is being prepared
+        if (stream->isPreparing()) {
+            CLOGE("Request references an output stream that's being prepared!");
+            return NULL;
+        }
 
         newRequest->mOutputStreams.push(stream);
     }
@@ -1423,6 +1587,17 @@
     return newRequest;
 }
 
+bool Camera3Device::isOpaqueInputSizeSupported(uint32_t width, uint32_t height) {
+    for (uint32_t i = 0; i < mSupportedOpaqueInputSizes.size(); i++) {
+        Size size = mSupportedOpaqueInputSizes[i];
+        if (size.width == width && size.height == height) {
+            return true;
+        }
+    }
+
+    return false;
+}
+
 status_t Camera3Device::configureStreamsLocked() {
     ATRACE_CALL();
     status_t res;
@@ -1450,7 +1625,9 @@
     ALOGV("%s: Camera %d: Starting stream configuration", __FUNCTION__, mId);
 
     camera3_stream_configuration config;
-
+    config.operation_mode = mIsConstrainedHighSpeedConfiguration ?
+            CAMERA3_STREAM_CONFIGURATION_CONSTRAINED_HIGH_SPEED_MODE :
+            CAMERA3_STREAM_CONFIGURATION_NORMAL_MODE;
     config.num_streams = (mInputStream != NULL) + mOutputStreams.size();
 
     Vector<camera3_stream_t*> streams;
@@ -1521,7 +1698,7 @@
 
         // Return state to that at start of call, so that future configures
         // properly clean things up
-        mStatus = STATUS_UNCONFIGURED;
+        internalUpdateStatusLocked(STATUS_UNCONFIGURED);
         mNeedConfig = true;
 
         ALOGV("%s: Camera %d: Stream configuration failed", __FUNCTION__, mId);
@@ -1568,11 +1745,8 @@
 
     mNeedConfig = false;
 
-    if (mDummyStreamId == NO_STREAM) {
-        mStatus = STATUS_CONFIGURED;
-    } else {
-        mStatus = STATUS_UNCONFIGURED;
-    }
+    internalUpdateStatusLocked((mDummyStreamId == NO_STREAM) ?
+            STATUS_CONFIGURED : STATUS_UNCONFIGURED);
 
     ALOGV("%s: Camera %d: Stream configuration complete", __FUNCTION__, mId);
 
@@ -1680,7 +1854,7 @@
     mErrorCause = errorCause;
 
     mRequestThread->setPaused(true);
-    mStatus = STATUS_ERROR;
+    internalUpdateStatusLocked(STATUS_ERROR);
 
     // Notify upstream about a device error
     if (mListener != NULL) {
@@ -1698,12 +1872,14 @@
  */
 
 status_t Camera3Device::registerInFlight(uint32_t frameNumber,
-        int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput) {
+        int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
+        const AeTriggerCancelOverride_t &aeTriggerCancelOverride) {
     ATRACE_CALL();
     Mutex::Autolock l(mInFlightLock);
 
     ssize_t res;
-    res = mInFlightMap.add(frameNumber, InFlightRequest(numBuffers, resultExtras, hasInput));
+    res = mInFlightMap.add(frameNumber, InFlightRequest(numBuffers, resultExtras, hasInput,
+            aeTriggerCancelOverride));
     if (res < 0) return res;
 
     return OK;
@@ -1879,7 +2055,6 @@
     return true;
 }
 
-
 void Camera3Device::returnOutputBuffers(
         const camera3_stream_buffer_t *outputBuffers, size_t numBuffers,
         nsecs_t timestamp) {
@@ -1938,8 +2113,12 @@
 
     // Sanity check - if we have too many in-flight frames, something has
     // likely gone wrong
-    if (mInFlightMap.size() > kInFlightWarnLimit) {
+    if (!mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() > kInFlightWarnLimit) {
         CLOGE("In-flight list too large: %zu", mInFlightMap.size());
+    } else if (mIsConstrainedHighSpeedConfiguration && mInFlightMap.size() >
+            kInFlightWarnLimitHighSpeed) {
+        CLOGE("In-flight list too large for high speed configuration: %zu",
+                mInFlightMap.size());
     }
 }
 
@@ -1947,20 +2126,32 @@
 void Camera3Device::sendCaptureResult(CameraMetadata &pendingMetadata,
         CaptureResultExtras &resultExtras,
         CameraMetadata &collectedPartialResult,
-        uint32_t frameNumber) {
+        uint32_t frameNumber,
+        bool reprocess,
+        const AeTriggerCancelOverride_t &aeTriggerCancelOverride) {
     if (pendingMetadata.isEmpty())
         return;
 
     Mutex::Autolock l(mOutputLock);
 
     // TODO: need to track errors for tighter bounds on expected frame number
-    if (frameNumber < mNextResultFrameNumber) {
-        SET_ERR("Out-of-order capture result metadata submitted! "
+    if (reprocess) {
+        if (frameNumber < mNextReprocessResultFrameNumber) {
+            SET_ERR("Out-of-order reprocess capture result metadata submitted! "
                 "(got frame number %d, expecting %d)",
-                frameNumber, mNextResultFrameNumber);
-        return;
+                frameNumber, mNextReprocessResultFrameNumber);
+            return;
+        }
+        mNextReprocessResultFrameNumber = frameNumber + 1;
+    } else {
+        if (frameNumber < mNextResultFrameNumber) {
+            SET_ERR("Out-of-order capture result metadata submitted! "
+                    "(got frame number %d, expecting %d)",
+                    frameNumber, mNextResultFrameNumber);
+            return;
+        }
+        mNextResultFrameNumber = frameNumber + 1;
     }
-    mNextResultFrameNumber = frameNumber + 1;
 
     CaptureResult captureResult;
     captureResult.mResultExtras = resultExtras;
@@ -1992,6 +2183,8 @@
         return;
     }
 
+    overrideResultForPrecaptureCancel(&captureResult.mMetadata, aeTriggerCancelOverride);
+
     // Valid result, insert into queue
     List<CaptureResult>::iterator queuedResult =
             mResultQueue.insert(mResultQueue.end(), CaptureResult(captureResult));
@@ -2170,7 +2363,8 @@
                 CameraMetadata metadata;
                 metadata = result->result;
                 sendCaptureResult(metadata, request.resultExtras,
-                    collectedPartialResult, frameNumber);
+                    collectedPartialResult, frameNumber, hasInputBufferInRequest,
+                    request.aeTriggerCancelOverride);
             }
         }
 
@@ -2332,7 +2526,8 @@
 
             // send pending result and buffers
             sendCaptureResult(r.pendingMetadata, r.resultExtras,
-                r.partialResult.collectedResult, msg.frame_number);
+                r.partialResult.collectedResult, msg.frame_number,
+                r.hasInputBuffer, r.aeTriggerCancelOverride);
             returnOutputBuffers(r.pendingOutputBuffers.array(),
                 r.pendingOutputBuffers.size(), r.shutterTimestamp);
             r.pendingOutputBuffers.clear();
@@ -2366,8 +2561,9 @@
 
 Camera3Device::RequestThread::RequestThread(wp<Camera3Device> parent,
         sp<StatusTracker> statusTracker,
-        camera3_device_t *hal3Device) :
-        Thread(false),
+        camera3_device_t *hal3Device,
+        bool aeLockAvailable) :
+        Thread(/*canCallJava*/false),
         mParent(parent),
         mStatusTracker(statusTracker),
         mHal3Device(hal3Device),
@@ -2379,11 +2575,12 @@
         mLatestRequestId(NAME_NOT_FOUND),
         mCurrentAfTriggerId(0),
         mCurrentPreCaptureTriggerId(0),
-        mRepeatingLastFrameNumber(NO_IN_FLIGHT_REPEATING_FRAMES) {
+        mRepeatingLastFrameNumber(NO_IN_FLIGHT_REPEATING_FRAMES),
+        mAeLockAvailable(aeLockAvailable) {
     mStatusId = statusTracker->addComponent();
 }
 
-void Camera3Device::RequestThread::setNotifyCallback(
+void Camera3Device::RequestThread::setNotificationListener(
         NotificationListener *listener) {
     Mutex::Autolock l(mRequestLock);
     mListener = listener;
@@ -2524,6 +2721,21 @@
     if (listener != NULL) {
         for (RequestList::iterator it = mRequestQueue.begin();
                  it != mRequestQueue.end(); ++it) {
+            // Abort the input buffers for reprocess requests.
+            if ((*it)->mInputStream != NULL) {
+                camera3_stream_buffer_t inputBuffer;
+                status_t res = (*it)->mInputStream->getInputBuffer(&inputBuffer);
+                if (res != OK) {
+                    ALOGW("%s: %d: couldn't get input buffer while clearing the request "
+                            "list: %s (%d)", __FUNCTION__, __LINE__, strerror(-res), res);
+                } else {
+                    res = (*it)->mInputStream->returnInputBuffer(inputBuffer);
+                    if (res != OK) {
+                        ALOGE("%s: %d: couldn't return input buffer while clearing the request "
+                                "list: %s (%d)", __FUNCTION__, __LINE__, strerror(-res), res);
+                    }
+                }
+            }
             // Set the frame number this request would have had, if it
             // had been submitted; this frame number will not be reused.
             // The requestId and burstId fields were set when the request was
@@ -2572,6 +2784,65 @@
     mRequestSignal.signal();
 }
 
+
+/**
+ * For devices <= CAMERA_DEVICE_API_VERSION_3_2, AE_PRECAPTURE_TRIGGER_CANCEL is not supported so
+ * we need to override AE_PRECAPTURE_TRIGGER_CANCEL to AE_PRECAPTURE_TRIGGER_IDLE and AE_LOCK_OFF
+ * to AE_LOCK_ON to start cancelling AE precapture. If AE lock is not available, it still overrides
+ * AE_PRECAPTURE_TRIGGER_CANCEL to AE_PRECAPTURE_TRIGGER_IDLE but doesn't add AE_LOCK_ON to the
+ * request.
+ */
+void Camera3Device::RequestThread::handleAePrecaptureCancelRequest(sp<CaptureRequest> request) {
+    request->mAeTriggerCancelOverride.applyAeLock = false;
+    request->mAeTriggerCancelOverride.applyAePrecaptureTrigger = false;
+
+    if (mHal3Device->common.version > CAMERA_DEVICE_API_VERSION_3_2) {
+        return;
+    }
+
+    camera_metadata_entry_t aePrecaptureTrigger =
+            request->mSettings.find(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER);
+    if (aePrecaptureTrigger.count > 0 &&
+            aePrecaptureTrigger.data.u8[0] == ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL) {
+        // Always override CANCEL to IDLE
+        uint8_t aePrecaptureTrigger = ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_IDLE;
+        request->mSettings.update(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER, &aePrecaptureTrigger, 1);
+        request->mAeTriggerCancelOverride.applyAePrecaptureTrigger = true;
+        request->mAeTriggerCancelOverride.aePrecaptureTrigger =
+                ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL;
+
+        if (mAeLockAvailable == true) {
+            camera_metadata_entry_t aeLock = request->mSettings.find(ANDROID_CONTROL_AE_LOCK);
+            if (aeLock.count == 0 ||  aeLock.data.u8[0] == ANDROID_CONTROL_AE_LOCK_OFF) {
+                uint8_t aeLock = ANDROID_CONTROL_AE_LOCK_ON;
+                request->mSettings.update(ANDROID_CONTROL_AE_LOCK, &aeLock, 1);
+                request->mAeTriggerCancelOverride.applyAeLock = true;
+                request->mAeTriggerCancelOverride.aeLock = ANDROID_CONTROL_AE_LOCK_OFF;
+            }
+        }
+    }
+}
+
+/**
+ * Override result metadata for cancelling AE precapture trigger applied in
+ * handleAePrecaptureCancelRequest().
+ */
+void Camera3Device::overrideResultForPrecaptureCancel(
+        CameraMetadata *result, const AeTriggerCancelOverride_t &aeTriggerCancelOverride) {
+    if (aeTriggerCancelOverride.applyAeLock) {
+        // Only devices <= v3.2 should have this override
+        assert(mDeviceVersion <= CAMERA_DEVICE_API_VERSION_3_2);
+        result->update(ANDROID_CONTROL_AE_LOCK, &aeTriggerCancelOverride.aeLock, 1);
+    }
+
+    if (aeTriggerCancelOverride.applyAePrecaptureTrigger) {
+        // Only devices <= v3.2 should have this override
+        assert(mDeviceVersion <= CAMERA_DEVICE_API_VERSION_3_2);
+        result->update(ANDROID_CONTROL_AE_PRECAPTURE_TRIGGER,
+                &aeTriggerCancelOverride.aePrecaptureTrigger, 1);
+    }
+}
+
 bool Camera3Device::RequestThread::threadLoop() {
 
     status_t res;
@@ -2663,29 +2934,11 @@
                __FUNCTION__);
     }
 
-    camera3_stream_buffer_t inputBuffer;
     uint32_t totalNumBuffers = 0;
 
     // Fill in buffers
-
     if (nextRequest->mInputStream != NULL) {
-        request.input_buffer = &inputBuffer;
-        res = nextRequest->mInputStream->getInputBuffer(&inputBuffer);
-        if (res != OK) {
-            // Can't get input buffer from gralloc queue - this could be due to
-            // disconnected queue or other producer misbehavior, so not a fatal
-            // error
-            ALOGE("RequestThread: Can't get input buffer, skipping request:"
-                    " %s (%d)", strerror(-res), res);
-            Mutex::Autolock l(mRequestLock);
-            if (mListener != NULL) {
-                mListener->notifyError(
-                        ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
-                        nextRequest->mResultExtras);
-            }
-            cleanUpFailedRequest(request, nextRequest, outputBuffers);
-            return true;
-        }
+        request.input_buffer = &nextRequest->mInputBuffer;
         totalNumBuffers += 1;
     } else {
         request.input_buffer = NULL;
@@ -2703,11 +2956,13 @@
             // error
             ALOGE("RequestThread: Can't get output buffer, skipping request:"
                     " %s (%d)", strerror(-res), res);
-            Mutex::Autolock l(mRequestLock);
-            if (mListener != NULL) {
-                mListener->notifyError(
-                        ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
-                        nextRequest->mResultExtras);
+            {
+                Mutex::Autolock l(mRequestLock);
+                if (mListener != NULL) {
+                    mListener->notifyError(
+                            ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+                            nextRequest->mResultExtras);
+                }
             }
             cleanUpFailedRequest(request, nextRequest, outputBuffers);
             return true;
@@ -2727,7 +2982,8 @@
 
     res = parent->registerInFlight(request.frame_number,
             totalNumBuffers, nextRequest->mResultExtras,
-            /*hasInput*/request.input_buffer != NULL);
+            /*hasInput*/request.input_buffer != NULL,
+            nextRequest->mAeTriggerCancelOverride);
     ALOGVV("%s: registered in flight requestId = %" PRId32 ", frameNumber = %" PRId64
            ", burstId = %" PRId32 ".",
             __FUNCTION__,
@@ -2776,6 +3032,12 @@
         nextRequest->mSettings.unlock(request.settings);
     }
 
+    // Unset as current request
+    {
+        Mutex::Autolock l(mRequestLock);
+        mNextRequest.clear();
+    }
+
     // Remove any previously queued triggers (after unlock)
     res = removeTriggers(mPrevRequest);
     if (res != OK) {
@@ -2797,6 +3059,33 @@
     return mLatestRequest;
 }
 
+bool Camera3Device::RequestThread::isStreamPending(
+        sp<Camera3StreamInterface>& stream) {
+    Mutex::Autolock l(mRequestLock);
+
+    if (mNextRequest != nullptr) {
+        for (const auto& s : mNextRequest->mOutputStreams) {
+            if (stream == s) return true;
+        }
+        if (stream == mNextRequest->mInputStream) return true;
+    }
+
+    for (const auto& request : mRequestQueue) {
+        for (const auto& s : request->mOutputStreams) {
+            if (stream == s) return true;
+        }
+        if (stream == request->mInputStream) return true;
+    }
+
+    for (const auto& request : mRepeatingRequests) {
+        for (const auto& s : request->mOutputStreams) {
+            if (stream == s) return true;
+        }
+        if (stream == request->mInputStream) return true;
+    }
+
+    return false;
+}
 
 void Camera3Device::RequestThread::cleanUpFailedRequest(
         camera3_capture_request_t &request,
@@ -2806,15 +3095,18 @@
     if (request.settings != NULL) {
         nextRequest->mSettings.unlock(request.settings);
     }
-    if (request.input_buffer != NULL) {
-        request.input_buffer->status = CAMERA3_BUFFER_STATUS_ERROR;
-        nextRequest->mInputStream->returnInputBuffer(*(request.input_buffer));
+    if (nextRequest->mInputStream != NULL) {
+        nextRequest->mInputBuffer.status = CAMERA3_BUFFER_STATUS_ERROR;
+        nextRequest->mInputStream->returnInputBuffer(nextRequest->mInputBuffer);
     }
     for (size_t i = 0; i < request.num_output_buffers; i++) {
         outputBuffers.editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR;
         nextRequest->mOutputStreams.editItemAt(i)->returnBuffer(
             outputBuffers[i], 0);
     }
+
+    Mutex::Autolock l(mRequestLock);
+    mNextRequest.clear();
 }
 
 sp<Camera3Device::CaptureRequest>
@@ -2897,7 +3189,31 @@
         nextRequest->mResultExtras.frameNumber = mFrameNumber++;
         nextRequest->mResultExtras.afTriggerId = mCurrentAfTriggerId;
         nextRequest->mResultExtras.precaptureTriggerId = mCurrentPreCaptureTriggerId;
+
+        // Since RequestThread::clear() removes buffers from the input stream,
+        // get the right buffer here before unlocking mRequestLock
+        if (nextRequest->mInputStream != NULL) {
+            res = nextRequest->mInputStream->getInputBuffer(&nextRequest->mInputBuffer);
+            if (res != OK) {
+                // Can't get input buffer from gralloc queue - this could be due to
+                // disconnected queue or other producer misbehavior, so not a fatal
+                // error
+                ALOGE("%s: Can't get input buffer, skipping request:"
+                        " %s (%d)", __FUNCTION__, strerror(-res), res);
+                if (mListener != NULL) {
+                    mListener->notifyError(
+                            ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+                            nextRequest->mResultExtras);
+                }
+                return NULL;
+            }
+        }
     }
+
+    handleAePrecaptureCancelRequest(nextRequest);
+
+    mNextRequest = nextRequest;
+
     return nextRequest;
 }
 
@@ -3144,6 +3460,138 @@
     return OK;
 }
 
+/**
+ * PreparerThread inner class methods
+ */
+
+Camera3Device::PreparerThread::PreparerThread() :
+        Thread(/*canCallJava*/false), mActive(false), mCancelNow(false) {
+}
+
+Camera3Device::PreparerThread::~PreparerThread() {
+    Thread::requestExitAndWait();
+    if (mCurrentStream != nullptr) {
+        mCurrentStream->cancelPrepare();
+        ATRACE_ASYNC_END("stream prepare", mCurrentStream->getId());
+        mCurrentStream.clear();
+    }
+    clear();
+}
+
+status_t Camera3Device::PreparerThread::prepare(sp<Camera3StreamInterface>& stream) {
+    status_t res;
+
+    Mutex::Autolock l(mLock);
+
+    res = stream->startPrepare();
+    if (res == OK) {
+        // No preparation needed, fire listener right off
+        ALOGV("%s: Stream %d already prepared", __FUNCTION__, stream->getId());
+        if (mListener) {
+            mListener->notifyPrepared(stream->getId());
+        }
+        return OK;
+    } else if (res != NOT_ENOUGH_DATA) {
+        return res;
+    }
+
+    // Need to prepare, start up thread if necessary
+    if (!mActive) {
+        // mRunning will change to false before the thread fully shuts down, so wait to be sure it
+        // isn't running
+        Thread::requestExitAndWait();
+        res = Thread::run("C3PrepThread", PRIORITY_BACKGROUND);
+        if (res != OK) {
+            ALOGE("%s: Unable to start preparer stream: %d (%s)", __FUNCTION__, res, strerror(-res));
+            if (mListener) {
+                mListener->notifyPrepared(stream->getId());
+            }
+            return res;
+        }
+        mCancelNow = false;
+        mActive = true;
+        ALOGV("%s: Preparer stream started", __FUNCTION__);
+    }
+
+    // queue up the work
+    mPendingStreams.push_back(stream);
+    ALOGV("%s: Stream %d queued for preparing", __FUNCTION__, stream->getId());
+
+    return OK;
+}
+
+status_t Camera3Device::PreparerThread::clear() {
+    status_t res;
+
+    Mutex::Autolock l(mLock);
+
+    for (const auto& stream : mPendingStreams) {
+        stream->cancelPrepare();
+    }
+    mPendingStreams.clear();
+    mCancelNow = true;
+
+    return OK;
+}
+
+void Camera3Device::PreparerThread::setNotificationListener(NotificationListener *listener) {
+    Mutex::Autolock l(mLock);
+    mListener = listener;
+}
+
+bool Camera3Device::PreparerThread::threadLoop() {
+    status_t res;
+    {
+        Mutex::Autolock l(mLock);
+        if (mCurrentStream == nullptr) {
+            // End thread if done with work
+            if (mPendingStreams.empty()) {
+                ALOGV("%s: Preparer stream out of work", __FUNCTION__);
+                // threadLoop _must not_ re-acquire mLock after it sets mActive to false; would
+                // cause deadlock with prepare()'s requestExitAndWait triggered by !mActive.
+                mActive = false;
+                return false;
+            }
+
+            // Get next stream to prepare
+            auto it = mPendingStreams.begin();
+            mCurrentStream = *it;
+            mPendingStreams.erase(it);
+            ATRACE_ASYNC_BEGIN("stream prepare", mCurrentStream->getId());
+            ALOGV("%s: Preparing stream %d", __FUNCTION__, mCurrentStream->getId());
+        } else if (mCancelNow) {
+            mCurrentStream->cancelPrepare();
+            ATRACE_ASYNC_END("stream prepare", mCurrentStream->getId());
+            ALOGV("%s: Cancelling stream %d prepare", __FUNCTION__, mCurrentStream->getId());
+            mCurrentStream.clear();
+            mCancelNow = false;
+            return true;
+        }
+    }
+
+    res = mCurrentStream->prepareNextBuffer();
+    if (res == NOT_ENOUGH_DATA) return true;
+    if (res != OK) {
+        // Something bad happened; try to recover by cancelling prepare and
+        // signalling listener anyway
+        ALOGE("%s: Stream %d returned error %d (%s) during prepare", __FUNCTION__,
+                mCurrentStream->getId(), res, strerror(-res));
+        mCurrentStream->cancelPrepare();
+    }
+
+    // This stream has finished, notify listener
+    Mutex::Autolock l(mLock);
+    if (mListener) {
+        ALOGV("%s: Stream %d prepare done, signaling listener", __FUNCTION__,
+                mCurrentStream->getId());
+        mListener->notifyPrepared(mCurrentStream->getId());
+    }
+
+    ATRACE_ASYNC_END("stream prepare", mCurrentStream->getId());
+    mCurrentStream.clear();
+
+    return true;
+}
 
 /**
  * Static callback forwarding methods from HAL to instance
@@ -3153,6 +3601,7 @@
         const camera3_capture_result *result) {
     Camera3Device *d =
             const_cast<Camera3Device*>(static_cast<const Camera3Device*>(cb));
+
     d->processCaptureResult(result);
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index ec8dc10..5287058 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -73,7 +73,7 @@
     virtual int      getId() const;
 
     // Transitions to idle state on success.
-    virtual status_t initialize(camera_module_t *module);
+    virtual status_t initialize(CameraModule *module);
     virtual status_t disconnect();
     virtual status_t dump(int fd, const Vector<String16> &args);
     virtual const CameraMetadata& info() const;
@@ -94,8 +94,9 @@
     // Actual stream creation/deletion is delayed until first request is submitted
     // If adding streams while actively capturing, will pause device before adding
     // stream, reconfiguring device, and unpausing.
-    virtual status_t createStream(sp<ANativeWindow> consumer,
-            uint32_t width, uint32_t height, int format, int *id);
+    virtual status_t createStream(sp<Surface> consumer,
+            uint32_t width, uint32_t height, int format,
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id);
     virtual status_t createInputStream(
             uint32_t width, uint32_t height, int format,
             int *id);
@@ -108,13 +109,16 @@
     virtual status_t createReprocessStreamFromStream(int outputId, int *id);
 
     virtual status_t getStreamInfo(int id,
-            uint32_t *width, uint32_t *height, uint32_t *format);
+            uint32_t *width, uint32_t *height,
+            uint32_t *format, android_dataspace *dataSpace);
     virtual status_t setStreamTransform(int id, int transform);
 
     virtual status_t deleteStream(int id);
     virtual status_t deleteReprocessStream(int id);
 
-    virtual status_t configureStreams();
+    virtual status_t configureStreams(bool isConstraiedHighSpeed = false);
+    virtual status_t getInputBufferProducer(
+            sp<IGraphicBufferProducer> *producer);
 
     virtual status_t createDefaultRequest(int templateId, CameraMetadata *request);
 
@@ -135,9 +139,14 @@
 
     virtual status_t flush(int64_t *lastFrameNumber = NULL);
 
+    virtual status_t prepare(int streamId);
+
+    virtual status_t tearDown(int streamId);
+
     virtual uint32_t getDeviceVersion();
 
     virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const;
+    ssize_t getPointCloudBufferSize() const;
 
     // Methods called by subclasses
     void             notifyStatus(bool idle); // updates from StatusTracker
@@ -145,9 +154,11 @@
   private:
     static const size_t        kDumpLockAttempts  = 10;
     static const size_t        kDumpSleepDuration = 100000; // 0.10 sec
-    static const size_t        kInFlightWarnLimit = 20;
     static const nsecs_t       kShutdownTimeout   = 5000000000; // 5 sec
     static const nsecs_t       kActiveTimeout     = 500000000;  // 500 ms
+    static const size_t        kInFlightWarnLimit = 20;
+    static const size_t        kInFlightWarnLimitHighSpeed = 256; // batch size 32 * pipe depth 8
+
     struct                     RequestTrigger;
     // minimal jpeg buffer size: 256KB + blob header
     static const ssize_t       kMinJpegBufferSize = 256 * 1024 + sizeof(camera3_jpeg_blob);
@@ -168,6 +179,9 @@
     // Camera device ID
     const int                  mId;
 
+    // Flag indicating is the current active stream configuration is constrained high speed.
+    bool                       mIsConstrainedHighSpeedConfiguration;
+
     /**** Scope for mLock ****/
 
     camera3_device_t          *mHal3Device;
@@ -178,6 +192,14 @@
 
     uint32_t                   mDeviceVersion;
 
+    struct Size {
+        uint32_t width;
+        uint32_t height;
+        Size(uint32_t w = 0, uint32_t h = 0) : width(w), height(h){}
+    };
+    // Map from format to size.
+    Vector<Size>               mSupportedOpaqueInputSizes;
+
     enum Status {
         STATUS_ERROR,
         STATUS_UNINITIALIZED,
@@ -185,7 +207,11 @@
         STATUS_CONFIGURED,
         STATUS_ACTIVE
     }                          mStatus;
+
+    // Only clear mRecentStatusUpdates, mStatusWaiters from waitUntilStateThenRelock
     Vector<Status>             mRecentStatusUpdates;
+    int                        mStatusWaiters;
+
     Condition                  mStatusChanged;
 
     // Tracking cause of fatal errors when in STATUS_ERROR
@@ -217,13 +243,24 @@
 
     /**** End scope for mLock ****/
 
+    typedef struct AeTriggerCancelOverride {
+        bool applyAeLock;
+        uint8_t aeLock;
+        bool applyAePrecaptureTrigger;
+        uint8_t aePrecaptureTrigger;
+    } AeTriggerCancelOverride_t;
+
     class CaptureRequest : public LightRefBase<CaptureRequest> {
       public:
         CameraMetadata                      mSettings;
         sp<camera3::Camera3Stream>          mInputStream;
+        camera3_stream_buffer_t             mInputBuffer;
         Vector<sp<camera3::Camera3OutputStreamInterface> >
                                             mOutputStreams;
         CaptureResultExtras                 mResultExtras;
+        // Used to cancel AE precapture trigger for devices doesn't support
+        // CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL
+        AeTriggerCancelOverride_t           mAeTriggerCancelOverride;
     };
     typedef List<sp<CaptureRequest> > RequestList;
 
@@ -245,6 +282,13 @@
     virtual CameraMetadata getLatestRequestLocked();
 
     /**
+     * Update the current device status and wake all waiting threads.
+     *
+     * Must be called with mLock held.
+     */
+    void internalUpdateStatusLocked(Status status);
+
+    /**
      * Pause processing and flush everything, but don't tell the clients.
      * This is for reconfiguring outputs transparently when according to the
      * CameraDeviceBase interface we shouldn't need to.
@@ -323,11 +367,11 @@
      */
     bool               tryLockSpinRightRound(Mutex& lock);
 
-    struct Size {
-        int width;
-        int height;
-        Size(int w, int h) : width(w), height(h){}
-    };
+    /**
+     * Helper function to determine if an input size for implementation defined
+     * format is supported.
+     */
+    bool isOpaqueInputSizeSupported(uint32_t width, uint32_t height);
 
     /**
      * Helper function to get the largest Jpeg resolution (in area)
@@ -361,9 +405,10 @@
 
         RequestThread(wp<Camera3Device> parent,
                 sp<camera3::StatusTracker> statusTracker,
-                camera3_device_t *hal3Device);
+                camera3_device_t *hal3Device,
+                bool aeLockAvailable);
 
-        void     setNotifyCallback(NotificationListener *listener);
+        void     setNotificationListener(NotificationListener *listener);
 
         /**
          * Call after stream (re)-configuration is completed.
@@ -427,6 +472,12 @@
          */
         CameraMetadata getLatestRequest() const;
 
+        /**
+         * Returns true if the stream is a target of any queued or repeating
+         * capture request
+         */
+        bool isStreamPending(sp<camera3::Camera3StreamInterface>& stream);
+
       protected:
 
         virtual bool threadLoop();
@@ -468,6 +519,9 @@
         // If the input request is in mRepeatingRequests. Must be called with mRequestLock hold
         bool isRepeatingRequestLocked(const sp<CaptureRequest>);
 
+        // Handle AE precapture trigger cancel for devices <= CAMERA_DEVICE_API_VERSION_3_2.
+        void handleAePrecaptureCancelRequest(sp<CaptureRequest> request);
+
         wp<Camera3Device>  mParent;
         wp<camera3::StatusTracker>  mStatusTracker;
         camera3_device_t  *mHal3Device;
@@ -482,6 +536,10 @@
         Condition          mRequestSignal;
         RequestList        mRequestQueue;
         RequestList        mRepeatingRequests;
+        // The next request being prepped for submission to the HAL, no longer
+        // on the request queue. Read-only even with mRequestLock held, outside
+        // of threadLoop
+        sp<const CaptureRequest> mNextRequest;
 
         bool               mReconfigured;
 
@@ -512,6 +570,9 @@
         uint32_t           mCurrentPreCaptureTriggerId;
 
         int64_t            mRepeatingLastFrameNumber;
+
+        // Whether the device supports AE lock
+        bool               mAeLockAvailable;
     };
     sp<RequestThread> mRequestThread;
 
@@ -534,7 +595,6 @@
         // If this request has any input buffer
         bool hasInputBuffer;
 
-
         // The last metadata that framework receives from HAL and
         // not yet send out because the shutter event hasn't arrived.
         // It's added by process_capture_result and sent when framework
@@ -547,6 +607,9 @@
         // the shutter event.
         Vector<camera3_stream_buffer_t> pendingOutputBuffers;
 
+        // Used to cancel AE precapture trigger for devices doesn't support
+        // CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL
+        AeTriggerCancelOverride_t aeTriggerCancelOverride;
 
 
         // Fields used by the partial result only
@@ -568,38 +631,23 @@
                 requestStatus(OK),
                 haveResultMetadata(false),
                 numBuffersLeft(0),
-                hasInputBuffer(false){
+                hasInputBuffer(false),
+                aeTriggerCancelOverride({false, 0, false, 0}){
         }
 
-        InFlightRequest(int numBuffers) :
-                shutterTimestamp(0),
-                sensorTimestamp(0),
-                requestStatus(OK),
-                haveResultMetadata(false),
-                numBuffersLeft(numBuffers),
-                hasInputBuffer(false){
-        }
-
-        InFlightRequest(int numBuffers, CaptureResultExtras extras) :
+        InFlightRequest(int numBuffers, CaptureResultExtras extras, bool hasInput,
+                AeTriggerCancelOverride aeTriggerCancelOverride) :
                 shutterTimestamp(0),
                 sensorTimestamp(0),
                 requestStatus(OK),
                 haveResultMetadata(false),
                 numBuffersLeft(numBuffers),
                 resultExtras(extras),
-                hasInputBuffer(false){
+                hasInputBuffer(hasInput),
+                aeTriggerCancelOverride(aeTriggerCancelOverride){
         }
+    };
 
-        InFlightRequest(int numBuffers, CaptureResultExtras extras, bool hasInput) :
-                shutterTimestamp(0),
-                sensorTimestamp(0),
-                requestStatus(OK),
-                haveResultMetadata(false),
-                numBuffersLeft(numBuffers),
-                resultExtras(extras),
-                hasInputBuffer(hasInput){
-        }
-};
     // Map from frame number to the in-flight request state
     typedef KeyedVector<uint32_t, InFlightRequest> InFlightMap;
 
@@ -607,7 +655,8 @@
     InFlightMap            mInFlightMap;
 
     status_t registerInFlight(uint32_t frameNumber,
-            int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput);
+            int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
+            const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
 
     /**
      * For the partial result, check if all 3A state fields are available
@@ -625,12 +674,59 @@
     template<typename T>
     bool insert3AResult(CameraMetadata &result, int32_t tag, const T* value,
             uint32_t frameNumber);
+
+    /**
+     * Override result metadata for cancelling AE precapture trigger applied in
+     * handleAePrecaptureCancelRequest().
+     */
+    void overrideResultForPrecaptureCancel(CameraMetadata* result,
+            const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
+
     /**
      * Tracking for idle detection
      */
     sp<camera3::StatusTracker> mStatusTracker;
 
     /**
+     * Thread for preparing streams
+     */
+    class PreparerThread : private Thread, public virtual RefBase {
+      public:
+        PreparerThread();
+        ~PreparerThread();
+
+        void setNotificationListener(NotificationListener *listener);
+
+        /**
+         * Queue up a stream to be prepared. Streams are processed by
+         * a background thread in FIFO order
+         */
+        status_t prepare(sp<camera3::Camera3StreamInterface>& stream);
+
+        /**
+         * Cancel all current and pending stream preparation
+         */
+        status_t clear();
+
+      private:
+        Mutex mLock;
+
+        virtual bool threadLoop();
+
+        // Guarded by mLock
+
+        NotificationListener *mListener;
+        List<sp<camera3::Camera3StreamInterface> > mPendingStreams;
+        bool mActive;
+        bool mCancelNow;
+
+        // Only accessed by threadLoop and the destructor
+
+        sp<camera3::Camera3StreamInterface> mCurrentStream;
+    };
+    sp<PreparerThread> mPreparerThread;
+
+    /**
      * Output result queue and current HAL device 3A state
      */
 
@@ -638,8 +734,10 @@
     Mutex                  mOutputLock;
 
     /**** Scope for mOutputLock ****/
-
+    // the minimal frame number of the next non-reprocess result
     uint32_t               mNextResultFrameNumber;
+    // the minimal frame number of the next reprocess result
+    uint32_t               mNextReprocessResultFrameNumber;
     uint32_t               mNextShutterFrameNumber;
     List<CaptureResult>   mResultQueue;
     Condition              mResultSignal;
@@ -668,7 +766,8 @@
     // partial results, and the frame number to the result queue.
     void sendCaptureResult(CameraMetadata &pendingMetadata,
             CaptureResultExtras &resultExtras,
-            CameraMetadata &collectedPartialResult, uint32_t frameNumber);
+            CameraMetadata &collectedPartialResult, uint32_t frameNumber,
+            bool reprocess, const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
 
     /**** Scope for mInFlightLock ****/
 
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index 6656b09..ecb8ac8 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -28,7 +28,7 @@
 
 Camera3DummyStream::Camera3DummyStream(int id) :
         Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, DUMMY_WIDTH, DUMMY_HEIGHT,
-                /*maxSize*/0, DUMMY_FORMAT) {
+                /*maxSize*/0, DUMMY_FORMAT, DUMMY_DATASPACE, DUMMY_ROTATION) {
 
 }
 
@@ -87,7 +87,7 @@
     return OK;
 }
 
-status_t Camera3DummyStream::getEndpointUsage(uint32_t *usage) {
+status_t Camera3DummyStream::getEndpointUsage(uint32_t *usage) const {
     *usage = DUMMY_USAGE;
     return OK;
 }
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.h b/services/camera/libcameraservice/device3/Camera3DummyStream.h
index 3e42623..3a3dbf4 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.h
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.h
@@ -75,6 +75,8 @@
     static const int DUMMY_WIDTH = 320;
     static const int DUMMY_HEIGHT = 240;
     static const int DUMMY_FORMAT = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+    static const android_dataspace DUMMY_DATASPACE = HAL_DATASPACE_UNKNOWN;
+    static const camera3_stream_rotation_t DUMMY_ROTATION = CAMERA3_STREAM_ROTATION_0;
     static const uint32_t DUMMY_USAGE = GRALLOC_USAGE_HW_COMPOSER;
 
     /**
@@ -87,7 +89,7 @@
 
     virtual status_t configureQueueLocked();
 
-    virtual status_t getEndpointUsage(uint32_t *usage);
+    virtual status_t getEndpointUsage(uint32_t *usage) const;
 
 }; // class Camera3DummyStream
 
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index cc66459..23b1c45 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -30,9 +30,10 @@
 namespace camera3 {
 
 Camera3IOStreamBase::Camera3IOStreamBase(int id, camera3_stream_type_t type,
-        uint32_t width, uint32_t height, size_t maxSize, int format) :
+        uint32_t width, uint32_t height, size_t maxSize, int format,
+        android_dataspace dataSpace, camera3_stream_rotation_t rotation) :
         Camera3Stream(id, type,
-                width, height, maxSize, format),
+                width, height, maxSize, format, dataSpace, rotation),
         mTotalBufferCount(0),
         mHandoutTotalBufferCount(0),
         mHandoutOutputBufferCount(0),
@@ -66,13 +67,18 @@
 void Camera3IOStreamBase::dump(int fd, const Vector<String16> &args) const {
     (void) args;
     String8 lines;
+
+    uint32_t consumerUsage = 0;
+    status_t res = getEndpointUsage(&consumerUsage);
+    if (res != OK) consumerUsage = 0;
+
     lines.appendFormat("      State: %d\n", mState);
-    lines.appendFormat("      Dims: %d x %d, format 0x%x\n",
+    lines.appendFormat("      Dims: %d x %d, format 0x%x, dataspace 0x%x\n",
             camera3_stream::width, camera3_stream::height,
-            camera3_stream::format);
+            camera3_stream::format, camera3_stream::data_space);
     lines.appendFormat("      Max size: %zu\n", mMaxSize);
-    lines.appendFormat("      Usage: %d, max HAL buffers: %d\n",
-            camera3_stream::usage, camera3_stream::max_buffers);
+    lines.appendFormat("      Combined usage: %d, max HAL buffers: %d\n",
+            camera3_stream::usage | consumerUsage, camera3_stream::max_buffers);
     lines.appendFormat("      Frames produced: %d, last timestamp: %" PRId64 " ns\n",
             mFrameCount, mLastTimestamp);
     lines.appendFormat("      Total buffers: %zu, currently dequeued: %zu\n",
@@ -155,13 +161,11 @@
 
     // Inform tracker about becoming busy
     if (mHandoutTotalBufferCount == 0 && mState != STATE_IN_CONFIG &&
-            mState != STATE_IN_RECONFIG) {
+            mState != STATE_IN_RECONFIG && mState != STATE_PREPARING) {
         /**
          * Avoid a spurious IDLE->ACTIVE->IDLE transition when using buffers
          * before/after register_stream_buffers during initial configuration
-         * or re-configuration.
-         *
-         * TODO: IN_CONFIG and IN_RECONFIG checks only make sense for <HAL3.2
+         * or re-configuration, or during prepare pre-allocation
          */
         sp<StatusTracker> statusTracker = mStatusTracker.promote();
         if (statusTracker != 0) {
@@ -176,9 +180,11 @@
 }
 
 status_t Camera3IOStreamBase::getBufferPreconditionCheckLocked() const {
-    // Allow dequeue during IN_[RE]CONFIG for registration
+    // Allow dequeue during IN_[RE]CONFIG for registration, in
+    // PREPARING for pre-allocation
     if (mState != STATE_CONFIGURED &&
-            mState != STATE_IN_CONFIG && mState != STATE_IN_RECONFIG) {
+            mState != STATE_IN_CONFIG && mState != STATE_IN_RECONFIG &&
+            mState != STATE_PREPARING) {
         ALOGE("%s: Stream %d: Can't get buffers in unconfigured state %d",
                 __FUNCTION__, mId, mState);
         return INVALID_OPERATION;
@@ -239,13 +245,11 @@
 
     mHandoutTotalBufferCount--;
     if (mHandoutTotalBufferCount == 0 && mState != STATE_IN_CONFIG &&
-            mState != STATE_IN_RECONFIG) {
+            mState != STATE_IN_RECONFIG && mState != STATE_PREPARING) {
         /**
          * Avoid a spurious IDLE->ACTIVE->IDLE transition when using buffers
          * before/after register_stream_buffers during initial configuration
-         * or re-configuration.
-         *
-         * TODO: IN_CONFIG and IN_RECONFIG checks only make sense for <HAL3.2
+         * or re-configuration, or during prepare pre-allocation
          */
         ALOGV("%s: Stream %d: All buffers returned; now idle", __FUNCTION__,
                 mId);
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
index a35c290..f5727e8 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -33,7 +33,8 @@
         public Camera3Stream {
   protected:
     Camera3IOStreamBase(int id, camera3_stream_type_t type,
-            uint32_t width, uint32_t height, size_t maxSize, int format);
+            uint32_t width, uint32_t height, size_t maxSize, int format,
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation);
 
   public:
 
@@ -83,7 +84,7 @@
 
     virtual size_t   getHandoutInputBufferCountLocked();
 
-    virtual status_t getEndpointUsage(uint32_t *usage) = 0;
+    virtual status_t getEndpointUsage(uint32_t *usage) const = 0;
 
     status_t getBufferPreconditionCheckLocked() const;
     status_t returnBufferPreconditionCheckLocked() const;
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 9c1e28b..2504bfd 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -29,8 +29,8 @@
 
 Camera3InputStream::Camera3InputStream(int id,
         uint32_t width, uint32_t height, int format) :
-        Camera3IOStreamBase(id, CAMERA3_STREAM_INPUT, width, height,
-                            /*maxSize*/0, format) {
+        Camera3IOStreamBase(id, CAMERA3_STREAM_INPUT, width, height, /*maxSize*/0,
+                            format, HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0) {
 
     if (format == HAL_PIXEL_FORMAT_BLOB) {
         ALOGE("%s: Bad format, BLOB not supported", __FUNCTION__);
@@ -65,8 +65,8 @@
     assert(mConsumer != 0);
 
     BufferItem bufferItem;
-    res = mConsumer->acquireBuffer(&bufferItem, /*waitForFence*/false);
 
+    res = mConsumer->acquireBuffer(&bufferItem, /*waitForFence*/false);
     if (res != OK) {
         ALOGE("%s: Stream %d: Can't acquire next output buffer: %s (%d)",
                 __FUNCTION__, mId, strerror(-res), res);
@@ -162,6 +162,21 @@
     return returnAnyBufferLocked(buffer, /*timestamp*/0, /*output*/false);
 }
 
+status_t Camera3InputStream::getInputBufferProducerLocked(
+            sp<IGraphicBufferProducer> *producer) {
+    ATRACE_CALL();
+
+    if (producer == NULL) {
+        return BAD_VALUE;
+    } else if (mProducer == NULL) {
+        ALOGE("%s: No input stream is configured");
+        return INVALID_OPERATION;
+    }
+
+    *producer = mProducer;
+    return OK;
+}
+
 status_t Camera3InputStream::disconnectLocked() {
 
     status_t res;
@@ -172,6 +187,8 @@
 
     assert(mBuffersInFlight.size() == 0);
 
+    mConsumer->abandon();
+
     /**
      *  no-op since we can't disconnect the producer from the consumer-side
      */
@@ -212,10 +229,17 @@
         res = producer->query(NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBuffers);
         if (res != OK || minUndequeuedBuffers < 0) {
             ALOGE("%s: Stream %d: Could not query min undequeued buffers (error %d, bufCount %d)",
-                  __FUNCTION__, mId, res, minUndequeuedBuffers);
+                    __FUNCTION__, mId, res, minUndequeuedBuffers);
             return res;
         }
         size_t minBufs = static_cast<size_t>(minUndequeuedBuffers);
+
+        if (camera3_stream::max_buffers == 0) {
+            ALOGE("%s: %d: HAL sets max_buffer to 0. Must be at least 1.",
+                    __FUNCTION__, __LINE__);
+            return INVALID_OPERATION;
+        }
+
         /*
          * We promise never to 'acquire' more than camera3_stream::max_buffers
          * at any one time.
@@ -232,6 +256,8 @@
         mConsumer = new BufferItemConsumer(consumer, camera3_stream::usage,
                                            mTotalBufferCount);
         mConsumer->setName(String8::format("Camera3-InputStream-%d", mId));
+
+        mProducer = producer;
     }
 
     res = mConsumer->setDefaultBufferSize(camera3_stream::width,
@@ -251,7 +277,7 @@
     return OK;
 }
 
-status_t Camera3InputStream::getEndpointUsage(uint32_t *usage) {
+status_t Camera3InputStream::getEndpointUsage(uint32_t *usage) const {
     // Per HAL3 spec, input streams have 0 for their initial usage field.
     *usage = 0;
     return OK;
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h
index fd17f4f..9f3de10 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.h
@@ -49,6 +49,7 @@
   private:
 
     sp<BufferItemConsumer> mConsumer;
+    sp<IGraphicBufferProducer> mProducer;
     Vector<BufferItem> mBuffersInFlight;
 
     /**
@@ -68,11 +69,13 @@
     virtual status_t getInputBufferLocked(camera3_stream_buffer *buffer);
     virtual status_t returnInputBufferLocked(
             const camera3_stream_buffer &buffer);
+    virtual status_t getInputBufferProducerLocked(
+            sp<IGraphicBufferProducer> *producer);
     virtual status_t disconnectLocked();
 
     virtual status_t configureQueueLocked();
 
-    virtual status_t getEndpointUsage(uint32_t *usage);
+    virtual status_t getEndpointUsage(uint32_t *usage) const;
 
 }; // class Camera3InputStream
 
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 77ad503..8c611d5 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -32,10 +32,11 @@
 namespace camera3 {
 
 Camera3OutputStream::Camera3OutputStream(int id,
-        sp<ANativeWindow> consumer,
-        uint32_t width, uint32_t height, int format) :
+        sp<Surface> consumer,
+        uint32_t width, uint32_t height, int format,
+        android_dataspace dataSpace, camera3_stream_rotation_t rotation) :
         Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, width, height,
-                            /*maxSize*/0, format),
+                            /*maxSize*/0, format, dataSpace, rotation),
         mConsumer(consumer),
         mTransform(0),
         mTraceFirstBuffer(true) {
@@ -47,10 +48,11 @@
 }
 
 Camera3OutputStream::Camera3OutputStream(int id,
-        sp<ANativeWindow> consumer,
-        uint32_t width, uint32_t height, size_t maxSize, int format) :
+        sp<Surface> consumer,
+        uint32_t width, uint32_t height, size_t maxSize, int format,
+        android_dataspace dataSpace, camera3_stream_rotation_t rotation) :
         Camera3IOStreamBase(id, CAMERA3_STREAM_OUTPUT, width, height, maxSize,
-                            format),
+                            format, dataSpace, rotation),
         mConsumer(consumer),
         mTransform(0),
         mTraceFirstBuffer(true) {
@@ -69,10 +71,12 @@
 
 Camera3OutputStream::Camera3OutputStream(int id, camera3_stream_type_t type,
                                          uint32_t width, uint32_t height,
-                                         int format) :
+                                         int format,
+                                         android_dataspace dataSpace,
+                                         camera3_stream_rotation_t rotation) :
         Camera3IOStreamBase(id, type, width, height,
                             /*maxSize*/0,
-                            format),
+                            format, dataSpace, rotation),
         mTransform(0) {
 
     // Subclasses expected to initialize mConsumer themselves
@@ -153,33 +157,9 @@
     ALOG_ASSERT(output, "Expected output to be true");
 
     status_t res;
-    sp<Fence> releaseFence;
 
-    /**
-     * Fence management - calculate Release Fence
-     */
-    if (buffer.status == CAMERA3_BUFFER_STATUS_ERROR) {
-        if (buffer.release_fence != -1) {
-            ALOGE("%s: Stream %d: HAL should not set release_fence(%d) when "
-                  "there is an error", __FUNCTION__, mId, buffer.release_fence);
-            close(buffer.release_fence);
-        }
-
-        /**
-         * Reassign release fence as the acquire fence in case of error
-         */
-        releaseFence = new Fence(buffer.acquire_fence);
-    } else {
-        res = native_window_set_buffers_timestamp(mConsumer.get(), timestamp);
-        if (res != OK) {
-            ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
-                  __FUNCTION__, mId, strerror(-res), res);
-            return res;
-        }
-
-        releaseFence = new Fence(buffer.release_fence);
-    }
-
+    // Fence management - always honor release fence from HAL
+    sp<Fence> releaseFence = new Fence(buffer.release_fence);
     int anwReleaseFence = releaseFence->dup();
 
     /**
@@ -213,6 +193,13 @@
             mTraceFirstBuffer = false;
         }
 
+        res = native_window_set_buffers_timestamp(mConsumer.get(), timestamp);
+        if (res != OK) {
+            ALOGE("%s: Stream %d: Error setting timestamp: %s (%d)",
+                  __FUNCTION__, mId, strerror(-res), res);
+            return res;
+        }
+
         res = currentConsumer->queueBuffer(currentConsumer.get(),
                 container_of(buffer.buffer, ANativeWindowBuffer, handle),
                 anwReleaseFence);
@@ -222,6 +209,13 @@
         }
     }
     mLock.lock();
+
+    // Once a valid buffer has been returned to the queue, can no longer
+    // dequeue all buffers for preallocation.
+    if (buffer.status != CAMERA3_BUFFER_STATUS_ERROR) {
+        mStreamUnpreparable = true;
+    }
+
     if (res != OK) {
         close(anwReleaseFence);
     }
@@ -235,6 +229,7 @@
     (void) args;
     String8 lines;
     lines.appendFormat("    Stream[%d]: Output\n", mId);
+    lines.appendFormat("      Consumer name: %s\n", mConsumerName.string());
     write(fd, lines.string(), lines.size());
 
     Camera3IOStreamBase::dump(fd, args);
@@ -284,6 +279,8 @@
         return res;
     }
 
+    mConsumerName = mConsumer->getConsumerName();
+
     res = native_window_set_usage(mConsumer.get(), camera3_stream::usage);
     if (res != OK) {
         ALOGE("%s: Unable to configure usage %08x for stream %d",
@@ -323,8 +320,17 @@
         return res;
     }
 
+    res = native_window_set_buffers_data_space(mConsumer.get(),
+            camera3_stream::data_space);
+    if (res != OK) {
+        ALOGE("%s: Unable to configure stream dataspace %#x for stream %d",
+                __FUNCTION__, camera3_stream::data_space, mId);
+        return res;
+    }
+
     int maxConsumerBuffers;
-    res = mConsumer->query(mConsumer.get(),
+    res = static_cast<ANativeWindow*>(mConsumer.get())->query(
+            mConsumer.get(),
             NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &maxConsumerBuffers);
     if (res != OK) {
         ALOGE("%s: Unable to query consumer undequeued"
@@ -395,14 +401,28 @@
     return OK;
 }
 
-status_t Camera3OutputStream::getEndpointUsage(uint32_t *usage) {
+status_t Camera3OutputStream::getEndpointUsage(uint32_t *usage) const {
 
     status_t res;
     int32_t u = 0;
-    res = mConsumer->query(mConsumer.get(),
+    res = static_cast<ANativeWindow*>(mConsumer.get())->query(mConsumer.get(),
             NATIVE_WINDOW_CONSUMER_USAGE_BITS, &u);
-    *usage = u;
 
+    // If an opaque output stream's endpoint is ImageReader, add
+    // GRALLOC_USAGE_HW_CAMERA_ZSL to the usage so HAL knows it will be used
+    // for the ZSL use case.
+    // Assume it's for ImageReader if the consumer usage doesn't have any of these bits set:
+    //     1. GRALLOC_USAGE_HW_TEXTURE
+    //     2. GRALLOC_USAGE_HW_RENDER
+    //     3. GRALLOC_USAGE_HW_COMPOSER
+    //     4. GRALLOC_USAGE_HW_VIDEO_ENCODER
+    if (camera3_stream::format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED &&
+            (u & (GRALLOC_USAGE_HW_TEXTURE | GRALLOC_USAGE_HW_RENDER | GRALLOC_USAGE_HW_COMPOSER |
+            GRALLOC_USAGE_HW_VIDEO_ENCODER)) == 0) {
+        u |= GRALLOC_USAGE_HW_CAMERA_ZSL;
+    }
+
+    *usage = u;
     return res;
 }
 
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index be278c5..941d693 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -38,15 +38,17 @@
     /**
      * Set up a stream for formats that have 2 dimensions, such as RAW and YUV.
      */
-    Camera3OutputStream(int id, sp<ANativeWindow> consumer,
-            uint32_t width, uint32_t height, int format);
+    Camera3OutputStream(int id, sp<Surface> consumer,
+            uint32_t width, uint32_t height, int format,
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation);
 
     /**
      * Set up a stream for formats that have a variable buffer size for the same
      * dimensions, such as compressed JPEG.
      */
-    Camera3OutputStream(int id, sp<ANativeWindow> consumer,
-            uint32_t width, uint32_t height, size_t maxSize, int format);
+    Camera3OutputStream(int id, sp<Surface> consumer,
+            uint32_t width, uint32_t height, size_t maxSize, int format,
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation);
 
     virtual ~Camera3OutputStream();
 
@@ -64,7 +66,8 @@
 
   protected:
     Camera3OutputStream(int id, camera3_stream_type_t type,
-            uint32_t width, uint32_t height, int format);
+            uint32_t width, uint32_t height, int format,
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation);
 
     /**
      * Note that we release the lock briefly in this function
@@ -78,7 +81,7 @@
 
     virtual status_t disconnectLocked();
 
-    sp<ANativeWindow> mConsumer;
+    sp<Surface> mConsumer;
   private:
     int               mTransform;
 
@@ -86,6 +89,9 @@
 
     bool mTraceFirstBuffer;
 
+    // Name of Surface consumer
+    String8           mConsumerName;
+
     /**
      * Internal Camera3Stream interface
      */
@@ -96,7 +102,7 @@
 
     virtual status_t configureQueueLocked();
 
-    virtual status_t getEndpointUsage(uint32_t *usage);
+    virtual status_t getEndpointUsage(uint32_t *usage) const;
 
 }; // class Camera3OutputStream
 
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 3c0e908..2527fd6 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -46,7 +46,8 @@
 
 Camera3Stream::Camera3Stream(int id,
         camera3_stream_type type,
-        uint32_t width, uint32_t height, size_t maxSize, int format) :
+        uint32_t width, uint32_t height, size_t maxSize, int format,
+        android_dataspace dataSpace, camera3_stream_rotation_t rotation) :
     camera3_stream(),
     mId(id),
     mName(String8::format("Camera3Stream[%d]", id)),
@@ -58,6 +59,8 @@
     camera3_stream::width = width;
     camera3_stream::height = height;
     camera3_stream::format = format;
+    camera3_stream::data_space = dataSpace;
+    camera3_stream::rotation = rotation;
     camera3_stream::usage = 0;
     camera3_stream::max_buffers = 0;
     camera3_stream::priv = NULL;
@@ -84,6 +87,10 @@
     return camera3_stream::format;
 }
 
+android_dataspace Camera3Stream::getDataSpace() const {
+    return camera3_stream::data_space;
+}
+
 camera3_stream* Camera3Stream::startConfiguration() {
     ATRACE_CALL();
     Mutex::Autolock l(mLock);
@@ -102,11 +109,7 @@
             // oldUsage/oldMaxBuffers
             return this;
         case STATE_CONFIGURED:
-            if (stream_type == CAMERA3_STREAM_INPUT) {
-                ALOGE("%s: Cannot configure an input stream twice",
-                        __FUNCTION__);
-                return NULL;
-            } else if (hasOutstandingBuffersLocked()) {
+            if (hasOutstandingBuffersLocked()) {
                 ALOGE("%s: Cannot configure stream; has outstanding buffers",
                         __FUNCTION__);
                 return NULL;
@@ -187,6 +190,11 @@
         return OK;
     }
 
+    // Reset prepared state, since buffer config has changed, and existing
+    // allocations are no longer valid
+    mPrepared = false;
+    mStreamUnpreparable = false;
+
     status_t res;
     res = configureQueueLocked();
     if (res != OK) {
@@ -237,6 +245,180 @@
     return OK;
 }
 
+bool Camera3Stream::isUnpreparable() {
+    ATRACE_CALL();
+
+    Mutex::Autolock l(mLock);
+    return mStreamUnpreparable;
+}
+
+status_t Camera3Stream::startPrepare() {
+    ATRACE_CALL();
+
+    Mutex::Autolock l(mLock);
+    status_t res = OK;
+
+    // This function should be only called when the stream is configured already.
+    if (mState != STATE_CONFIGURED) {
+        ALOGE("%s: Stream %d: Can't prepare stream if stream is not in CONFIGURED "
+                "state %d", __FUNCTION__, mId, mState);
+        return INVALID_OPERATION;
+    }
+
+    // This function can't be called if the stream has already received filled
+    // buffers
+    if (mStreamUnpreparable) {
+        ALOGE("%s: Stream %d: Can't prepare stream that's already in use",
+                __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    if (getHandoutOutputBufferCountLocked() > 0) {
+        ALOGE("%s: Stream %d: Can't prepare stream that has outstanding buffers",
+                __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    if (mPrepared) return OK;
+
+    size_t bufferCount = getBufferCountLocked();
+
+    mPreparedBuffers.insertAt(camera3_stream_buffer_t(), /*index*/0, bufferCount);
+    mPreparedBufferIdx = 0;
+
+    mState = STATE_PREPARING;
+
+    return NOT_ENOUGH_DATA;
+}
+
+bool Camera3Stream::isPreparing() const {
+    Mutex::Autolock l(mLock);
+    return mState == STATE_PREPARING;
+}
+
+status_t Camera3Stream::prepareNextBuffer() {
+    ATRACE_CALL();
+
+    Mutex::Autolock l(mLock);
+    status_t res = OK;
+
+    // This function should be only called when the stream is preparing
+    if (mState != STATE_PREPARING) {
+        ALOGE("%s: Stream %d: Can't prepare buffer if stream is not in PREPARING "
+                "state %d", __FUNCTION__, mId, mState);
+        return INVALID_OPERATION;
+    }
+
+    // Get next buffer - this may allocate, and take a while for large buffers
+    res = getBufferLocked( &mPreparedBuffers.editItemAt(mPreparedBufferIdx) );
+    if (res != OK) {
+        ALOGE("%s: Stream %d: Unable to allocate buffer %d during preparation",
+                __FUNCTION__, mId, mPreparedBufferIdx);
+        return NO_INIT;
+    }
+
+    mPreparedBufferIdx++;
+
+    // Check if we still have buffers left to allocate
+    if (mPreparedBufferIdx < mPreparedBuffers.size()) {
+        return NOT_ENOUGH_DATA;
+    }
+
+    // Done with prepare - mark stream as such, and return all buffers
+    // via cancelPrepare
+    mPrepared = true;
+
+    return cancelPrepareLocked();
+}
+
+status_t Camera3Stream::cancelPrepare() {
+    ATRACE_CALL();
+
+    Mutex::Autolock l(mLock);
+
+    return cancelPrepareLocked();
+}
+
+status_t Camera3Stream::cancelPrepareLocked() {
+    status_t res = OK;
+
+    // This function should be only called when the stream is mid-preparing.
+    if (mState != STATE_PREPARING) {
+        ALOGE("%s: Stream %d: Can't cancel prepare stream if stream is not in "
+                "PREPARING state %d", __FUNCTION__, mId, mState);
+        return INVALID_OPERATION;
+    }
+
+    // Return all valid buffers to stream, in ERROR state to indicate
+    // they weren't filled.
+    for (size_t i = 0; i < mPreparedBufferIdx; i++) {
+        mPreparedBuffers.editItemAt(i).release_fence = -1;
+        mPreparedBuffers.editItemAt(i).status = CAMERA3_BUFFER_STATUS_ERROR;
+        returnBufferLocked(mPreparedBuffers[i], 0);
+    }
+    mPreparedBuffers.clear();
+    mPreparedBufferIdx = 0;
+
+    mState = STATE_CONFIGURED;
+
+    return res;
+}
+
+status_t Camera3Stream::tearDown() {
+    ATRACE_CALL();
+    Mutex::Autolock l(mLock);
+
+    status_t res = OK;
+
+    // This function should be only called when the stream is configured.
+    if (mState != STATE_CONFIGURED) {
+        ALOGE("%s: Stream %d: Can't tear down stream if stream is not in "
+                "CONFIGURED state %d", __FUNCTION__, mId, mState);
+        return INVALID_OPERATION;
+    }
+
+    // If any buffers have been handed to the HAL, the stream cannot be torn down.
+    if (getHandoutOutputBufferCountLocked() > 0) {
+        ALOGE("%s: Stream %d: Can't tear down a stream that has outstanding buffers",
+                __FUNCTION__, mId);
+        return INVALID_OPERATION;
+    }
+
+    // Free buffers by disconnecting and then reconnecting to the buffer queue
+    // Only unused buffers will be dropped immediately; buffers that have been filled
+    // and are waiting to be acquired by the consumer and buffers that are currently
+    // acquired will be freed once they are released by the consumer.
+
+    res = disconnectLocked();
+    if (res != OK) {
+        if (res == -ENOTCONN) {
+            // queue has been disconnected, nothing left to do, so exit with success
+            return OK;
+        }
+        ALOGE("%s: Stream %d: Unable to disconnect to tear down buffers: %s (%d)",
+                __FUNCTION__, mId, strerror(-res), res);
+        return res;
+    }
+
+    mState = STATE_IN_CONFIG;
+
+    res = configureQueueLocked();
+    if (res != OK) {
+        ALOGE("%s: Unable to configure stream %d queue: %s (%d)",
+                __FUNCTION__, mId, strerror(-res), res);
+        mState = STATE_ERROR;
+        return res;
+    }
+
+    // Reset prepared state, since we've reconnected to the queue and can prepare again.
+    mPrepared = false;
+    mStreamUnpreparable = false;
+
+    mState = STATE_CONFIGURED;
+
+    return OK;
+}
+
 status_t Camera3Stream::getBuffer(camera3_stream_buffer *buffer) {
     ATRACE_CALL();
     Mutex::Autolock l(mLock);
@@ -339,6 +521,13 @@
     return res;
 }
 
+status_t Camera3Stream::getInputBufferProducer(sp<IGraphicBufferProducer> *producer) {
+    ATRACE_CALL();
+    Mutex::Autolock l(mLock);
+
+    return getInputBufferProducerLocked(producer);
+}
+
 void Camera3Stream::fireBufferListenersLocked(
         const camera3_stream_buffer& /*buffer*/, bool acquired, bool output) {
     List<wp<Camera3StreamBufferListener> >::iterator it, end;
@@ -413,15 +602,13 @@
             ALOGE("%s: register_stream_buffers is deprecated in HAL3.2; "
                     "must be set to NULL in camera3_device::ops", __FUNCTION__);
             return INVALID_OPERATION;
-        } else {
-            ALOGD("%s: Skipping NULL check for deprecated register_stream_buffers", __FUNCTION__);
         }
 
         return OK;
-    } else {
-        ALOGV("%s: register_stream_buffers using deprecated code path", __FUNCTION__);
     }
 
+    ALOGV("%s: register_stream_buffers using deprecated code path", __FUNCTION__);
+
     status_t res;
 
     size_t bufferCount = getBufferCountLocked();
@@ -477,6 +664,8 @@
         returnBufferLocked(streamBuffers[i], 0);
     }
 
+    mPrepared = true;
+
     return res;
 }
 
@@ -498,6 +687,10 @@
     ALOGE("%s: This type of stream does not support input", __FUNCTION__);
     return INVALID_OPERATION;
 }
+status_t Camera3Stream::getInputBufferProducerLocked(sp<IGraphicBufferProducer> *producer) {
+    ALOGE("%s: This type of stream does not support input", __FUNCTION__);
+    return INVALID_OPERATION;
+}
 
 void Camera3Stream::addBufferListener(
         wp<Camera3StreamBufferListener> listener) {
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index d0e1337..bab2177 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -57,8 +57,15 @@
  *    re-registering buffers with HAL.
  *
  *  STATE_CONFIGURED: Stream is configured, and has registered buffers with the
- *    HAL. The stream's getBuffer/returnBuffer work. The priv pointer may still be
- *    modified.
+ *    HAL (if necessary). The stream's getBuffer/returnBuffer work. The priv
+ *    pointer may still be modified.
+ *
+ *  STATE_PREPARING: The stream's buffers are being pre-allocated for use.  On
+ *    older HALs, this is done as part of configuration, but in newer HALs
+ *    buffers may be allocated at time of first use. But some use cases require
+ *    buffer allocation upfront, to minmize disruption due to lengthy allocation
+ *    duration.  In this state, only prepareNextBuffer() and cancelPrepare()
+ *    may be called.
  *
  * Transition table:
  *
@@ -82,6 +89,12 @@
  *    STATE_CONFIGURED     => STATE_CONSTRUCTED:
  *        When disconnect() is called after making sure stream is idle with
  *        waitUntilIdle().
+ *    STATE_CONFIGURED     => STATE_PREPARING:
+ *        When startPrepare is called before the stream has a buffer
+ *        queued back into it for the first time.
+ *    STATE_PREPARING      => STATE_CONFIGURED:
+ *        When sufficient prepareNextBuffer calls have been made to allocate
+ *        all stream buffers, or cancelPrepare is called.
  *
  * Status Tracking:
  *    Each stream is tracked by StatusTracker as a separate component,
@@ -119,9 +132,10 @@
     /**
      * Get the stream's dimensions and format
      */
-    uint32_t         getWidth() const;
-    uint32_t         getHeight() const;
-    int              getFormat() const;
+    uint32_t          getWidth() const;
+    uint32_t          getHeight() const;
+    int               getFormat() const;
+    android_dataspace getDataSpace() const;
 
     /**
      * Start the stream configuration process. Returns a handle to the stream's
@@ -166,6 +180,87 @@
     status_t         cancelConfiguration();
 
     /**
+     * Determine whether the stream has already become in-use (has received
+     * a valid filled buffer), which determines if a stream can still have
+     * prepareNextBuffer called on it.
+     */
+    bool             isUnpreparable();
+
+    /**
+     * Start stream preparation. May only be called in the CONFIGURED state,
+     * when no valid buffers have yet been returned to this stream.
+     *
+     * If no prepartion is necessary, returns OK and does not transition to
+     * PREPARING state. Otherwise, returns NOT_ENOUGH_DATA and transitions
+     * to PREPARING.
+     *
+     * This call performs no allocation, so is quick to call.
+     *
+     * Returns:
+     *    OK if no more buffers need to be preallocated
+     *    NOT_ENOUGH_DATA if calls to prepareNextBuffer are needed to finish
+     *        buffer pre-allocation, and transitions to the PREPARING state.
+     *    NO_INIT in case of a serious error from the HAL device
+     *    INVALID_OPERATION if called when not in CONFIGURED state, or a
+     *        valid buffer has already been returned to this stream.
+     */
+    status_t         startPrepare();
+
+    /**
+     * Check if the stream is mid-preparing.
+     */
+    bool             isPreparing() const;
+
+    /**
+     * Continue stream buffer preparation by allocating the next
+     * buffer for this stream.  May only be called in the PREPARED state.
+     *
+     * Returns OK and transitions to the CONFIGURED state if all buffers
+     * are allocated after the call concludes. Otherwise returns NOT_ENOUGH_DATA.
+     *
+     * This call allocates one buffer, which may take several milliseconds for
+     * large buffers.
+     *
+     * Returns:
+     *    OK if no more buffers need to be preallocated, and transitions
+     *        to the CONFIGURED state.
+     *    NOT_ENOUGH_DATA if more calls to prepareNextBuffer are needed to finish
+     *        buffer pre-allocation.
+     *    NO_INIT in case of a serious error from the HAL device
+     *    INVALID_OPERATION if called when not in CONFIGURED state, or a
+     *        valid buffer has already been returned to this stream.
+     */
+    status_t         prepareNextBuffer();
+
+    /**
+     * Cancel stream preparation early. In case allocation needs to be
+     * stopped, this method transitions the stream back to the CONFIGURED state.
+     * Buffers that have been allocated with prepareNextBuffer remain that way,
+     * but a later use of prepareNextBuffer will require just as many
+     * calls as if the earlier prepare attempt had not existed.
+     *
+     * Returns:
+     *    OK if cancellation succeeded, and transitions to the CONFIGURED state
+     *    INVALID_OPERATION if not in the PREPARING state
+     *    NO_INIT in case of a serious error from the HAL device
+     */
+    status_t        cancelPrepare();
+
+    /**
+     * Tear down memory for this stream. This frees all unused gralloc buffers
+     * allocated for this stream, but leaves it ready for operation afterward.
+     *
+     * May only be called in the CONFIGURED state, and keeps the stream in
+     * the CONFIGURED state.
+     *
+     * Returns:
+     *    OK if teardown succeeded.
+     *    INVALID_OPERATION if not in the CONFIGURED state
+     *    NO_INIT in case of a serious error from the HAL device
+     */
+    status_t       tearDown();
+
+    /**
      * Fill in the camera3_stream_buffer with the next valid buffer for this
      * stream, to hand over to the HAL.
      *
@@ -204,6 +299,10 @@
      */
     status_t         returnInputBuffer(const camera3_stream_buffer &buffer);
 
+    // get the buffer producer of the input buffer queue.
+    // only apply to input streams.
+    status_t         getInputBufferProducer(sp<IGraphicBufferProducer> *producer);
+
     /**
      * Whether any of the stream's buffers are currently in use by the HAL,
      * including buffers that have been returned but not yet had their
@@ -258,13 +357,15 @@
         STATE_CONSTRUCTED,
         STATE_IN_CONFIG,
         STATE_IN_RECONFIG,
-        STATE_CONFIGURED
+        STATE_CONFIGURED,
+        STATE_PREPARING
     } mState;
 
     mutable Mutex mLock;
 
     Camera3Stream(int id, camera3_stream_type type,
-            uint32_t width, uint32_t height, size_t maxSize, int format);
+            uint32_t width, uint32_t height, size_t maxSize, int format,
+            android_dataspace dataSpace, camera3_stream_rotation_t rotation);
 
     /**
      * Interface to be implemented by derived classes
@@ -283,6 +384,9 @@
     virtual status_t returnInputBufferLocked(
             const camera3_stream_buffer &buffer);
     virtual bool     hasOutstandingBuffersLocked() const = 0;
+    // Get the buffer producer of the input buffer queue. Only apply to input streams.
+    virtual status_t getInputBufferProducerLocked(sp<IGraphicBufferProducer> *producer);
+
     // Can return -ENOTCONN when we are already disconnected (not an error)
     virtual status_t disconnectLocked() = 0;
 
@@ -303,13 +407,17 @@
 
     // Get the usage flags for the other endpoint, or return
     // INVALID_OPERATION if they cannot be obtained.
-    virtual status_t getEndpointUsage(uint32_t *usage) = 0;
+    virtual status_t getEndpointUsage(uint32_t *usage) const = 0;
 
     // Tracking for idle state
     wp<StatusTracker> mStatusTracker;
     // Status tracker component ID
     int mStatusId;
 
+    // Tracking for stream prepare - whether this stream can still have
+    // prepareNextBuffer called on it.
+    bool mStreamUnpreparable;
+
   private:
     uint32_t oldUsage;
     uint32_t oldMaxBuffers;
@@ -324,6 +432,18 @@
                                   bool acquired, bool output);
     List<wp<Camera3StreamBufferListener> > mBufferListenerList;
 
+    status_t        cancelPrepareLocked();
+
+    // Tracking for PREPARING state
+
+    // State of buffer preallocation. Only true if either prepareNextBuffer
+    // has been called sufficient number of times, or stream configuration
+    // had to register buffers with the HAL
+    bool mPrepared;
+
+    Vector<camera3_stream_buffer_t> mPreparedBuffers;
+    size_t mPreparedBufferIdx;
+
 }; // class Camera3Stream
 
 }; // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index da989cd..c086eaf 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -45,6 +45,7 @@
     virtual uint32_t getWidth() const = 0;
     virtual uint32_t getHeight() const = 0;
     virtual int      getFormat() const = 0;
+    virtual android_dataspace getDataSpace() const = 0;
 
     /**
      * Start the stream configuration process. Returns a handle to the stream's
@@ -89,6 +90,82 @@
     virtual status_t cancelConfiguration() = 0;
 
     /**
+     * Determine whether the stream has already become in-use (has received
+     * a valid filled buffer), which determines if a stream can still have
+     * prepareNextBuffer called on it.
+     */
+    virtual bool     isUnpreparable() = 0;
+
+    /**
+     * Start stream preparation. May only be called in the CONFIGURED state,
+     * when no valid buffers have yet been returned to this stream.
+     *
+     * If no prepartion is necessary, returns OK and does not transition to
+     * PREPARING state. Otherwise, returns NOT_ENOUGH_DATA and transitions
+     * to PREPARING.
+     *
+     * Returns:
+     *    OK if no more buffers need to be preallocated
+     *    NOT_ENOUGH_DATA if calls to prepareNextBuffer are needed to finish
+     *        buffer pre-allocation, and transitions to the PREPARING state.
+     *    NO_INIT in case of a serious error from the HAL device
+     *    INVALID_OPERATION if called when not in CONFIGURED state, or a
+     *        valid buffer has already been returned to this stream.
+     */
+    virtual status_t startPrepare() = 0;
+
+    /**
+     * Check if the stream is mid-preparing.
+     */
+    virtual bool     isPreparing() const = 0;
+
+    /**
+     * Continue stream buffer preparation by allocating the next
+     * buffer for this stream.  May only be called in the PREPARED state.
+     *
+     * Returns OK and transitions to the CONFIGURED state if all buffers
+     * are allocated after the call concludes. Otherwise returns NOT_ENOUGH_DATA.
+     *
+     * Returns:
+     *    OK if no more buffers need to be preallocated, and transitions
+     *        to the CONFIGURED state.
+     *    NOT_ENOUGH_DATA if more calls to prepareNextBuffer are needed to finish
+     *        buffer pre-allocation.
+     *    NO_INIT in case of a serious error from the HAL device
+     *    INVALID_OPERATION if called when not in CONFIGURED state, or a
+     *        valid buffer has already been returned to this stream.
+     */
+    virtual status_t prepareNextBuffer() = 0;
+
+    /**
+     * Cancel stream preparation early. In case allocation needs to be
+     * stopped, this method transitions the stream back to the CONFIGURED state.
+     * Buffers that have been allocated with prepareNextBuffer remain that way,
+     * but a later use of prepareNextBuffer will require just as many
+     * calls as if the earlier prepare attempt had not existed.
+     *
+     * Returns:
+     *    OK if cancellation succeeded, and transitions to the CONFIGURED state
+     *    INVALID_OPERATION if not in the PREPARING state
+     *    NO_INIT in case of a serious error from the HAL device
+     */
+    virtual status_t cancelPrepare() = 0;
+
+    /**
+     * Tear down memory for this stream. This frees all unused gralloc buffers
+     * allocated for this stream, but leaves it ready for operation afterward.
+     *
+     * May only be called in the CONFIGURED state, and keeps the stream in
+     * the CONFIGURED state.
+     *
+     * Returns:
+     *    OK if teardown succeeded.
+     *    INVALID_OPERATION if not in the CONFIGURED state
+     *    NO_INIT in case of a serious error from the HAL device
+     */
+    virtual status_t tearDown() = 0;
+
+    /**
      * Fill in the camera3_stream_buffer with the next valid buffer for this
      * stream, to hand over to the HAL.
      *
@@ -128,6 +205,13 @@
     virtual status_t returnInputBuffer(const camera3_stream_buffer &buffer) = 0;
 
     /**
+     * Get the buffer producer of the input buffer queue.
+     *
+     * This method only applies to input streams.
+     */
+    virtual status_t getInputBufferProducer(sp<IGraphicBufferProducer> *producer) = 0;
+
+    /**
      * Whether any of the stream's buffers are currently in use by the HAL,
      * including buffers that have been returned but not yet had their
      * release fence signaled.
diff --git a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
index 81330ea..eefcb44 100644
--- a/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3ZslStream.cpp
@@ -114,13 +114,15 @@
         int bufferCount) :
         Camera3OutputStream(id, CAMERA3_STREAM_BIDIRECTIONAL,
                             width, height,
-                            HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED),
+                            HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+                            HAL_DATASPACE_UNKNOWN, CAMERA3_STREAM_ROTATION_0),
         mDepth(bufferCount) {
 
     sp<IGraphicBufferProducer> producer;
     sp<IGraphicBufferConsumer> consumer;
     BufferQueue::createBufferQueue(&producer, &consumer);
     mProducer = new RingBufferConsumer(consumer, GRALLOC_USAGE_HW_CAMERA_ZSL, bufferCount);
+    mProducer->setName(String8("Camera2-ZslRingBufferConsumer"));
     mConsumer = new Surface(producer);
 }
 
diff --git a/services/camera/libcameraservice/utils/AutoConditionLock.cpp b/services/camera/libcameraservice/utils/AutoConditionLock.cpp
new file mode 100644
index 0000000..c8ee965
--- /dev/null
+++ b/services/camera/libcameraservice/utils/AutoConditionLock.cpp
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "AutoConditionLock.h"
+
+namespace android {
+
+WaitableMutexWrapper::WaitableMutexWrapper(Mutex* mutex) : mMutex{mutex}, mState{false} {}
+
+WaitableMutexWrapper::~WaitableMutexWrapper() {}
+
+// Locks manager-owned mutex
+AutoConditionLock::AutoConditionLock(const std::shared_ptr<WaitableMutexWrapper>& manager) :
+        mManager{manager}, mAutoLock{manager->mMutex} {}
+
+// Unlocks manager-owned mutex
+AutoConditionLock::~AutoConditionLock() {
+    // Unset the condition and wake everyone up before releasing lock
+    mManager->mState = false;
+    mManager->mCondition.broadcast();
+}
+
+std::unique_ptr<AutoConditionLock> AutoConditionLock::waitAndAcquire(
+        const std::shared_ptr<WaitableMutexWrapper>& manager, nsecs_t waitTime) {
+
+    if (manager == nullptr || manager->mMutex == nullptr) {
+        // Bad input, return null
+        return std::unique_ptr<AutoConditionLock>{nullptr};
+    }
+
+    // Acquire scoped lock
+    std::unique_ptr<AutoConditionLock> scopedLock(new AutoConditionLock(manager));
+
+    // Figure out what time in the future we should hit the timeout
+    nsecs_t failTime = systemTime(SYSTEM_TIME_MONOTONIC) + waitTime;
+
+    // Wait until we timeout, or success
+    while(manager->mState) {
+        status_t ret = manager->mCondition.waitRelative(*(manager->mMutex), waitTime);
+        if (ret != NO_ERROR) {
+            // Timed out or whatever, return null
+            return std::unique_ptr<AutoConditionLock>{nullptr};
+        }
+        waitTime = failTime - systemTime(SYSTEM_TIME_MONOTONIC);
+    }
+
+    // Set the condition and return
+    manager->mState = true;
+    return scopedLock;
+}
+
+std::unique_ptr<AutoConditionLock> AutoConditionLock::waitAndAcquire(
+        const std::shared_ptr<WaitableMutexWrapper>& manager) {
+
+    if (manager == nullptr || manager->mMutex == nullptr) {
+        // Bad input, return null
+        return std::unique_ptr<AutoConditionLock>{nullptr};
+    }
+
+    // Acquire scoped lock
+    std::unique_ptr<AutoConditionLock> scopedLock(new AutoConditionLock(manager));
+
+    // Wait until we timeout, or success
+    while(manager->mState) {
+        status_t ret = manager->mCondition.wait(*(manager->mMutex));
+        if (ret != NO_ERROR) {
+            // Timed out or whatever, return null
+            return std::unique_ptr<AutoConditionLock>{nullptr};
+        }
+    }
+
+    // Set the condition and return
+    manager->mState = true;
+    return scopedLock;
+}
+
+}; // namespace android
diff --git a/services/camera/libcameraservice/utils/AutoConditionLock.h b/services/camera/libcameraservice/utils/AutoConditionLock.h
new file mode 100644
index 0000000..9a3eafc
--- /dev/null
+++ b/services/camera/libcameraservice/utils/AutoConditionLock.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#ifndef ANDROID_SERVICE_UTILS_SCOPED_CONDITION_H
+#define ANDROID_SERVICE_UTILS_SCOPED_CONDITION_H
+
+#include <utils/Timers.h>
+#include <utils/Condition.h>
+#include <utils/Errors.h>
+#include <utils/Mutex.h>
+
+#include <memory>
+
+namespace android {
+
+/**
+ * WaitableMutexWrapper can be used with AutoConditionLock to construct scoped locks for the
+ * wrapped Mutex with timeouts for lock acquisition.
+ */
+class WaitableMutexWrapper {
+    friend class AutoConditionLock;
+public:
+    /**
+     * Construct the ConditionManger with the given Mutex.
+     */
+    WaitableMutexWrapper(Mutex* mutex);
+
+    virtual ~WaitableMutexWrapper();
+private:
+    Mutex* mMutex;
+    bool mState;
+    Condition mCondition;
+};
+
+/**
+ * AutoConditionLock is a scoped lock similar to Mutex::Autolock, but allows timeouts to be
+ * specified for lock acquisition.
+ *
+ * AutoConditionLock is used with a WaitableMutexWrapper to lock/unlock the WaitableMutexWrapper's
+ * wrapped Mutex, and wait/set/signal the WaitableMutexWrapper's wrapped condition. To use this,
+ * call AutoConditionLock::waitAndAcquire to get an instance.  This will:
+ *      - Lock the given WaitableMutexWrapper's mutex.
+ *      - Wait for the WaitableMutexWrapper's condition to become false, or timeout.
+ *      - Set the WaitableMutexWrapper's condition to true.
+ *
+ * When the AutoConditionLock goes out of scope and is destroyed, this will:
+ *      - Set the WaitableMutexWrapper's condition to false.
+ *      - Signal threads waiting on this condition to wakeup.
+ *      - Release WaitableMutexWrapper's mutex.
+ */
+class AutoConditionLock final {
+public:
+    AutoConditionLock() = delete;
+    AutoConditionLock(const AutoConditionLock& other) = delete;
+    AutoConditionLock & operator=(const AutoConditionLock&) = delete;
+
+    ~AutoConditionLock();
+
+    /**
+     * Make a new AutoConditionLock from a given WaitableMutexWrapper, waiting up to waitTime
+     * nanoseconds to acquire the WaitableMutexWrapper's wrapped lock.
+     *
+     * Return an empty unique_ptr if this fails, or a timeout occurs.
+     */
+    static std::unique_ptr<AutoConditionLock> waitAndAcquire(
+            const std::shared_ptr<WaitableMutexWrapper>& manager, nsecs_t waitTime);
+
+    /**
+     * Make a new AutoConditionLock from a given WaitableMutexWrapper, waiting indefinitely to
+     * acquire the WaitableMutexWrapper's wrapped lock.
+     *
+     * Return an empty unique_ptr if this fails.
+     */
+    static std::unique_ptr<AutoConditionLock> waitAndAcquire(
+            const std::shared_ptr<WaitableMutexWrapper>& manager);
+private:
+    AutoConditionLock(const std::shared_ptr<WaitableMutexWrapper>& manager);
+
+    std::shared_ptr<WaitableMutexWrapper> mManager;
+    Mutex::Autolock mAutoLock;
+};
+
+}; // namespace android
+
+#endif // ANDROID_SERVICE_UTILS_SCOPED_CONDITION_H
diff --git a/services/camera/libcameraservice/utils/ClientManager.h b/services/camera/libcameraservice/utils/ClientManager.h
new file mode 100644
index 0000000..7ae58d5
--- /dev/null
+++ b/services/camera/libcameraservice/utils/ClientManager.h
@@ -0,0 +1,649 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVICE_UTILS_EVICTION_POLICY_MANAGER_H
+#define ANDROID_SERVICE_UTILS_EVICTION_POLICY_MANAGER_H
+
+#include <utils/Condition.h>
+#include <utils/Mutex.h>
+#include <utils/Timers.h>
+
+#include <algorithm>
+#include <utility>
+#include <vector>
+#include <set>
+#include <map>
+#include <memory>
+
+namespace android {
+namespace resource_policy {
+
+// --------------------------------------------------------------------------------
+
+/**
+ * The ClientDescriptor class is a container for a given key/value pair identifying a shared
+ * resource, and the corresponding cost, priority, owner ID, and conflicting keys list used
+ * in determining eviction behavior.
+ *
+ * Aside from the priority, these values are immutable once the ClientDescriptor has been
+ * constructed.
+ */
+template<class KEY, class VALUE>
+class ClientDescriptor final {
+public:
+    ClientDescriptor(const KEY& key, const VALUE& value, int32_t cost,
+            const std::set<KEY>& conflictingKeys, int32_t priority, int32_t ownerId);
+    ClientDescriptor(KEY&& key, VALUE&& value, int32_t cost, std::set<KEY>&& conflictingKeys,
+            int32_t priority, int32_t ownerId);
+
+    ~ClientDescriptor();
+
+    /**
+     * Return the key for this descriptor.
+     */
+    const KEY& getKey() const;
+
+    /**
+     * Return the value for this descriptor.
+     */
+    const VALUE& getValue() const;
+
+    /**
+     * Return the cost for this descriptor.
+     */
+    int32_t getCost() const;
+
+    /**
+     * Return the priority for this descriptor.
+     */
+    int32_t getPriority() const;
+
+    /**
+     * Return the owner ID for this descriptor.
+     */
+    int32_t getOwnerId() const;
+
+    /**
+     * Return true if the given key is in this descriptor's conflicting keys list.
+     */
+    bool isConflicting(const KEY& key) const;
+
+    /**
+     * Return the set of all conflicting keys for this descriptor.
+     */
+    std::set<KEY> getConflicting() const;
+
+    /**
+     * Set the proirity for this descriptor.
+     */
+    void setPriority(int32_t priority);
+
+    // This class is ordered by key
+    template<class K, class V>
+    friend bool operator < (const ClientDescriptor<K, V>& a, const ClientDescriptor<K, V>& b);
+
+private:
+    KEY mKey;
+    VALUE mValue;
+    int32_t mCost;
+    std::set<KEY> mConflicting;
+    int32_t mPriority;
+    int32_t mOwnerId;
+}; // class ClientDescriptor
+
+template<class K, class V>
+bool operator < (const ClientDescriptor<K, V>& a, const ClientDescriptor<K, V>& b) {
+    return a.mKey < b.mKey;
+}
+
+template<class KEY, class VALUE>
+ClientDescriptor<KEY, VALUE>::ClientDescriptor(const KEY& key, const VALUE& value, int32_t cost,
+        const std::set<KEY>& conflictingKeys, int32_t priority, int32_t ownerId) : mKey{key},
+        mValue{value}, mCost{cost}, mConflicting{conflictingKeys}, mPriority{priority},
+        mOwnerId{ownerId} {}
+
+template<class KEY, class VALUE>
+ClientDescriptor<KEY, VALUE>::ClientDescriptor(KEY&& key, VALUE&& value, int32_t cost,
+        std::set<KEY>&& conflictingKeys, int32_t priority, int32_t ownerId) :
+        mKey{std::forward<KEY>(key)}, mValue{std::forward<VALUE>(value)}, mCost{cost},
+        mConflicting{std::forward<std::set<KEY>>(conflictingKeys)}, mPriority{priority},
+        mOwnerId{ownerId} {}
+
+template<class KEY, class VALUE>
+ClientDescriptor<KEY, VALUE>::~ClientDescriptor() {}
+
+template<class KEY, class VALUE>
+const KEY& ClientDescriptor<KEY, VALUE>::getKey() const {
+    return mKey;
+}
+
+template<class KEY, class VALUE>
+const VALUE& ClientDescriptor<KEY, VALUE>::getValue() const {
+    return mValue;
+}
+
+template<class KEY, class VALUE>
+int32_t ClientDescriptor<KEY, VALUE>::getCost() const {
+    return mCost;
+}
+
+template<class KEY, class VALUE>
+int32_t ClientDescriptor<KEY, VALUE>::getPriority() const {
+    return mPriority;
+}
+
+template<class KEY, class VALUE>
+int32_t ClientDescriptor<KEY, VALUE>::getOwnerId() const {
+    return mOwnerId;
+}
+
+template<class KEY, class VALUE>
+bool ClientDescriptor<KEY, VALUE>::isConflicting(const KEY& key) const {
+    if (key == mKey) return true;
+    for (const auto& x : mConflicting) {
+        if (key == x) return true;
+    }
+    return false;
+}
+
+template<class KEY, class VALUE>
+std::set<KEY> ClientDescriptor<KEY, VALUE>::getConflicting() const {
+    return mConflicting;
+}
+
+template<class KEY, class VALUE>
+void ClientDescriptor<KEY, VALUE>::setPriority(int32_t priority) {
+    mPriority = priority;
+}
+
+// --------------------------------------------------------------------------------
+
+/**
+ * A default class implementing the LISTENER interface used by ClientManager.
+ */
+template<class KEY, class VALUE>
+class DefaultEventListener {
+public:
+    void onClientAdded(const ClientDescriptor<KEY, VALUE>& descriptor);
+    void onClientRemoved(const ClientDescriptor<KEY, VALUE>& descriptor);
+};
+
+template<class KEY, class VALUE>
+void DefaultEventListener<KEY, VALUE>::onClientAdded(
+        const ClientDescriptor<KEY, VALUE>& /*descriptor*/) {}
+
+template<class KEY, class VALUE>
+void DefaultEventListener<KEY, VALUE>::onClientRemoved(
+        const ClientDescriptor<KEY, VALUE>& /*descriptor*/) {}
+
+// --------------------------------------------------------------------------------
+
+/**
+ * The ClientManager class wraps an LRU-ordered list of active clients and implements eviction
+ * behavior for handling shared resource access.
+ *
+ * When adding a new descriptor, eviction behavior is as follows:
+ *   - Keys are unique, adding a descriptor with the same key as an existing descriptor will
+ *     result in the lower-priority of the two being removed.  Priority ties result in the
+ *     LRU descriptor being evicted (this means the incoming descriptor be added in this case).
+ *   - Any descriptors with keys that are in the incoming descriptor's 'conflicting keys' list
+ *     will be removed if they have an equal or lower priority than the incoming descriptor;
+ *     if any have a higher priority, the incoming descriptor is removed instead.
+ *   - If the sum of all descriptors' costs, including the incoming descriptor's, is more than
+ *     the max cost allowed for this ClientManager, descriptors with non-zero cost, equal or lower
+ *     priority, and a different owner will be evicted in LRU order until either the cost is less
+ *     than the max cost, or all descriptors meeting this criteria have been evicted and the
+ *     incoming descriptor has the highest priority.  Otherwise, the incoming descriptor is
+ *     removed instead.
+ */
+template<class KEY, class VALUE, class LISTENER=DefaultEventListener<KEY, VALUE>>
+class ClientManager {
+public:
+    // The default maximum "cost" allowed before evicting
+    static constexpr int32_t DEFAULT_MAX_COST = 100;
+
+    ClientManager();
+    ClientManager(int32_t totalCost);
+
+    /**
+     * Add a given ClientDescriptor to the managed list.  ClientDescriptors for clients that
+     * are evicted by this action are returned in a vector.
+     *
+     * This may return the ClientDescriptor passed in if it would be evicted.
+     */
+    std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>> addAndEvict(
+            const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& client);
+
+    /**
+     * Given a map containing owner (pid) -> priority mappings, update the priority of each
+     * ClientDescriptor with an owner in this mapping.
+     */
+    void updatePriorities(const std::map<int32_t,int32_t>& ownerPriorityList);
+
+    /**
+     * Remove all ClientDescriptors.
+     */
+    void removeAll();
+
+    /**
+     * Remove and return the ClientDescriptor with a given key.
+     */
+    std::shared_ptr<ClientDescriptor<KEY, VALUE>> remove(const KEY& key);
+
+    /**
+     * Remove the given ClientDescriptor.
+     */
+    void remove(const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& value);
+
+    /**
+     * Return a vector of the ClientDescriptors that would be evicted by adding the given
+     * ClientDescriptor.
+     *
+     * This may return the ClientDescriptor passed in if it would be evicted.
+     */
+    std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>> wouldEvict(
+            const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& client) const;
+
+    /**
+     * Return a vector of active ClientDescriptors that prevent this client from being added.
+     */
+    std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>> getIncompatibleClients(
+            const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& client) const;
+
+    /**
+     * Return a vector containing all currently active ClientDescriptors.
+     */
+    std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>> getAll() const;
+
+    /**
+     * Return a vector containing all keys of currently active ClientDescriptors.
+     */
+    std::vector<KEY> getAllKeys() const;
+
+    /**
+     * Return a vector of the owner tags of all currently active ClientDescriptors (duplicates
+     * will be removed).
+     */
+    std::vector<int32_t> getAllOwners() const;
+
+    /**
+     * Return the ClientDescriptor corresponding to the given key, or an empty shared pointer
+     * if none exists.
+     */
+    std::shared_ptr<ClientDescriptor<KEY, VALUE>> get(const KEY& key) const;
+
+    /**
+     * Block until the given client is no longer in the active clients list, or the timeout
+     * occurred.
+     *
+     * Returns NO_ERROR if this succeeded, -ETIMEDOUT on a timeout, or a negative error code on
+     * failure.
+     */
+    status_t waitUntilRemoved(const std::shared_ptr<ClientDescriptor<KEY, VALUE>> client,
+            nsecs_t timeout) const;
+
+    /**
+     * Set the current listener for client add/remove events.
+     *
+     * The listener instance must inherit from the LISTENER class and implement the following
+     * methods:
+     *    void onClientRemoved(const ClientDescriptor<KEY, VALUE>& descriptor);
+     *    void onClientAdded(const ClientDescriptor<KEY, VALUE>& descriptor);
+     *
+     * These callback methods will be called with the ClientManager's lock held, and should
+     * not call any further ClientManager methods.
+     *
+     * The onClientRemoved method will be called when the client has been removed or evicted
+     * from the ClientManager that this event listener has been added to. The onClientAdded
+     * method will be called when the client has been added to the ClientManager that this
+     * event listener has been added to.
+     */
+    void setListener(const std::shared_ptr<LISTENER>& listener);
+
+protected:
+    ~ClientManager();
+
+private:
+
+    /**
+     * Return a vector of the ClientDescriptors that would be evicted by adding the given
+     * ClientDescriptor.  If returnIncompatibleClients is set to true, instead, return the
+     * vector of ClientDescriptors that are higher priority than the incoming client and
+     * either conflict with this client, or contribute to the resource cost if that would
+     * prevent the incoming client from being added.
+     *
+     * This may return the ClientDescriptor passed in.
+     */
+    std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>> wouldEvictLocked(
+            const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& client,
+            bool returnIncompatibleClients = false) const;
+
+    int64_t getCurrentCostLocked() const;
+
+    mutable Mutex mLock;
+    mutable Condition mRemovedCondition;
+    int32_t mMaxCost;
+    // LRU ordered, most recent at end
+    std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>> mClients;
+    std::shared_ptr<LISTENER> mListener;
+}; // class ClientManager
+
+template<class KEY, class VALUE, class LISTENER>
+ClientManager<KEY, VALUE, LISTENER>::ClientManager() :
+        ClientManager(DEFAULT_MAX_COST) {}
+
+template<class KEY, class VALUE, class LISTENER>
+ClientManager<KEY, VALUE, LISTENER>::ClientManager(int32_t totalCost) : mMaxCost(totalCost) {}
+
+template<class KEY, class VALUE, class LISTENER>
+ClientManager<KEY, VALUE, LISTENER>::~ClientManager() {}
+
+template<class KEY, class VALUE, class LISTENER>
+std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>>
+ClientManager<KEY, VALUE, LISTENER>::wouldEvict(
+        const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& client) const {
+    Mutex::Autolock lock(mLock);
+    return wouldEvictLocked(client);
+}
+
+template<class KEY, class VALUE, class LISTENER>
+std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>>
+ClientManager<KEY, VALUE, LISTENER>::getIncompatibleClients(
+        const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& client) const {
+    Mutex::Autolock lock(mLock);
+    return wouldEvictLocked(client, /*returnIncompatibleClients*/true);
+}
+
+template<class KEY, class VALUE, class LISTENER>
+std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>>
+ClientManager<KEY, VALUE, LISTENER>::wouldEvictLocked(
+        const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& client,
+        bool returnIncompatibleClients) const {
+
+    std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>> evictList;
+
+    // Disallow null clients, return input
+    if (client == nullptr) {
+        evictList.push_back(client);
+        return evictList;
+    }
+
+    const KEY& key = client->getKey();
+    int32_t cost = client->getCost();
+    int32_t priority = client->getPriority();
+    int32_t owner = client->getOwnerId();
+
+    int64_t totalCost = getCurrentCostLocked() + cost;
+
+    // Determine the MRU of the owners tied for having the highest priority
+    int32_t highestPriorityOwner = owner;
+    int32_t highestPriority = priority;
+    for (const auto& i : mClients) {
+        int32_t curPriority = i->getPriority();
+        if (curPriority >= highestPriority) {
+            highestPriority = curPriority;
+            highestPriorityOwner = i->getOwnerId();
+        }
+    }
+
+    if (highestPriority == priority) {
+        // Switch back owner if the incoming client has the highest priority, as it is MRU
+        highestPriorityOwner = owner;
+    }
+
+    // Build eviction list of clients to remove
+    for (const auto& i : mClients) {
+        const KEY& curKey = i->getKey();
+        int32_t curCost = i->getCost();
+        int32_t curPriority = i->getPriority();
+        int32_t curOwner = i->getOwnerId();
+
+        bool conflicting = (curKey == key || i->isConflicting(key) ||
+                client->isConflicting(curKey));
+
+        if (!returnIncompatibleClients) {
+            // Find evicted clients
+
+            if (conflicting && curPriority > priority) {
+                // Pre-existing conflicting client with higher priority exists
+                evictList.clear();
+                evictList.push_back(client);
+                return evictList;
+            } else if (conflicting || ((totalCost > mMaxCost && curCost > 0) &&
+                    (curPriority <= priority) &&
+                    !(highestPriorityOwner == owner && owner == curOwner))) {
+                // Add a pre-existing client to the eviction list if:
+                // - We are adding a client with higher priority that conflicts with this one.
+                // - The total cost including the incoming client's is more than the allowable
+                //   maximum, and the client has a non-zero cost, lower priority, and a different
+                //   owner than the incoming client when the incoming client has the
+                //   highest priority.
+                evictList.push_back(i);
+                totalCost -= curCost;
+            }
+        } else {
+            // Find clients preventing the incoming client from being added
+
+            if (curPriority > priority && (conflicting || (totalCost > mMaxCost && curCost > 0))) {
+                // Pre-existing conflicting client with higher priority exists
+                evictList.push_back(i);
+            }
+        }
+    }
+
+    // Immediately return the incompatible clients if we are calculating these instead
+    if (returnIncompatibleClients) {
+        return evictList;
+    }
+
+    // If the total cost is too high, return the input unless the input has the highest priority
+    if (totalCost > mMaxCost && highestPriorityOwner != owner) {
+        evictList.clear();
+        evictList.push_back(client);
+        return evictList;
+    }
+
+    return evictList;
+
+}
+
+template<class KEY, class VALUE, class LISTENER>
+std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>>
+ClientManager<KEY, VALUE, LISTENER>::addAndEvict(
+        const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& client) {
+    Mutex::Autolock lock(mLock);
+    auto evicted = wouldEvictLocked(client);
+    auto it = evicted.begin();
+    if (it != evicted.end() && *it == client) {
+        return evicted;
+    }
+
+    auto iter = evicted.cbegin();
+
+    if (iter != evicted.cend()) {
+
+        if (mListener != nullptr) mListener->onClientRemoved(**iter);
+
+        // Remove evicted clients from list
+        mClients.erase(std::remove_if(mClients.begin(), mClients.end(),
+            [&iter] (std::shared_ptr<ClientDescriptor<KEY, VALUE>>& curClientPtr) {
+                if (curClientPtr->getKey() == (*iter)->getKey()) {
+                    iter++;
+                    return true;
+                }
+                return false;
+            }), mClients.end());
+    }
+
+    if (mListener != nullptr) mListener->onClientAdded(*client);
+    mClients.push_back(client);
+    mRemovedCondition.broadcast();
+
+    return evicted;
+}
+
+template<class KEY, class VALUE, class LISTENER>
+std::vector<std::shared_ptr<ClientDescriptor<KEY, VALUE>>>
+ClientManager<KEY, VALUE, LISTENER>::getAll() const {
+    Mutex::Autolock lock(mLock);
+    return mClients;
+}
+
+template<class KEY, class VALUE, class LISTENER>
+std::vector<KEY> ClientManager<KEY, VALUE, LISTENER>::getAllKeys() const {
+    Mutex::Autolock lock(mLock);
+    std::vector<KEY> keys(mClients.size());
+    for (const auto& i : mClients) {
+        keys.push_back(i->getKey());
+    }
+    return keys;
+}
+
+template<class KEY, class VALUE, class LISTENER>
+std::vector<int32_t> ClientManager<KEY, VALUE, LISTENER>::getAllOwners() const {
+    Mutex::Autolock lock(mLock);
+    std::set<int32_t> owners;
+    for (const auto& i : mClients) {
+        owners.emplace(i->getOwnerId());
+    }
+    return std::vector<int32_t>(owners.begin(), owners.end());
+}
+
+template<class KEY, class VALUE, class LISTENER>
+void ClientManager<KEY, VALUE, LISTENER>::updatePriorities(
+        const std::map<int32_t,int32_t>& ownerPriorityList) {
+    Mutex::Autolock lock(mLock);
+    for (auto& i : mClients) {
+        auto j = ownerPriorityList.find(i->getOwnerId());
+        if (j != ownerPriorityList.end()) {
+            i->setPriority(j->second);
+        }
+    }
+}
+
+template<class KEY, class VALUE, class LISTENER>
+std::shared_ptr<ClientDescriptor<KEY, VALUE>> ClientManager<KEY, VALUE, LISTENER>::get(
+        const KEY& key) const {
+    Mutex::Autolock lock(mLock);
+    for (const auto& i : mClients) {
+        if (i->getKey() == key) return i;
+    }
+    return std::shared_ptr<ClientDescriptor<KEY, VALUE>>(nullptr);
+}
+
+template<class KEY, class VALUE, class LISTENER>
+void ClientManager<KEY, VALUE, LISTENER>::removeAll() {
+    Mutex::Autolock lock(mLock);
+    if (mListener != nullptr) {
+        for (const auto& i : mClients) {
+            mListener->onClientRemoved(*i);
+        }
+    }
+    mClients.clear();
+    mRemovedCondition.broadcast();
+}
+
+template<class KEY, class VALUE, class LISTENER>
+std::shared_ptr<ClientDescriptor<KEY, VALUE>> ClientManager<KEY, VALUE, LISTENER>::remove(
+    const KEY& key) {
+    Mutex::Autolock lock(mLock);
+
+    std::shared_ptr<ClientDescriptor<KEY, VALUE>> ret;
+
+    // Remove evicted clients from list
+    mClients.erase(std::remove_if(mClients.begin(), mClients.end(),
+        [this, &key, &ret] (std::shared_ptr<ClientDescriptor<KEY, VALUE>>& curClientPtr) {
+            if (curClientPtr->getKey() == key) {
+                if (mListener != nullptr) mListener->onClientRemoved(*curClientPtr);
+                ret = curClientPtr;
+                return true;
+            }
+            return false;
+        }), mClients.end());
+
+    mRemovedCondition.broadcast();
+    return ret;
+}
+
+template<class KEY, class VALUE, class LISTENER>
+status_t ClientManager<KEY, VALUE, LISTENER>::waitUntilRemoved(
+        const std::shared_ptr<ClientDescriptor<KEY, VALUE>> client,
+        nsecs_t timeout) const {
+    status_t ret = NO_ERROR;
+    Mutex::Autolock lock(mLock);
+
+    bool isRemoved = false;
+
+    // Figure out what time in the future we should hit the timeout
+    nsecs_t failTime = systemTime(SYSTEM_TIME_MONOTONIC) + timeout;
+
+    while (!isRemoved) {
+        isRemoved = true;
+        for (const auto& i : mClients) {
+            if (i == client) {
+                isRemoved = false;
+            }
+        }
+
+        if (!isRemoved) {
+            ret = mRemovedCondition.waitRelative(mLock, timeout);
+            if (ret != NO_ERROR) {
+                break;
+            }
+            timeout = failTime - systemTime(SYSTEM_TIME_MONOTONIC);
+        }
+    }
+
+    return ret;
+}
+
+template<class KEY, class VALUE, class LISTENER>
+void ClientManager<KEY, VALUE, LISTENER>::setListener(const std::shared_ptr<LISTENER>& listener) {
+    Mutex::Autolock lock(mLock);
+    mListener = listener;
+}
+
+template<class KEY, class VALUE, class LISTENER>
+void ClientManager<KEY, VALUE, LISTENER>::remove(
+        const std::shared_ptr<ClientDescriptor<KEY, VALUE>>& value) {
+    Mutex::Autolock lock(mLock);
+    // Remove evicted clients from list
+    mClients.erase(std::remove_if(mClients.begin(), mClients.end(),
+        [this, &value] (std::shared_ptr<ClientDescriptor<KEY, VALUE>>& curClientPtr) {
+            if (curClientPtr == value) {
+                if (mListener != nullptr) mListener->onClientRemoved(*curClientPtr);
+                return true;
+            }
+            return false;
+        }), mClients.end());
+    mRemovedCondition.broadcast();
+}
+
+template<class KEY, class VALUE, class LISTENER>
+int64_t ClientManager<KEY, VALUE, LISTENER>::getCurrentCostLocked() const {
+    int64_t totalCost = 0;
+    for (const auto& x : mClients) {
+            totalCost += x->getCost();
+    }
+    return totalCost;
+}
+
+// --------------------------------------------------------------------------------
+
+}; // namespace resource_policy
+}; // namespace android
+
+#endif // ANDROID_SERVICE_UTILS_EVICTION_POLICY_MANAGER_H
diff --git a/services/medialog/Android.mk b/services/medialog/Android.mk
index 95f2fef..03438bf 100644
--- a/services/medialog/Android.mk
+++ b/services/medialog/Android.mk
@@ -10,4 +10,6 @@
 
 LOCAL_32_BIT_ONLY := true
 
+LOCAL_C_INCLUDES := $(call include-path-for, audio-utils)
+
 include $(BUILD_SHARED_LIBRARY)
diff --git a/services/mediaresourcemanager/Android.mk b/services/mediaresourcemanager/Android.mk
new file mode 100644
index 0000000..b72230f
--- /dev/null
+++ b/services/mediaresourcemanager/Android.mk
@@ -0,0 +1,21 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES := ResourceManagerService.cpp ServiceLog.cpp
+
+LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils liblog
+
+LOCAL_MODULE:= libresourcemanagerservice
+
+LOCAL_32_BIT_ONLY := true
+
+LOCAL_C_INCLUDES += \
+    $(TOPDIR)frameworks/av/include
+
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
+
+include $(BUILD_SHARED_LIBRARY)
+
+include $(call all-makefiles-under,$(LOCAL_PATH))
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
new file mode 100644
index 0000000..e54cc5a
--- /dev/null
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -0,0 +1,456 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ResourceManagerService"
+#include <utils/Log.h>
+
+#include <binder/IServiceManager.h>
+#include <dirent.h>
+#include <media/stagefright/ProcessInfo.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <unistd.h>
+
+#include "ResourceManagerService.h"
+#include "ServiceLog.h"
+
+namespace android {
+
+template <typename T>
+static String8 getString(const Vector<T> &items) {
+    String8 itemsStr;
+    for (size_t i = 0; i < items.size(); ++i) {
+        itemsStr.appendFormat("%s ", items[i].toString().string());
+    }
+    return itemsStr;
+}
+
+static bool hasResourceType(String8 type, Vector<MediaResource> resources) {
+    for (size_t i = 0; i < resources.size(); ++i) {
+        if (resources[i].mType == type) {
+            return true;
+        }
+    }
+    return false;
+}
+
+static bool hasResourceType(String8 type, ResourceInfos infos) {
+    for (size_t i = 0; i < infos.size(); ++i) {
+        if (hasResourceType(type, infos[i].resources)) {
+            return true;
+        }
+    }
+    return false;
+}
+
+static ResourceInfos& getResourceInfosForEdit(
+        int pid,
+        PidResourceInfosMap& map) {
+    ssize_t index = map.indexOfKey(pid);
+    if (index < 0) {
+        // new pid
+        ResourceInfos infosForPid;
+        map.add(pid, infosForPid);
+    }
+
+    return map.editValueFor(pid);
+}
+
+static ResourceInfo& getResourceInfoForEdit(
+        int64_t clientId,
+        const sp<IResourceManagerClient> client,
+        ResourceInfos& infos) {
+    for (size_t i = 0; i < infos.size(); ++i) {
+        if (infos[i].clientId == clientId) {
+            return infos.editItemAt(i);
+        }
+    }
+    ResourceInfo info;
+    info.clientId = clientId;
+    info.client = client;
+    infos.push_back(info);
+    return infos.editItemAt(infos.size() - 1);
+}
+
+status_t ResourceManagerService::dump(int fd, const Vector<String16>& /* args */) {
+    Mutex::Autolock lock(mLock);
+
+    String8 result;
+    const size_t SIZE = 256;
+    char buffer[SIZE];
+
+    snprintf(buffer, SIZE, "ResourceManagerService: %p\n", this);
+    result.append(buffer);
+    result.append("  Policies:\n");
+    snprintf(buffer, SIZE, "    SupportsMultipleSecureCodecs: %d\n", mSupportsMultipleSecureCodecs);
+    result.append(buffer);
+    snprintf(buffer, SIZE, "    SupportsSecureWithNonSecureCodec: %d\n", mSupportsSecureWithNonSecureCodec);
+    result.append(buffer);
+
+    result.append("  Processes:\n");
+    for (size_t i = 0; i < mMap.size(); ++i) {
+        snprintf(buffer, SIZE, "    Pid: %d\n", mMap.keyAt(i));
+        result.append(buffer);
+
+        const ResourceInfos &infos = mMap.valueAt(i);
+        for (size_t j = 0; j < infos.size(); ++j) {
+            result.append("      Client:\n");
+            snprintf(buffer, SIZE, "        Id: %lld\n", (long long)infos[j].clientId);
+            result.append(buffer);
+
+            snprintf(buffer, SIZE, "        Name: %s\n", infos[j].client->getName().string());
+            result.append(buffer);
+
+            Vector<MediaResource> resources = infos[j].resources;
+            result.append("        Resources:\n");
+            for (size_t k = 0; k < resources.size(); ++k) {
+                snprintf(buffer, SIZE, "          %s\n", resources[k].toString().string());
+                result.append(buffer);
+            }
+        }
+    }
+    result.append("  Events logs (most recent at top):\n");
+    result.append(mServiceLog->toString("    " /* linePrefix */));
+
+    write(fd, result.string(), result.size());
+    return OK;
+}
+
+ResourceManagerService::ResourceManagerService()
+    : mProcessInfo(new ProcessInfo()),
+      mServiceLog(new ServiceLog()),
+      mSupportsMultipleSecureCodecs(true),
+      mSupportsSecureWithNonSecureCodec(true) {}
+
+ResourceManagerService::ResourceManagerService(sp<ProcessInfoInterface> processInfo)
+    : mProcessInfo(processInfo),
+      mServiceLog(new ServiceLog()),
+      mSupportsMultipleSecureCodecs(true),
+      mSupportsSecureWithNonSecureCodec(true) {}
+
+ResourceManagerService::~ResourceManagerService() {}
+
+void ResourceManagerService::config(const Vector<MediaResourcePolicy> &policies) {
+    String8 log = String8::format("config(%s)", getString(policies).string());
+    mServiceLog->add(log);
+
+    Mutex::Autolock lock(mLock);
+    for (size_t i = 0; i < policies.size(); ++i) {
+        String8 type = policies[i].mType;
+        String8 value = policies[i].mValue;
+        if (type == kPolicySupportsMultipleSecureCodecs) {
+            mSupportsMultipleSecureCodecs = (value == "true");
+        } else if (type == kPolicySupportsSecureWithNonSecureCodec) {
+            mSupportsSecureWithNonSecureCodec = (value == "true");
+        }
+    }
+}
+
+void ResourceManagerService::addResource(
+        int pid,
+        int64_t clientId,
+        const sp<IResourceManagerClient> client,
+        const Vector<MediaResource> &resources) {
+    String8 log = String8::format("addResource(pid %d, clientId %lld, resources %s)",
+            pid, (long long) clientId, getString(resources).string());
+    mServiceLog->add(log);
+
+    Mutex::Autolock lock(mLock);
+    ResourceInfos& infos = getResourceInfosForEdit(pid, mMap);
+    ResourceInfo& info = getResourceInfoForEdit(clientId, client, infos);
+    // TODO: do the merge instead of append.
+    info.resources.appendVector(resources);
+}
+
+void ResourceManagerService::removeResource(int pid, int64_t clientId) {
+    String8 log = String8::format(
+            "removeResource(pid %d, clientId %lld)",
+            pid, (long long) clientId);
+    mServiceLog->add(log);
+
+    Mutex::Autolock lock(mLock);
+    ssize_t index = mMap.indexOfKey(pid);
+    if (index < 0) {
+        ALOGV("removeResource: didn't find pid %d for clientId %lld", pid, (long long) clientId);
+        return;
+    }
+    bool found = false;
+    ResourceInfos &infos = mMap.editValueAt(index);
+    for (size_t j = 0; j < infos.size(); ++j) {
+        if (infos[j].clientId == clientId) {
+            j = infos.removeAt(j);
+            found = true;
+            break;
+        }
+    }
+    if (!found) {
+        ALOGV("didn't find client");
+    }
+}
+
+void ResourceManagerService::getClientForResource_l(
+        int callingPid, const MediaResource *res, Vector<sp<IResourceManagerClient>> *clients) {
+    if (res == NULL) {
+        return;
+    }
+    sp<IResourceManagerClient> client;
+    if (getLowestPriorityBiggestClient_l(callingPid, res->mType, &client)) {
+        clients->push_back(client);
+    }
+}
+
+bool ResourceManagerService::reclaimResource(
+        int callingPid, const Vector<MediaResource> &resources) {
+    String8 log = String8::format("reclaimResource(callingPid %d, resources %s)",
+            callingPid, getString(resources).string());
+    mServiceLog->add(log);
+
+    Vector<sp<IResourceManagerClient>> clients;
+    {
+        Mutex::Autolock lock(mLock);
+        const MediaResource *secureCodec = NULL;
+        const MediaResource *nonSecureCodec = NULL;
+        const MediaResource *graphicMemory = NULL;
+        for (size_t i = 0; i < resources.size(); ++i) {
+            String8 type = resources[i].mType;
+            if (resources[i].mType == kResourceSecureCodec) {
+                secureCodec = &resources[i];
+            } else if (type == kResourceNonSecureCodec) {
+                nonSecureCodec = &resources[i];
+            } else if (type == kResourceGraphicMemory) {
+                graphicMemory = &resources[i];
+            }
+        }
+
+        // first pass to handle secure/non-secure codec conflict
+        if (secureCodec != NULL) {
+            if (!mSupportsMultipleSecureCodecs) {
+                if (!getAllClients_l(callingPid, String8(kResourceSecureCodec), &clients)) {
+                    return false;
+                }
+            }
+            if (!mSupportsSecureWithNonSecureCodec) {
+                if (!getAllClients_l(callingPid, String8(kResourceNonSecureCodec), &clients)) {
+                    return false;
+                }
+            }
+        }
+        if (nonSecureCodec != NULL) {
+            if (!mSupportsSecureWithNonSecureCodec) {
+                if (!getAllClients_l(callingPid, String8(kResourceSecureCodec), &clients)) {
+                    return false;
+                }
+            }
+        }
+
+        if (clients.size() == 0) {
+            // if no secure/non-secure codec conflict, run second pass to handle other resources.
+            getClientForResource_l(callingPid, graphicMemory, &clients);
+        }
+
+        if (clients.size() == 0) {
+            // if we are here, run the third pass to free one codec with the same type.
+            getClientForResource_l(callingPid, secureCodec, &clients);
+            getClientForResource_l(callingPid, nonSecureCodec, &clients);
+        }
+
+        if (clients.size() == 0) {
+            // if we are here, run the fourth pass to free one codec with the different type.
+            if (secureCodec != NULL) {
+                MediaResource temp(String8(kResourceNonSecureCodec), 1);
+                getClientForResource_l(callingPid, &temp, &clients);
+            }
+            if (nonSecureCodec != NULL) {
+                MediaResource temp(String8(kResourceSecureCodec), 1);
+                getClientForResource_l(callingPid, &temp, &clients);
+            }
+        }
+    }
+
+    if (clients.size() == 0) {
+        return false;
+    }
+
+    sp<IResourceManagerClient> failedClient;
+    for (size_t i = 0; i < clients.size(); ++i) {
+        log = String8::format("reclaimResource from client %p", clients[i].get());
+        mServiceLog->add(log);
+        if (!clients[i]->reclaimResource()) {
+            failedClient = clients[i];
+            break;
+        }
+    }
+
+    {
+        Mutex::Autolock lock(mLock);
+        bool found = false;
+        for (size_t i = 0; i < mMap.size(); ++i) {
+            ResourceInfos &infos = mMap.editValueAt(i);
+            for (size_t j = 0; j < infos.size();) {
+                if (infos[j].client == failedClient) {
+                    j = infos.removeAt(j);
+                    found = true;
+                } else {
+                    ++j;
+                }
+            }
+            if (found) {
+                break;
+            }
+        }
+        if (!found) {
+            ALOGV("didn't find failed client");
+        }
+    }
+
+    return (failedClient == NULL);
+}
+
+bool ResourceManagerService::getAllClients_l(
+        int callingPid, const String8 &type, Vector<sp<IResourceManagerClient>> *clients) {
+    Vector<sp<IResourceManagerClient>> temp;
+    for (size_t i = 0; i < mMap.size(); ++i) {
+        ResourceInfos &infos = mMap.editValueAt(i);
+        for (size_t j = 0; j < infos.size(); ++j) {
+            if (hasResourceType(type, infos[j].resources)) {
+                if (!isCallingPriorityHigher_l(callingPid, mMap.keyAt(i))) {
+                    // some higher/equal priority process owns the resource,
+                    // this request can't be fulfilled.
+                    ALOGE("getAllClients_l: can't reclaim resource %s from pid %d",
+                            type.string(), mMap.keyAt(i));
+                    return false;
+                }
+                temp.push_back(infos[j].client);
+            }
+        }
+    }
+    if (temp.size() == 0) {
+        ALOGV("getAllClients_l: didn't find any resource %s", type.string());
+        return true;
+    }
+    clients->appendVector(temp);
+    return true;
+}
+
+bool ResourceManagerService::getLowestPriorityBiggestClient_l(
+        int callingPid, const String8 &type, sp<IResourceManagerClient> *client) {
+    int lowestPriorityPid;
+    int lowestPriority;
+    int callingPriority;
+    if (!mProcessInfo->getPriority(callingPid, &callingPriority)) {
+        ALOGE("getLowestPriorityBiggestClient_l: can't get process priority for pid %d",
+                callingPid);
+        return false;
+    }
+    if (!getLowestPriorityPid_l(type, &lowestPriorityPid, &lowestPriority)) {
+        return false;
+    }
+    if (lowestPriority <= callingPriority) {
+        ALOGE("getLowestPriorityBiggestClient_l: lowest priority %d vs caller priority %d",
+                lowestPriority, callingPriority);
+        return false;
+    }
+
+    if (!getBiggestClient_l(lowestPriorityPid, type, client)) {
+        return false;
+    }
+    return true;
+}
+
+bool ResourceManagerService::getLowestPriorityPid_l(
+        const String8 &type, int *lowestPriorityPid, int *lowestPriority) {
+    int pid = -1;
+    int priority = -1;
+    for (size_t i = 0; i < mMap.size(); ++i) {
+        if (mMap.valueAt(i).size() == 0) {
+            // no client on this process.
+            continue;
+        }
+        if (!hasResourceType(type, mMap.valueAt(i))) {
+            // doesn't have the requested resource type
+            continue;
+        }
+        int tempPid = mMap.keyAt(i);
+        int tempPriority;
+        if (!mProcessInfo->getPriority(tempPid, &tempPriority)) {
+            ALOGV("getLowestPriorityPid_l: can't get priority of pid %d, skipped", tempPid);
+            // TODO: remove this pid from mMap?
+            continue;
+        }
+        if (pid == -1 || tempPriority > priority) {
+            // initial the value
+            pid = tempPid;
+            priority = tempPriority;
+        }
+    }
+    if (pid != -1) {
+        *lowestPriorityPid = pid;
+        *lowestPriority = priority;
+    }
+    return (pid != -1);
+}
+
+bool ResourceManagerService::isCallingPriorityHigher_l(int callingPid, int pid) {
+    int callingPidPriority;
+    if (!mProcessInfo->getPriority(callingPid, &callingPidPriority)) {
+        return false;
+    }
+
+    int priority;
+    if (!mProcessInfo->getPriority(pid, &priority)) {
+        return false;
+    }
+
+    return (callingPidPriority < priority);
+}
+
+bool ResourceManagerService::getBiggestClient_l(
+        int pid, const String8 &type, sp<IResourceManagerClient> *client) {
+    ssize_t index = mMap.indexOfKey(pid);
+    if (index < 0) {
+        ALOGE("getBiggestClient_l: can't find resource info for pid %d", pid);
+        return false;
+    }
+
+    sp<IResourceManagerClient> clientTemp;
+    uint64_t largestValue = 0;
+    const ResourceInfos &infos = mMap.valueAt(index);
+    for (size_t i = 0; i < infos.size(); ++i) {
+        Vector<MediaResource> resources = infos[i].resources;
+        for (size_t j = 0; j < resources.size(); ++j) {
+            if (resources[j].mType == type) {
+                if (resources[j].mValue > largestValue) {
+                    largestValue = resources[j].mValue;
+                    clientTemp = infos[i].client;
+                }
+            }
+        }
+    }
+
+    if (clientTemp == NULL) {
+        ALOGE("getBiggestClient_l: can't find resource type %s for pid %d", type.string(), pid);
+        return false;
+    }
+
+    *client = clientTemp;
+    return true;
+}
+
+} // namespace android
diff --git a/services/mediaresourcemanager/ResourceManagerService.h b/services/mediaresourcemanager/ResourceManagerService.h
new file mode 100644
index 0000000..4769373
--- /dev/null
+++ b/services/mediaresourcemanager/ResourceManagerService.h
@@ -0,0 +1,118 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_RESOURCEMANAGERSERVICE_H
+#define ANDROID_RESOURCEMANAGERSERVICE_H
+
+#include <arpa/inet.h>
+#include <binder/BinderService.h>
+#include <utils/Errors.h>
+#include <utils/KeyedVector.h>
+#include <utils/String8.h>
+#include <utils/threads.h>
+#include <utils/Vector.h>
+
+#include <media/IResourceManagerService.h>
+
+namespace android {
+
+class ServiceLog;
+struct ProcessInfoInterface;
+
+struct ResourceInfo {
+    int64_t clientId;
+    sp<IResourceManagerClient> client;
+    Vector<MediaResource> resources;
+};
+
+typedef Vector<ResourceInfo> ResourceInfos;
+typedef KeyedVector<int, ResourceInfos> PidResourceInfosMap;
+
+class ResourceManagerService
+    : public BinderService<ResourceManagerService>,
+      public BnResourceManagerService
+{
+public:
+    static char const *getServiceName() { return "media.resource_manager"; }
+
+    virtual status_t dump(int fd, const Vector<String16>& args);
+
+    ResourceManagerService();
+    ResourceManagerService(sp<ProcessInfoInterface> processInfo);
+
+    // IResourceManagerService interface
+    virtual void config(const Vector<MediaResourcePolicy> &policies);
+
+    virtual void addResource(
+            int pid,
+            int64_t clientId,
+            const sp<IResourceManagerClient> client,
+            const Vector<MediaResource> &resources);
+
+    virtual void removeResource(int pid, int64_t clientId);
+
+    // Tries to reclaim resource from processes with lower priority than the calling process
+    // according to the requested resources.
+    // Returns true if any resource has been reclaimed, otherwise returns false.
+    virtual bool reclaimResource(int callingPid, const Vector<MediaResource> &resources);
+
+protected:
+    virtual ~ResourceManagerService();
+
+private:
+    friend class ResourceManagerServiceTest;
+
+    // Gets the list of all the clients who own the specified resource type.
+    // Returns false if any client belongs to a process with higher priority than the
+    // calling process. The clients will remain unchanged if returns false.
+    bool getAllClients_l(int callingPid, const String8 &type,
+            Vector<sp<IResourceManagerClient>> *clients);
+
+    // Gets the client who owns specified resource type from lowest possible priority process.
+    // Returns false if the calling process priority is not higher than the lowest process
+    // priority. The client will remain unchanged if returns false.
+    bool getLowestPriorityBiggestClient_l(int callingPid, const String8 &type,
+            sp<IResourceManagerClient> *client);
+
+    // Gets lowest priority process that has the specified resource type.
+    // Returns false if failed. The output parameters will remain unchanged if failed.
+    bool getLowestPriorityPid_l(const String8 &type, int *pid, int *priority);
+
+    // Gets the client who owns biggest piece of specified resource type from pid.
+    // Returns false if failed. The client will remain unchanged if failed.
+    bool getBiggestClient_l(int pid, const String8 &type, sp<IResourceManagerClient> *client);
+
+    bool isCallingPriorityHigher_l(int callingPid, int pid);
+
+    // A helper function basically calls getLowestPriorityBiggestClient_l and add the result client
+    // to the given Vector.
+    void getClientForResource_l(
+        int callingPid, const MediaResource *res, Vector<sp<IResourceManagerClient>> *clients);
+
+    mutable Mutex mLock;
+    sp<ProcessInfoInterface> mProcessInfo;
+    sp<ServiceLog> mServiceLog;
+    PidResourceInfosMap mMap;
+    bool mSupportsMultipleSecureCodecs;
+    bool mSupportsSecureWithNonSecureCodec;
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_RESOURCEMANAGERSERVICE_H
diff --git a/services/mediaresourcemanager/ServiceLog.cpp b/services/mediaresourcemanager/ServiceLog.cpp
new file mode 100644
index 0000000..791e797
--- /dev/null
+++ b/services/mediaresourcemanager/ServiceLog.cpp
@@ -0,0 +1,63 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ServiceLog"
+#include <utils/Log.h>
+
+#include <time.h>
+
+#include "ServiceLog.h"
+
+static const size_t kDefaultMaxNum = 100;
+
+namespace android {
+
+ServiceLog::ServiceLog() : mMaxNum(kDefaultMaxNum), mLogs(mMaxNum) {}
+ServiceLog::ServiceLog(size_t maxNum) : mMaxNum(maxNum), mLogs(mMaxNum) {}
+
+void ServiceLog::add(const String8 &log) {
+    Mutex::Autolock lock(mLock);
+    time_t now = time(0);
+    char buf[64];
+    strftime(buf, sizeof(buf), "%m-%d %T", localtime(&now));
+    mLogs.add(String8::format("%s %s", buf, log.string()));
+}
+
+String8 ServiceLog::toString(const char *linePrefix) const {
+    Mutex::Autolock lock(mLock);
+    String8 result;
+    for (const auto& log : mLogs) {
+        addLine(log.string(), linePrefix, &result);
+    }
+    if (mLogs.size() == mMaxNum) {
+        addLine("...", linePrefix, &result);
+    } else if (mLogs.size() == 0) {
+        addLine("[no events yet]", linePrefix, &result);
+    }
+    return result;
+}
+
+void ServiceLog::addLine(const char *log, const char *prefix, String8 *result) const {
+    if (prefix != NULL) {
+        result->append(prefix);
+    }
+    result->append(log);
+    result->append("\n");
+}
+
+} // namespace android
diff --git a/services/mediaresourcemanager/ServiceLog.h b/services/mediaresourcemanager/ServiceLog.h
new file mode 100644
index 0000000..a6f16eb
--- /dev/null
+++ b/services/mediaresourcemanager/ServiceLog.h
@@ -0,0 +1,50 @@
+/*
+**
+** Copyright 2015, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+**     http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#ifndef ANDROID_SERVICELOG_H
+#define ANDROID_SERVICELOG_H
+
+#include <utils/Errors.h>
+#include <utils/String8.h>
+#include <utils/threads.h>
+#include <utils/Vector.h>
+
+#include "media/RingBuffer.h"
+
+namespace android {
+
+class ServiceLog : public RefBase {
+public:
+    ServiceLog();
+    ServiceLog(size_t maxNum);
+
+    void add(const String8 &log);
+    String8 toString(const char *linePrefix = NULL) const;
+
+private:
+    size_t mMaxNum;
+    mutable Mutex mLock;
+    RingBuffer<String8> mLogs;
+
+    void addLine(const char *log, const char *prefix, String8 *result) const;
+};
+
+// ----------------------------------------------------------------------------
+
+}; // namespace android
+
+#endif // ANDROID_SERVICELOG_H
diff --git a/services/mediaresourcemanager/test/Android.mk b/services/mediaresourcemanager/test/Android.mk
new file mode 100644
index 0000000..3b4ef0d
--- /dev/null
+++ b/services/mediaresourcemanager/test/Android.mk
@@ -0,0 +1,54 @@
+# Build the unit tests.
+LOCAL_PATH:= $(call my-dir)
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := ResourceManagerService_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+  ResourceManagerService_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+  libbinder \
+  liblog \
+  libmedia \
+  libresourcemanagerservice \
+  libutils \
+
+LOCAL_C_INCLUDES := \
+  frameworks/av/include \
+  frameworks/av/services/mediaresourcemanager \
+
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
+
+LOCAL_32_BIT_ONLY := true
+
+include $(BUILD_NATIVE_TEST)
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := ServiceLog_test
+
+LOCAL_MODULE_TAGS := tests
+
+LOCAL_SRC_FILES := \
+  ServiceLog_test.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+  liblog \
+  libmedia \
+  libresourcemanagerservice \
+  libutils \
+
+LOCAL_C_INCLUDES := \
+  frameworks/av/include \
+  frameworks/av/services/mediaresourcemanager \
+
+LOCAL_CFLAGS += -Werror -Wall
+LOCAL_CLANG := true
+
+LOCAL_32_BIT_ONLY := true
+
+include $(BUILD_NATIVE_TEST)
diff --git a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
new file mode 100644
index 0000000..df49ddc
--- /dev/null
+++ b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
@@ -0,0 +1,542 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ResourceManagerService_test"
+#include <utils/Log.h>
+
+#include <gtest/gtest.h>
+
+#include "ResourceManagerService.h"
+#include <media/IResourceManagerService.h>
+#include <media/MediaResource.h>
+#include <media/MediaResourcePolicy.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/ProcessInfoInterface.h>
+
+namespace android {
+
+static int64_t getId(sp<IResourceManagerClient> client) {
+    return (int64_t) client.get();
+}
+
+struct TestProcessInfo : public ProcessInfoInterface {
+    TestProcessInfo() {}
+    virtual ~TestProcessInfo() {}
+
+    virtual bool getPriority(int pid, int *priority) {
+        // For testing, use pid as priority.
+        // Lower the value higher the priority.
+        *priority = pid;
+        return true;
+    }
+
+private:
+    DISALLOW_EVIL_CONSTRUCTORS(TestProcessInfo);
+};
+
+struct TestClient : public BnResourceManagerClient {
+    TestClient(int pid, sp<ResourceManagerService> service)
+        : mReclaimed(false), mPid(pid), mService(service) {}
+
+    virtual bool reclaimResource() {
+        sp<IResourceManagerClient> client(this);
+        mService->removeResource(mPid, (int64_t) client.get());
+        mReclaimed = true;
+        return true;
+    }
+
+    virtual String8 getName() {
+        return String8("test_client");
+    }
+
+    bool reclaimed() const {
+        return mReclaimed;
+    }
+
+    void reset() {
+        mReclaimed = false;
+    }
+
+protected:
+    virtual ~TestClient() {}
+
+private:
+    bool mReclaimed;
+    int mPid;
+    sp<ResourceManagerService> mService;
+    DISALLOW_EVIL_CONSTRUCTORS(TestClient);
+};
+
+static const int kTestPid1 = 30;
+static const int kTestPid2 = 20;
+
+static const int kLowPriorityPid = 40;
+static const int kMidPriorityPid = 25;
+static const int kHighPriorityPid = 10;
+
+class ResourceManagerServiceTest : public ::testing::Test {
+public:
+    ResourceManagerServiceTest()
+        : mService(new ResourceManagerService(new TestProcessInfo)),
+          mTestClient1(new TestClient(kTestPid1, mService)),
+          mTestClient2(new TestClient(kTestPid2, mService)),
+          mTestClient3(new TestClient(kTestPid2, mService)) {
+    }
+
+protected:
+    static bool isEqualResources(const Vector<MediaResource> &resources1,
+            const Vector<MediaResource> &resources2) {
+        if (resources1.size() != resources2.size()) {
+            return false;
+        }
+        for (size_t i = 0; i < resources1.size(); ++i) {
+            if (resources1[i] != resources2[i]) {
+                return false;
+            }
+        }
+        return true;
+    }
+
+    static void expectEqResourceInfo(const ResourceInfo &info, sp<IResourceManagerClient> client,
+            const Vector<MediaResource> &resources) {
+        EXPECT_EQ(client, info.client);
+        EXPECT_TRUE(isEqualResources(resources, info.resources));
+    }
+
+    void verifyClients(bool c1, bool c2, bool c3) {
+        TestClient *client1 = static_cast<TestClient*>(mTestClient1.get());
+        TestClient *client2 = static_cast<TestClient*>(mTestClient2.get());
+        TestClient *client3 = static_cast<TestClient*>(mTestClient3.get());
+
+        EXPECT_EQ(c1, client1->reclaimed());
+        EXPECT_EQ(c2, client2->reclaimed());
+        EXPECT_EQ(c3, client3->reclaimed());
+
+        client1->reset();
+        client2->reset();
+        client3->reset();
+    }
+
+    // test set up
+    // ---------------------------------------------------------------------------------
+    //   pid                priority         client           type               number
+    // ---------------------------------------------------------------------------------
+    //   kTestPid1(30)      30               mTestClient1     secure codec       1
+    //                                                        graphic memory     200
+    //                                                        graphic memory     200
+    // ---------------------------------------------------------------------------------
+    //   kTestPid2(20)      20               mTestClient2     non-secure codec   1
+    //                                                        graphic memory     300
+    //                                       -------------------------------------------
+    //                                       mTestClient3     secure codec       1
+    //                                                        graphic memory     100
+    // ---------------------------------------------------------------------------------
+    void addResource() {
+        // kTestPid1 mTestClient1
+        Vector<MediaResource> resources1;
+        resources1.push_back(MediaResource(String8(kResourceSecureCodec), 1));
+        mService->addResource(kTestPid1, getId(mTestClient1), mTestClient1, resources1);
+        resources1.push_back(MediaResource(String8(kResourceGraphicMemory), 200));
+        Vector<MediaResource> resources11;
+        resources11.push_back(MediaResource(String8(kResourceGraphicMemory), 200));
+        mService->addResource(kTestPid1, getId(mTestClient1), mTestClient1, resources11);
+
+        // kTestPid2 mTestClient2
+        Vector<MediaResource> resources2;
+        resources2.push_back(MediaResource(String8(kResourceNonSecureCodec), 1));
+        resources2.push_back(MediaResource(String8(kResourceGraphicMemory), 300));
+        mService->addResource(kTestPid2, getId(mTestClient2), mTestClient2, resources2);
+
+        // kTestPid2 mTestClient3
+        Vector<MediaResource> resources3;
+        mService->addResource(kTestPid2, getId(mTestClient3), mTestClient3, resources3);
+        resources3.push_back(MediaResource(String8(kResourceSecureCodec), 1));
+        resources3.push_back(MediaResource(String8(kResourceGraphicMemory), 100));
+        mService->addResource(kTestPid2, getId(mTestClient3), mTestClient3, resources3);
+
+        const PidResourceInfosMap &map = mService->mMap;
+        EXPECT_EQ(2u, map.size());
+        ssize_t index1 = map.indexOfKey(kTestPid1);
+        ASSERT_GE(index1, 0);
+        const ResourceInfos &infos1 = map[index1];
+        EXPECT_EQ(1u, infos1.size());
+        expectEqResourceInfo(infos1[0], mTestClient1, resources1);
+
+        ssize_t index2 = map.indexOfKey(kTestPid2);
+        ASSERT_GE(index2, 0);
+        const ResourceInfos &infos2 = map[index2];
+        EXPECT_EQ(2u, infos2.size());
+        expectEqResourceInfo(infos2[0], mTestClient2, resources2);
+        expectEqResourceInfo(infos2[1], mTestClient3, resources3);
+    }
+
+    void testConfig() {
+        EXPECT_TRUE(mService->mSupportsMultipleSecureCodecs);
+        EXPECT_TRUE(mService->mSupportsSecureWithNonSecureCodec);
+
+        Vector<MediaResourcePolicy> policies1;
+        policies1.push_back(
+                MediaResourcePolicy(
+                        String8(kPolicySupportsMultipleSecureCodecs),
+                        String8("true")));
+        policies1.push_back(
+                MediaResourcePolicy(
+                        String8(kPolicySupportsSecureWithNonSecureCodec),
+                        String8("false")));
+        mService->config(policies1);
+        EXPECT_TRUE(mService->mSupportsMultipleSecureCodecs);
+        EXPECT_FALSE(mService->mSupportsSecureWithNonSecureCodec);
+
+        Vector<MediaResourcePolicy> policies2;
+        policies2.push_back(
+                MediaResourcePolicy(
+                        String8(kPolicySupportsMultipleSecureCodecs),
+                        String8("false")));
+        policies2.push_back(
+                MediaResourcePolicy(
+                        String8(kPolicySupportsSecureWithNonSecureCodec),
+                        String8("true")));
+        mService->config(policies2);
+        EXPECT_FALSE(mService->mSupportsMultipleSecureCodecs);
+        EXPECT_TRUE(mService->mSupportsSecureWithNonSecureCodec);
+    }
+
+    void testRemoveResource() {
+        addResource();
+
+        mService->removeResource(kTestPid2, getId(mTestClient2));
+
+        const PidResourceInfosMap &map = mService->mMap;
+        EXPECT_EQ(2u, map.size());
+        const ResourceInfos &infos1 = map.valueFor(kTestPid1);
+        const ResourceInfos &infos2 = map.valueFor(kTestPid2);
+        EXPECT_EQ(1u, infos1.size());
+        EXPECT_EQ(1u, infos2.size());
+        // mTestClient2 has been removed.
+        EXPECT_EQ(mTestClient3, infos2[0].client);
+    }
+
+    void testGetAllClients() {
+        addResource();
+
+        String8 type = String8(kResourceSecureCodec);
+        String8 unknowType = String8("unknowType");
+        Vector<sp<IResourceManagerClient> > clients;
+        EXPECT_FALSE(mService->getAllClients_l(kLowPriorityPid, type, &clients));
+        // some higher priority process (e.g. kTestPid2) owns the resource, so getAllClients_l
+        // will fail.
+        EXPECT_FALSE(mService->getAllClients_l(kMidPriorityPid, type, &clients));
+        EXPECT_TRUE(mService->getAllClients_l(kHighPriorityPid, unknowType, &clients));
+        EXPECT_TRUE(mService->getAllClients_l(kHighPriorityPid, type, &clients));
+
+        EXPECT_EQ(2u, clients.size());
+        EXPECT_EQ(mTestClient3, clients[0]);
+        EXPECT_EQ(mTestClient1, clients[1]);
+    }
+
+    void testReclaimResourceSecure() {
+        Vector<MediaResource> resources;
+        resources.push_back(MediaResource(String8(kResourceSecureCodec), 1));
+        resources.push_back(MediaResource(String8(kResourceGraphicMemory), 150));
+
+        // ### secure codec can't coexist and secure codec can coexist with non-secure codec ###
+        {
+            addResource();
+            mService->mSupportsMultipleSecureCodecs = false;
+            mService->mSupportsSecureWithNonSecureCodec = true;
+
+            // priority too low
+            EXPECT_FALSE(mService->reclaimResource(kLowPriorityPid, resources));
+            EXPECT_FALSE(mService->reclaimResource(kMidPriorityPid, resources));
+
+            // reclaim all secure codecs
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            verifyClients(true /* c1 */, false /* c2 */, true /* c3 */);
+
+            // call again should reclaim one largest graphic memory from lowest process
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            verifyClients(false /* c1 */, true /* c2 */, false /* c3 */);
+
+            // nothing left
+            EXPECT_FALSE(mService->reclaimResource(kHighPriorityPid, resources));
+        }
+
+        // ### secure codecs can't coexist and secure codec can't coexist with non-secure codec ###
+        {
+            addResource();
+            mService->mSupportsMultipleSecureCodecs = false;
+            mService->mSupportsSecureWithNonSecureCodec = false;
+
+            // priority too low
+            EXPECT_FALSE(mService->reclaimResource(kLowPriorityPid, resources));
+            EXPECT_FALSE(mService->reclaimResource(kMidPriorityPid, resources));
+
+            // reclaim all secure and non-secure codecs
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            verifyClients(true /* c1 */, true /* c2 */, true /* c3 */);
+
+            // nothing left
+            EXPECT_FALSE(mService->reclaimResource(kHighPriorityPid, resources));
+        }
+
+
+        // ### secure codecs can coexist but secure codec can't coexist with non-secure codec ###
+        {
+            addResource();
+            mService->mSupportsMultipleSecureCodecs = true;
+            mService->mSupportsSecureWithNonSecureCodec = false;
+
+            // priority too low
+            EXPECT_FALSE(mService->reclaimResource(kLowPriorityPid, resources));
+            EXPECT_FALSE(mService->reclaimResource(kMidPriorityPid, resources));
+
+            // reclaim all non-secure codecs
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            verifyClients(false /* c1 */, true /* c2 */, false /* c3 */);
+
+            // call again should reclaim one largest graphic memory from lowest process
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            verifyClients(true /* c1 */, false /* c2 */, false /* c3 */);
+
+            // call again should reclaim another largest graphic memory from lowest process
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            verifyClients(false /* c1 */, false /* c2 */, true /* c3 */);
+
+            // nothing left
+            EXPECT_FALSE(mService->reclaimResource(kHighPriorityPid, resources));
+        }
+
+        // ### secure codecs can coexist and secure codec can coexist with non-secure codec ###
+        {
+            addResource();
+            mService->mSupportsMultipleSecureCodecs = true;
+            mService->mSupportsSecureWithNonSecureCodec = true;
+
+            // priority too low
+            EXPECT_FALSE(mService->reclaimResource(kLowPriorityPid, resources));
+
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            // one largest graphic memory from lowest process got reclaimed
+            verifyClients(true /* c1 */, false /* c2 */, false /* c3 */);
+
+            // call again should reclaim another graphic memory from lowest process
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            verifyClients(false /* c1 */, true /* c2 */, false /* c3 */);
+
+            // call again should reclaim another graphic memory from lowest process
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            verifyClients(false /* c1 */, false /* c2 */, true /* c3 */);
+
+            // nothing left
+            EXPECT_FALSE(mService->reclaimResource(kHighPriorityPid, resources));
+        }
+
+        // ### secure codecs can coexist and secure codec can coexist with non-secure codec ###
+        {
+            addResource();
+            mService->mSupportsMultipleSecureCodecs = true;
+            mService->mSupportsSecureWithNonSecureCodec = true;
+
+            Vector<MediaResource> resources;
+            resources.push_back(MediaResource(String8(kResourceSecureCodec), 1));
+
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            // secure codec from lowest process got reclaimed
+            verifyClients(true /* c1 */, false /* c2 */, false /* c3 */);
+
+            // call again should reclaim another secure codec from lowest process
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            verifyClients(false /* c1 */, false /* c2 */, true /* c3 */);
+
+            // no more secure codec, non-secure codec will be reclaimed.
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            verifyClients(false /* c1 */, true /* c2 */, false /* c3 */);
+        }
+    }
+
+    void testReclaimResourceNonSecure() {
+        Vector<MediaResource> resources;
+        resources.push_back(MediaResource(String8(kResourceNonSecureCodec), 1));
+        resources.push_back(MediaResource(String8(kResourceGraphicMemory), 150));
+
+        // ### secure codec can't coexist with non-secure codec ###
+        {
+            addResource();
+            mService->mSupportsSecureWithNonSecureCodec = false;
+
+            // priority too low
+            EXPECT_FALSE(mService->reclaimResource(kLowPriorityPid, resources));
+            EXPECT_FALSE(mService->reclaimResource(kMidPriorityPid, resources));
+
+            // reclaim all secure codecs
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            verifyClients(true /* c1 */, false /* c2 */, true /* c3 */);
+
+            // call again should reclaim one graphic memory from lowest process
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            verifyClients(false /* c1 */, true /* c2 */, false /* c3 */);
+
+            // nothing left
+            EXPECT_FALSE(mService->reclaimResource(kHighPriorityPid, resources));
+        }
+
+
+        // ### secure codec can coexist with non-secure codec ###
+        {
+            addResource();
+            mService->mSupportsSecureWithNonSecureCodec = true;
+
+            // priority too low
+            EXPECT_FALSE(mService->reclaimResource(kLowPriorityPid, resources));
+
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            // one largest graphic memory from lowest process got reclaimed
+            verifyClients(true /* c1 */, false /* c2 */, false /* c3 */);
+
+            // call again should reclaim another graphic memory from lowest process
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            verifyClients(false /* c1 */, true /* c2 */, false /* c3 */);
+
+            // call again should reclaim another graphic memory from lowest process
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            verifyClients(false /* c1 */, false /* c2 */, true /* c3 */);
+
+            // nothing left
+            EXPECT_FALSE(mService->reclaimResource(kHighPriorityPid, resources));
+        }
+
+        // ### secure codec can coexist with non-secure codec ###
+        {
+            addResource();
+            mService->mSupportsSecureWithNonSecureCodec = true;
+
+            Vector<MediaResource> resources;
+            resources.push_back(MediaResource(String8(kResourceNonSecureCodec), 1));
+
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            // one non secure codec from lowest process got reclaimed
+            verifyClients(false /* c1 */, true /* c2 */, false /* c3 */);
+
+            // no more non-secure codec, secure codec from lowest priority process will be reclaimed
+            EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
+            verifyClients(true /* c1 */, false /* c2 */, false /* c3 */);
+
+            // clean up client 3 which still left
+            mService->removeResource(kTestPid2, getId(mTestClient3));
+        }
+    }
+
+    void testGetLowestPriorityBiggestClient() {
+        String8 type = String8(kResourceGraphicMemory);
+        sp<IResourceManagerClient> client;
+        EXPECT_FALSE(mService->getLowestPriorityBiggestClient_l(kHighPriorityPid, type, &client));
+
+        addResource();
+
+        EXPECT_FALSE(mService->getLowestPriorityBiggestClient_l(kLowPriorityPid, type, &client));
+        EXPECT_TRUE(mService->getLowestPriorityBiggestClient_l(kHighPriorityPid, type, &client));
+
+        // kTestPid1 is the lowest priority process with kResourceGraphicMemory.
+        // mTestClient1 has the largest kResourceGraphicMemory within kTestPid1.
+        EXPECT_EQ(mTestClient1, client);
+    }
+
+    void testGetLowestPriorityPid() {
+        int pid;
+        int priority;
+        TestProcessInfo processInfo;
+
+        String8 type = String8(kResourceGraphicMemory);
+        EXPECT_FALSE(mService->getLowestPriorityPid_l(type, &pid, &priority));
+
+        addResource();
+
+        EXPECT_TRUE(mService->getLowestPriorityPid_l(type, &pid, &priority));
+        EXPECT_EQ(kTestPid1, pid);
+        int priority1;
+        processInfo.getPriority(kTestPid1, &priority1);
+        EXPECT_EQ(priority1, priority);
+
+        type = String8(kResourceNonSecureCodec);
+        EXPECT_TRUE(mService->getLowestPriorityPid_l(type, &pid, &priority));
+        EXPECT_EQ(kTestPid2, pid);
+        int priority2;
+        processInfo.getPriority(kTestPid2, &priority2);
+        EXPECT_EQ(priority2, priority);
+    }
+
+    void testGetBiggestClient() {
+        String8 type = String8(kResourceGraphicMemory);
+        sp<IResourceManagerClient> client;
+        EXPECT_FALSE(mService->getBiggestClient_l(kTestPid2, type, &client));
+
+        addResource();
+
+        EXPECT_TRUE(mService->getBiggestClient_l(kTestPid2, type, &client));
+        EXPECT_EQ(mTestClient2, client);
+    }
+
+    void testIsCallingPriorityHigher() {
+        EXPECT_FALSE(mService->isCallingPriorityHigher_l(101, 100));
+        EXPECT_FALSE(mService->isCallingPriorityHigher_l(100, 100));
+        EXPECT_TRUE(mService->isCallingPriorityHigher_l(99, 100));
+    }
+
+    sp<ResourceManagerService> mService;
+    sp<IResourceManagerClient> mTestClient1;
+    sp<IResourceManagerClient> mTestClient2;
+    sp<IResourceManagerClient> mTestClient3;
+};
+
+TEST_F(ResourceManagerServiceTest, config) {
+    testConfig();
+}
+
+TEST_F(ResourceManagerServiceTest, addResource) {
+    addResource();
+}
+
+TEST_F(ResourceManagerServiceTest, removeResource) {
+    testRemoveResource();
+}
+
+TEST_F(ResourceManagerServiceTest, reclaimResource) {
+    testReclaimResourceSecure();
+    testReclaimResourceNonSecure();
+}
+
+TEST_F(ResourceManagerServiceTest, getAllClients_l) {
+    testGetAllClients();
+}
+
+TEST_F(ResourceManagerServiceTest, getLowestPriorityBiggestClient_l) {
+    testGetLowestPriorityBiggestClient();
+}
+
+TEST_F(ResourceManagerServiceTest, getLowestPriorityPid_l) {
+    testGetLowestPriorityPid();
+}
+
+TEST_F(ResourceManagerServiceTest, getBiggestClient_l) {
+    testGetBiggestClient();
+}
+
+TEST_F(ResourceManagerServiceTest, isCallingPriorityHigher_l) {
+    testIsCallingPriorityHigher();
+}
+
+} // namespace android
diff --git a/services/mediaresourcemanager/test/ServiceLog_test.cpp b/services/mediaresourcemanager/test/ServiceLog_test.cpp
new file mode 100644
index 0000000..9172499
--- /dev/null
+++ b/services/mediaresourcemanager/test/ServiceLog_test.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ServiceLog_test"
+#include <utils/Log.h>
+
+#include <gtest/gtest.h>
+
+#include "ServiceLog.h"
+
+namespace android {
+
+class ServiceLogTest : public ::testing::Test {
+public:
+    ServiceLogTest() : mServiceLog(new ServiceLog(3)) {
+    }
+
+protected:
+    sp<ServiceLog> mServiceLog;
+};
+
+TEST_F(ServiceLogTest, addThenToString) {
+    String8 logString;
+
+    mServiceLog->add(String8("log1"));
+    logString = mServiceLog->toString();
+    EXPECT_TRUE(logString.contains("log1"));
+    ALOGV("toString:\n%s", logString.string());
+
+    static const char kTestLogPrefix[] = "testlogprefix: ";
+    logString = mServiceLog->toString(kTestLogPrefix);
+    EXPECT_TRUE(logString.contains(kTestLogPrefix));
+    EXPECT_TRUE(logString.contains("log1"));
+    ALOGV("toString:\n%s", logString.string());
+
+    mServiceLog->add(String8("log2"));
+    logString = mServiceLog->toString();
+    EXPECT_TRUE(logString.contains("log1"));
+    EXPECT_TRUE(logString.contains("log2"));
+    ALOGV("toString:\n%s", logString.string());
+
+    mServiceLog->add(String8("log3"));
+    logString = mServiceLog->toString();
+    EXPECT_TRUE(logString.contains("log1"));
+    EXPECT_TRUE(logString.contains("log2"));
+    EXPECT_TRUE(logString.contains("log3"));
+    ALOGV("toString:\n%s", logString.string());
+
+    mServiceLog->add(String8("log4"));
+    logString = mServiceLog->toString();
+    EXPECT_FALSE(logString.contains("log1"));
+    EXPECT_TRUE(logString.contains("log2"));
+    EXPECT_TRUE(logString.contains("log3"));
+    EXPECT_TRUE(logString.contains("log4"));
+    ALOGV("toString:\n%s", logString.string());
+
+    mServiceLog->add(String8("log5"));
+    logString = mServiceLog->toString();
+    EXPECT_FALSE(logString.contains("log1"));
+    EXPECT_FALSE(logString.contains("log2"));
+    EXPECT_TRUE(logString.contains("log3"));
+    EXPECT_TRUE(logString.contains("log4"));
+    EXPECT_TRUE(logString.contains("log5"));
+    ALOGV("toString:\n%s", logString.string());
+}
+
+} // namespace android
diff --git a/services/radio/Android.mk b/services/radio/Android.mk
new file mode 100644
index 0000000..9ee5666
--- /dev/null
+++ b/services/radio/Android.mk
@@ -0,0 +1,36 @@
+# Copyright 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+
+LOCAL_SRC_FILES:=               \
+    RadioService.cpp
+
+LOCAL_SHARED_LIBRARIES:= \
+    libui \
+    liblog \
+    libutils \
+    libbinder \
+    libcutils \
+    libmedia \
+    libhardware \
+    libradio \
+    libradio_metadata
+
+LOCAL_MODULE:= libradioservice
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/services/radio/RadioRegions.h b/services/radio/RadioRegions.h
new file mode 100644
index 0000000..3335b8a
--- /dev/null
+++ b/services/radio/RadioRegions.h
@@ -0,0 +1,225 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_RADIO_REGIONS_H
+#define ANDROID_HARDWARE_RADIO_REGIONS_H
+
+namespace android {
+
+#define RADIO_BAND_LOWER_FM_ITU1    87500
+#define RADIO_BAND_UPPER_FM_ITU1    108000
+#define RADIO_BAND_SPACING_FM_ITU1  100
+
+#define RADIO_BAND_LOWER_FM_ITU2    87900
+#define RADIO_BAND_UPPER_FM_ITU2    107900
+#define RADIO_BAND_SPACING_FM_ITU2  200
+
+#define RADIO_BAND_LOWER_FM_JAPAN    76000
+#define RADIO_BAND_UPPER_FM_JAPAN    90000
+#define RADIO_BAND_SPACING_FM_JAPAN  100
+
+#define RADIO_BAND_LOWER_FM_OIRT    65800
+#define RADIO_BAND_UPPER_FM_OIRT    74000
+#define RADIO_BAND_SPACING_FM_OIRT  10
+
+#define RADIO_BAND_LOWER_LW         153
+#define RADIO_BAND_UPPER_LW         279
+#define RADIO_BAND_SPACING_LW       9
+
+#define RADIO_BAND_LOWER_MW_IUT1    531
+#define RADIO_BAND_UPPER_MW_ITU1    1611
+#define RADIO_BAND_SPACING_MW_ITU1  9
+
+#define RADIO_BAND_LOWER_MW_IUT2    540
+#define RADIO_BAND_UPPER_MW_ITU2    1610
+#define RADIO_BAND_SPACING_MW_ITU2  10
+
+#define RADIO_BAND_LOWER_SW         2300
+#define RADIO_BAND_UPPER_SW         26100
+#define RADIO_BAND_SPACING_SW       5
+
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+#endif
+
+const radio_band_config_t sKnownRegionConfigs[] = {
+    {   // FM ITU 1
+        RADIO_REGION_ITU_1,
+        {
+        RADIO_BAND_FM,
+            false,
+            RADIO_BAND_LOWER_FM_ITU1,
+            RADIO_BAND_UPPER_FM_ITU1,
+            1,
+            {RADIO_BAND_SPACING_FM_ITU1},
+            {
+            RADIO_DEEMPHASIS_50,
+            true,
+            RADIO_RDS_WORLD,
+            true,
+            true,
+            }
+        }
+    },
+    {   // FM Americas
+        RADIO_REGION_ITU_2,
+        {
+        RADIO_BAND_FM,
+            false,
+            RADIO_BAND_LOWER_FM_ITU2,
+            RADIO_BAND_UPPER_FM_ITU2,
+            1,
+            {RADIO_BAND_SPACING_FM_ITU2},
+            {
+            RADIO_DEEMPHASIS_75,
+            true,
+            RADIO_RDS_US,
+            true,
+            true,
+            }
+        }
+    },
+    {   // FM Japan
+        RADIO_REGION_JAPAN,
+        {
+        RADIO_BAND_FM,
+            false,
+            RADIO_BAND_LOWER_FM_JAPAN,
+            RADIO_BAND_UPPER_FM_JAPAN,
+            1,
+            {RADIO_BAND_SPACING_FM_JAPAN},
+            {
+            RADIO_DEEMPHASIS_50,
+            true,
+            RADIO_RDS_WORLD,
+            true,
+            true,
+            }
+        }
+    },
+    {   // FM Korea
+        RADIO_REGION_KOREA,
+        {
+        RADIO_BAND_FM,
+            false,
+            RADIO_BAND_LOWER_FM_ITU1,
+            RADIO_BAND_UPPER_FM_ITU1,
+            1,
+            {RADIO_BAND_SPACING_FM_ITU1},
+            {
+            RADIO_DEEMPHASIS_75,
+            true,
+            RADIO_RDS_WORLD,
+            true,
+            true,
+            }
+        }
+    },
+    {   // FM OIRT
+        RADIO_REGION_OIRT,
+        {
+        RADIO_BAND_FM,
+            false,
+            RADIO_BAND_LOWER_FM_OIRT,
+            RADIO_BAND_UPPER_FM_OIRT,
+            1,
+            {RADIO_BAND_SPACING_FM_OIRT},
+            {
+            RADIO_DEEMPHASIS_50,
+            true,
+            RADIO_RDS_WORLD,
+            true,
+            true,
+            }
+        }
+    },
+    {   // FM US HD radio
+        RADIO_REGION_ITU_2,
+        {
+            RADIO_BAND_FM_HD,
+            false,
+            RADIO_BAND_LOWER_FM_ITU2,
+            RADIO_BAND_UPPER_FM_ITU2,
+            1,
+            {RADIO_BAND_SPACING_FM_ITU2},
+            {
+            RADIO_DEEMPHASIS_75,
+            true,
+            RADIO_RDS_US,
+            true,
+            true,
+            }
+        }
+    },
+    {   // AM LW
+        RADIO_REGION_ITU_1,
+        {
+            RADIO_BAND_AM,
+            false,
+            RADIO_BAND_LOWER_LW,
+            RADIO_BAND_UPPER_LW,
+            1,
+            {RADIO_BAND_SPACING_LW},
+            {
+            }
+        }
+    },
+    {   // AM SW
+        RADIO_REGION_ITU_1,
+        {
+            RADIO_BAND_AM,
+            false,
+            RADIO_BAND_LOWER_SW,
+            RADIO_BAND_UPPER_SW,
+            1,
+            {RADIO_BAND_SPACING_SW},
+            {
+            }
+        }
+    },
+    {   // AM MW ITU1
+        RADIO_REGION_ITU_1,
+        {
+            RADIO_BAND_AM,
+            false,
+            RADIO_BAND_LOWER_MW_IUT1,
+            RADIO_BAND_UPPER_MW_ITU1,
+            1,
+            {RADIO_BAND_SPACING_MW_ITU1},
+            {
+            }
+        }
+    },
+    {   // AM MW ITU2
+        RADIO_REGION_ITU_2,
+        {
+            RADIO_BAND_AM,
+            false,
+            RADIO_BAND_LOWER_MW_IUT2,
+            RADIO_BAND_UPPER_MW_ITU2,
+            1,
+            {RADIO_BAND_SPACING_MW_ITU2},
+            {
+            }
+        }
+    }
+};
+
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_RADIO_REGIONS_H
diff --git a/services/radio/RadioService.cpp b/services/radio/RadioService.cpp
new file mode 100644
index 0000000..cd0f5f3
--- /dev/null
+++ b/services/radio/RadioService.cpp
@@ -0,0 +1,900 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "RadioService"
+//#define LOG_NDEBUG 0
+
+#include <stdio.h>
+#include <string.h>
+#include <sys/types.h>
+#include <pthread.h>
+
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <system/radio.h>
+#include <system/radio_metadata.h>
+#include <cutils/atomic.h>
+#include <cutils/properties.h>
+#include <hardware/hardware.h>
+#include <utils/Errors.h>
+#include <utils/Log.h>
+#include <binder/IServiceManager.h>
+#include <binder/MemoryBase.h>
+#include <binder/MemoryHeapBase.h>
+#include <hardware/radio.h>
+#include <media/AudioSystem.h>
+#include "RadioService.h"
+#include "RadioRegions.h"
+
+namespace android {
+
+static const char kRadioTunerAudioDeviceName[] = "Radio tuner source";
+
+RadioService::RadioService()
+    : BnRadioService(), mNextUniqueId(1)
+{
+    ALOGI("%s", __FUNCTION__);
+}
+
+void RadioService::onFirstRef()
+{
+    const hw_module_t *mod;
+    int rc;
+    struct radio_hw_device *dev;
+
+    ALOGI("%s", __FUNCTION__);
+
+    rc = hw_get_module_by_class(RADIO_HARDWARE_MODULE_ID, RADIO_HARDWARE_MODULE_ID_FM, &mod);
+    if (rc != 0) {
+        ALOGE("couldn't load radio module %s.%s (%s)",
+              RADIO_HARDWARE_MODULE_ID, "primary", strerror(-rc));
+        return;
+    }
+    rc = radio_hw_device_open(mod, &dev);
+    if (rc != 0) {
+        ALOGE("couldn't open radio hw device in %s.%s (%s)",
+              RADIO_HARDWARE_MODULE_ID, "primary", strerror(-rc));
+        return;
+    }
+    if (dev->common.version != RADIO_DEVICE_API_VERSION_CURRENT) {
+        ALOGE("wrong radio hw device version %04x", dev->common.version);
+        return;
+    }
+
+    struct radio_hal_properties halProperties;
+    rc = dev->get_properties(dev, &halProperties);
+    if (rc != 0) {
+        ALOGE("could not read implementation properties");
+        return;
+    }
+
+    radio_properties_t properties;
+    properties.handle =
+            (radio_handle_t)android_atomic_inc(&mNextUniqueId);
+
+    ALOGI("loaded default module %s, handle %d", properties.product, properties.handle);
+
+    convertProperties(&properties, &halProperties);
+    sp<Module> module = new Module(dev, properties);
+    mModules.add(properties.handle, module);
+}
+
+RadioService::~RadioService()
+{
+    for (size_t i = 0; i < mModules.size(); i++) {
+        radio_hw_device_close(mModules.valueAt(i)->hwDevice());
+    }
+}
+
+status_t RadioService::listModules(struct radio_properties *properties,
+                             uint32_t *numModules)
+{
+    ALOGV("listModules");
+
+    AutoMutex lock(mServiceLock);
+    if (numModules == NULL || (*numModules != 0 && properties == NULL)) {
+        return BAD_VALUE;
+    }
+    size_t maxModules = *numModules;
+    *numModules = mModules.size();
+    for (size_t i = 0; i < mModules.size() && i < maxModules; i++) {
+        properties[i] = mModules.valueAt(i)->properties();
+    }
+    return NO_ERROR;
+}
+
+status_t RadioService::attach(radio_handle_t handle,
+                        const sp<IRadioClient>& client,
+                        const struct radio_band_config *config,
+                        bool withAudio,
+                        sp<IRadio>& radio)
+{
+    ALOGV("%s %d config %p withAudio %d", __FUNCTION__, handle, config, withAudio);
+
+    AutoMutex lock(mServiceLock);
+    radio.clear();
+    if (client == 0) {
+        return BAD_VALUE;
+    }
+    ssize_t index = mModules.indexOfKey(handle);
+    if (index < 0) {
+        return BAD_VALUE;
+    }
+    sp<Module> module = mModules.valueAt(index);
+
+    if (config == NULL) {
+        config = module->getDefaultConfig();
+        if (config == NULL) {
+            return INVALID_OPERATION;
+        }
+    }
+    ALOGV("%s region %d type %d", __FUNCTION__, config->region, config->band.type);
+
+    radio = module->addClient(client, config, withAudio);
+
+    if (radio == 0) {
+        return NO_INIT;
+    }
+    return NO_ERROR;
+}
+
+
+static const int kDumpLockRetries = 50;
+static const int kDumpLockSleep = 60000;
+
+static bool tryLock(Mutex& mutex)
+{
+    bool locked = false;
+    for (int i = 0; i < kDumpLockRetries; ++i) {
+        if (mutex.tryLock() == NO_ERROR) {
+            locked = true;
+            break;
+        }
+        usleep(kDumpLockSleep);
+    }
+    return locked;
+}
+
+status_t RadioService::dump(int fd, const Vector<String16>& args __unused) {
+    String8 result;
+    if (checkCallingPermission(String16("android.permission.DUMP")) == false) {
+        result.appendFormat("Permission Denial: can't dump RadioService");
+        write(fd, result.string(), result.size());
+    } else {
+        bool locked = tryLock(mServiceLock);
+        // failed to lock - RadioService is probably deadlocked
+        if (!locked) {
+            result.append("RadioService may be deadlocked\n");
+            write(fd, result.string(), result.size());
+        }
+
+        if (locked) mServiceLock.unlock();
+    }
+    return NO_ERROR;
+}
+
+status_t RadioService::onTransact(
+    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) {
+    return BnRadioService::onTransact(code, data, reply, flags);
+}
+
+
+// static
+void RadioService::callback(radio_hal_event_t *halEvent, void *cookie)
+{
+    CallbackThread *callbackThread = (CallbackThread *)cookie;
+    if (callbackThread == NULL) {
+        return;
+    }
+    callbackThread->sendEvent(halEvent);
+}
+
+/* static */
+void RadioService::convertProperties(radio_properties_t *properties,
+                                     const radio_hal_properties_t *halProperties)
+{
+    memset(properties, 0, sizeof(struct radio_properties));
+    properties->class_id = halProperties->class_id;
+    strlcpy(properties->implementor, halProperties->implementor,
+            RADIO_STRING_LEN_MAX);
+    strlcpy(properties->product, halProperties->product,
+            RADIO_STRING_LEN_MAX);
+    strlcpy(properties->version, halProperties->version,
+            RADIO_STRING_LEN_MAX);
+    strlcpy(properties->serial, halProperties->serial,
+            RADIO_STRING_LEN_MAX);
+    properties->num_tuners = halProperties->num_tuners;
+    properties->num_audio_sources = halProperties->num_audio_sources;
+    properties->supports_capture = halProperties->supports_capture;
+
+    for (size_t i = 0; i < ARRAY_SIZE(sKnownRegionConfigs); i++) {
+        const radio_hal_band_config_t *band = &sKnownRegionConfigs[i].band;
+        size_t j;
+        for (j = 0; j < halProperties->num_bands; j++) {
+            const radio_hal_band_config_t *halBand = &halProperties->bands[j];
+            size_t k;
+            if (band->type != halBand->type) continue;
+            if (band->lower_limit < halBand->lower_limit) continue;
+            if (band->upper_limit > halBand->upper_limit) continue;
+            for (k = 0; k < halBand->num_spacings; k++) {
+                if (band->spacings[0] == halBand->spacings[k]) break;
+            }
+            if (k == halBand->num_spacings) continue;
+            if (band->type == RADIO_BAND_AM) break;
+            if ((band->fm.deemphasis & halBand->fm.deemphasis) == 0) continue;
+            if (halBand->fm.rds == 0) break;
+            if ((band->fm.rds & halBand->fm.rds) != 0) break;
+        }
+        if (j == halProperties->num_bands) continue;
+
+        ALOGI("convertProperties() Adding band type %d region %d",
+              sKnownRegionConfigs[i].band.type , sKnownRegionConfigs[i].region);
+
+        memcpy(&properties->bands[properties->num_bands++],
+               &sKnownRegionConfigs[i],
+               sizeof(radio_band_config_t));
+    }
+}
+
+#undef LOG_TAG
+#define LOG_TAG "RadioService::CallbackThread"
+
+RadioService::CallbackThread::CallbackThread(const wp<ModuleClient>& moduleClient)
+    : mModuleClient(moduleClient), mMemoryDealer(new MemoryDealer(1024 * 1024, "RadioService"))
+{
+}
+
+RadioService::CallbackThread::~CallbackThread()
+{
+    mEventQueue.clear();
+}
+
+void RadioService::CallbackThread::onFirstRef()
+{
+    run("RadioService cbk", ANDROID_PRIORITY_URGENT_AUDIO);
+}
+
+bool RadioService::CallbackThread::threadLoop()
+{
+    while (!exitPending()) {
+        sp<IMemory> eventMemory;
+        sp<ModuleClient> moduleClient;
+        {
+            Mutex::Autolock _l(mCallbackLock);
+            while (mEventQueue.isEmpty() && !exitPending()) {
+                ALOGV("CallbackThread::threadLoop() sleep");
+                mCallbackCond.wait(mCallbackLock);
+                ALOGV("CallbackThread::threadLoop() wake up");
+            }
+            if (exitPending()) {
+                break;
+            }
+            eventMemory = mEventQueue[0];
+            mEventQueue.removeAt(0);
+            moduleClient = mModuleClient.promote();
+        }
+        if (moduleClient != 0) {
+            moduleClient->onCallbackEvent(eventMemory);
+            eventMemory.clear();
+        }
+    }
+    return false;
+}
+
+void RadioService::CallbackThread::exit()
+{
+    Mutex::Autolock _l(mCallbackLock);
+    requestExit();
+    mCallbackCond.broadcast();
+}
+
+sp<IMemory> RadioService::CallbackThread::prepareEvent(radio_hal_event_t *halEvent)
+{
+    sp<IMemory> eventMemory;
+
+    size_t headerSize =
+            (sizeof(struct radio_event) + sizeof(unsigned int) - 1) /sizeof(unsigned int);
+    size_t metadataSize = 0;
+    switch (halEvent->type) {
+    case RADIO_EVENT_TUNED:
+    case RADIO_EVENT_AF_SWITCH:
+        if (radio_metadata_check(halEvent->info.metadata) == 0) {
+            metadataSize = radio_metadata_get_size(halEvent->info.metadata);
+        }
+        break;
+    case RADIO_EVENT_METADATA:
+        if (radio_metadata_check(halEvent->metadata) != 0) {
+            return eventMemory;
+        }
+        metadataSize = radio_metadata_get_size(halEvent->metadata);
+        break;
+    default:
+        break;
+    }
+    size_t size = headerSize + metadataSize;
+    eventMemory = mMemoryDealer->allocate(size);
+    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+        eventMemory.clear();
+        return eventMemory;
+    }
+    struct radio_event *event = (struct radio_event *)eventMemory->pointer();
+    event->type = halEvent->type;
+    event->status = halEvent->status;
+
+    switch (event->type) {
+    case RADIO_EVENT_CONFIG:
+        event->config.band = halEvent->config;
+        break;
+    case RADIO_EVENT_TUNED:
+    case RADIO_EVENT_AF_SWITCH:
+        event->info = halEvent->info;
+        if (metadataSize != 0) {
+            memcpy((char *)event + headerSize, halEvent->info.metadata, metadataSize);
+            // replace meta data pointer by offset while in shared memory so that receiving side
+            // can restore the pointer in destination process.
+            event->info.metadata = (radio_metadata_t *)headerSize;
+        }
+        break;
+    case RADIO_EVENT_TA:
+    case RADIO_EVENT_ANTENNA:
+    case RADIO_EVENT_CONTROL:
+        event->on = halEvent->on;
+        break;
+    case RADIO_EVENT_METADATA:
+        memcpy((char *)event + headerSize, halEvent->metadata, metadataSize);
+        // replace meta data pointer by offset while in shared memory so that receiving side
+        // can restore the pointer in destination process.
+        event->metadata = (radio_metadata_t *)headerSize;
+        break;
+    case RADIO_EVENT_HW_FAILURE:
+    default:
+        break;
+    }
+
+    return eventMemory;
+}
+
+void RadioService::CallbackThread::sendEvent(radio_hal_event_t *event)
+ {
+     sp<IMemory> eventMemory = prepareEvent(event);
+     if (eventMemory == 0) {
+         return;
+     }
+
+     AutoMutex lock(mCallbackLock);
+     mEventQueue.add(eventMemory);
+     mCallbackCond.signal();
+     ALOGV("%s DONE", __FUNCTION__);
+}
+
+
+#undef LOG_TAG
+#define LOG_TAG "RadioService::Module"
+
+RadioService::Module::Module(radio_hw_device* hwDevice, radio_properties properties)
+ : mHwDevice(hwDevice), mProperties(properties), mMute(true)
+{
+}
+
+RadioService::Module::~Module() {
+    mModuleClients.clear();
+}
+
+status_t RadioService::Module::dump(int fd __unused, const Vector<String16>& args __unused) {
+    String8 result;
+    return NO_ERROR;
+}
+
+sp<RadioService::ModuleClient> RadioService::Module::addClient(const sp<IRadioClient>& client,
+                                    const struct radio_band_config *config,
+                                    bool audio)
+{
+    ALOGV("addClient() %p config %p product %s", this, config, mProperties.product);
+    AutoMutex lock(mLock);
+    sp<ModuleClient> moduleClient;
+    int ret;
+
+    for (size_t i = 0; i < mModuleClients.size(); i++) {
+        if (mModuleClients[i]->client() == client) {
+            // client already connected: reject
+            return moduleClient;
+        }
+    }
+    moduleClient = new ModuleClient(this, client, config, audio);
+
+    struct radio_hal_band_config halConfig;
+    halConfig = config->band;
+
+    // Tuner preemption logic:
+    // There is a limited amount of tuners and a limited amount of radio audio sources per module.
+    // The minimum is one tuner and one audio source.
+    // The numbers of tuners and sources are indicated in the module properties.
+    // NOTE: current framework implementation only supports one radio audio source.
+    // It is possible to open more than one tuner at a time but only one tuner can be connected
+    // to the radio audio source (AUDIO_DEVICE_IN_FM_TUNER).
+    // The base rule is that a newly connected tuner always wins, i.e. always gets a tuner
+    // and can use the audio source if requested.
+    // If another client is preempted, it is notified by a callback with RADIO_EVENT_CONTROL
+    // indicating loss of control.
+    // - If the newly connected client requests the audio source (audio == true):
+    //    - if an audio source is available
+    //          no problem
+    //    - if not:
+    //          the oldest client in the list using audio is preempted.
+    // - If the newly connected client does not request the audio source (audio == false):
+    //    - if a tuner is available
+    //          no problem
+    //    - if not:
+    //          The oldest client not using audio is preempted first and if none is found the
+    //          the oldest client using audio is preempted.
+    // Each time a tuner using the audio source is opened or closed, the audio policy manager is
+    // notified of the connection or disconnection of AUDIO_DEVICE_IN_FM_TUNER.
+
+    sp<ModuleClient> oldestTuner;
+    sp<ModuleClient> oldestAudio;
+    size_t allocatedTuners = 0;
+    size_t allocatedAudio = 0;
+    for (size_t i = 0; i < mModuleClients.size(); i++) {
+        if (mModuleClients[i]->getTuner() != NULL) {
+            if (mModuleClients[i]->audio()) {
+                if (oldestAudio == 0) {
+                    oldestAudio = mModuleClients[i];
+                }
+                allocatedAudio++;
+            } else {
+                if (oldestTuner == 0) {
+                    oldestTuner = mModuleClients[i];
+                }
+                allocatedTuners++;
+            }
+        }
+    }
+
+    const struct radio_tuner *halTuner;
+    sp<ModuleClient> preemtedClient;
+    if (audio) {
+        if (allocatedAudio >= mProperties.num_audio_sources) {
+            ALOG_ASSERT(oldestAudio != 0, "addClient() allocatedAudio/oldestAudio mismatch");
+            preemtedClient = oldestAudio;
+        }
+    } else {
+        if (allocatedAudio + allocatedTuners >= mProperties.num_tuners) {
+            if (allocatedTuners != 0) {
+                ALOG_ASSERT(oldestTuner != 0, "addClient() allocatedTuners/oldestTuner mismatch");
+                preemtedClient = oldestTuner;
+            } else {
+                ALOG_ASSERT(oldestAudio != 0, "addClient() allocatedAudio/oldestAudio mismatch");
+                preemtedClient = oldestAudio;
+            }
+        }
+    }
+    if (preemtedClient != 0) {
+        halTuner = preemtedClient->getTuner();
+        preemtedClient->setTuner(NULL);
+        mHwDevice->close_tuner(mHwDevice, halTuner);
+        if (preemtedClient->audio()) {
+            notifyDeviceConnection(false, "");
+        }
+    }
+
+    ret = mHwDevice->open_tuner(mHwDevice, &halConfig, audio,
+                                RadioService::callback, moduleClient->callbackThread().get(),
+                                &halTuner);
+    if (ret == 0) {
+        ALOGV("addClient() setTuner %p", halTuner);
+        moduleClient->setTuner(halTuner);
+        mModuleClients.add(moduleClient);
+        if (audio) {
+            notifyDeviceConnection(true, "");
+        }
+        ALOGV("addClient() DONE moduleClient %p", moduleClient.get());
+    } else {
+        ALOGW("%s open_tuner failed with error %d", __FUNCTION__, ret);
+        moduleClient.clear();
+    }
+
+    return moduleClient;
+}
+
+void RadioService::Module::removeClient(const sp<ModuleClient>& moduleClient) {
+    ALOGV("removeClient()");
+    AutoMutex lock(mLock);
+    int ret;
+    ssize_t index = -1;
+
+    for (size_t i = 0; i < mModuleClients.size(); i++) {
+        if (mModuleClients[i] == moduleClient) {
+            index = i;
+            break;
+        }
+    }
+    if (index == -1) {
+        return;
+    }
+
+    mModuleClients.removeAt(index);
+    const struct radio_tuner *halTuner = moduleClient->getTuner();
+    if (halTuner == NULL) {
+        return;
+    }
+
+    mHwDevice->close_tuner(mHwDevice, halTuner);
+    if (moduleClient->audio()) {
+        notifyDeviceConnection(false, "");
+    }
+
+    mMute = true;
+
+    if (mModuleClients.isEmpty()) {
+        return;
+    }
+
+    // Tuner reallocation logic:
+    // When a client is removed and was controlling a tuner, this tuner will be allocated to a
+    // previously preempted client. This client will be notified by a callback with
+    // RADIO_EVENT_CONTROL indicating gain of control.
+    // - If a preempted client is waiting for an audio source and one becomes available:
+    //    Allocate the tuner to the most recently added client waiting for an audio source
+    // - If not:
+    //    Allocate the tuner to the most recently added client.
+    // Each time a tuner using the audio source is opened or closed, the audio policy manager is
+    // notified of the connection or disconnection of AUDIO_DEVICE_IN_FM_TUNER.
+
+    sp<ModuleClient> youngestClient;
+    sp<ModuleClient> youngestClientAudio;
+    size_t allocatedTuners = 0;
+    size_t allocatedAudio = 0;
+    for (ssize_t i = mModuleClients.size() - 1; i >= 0; i--) {
+        if (mModuleClients[i]->getTuner() == NULL) {
+            if (mModuleClients[i]->audio()) {
+                if (youngestClientAudio == 0) {
+                    youngestClientAudio = mModuleClients[i];
+                }
+            } else {
+                if (youngestClient == 0) {
+                    youngestClient = mModuleClients[i];
+                }
+            }
+        } else {
+            if (mModuleClients[i]->audio()) {
+                allocatedAudio++;
+            } else {
+                allocatedTuners++;
+            }
+        }
+    }
+
+    ALOG_ASSERT(allocatedTuners + allocatedAudio < mProperties.num_tuners,
+                "removeClient() removed client but no tuner available");
+
+    ALOG_ASSERT(!moduleClient->audio() || allocatedAudio < mProperties.num_audio_sources,
+                "removeClient() removed audio client but no tuner with audio available");
+
+    if (allocatedAudio < mProperties.num_audio_sources && youngestClientAudio != 0) {
+        youngestClient = youngestClientAudio;
+    }
+
+    ALOG_ASSERT(youngestClient != 0, "removeClient() removed client no candidate found for tuner");
+
+    struct radio_hal_band_config halConfig = youngestClient->halConfig();
+    ret = mHwDevice->open_tuner(mHwDevice, &halConfig, youngestClient->audio(),
+                                RadioService::callback, moduleClient->callbackThread().get(),
+                                &halTuner);
+
+    if (ret == 0) {
+        youngestClient->setTuner(halTuner);
+        if (youngestClient->audio()) {
+            notifyDeviceConnection(true, "");
+        }
+    }
+}
+
+status_t RadioService::Module::setMute(bool mute)
+{
+    Mutex::Autolock _l(mLock);
+    if (mute != mMute) {
+        mMute = mute;
+        //TODO notifify audio policy manager of media activity on radio audio device
+    }
+    return NO_ERROR;
+}
+
+status_t RadioService::Module::getMute(bool *mute)
+{
+    Mutex::Autolock _l(mLock);
+    *mute = mMute;
+    return NO_ERROR;
+}
+
+
+const struct radio_band_config *RadioService::Module::getDefaultConfig() const
+{
+    if (mProperties.num_bands == 0) {
+        return NULL;
+    }
+    return &mProperties.bands[0];
+}
+
+void RadioService::Module::notifyDeviceConnection(bool connected,
+                                                  const char *address) {
+    int64_t token = IPCThreadState::self()->clearCallingIdentity();
+    AudioSystem::setDeviceConnectionState(AUDIO_DEVICE_IN_FM_TUNER,
+                                          connected ? AUDIO_POLICY_DEVICE_STATE_AVAILABLE :
+                                                  AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+                                          address, kRadioTunerAudioDeviceName);
+    IPCThreadState::self()->restoreCallingIdentity(token);
+}
+
+#undef LOG_TAG
+#define LOG_TAG "RadioService::ModuleClient"
+
+RadioService::ModuleClient::ModuleClient(const sp<Module>& module,
+                                         const sp<IRadioClient>& client,
+                                         const struct radio_band_config *config,
+                                         bool audio)
+ : mModule(module), mClient(client), mConfig(*config), mAudio(audio), mTuner(NULL)
+{
+}
+
+void RadioService::ModuleClient::onFirstRef()
+{
+    mCallbackThread = new CallbackThread(this);
+    IInterface::asBinder(mClient)->linkToDeath(this);
+}
+
+RadioService::ModuleClient::~ModuleClient() {
+    if (mClient != 0) {
+        IInterface::asBinder(mClient)->unlinkToDeath(this);
+        mClient.clear();
+    }
+    if (mCallbackThread != 0) {
+        mCallbackThread->exit();
+    }
+}
+
+status_t RadioService::ModuleClient::dump(int fd __unused,
+                                             const Vector<String16>& args __unused) {
+    String8 result;
+    return NO_ERROR;
+}
+
+void RadioService::ModuleClient::detach() {
+    ALOGV("%s", __FUNCTION__);
+    sp<ModuleClient> strongMe = this;
+    {
+        AutoMutex lock(mLock);
+        if (mClient != 0) {
+            IInterface::asBinder(mClient)->unlinkToDeath(this);
+            mClient.clear();
+        }
+    }
+    sp<Module> module = mModule.promote();
+    if (module == 0) {
+        return;
+    }
+    module->removeClient(this);
+}
+
+radio_hal_band_config_t RadioService::ModuleClient::halConfig() const
+{
+    AutoMutex lock(mLock);
+    ALOGV("%s locked", __FUNCTION__);
+    return mConfig.band;
+}
+
+const struct radio_tuner *RadioService::ModuleClient::getTuner() const
+{
+    AutoMutex lock(mLock);
+    ALOGV("%s locked", __FUNCTION__);
+    return mTuner;
+}
+
+void RadioService::ModuleClient::setTuner(const struct radio_tuner *tuner)
+{
+    ALOGV("%s %p", __FUNCTION__, this);
+
+    AutoMutex lock(mLock);
+    mTuner = tuner;
+    ALOGV("%s locked", __FUNCTION__);
+
+    radio_hal_event_t event;
+    event.type = RADIO_EVENT_CONTROL;
+    event.status = 0;
+    event.on = mTuner != NULL;
+    mCallbackThread->sendEvent(&event);
+    ALOGV("%s DONE", __FUNCTION__);
+
+}
+
+status_t RadioService::ModuleClient::setConfiguration(const struct radio_band_config *config)
+{
+    AutoMutex lock(mLock);
+    status_t status = NO_ERROR;
+    ALOGV("%s locked", __FUNCTION__);
+
+    if (mTuner != NULL) {
+        struct radio_hal_band_config halConfig;
+        halConfig = config->band;
+        status = (status_t)mTuner->set_configuration(mTuner, &halConfig);
+        if (status == NO_ERROR) {
+            mConfig = *config;
+        }
+    } else {
+        mConfig = *config;
+        status == INVALID_OPERATION;
+    }
+
+    return status;
+}
+
+status_t RadioService::ModuleClient::getConfiguration(struct radio_band_config *config)
+{
+    AutoMutex lock(mLock);
+    status_t status = NO_ERROR;
+    ALOGV("%s locked", __FUNCTION__);
+
+    if (mTuner != NULL) {
+        struct radio_hal_band_config halConfig;
+        status = (status_t)mTuner->get_configuration(mTuner, &halConfig);
+        if (status == NO_ERROR) {
+            mConfig.band = halConfig;
+        }
+    }
+    *config = mConfig;
+
+    return status;
+}
+
+status_t RadioService::ModuleClient::setMute(bool mute)
+{
+    sp<Module> module;
+    {
+        Mutex::Autolock _l(mLock);
+        ALOGV("%s locked", __FUNCTION__);
+        if (mTuner == NULL || !mAudio) {
+            return INVALID_OPERATION;
+        }
+        module = mModule.promote();
+        if (module == 0) {
+            return NO_INIT;
+        }
+    }
+    module->setMute(mute);
+    return NO_ERROR;
+}
+
+status_t RadioService::ModuleClient::getMute(bool *mute)
+{
+    sp<Module> module;
+    {
+        Mutex::Autolock _l(mLock);
+        ALOGV("%s locked", __FUNCTION__);
+        module = mModule.promote();
+        if (module == 0) {
+            return NO_INIT;
+        }
+    }
+    return module->getMute(mute);
+}
+
+status_t RadioService::ModuleClient::scan(radio_direction_t direction, bool skipSubChannel)
+{
+    AutoMutex lock(mLock);
+    ALOGV("%s locked", __FUNCTION__);
+    status_t status;
+    if (mTuner != NULL) {
+        status = (status_t)mTuner->scan(mTuner, direction, skipSubChannel);
+    } else {
+        status = INVALID_OPERATION;
+    }
+    return status;
+}
+
+status_t RadioService::ModuleClient::step(radio_direction_t direction, bool skipSubChannel)
+{
+    AutoMutex lock(mLock);
+    ALOGV("%s locked", __FUNCTION__);
+    status_t status;
+    if (mTuner != NULL) {
+        status = (status_t)mTuner->step(mTuner, direction, skipSubChannel);
+    } else {
+        status = INVALID_OPERATION;
+    }
+    return status;
+}
+
+status_t RadioService::ModuleClient::tune(unsigned int channel, unsigned int subChannel)
+{
+    AutoMutex lock(mLock);
+    ALOGV("%s locked", __FUNCTION__);
+    status_t status;
+    if (mTuner != NULL) {
+        status = (status_t)mTuner->tune(mTuner, channel, subChannel);
+    } else {
+        status = INVALID_OPERATION;
+    }
+    return status;
+}
+
+status_t RadioService::ModuleClient::cancel()
+{
+    AutoMutex lock(mLock);
+    ALOGV("%s locked", __FUNCTION__);
+    status_t status;
+    if (mTuner != NULL) {
+        status = (status_t)mTuner->cancel(mTuner);
+    } else {
+        status = INVALID_OPERATION;
+    }
+    return status;
+}
+
+status_t RadioService::ModuleClient::getProgramInformation(struct radio_program_info *info)
+{
+    AutoMutex lock(mLock);
+    ALOGV("%s locked", __FUNCTION__);
+    status_t status;
+    if (mTuner != NULL) {
+        status = (status_t)mTuner->get_program_information(mTuner, info);
+    } else {
+        status = INVALID_OPERATION;
+    }
+    return status;
+}
+
+status_t RadioService::ModuleClient::hasControl(bool *hasControl)
+{
+    Mutex::Autolock lock(mLock);
+    ALOGV("%s locked", __FUNCTION__);
+    *hasControl = mTuner != NULL;
+    return NO_ERROR;
+}
+
+void RadioService::ModuleClient::onCallbackEvent(const sp<IMemory>& eventMemory)
+{
+    if (eventMemory == 0 || eventMemory->pointer() == NULL) {
+        return;
+    }
+
+    sp<IRadioClient> client;
+    {
+        AutoMutex lock(mLock);
+        ALOGV("%s locked", __FUNCTION__);
+        radio_event_t *event = (radio_event_t *)eventMemory->pointer();
+        switch (event->type) {
+        case RADIO_EVENT_CONFIG:
+            mConfig.band = event->config.band;
+            event->config.region = mConfig.region;
+            break;
+        default:
+            break;
+        }
+
+        client = mClient;
+    }
+    if (client != 0) {
+        client->onEvent(eventMemory);
+    }
+}
+
+
+void RadioService::ModuleClient::binderDied(
+    const wp<IBinder> &who __unused) {
+    ALOGW("client binder died for client %p", this);
+    detach();
+}
+
+}; // namespace android
diff --git a/services/radio/RadioService.h b/services/radio/RadioService.h
new file mode 100644
index 0000000..49feda6
--- /dev/null
+++ b/services/radio/RadioService.h
@@ -0,0 +1,211 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_RADIO_SERVICE_H
+#define ANDROID_HARDWARE_RADIO_SERVICE_H
+
+#include <utils/Vector.h>
+//#include <binder/AppOpsManager.h>
+#include <binder/MemoryDealer.h>
+#include <binder/BinderService.h>
+#include <binder/IAppOpsCallback.h>
+#include <radio/IRadioService.h>
+#include <radio/IRadio.h>
+#include <radio/IRadioClient.h>
+#include <system/radio.h>
+#include <hardware/radio.h>
+
+namespace android {
+
+class MemoryHeapBase;
+
+class RadioService :
+    public BinderService<RadioService>,
+    public BnRadioService
+{
+    friend class BinderService<RadioService>;
+
+public:
+    class ModuleClient;
+    class Module;
+
+    static char const* getServiceName() { return "media.radio"; }
+
+                        RadioService();
+    virtual             ~RadioService();
+
+    // IRadioService
+    virtual status_t listModules(struct radio_properties *properties,
+                                 uint32_t *numModules);
+
+    virtual status_t attach(radio_handle_t handle,
+                            const sp<IRadioClient>& client,
+                            const struct radio_band_config *config,
+                            bool withAudio,
+                            sp<IRadio>& radio);
+
+    virtual status_t    onTransact(uint32_t code, const Parcel& data,
+                                   Parcel* reply, uint32_t flags);
+
+    virtual status_t    dump(int fd, const Vector<String16>& args);
+
+
+    class Module : public virtual RefBase {
+    public:
+
+       Module(radio_hw_device* hwDevice,
+              struct radio_properties properties);
+
+       virtual ~Module();
+
+               sp<ModuleClient> addClient(const sp<IRadioClient>& client,
+                                  const struct radio_band_config *config,
+                                  bool audio);
+
+               void removeClient(const sp<ModuleClient>& moduleClient);
+
+               status_t setMute(bool mute);
+
+               status_t getMute(bool *mute);
+
+       virtual status_t dump(int fd, const Vector<String16>& args);
+
+       const struct radio_hw_device *hwDevice() const { return mHwDevice; }
+       const struct radio_properties properties() const { return mProperties; }
+       const struct radio_band_config *getDefaultConfig() const ;
+
+    private:
+
+       void notifyDeviceConnection(bool connected, const char *address);
+
+        Mutex                         mLock;          // protects  mModuleClients
+        const struct radio_hw_device  *mHwDevice;     // HAL hardware device
+        const struct radio_properties mProperties;    // cached hardware module properties
+        Vector< sp<ModuleClient> >    mModuleClients; // list of attached clients
+        bool                          mMute;          // radio audio source state
+                                                      // when unmuted, audio is routed to the
+                                                      // output device selected for media use case.
+    }; // class Module
+
+    class CallbackThread : public Thread {
+    public:
+
+        CallbackThread(const wp<ModuleClient>& moduleClient);
+
+        virtual ~CallbackThread();
+
+
+        // Thread virtuals
+        virtual bool threadLoop();
+
+        // RefBase
+        virtual void onFirstRef();
+
+                void exit();
+
+                void sendEvent(radio_hal_event_t *halEvent);
+                sp<IMemory> prepareEvent(radio_hal_event_t *halEvent);
+
+    private:
+        wp<ModuleClient>      mModuleClient;    // client module the thread belongs to
+        Condition             mCallbackCond;    // condition signaled when a new event is posted
+        Mutex                 mCallbackLock;    // protects mEventQueue
+        Vector< sp<IMemory> > mEventQueue;      // pending callback events
+        sp<MemoryDealer>      mMemoryDealer;    // shared memory for callback event
+    }; // class CallbackThread
+
+    class ModuleClient : public BnRadio,
+                   public IBinder::DeathRecipient {
+    public:
+
+       ModuleClient(const sp<Module>& module,
+              const sp<IRadioClient>& client,
+              const struct radio_band_config *config,
+              bool audio);
+
+       virtual ~ModuleClient();
+
+       // IRadio
+       virtual void detach();
+
+       virtual status_t setConfiguration(const struct radio_band_config *config);
+
+       virtual status_t getConfiguration(struct radio_band_config *config);
+
+       virtual status_t setMute(bool mute);
+
+       virtual status_t getMute(bool *mute);
+
+       virtual status_t scan(radio_direction_t direction, bool skipSubChannel);
+
+       virtual status_t step(radio_direction_t direction, bool skipSubChannel);
+
+       virtual status_t tune(unsigned int channel, unsigned int subChannel);
+
+       virtual status_t cancel();
+
+       virtual status_t getProgramInformation(struct radio_program_info *info);
+
+       virtual status_t hasControl(bool *hasControl);
+
+       virtual status_t dump(int fd, const Vector<String16>& args);
+
+               sp<IRadioClient> client() const { return mClient; }
+               wp<Module> module() const { return mModule; }
+               radio_hal_band_config_t halConfig() const;
+               sp<CallbackThread> callbackThread() const { return mCallbackThread; }
+               void setTuner(const struct radio_tuner *tuner);
+               const struct radio_tuner *getTuner() const;
+               bool audio() const { return mAudio; }
+
+               void onCallbackEvent(const sp<IMemory>& event);
+
+       virtual void onFirstRef();
+
+
+       // IBinder::DeathRecipient implementation
+       virtual void        binderDied(const wp<IBinder> &who);
+
+    private:
+
+        mutable Mutex               mLock;           // protects mClient, mConfig and mTuner
+        wp<Module>                  mModule;         // The module this client is attached to
+        sp<IRadioClient>            mClient;         // event callback binder interface
+        radio_band_config_t         mConfig;         // current band configuration
+        sp<CallbackThread>          mCallbackThread; // event callback thread
+        const bool                  mAudio;
+        const struct radio_tuner    *mTuner;        // HAL tuner interface. NULL indicates that
+                                                    // this client does not have control on any
+                                                    // tuner
+    }; // class ModuleClient
+
+
+    static void callback(radio_hal_event_t *halEvent, void *cookie);
+
+private:
+
+    virtual void onFirstRef();
+
+    static void convertProperties(radio_properties_t *properties,
+                                  const radio_hal_properties_t *halProperties);
+    Mutex               mServiceLock;   // protects mModules
+    volatile int32_t    mNextUniqueId;  // for module ID allocation
+    DefaultKeyedVector< radio_handle_t, sp<Module> > mModules;
+};
+
+} // namespace android
+
+#endif // ANDROID_HARDWARE_RADIO_SERVICE_H
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 081aff7..9de6fe2 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -787,6 +787,7 @@
                 mHwDevice->stop_recognition(mHwDevice, model->mHandle);
                 // keep model in ACTIVE state so that event is processed by onCallbackEvent()
                 struct sound_trigger_phrase_recognition_event phraseEvent;
+                memset(&phraseEvent, 0, sizeof(struct sound_trigger_phrase_recognition_event));
                 switch (model->mType) {
                 case SOUND_MODEL_TYPE_KEYPHRASE:
                     phraseEvent.num_phrases = model->mConfig.num_phrases;
diff --git a/soundtrigger/ISoundTriggerHwService.cpp b/soundtrigger/ISoundTriggerHwService.cpp
index 75f68b8..e14a771 100644
--- a/soundtrigger/ISoundTriggerHwService.cpp
+++ b/soundtrigger/ISoundTriggerHwService.cpp
@@ -40,6 +40,8 @@
     SET_CAPTURE_STATE,
 };
 
+#define MAX_ITEMS_PER_LIST 1024
+
 class BpSoundTriggerHwService: public BpInterface<ISoundTriggerHwService>
 {
 public:
@@ -116,10 +118,18 @@
         case LIST_MODULES: {
             CHECK_INTERFACE(ISoundTriggerHwService, data, reply);
             unsigned int numModulesReq = data.readInt32();
+            if (numModulesReq > MAX_ITEMS_PER_LIST) {
+                numModulesReq = MAX_ITEMS_PER_LIST;
+            }
             unsigned int numModules = numModulesReq;
             struct sound_trigger_module_descriptor *modules =
                     (struct sound_trigger_module_descriptor *)calloc(numModulesReq,
                                                    sizeof(struct sound_trigger_module_descriptor));
+            if (modules == NULL) {
+                reply->writeInt32(NO_MEMORY);
+                reply->writeInt32(0);
+                return NO_ERROR;
+            }
             status_t status = listModules(modules, &numModules);
             reply->writeInt32(status);
             reply->writeInt32(numModules);
diff --git a/tools/resampler_tools/Android.mk b/tools/resampler_tools/Android.mk
index e8cbe39..b58e4cd 100644
--- a/tools/resampler_tools/Android.mk
+++ b/tools/resampler_tools/Android.mk
@@ -1,6 +1,6 @@
 # Copyright 2005 The Android Open Source Project
 #
-# Android.mk for resampler_tools 
+# Android.mk for resampler_tools
 #
 
 
diff --git a/tools/resampler_tools/fir.cpp b/tools/resampler_tools/fir.cpp
index 62eddca..fe4d212 100644
--- a/tools/resampler_tools/fir.cpp
+++ b/tools/resampler_tools/fir.cpp
@@ -66,19 +66,20 @@
 
 static void usage(char* name) {
     fprintf(stderr,
-            "usage: %s [-h] [-d] [-s sample_rate] [-c cut-off_frequency] [-n half_zero_crossings]"
+            "usage: %s [-h] [-d] [-D] [-s sample_rate] [-c cut-off_frequency] [-n half_zero_crossings]"
             " [-f {float|fixed|fixed16}] [-b beta] [-v dBFS] [-l lerp]\n"
-            "       %s [-h] [-d] [-s sample_rate] [-c cut-off_frequency] [-n half_zero_crossings]"
+            "       %s [-h] [-d] [-D] [-s sample_rate] [-c cut-off_frequency] [-n half_zero_crossings]"
             " [-f {float|fixed|fixed16}] [-b beta] [-v dBFS] -p M/N\n"
             "    -h    this help message\n"
             "    -d    debug, print comma-separated coefficient table\n"
+            "    -D    generate extra declarations\n"
             "    -p    generate poly-phase filter coefficients, with sample increment M/N\n"
             "    -s    sample rate (48000)\n"
             "    -c    cut-off frequency (20478)\n"
             "    -n    number of zero-crossings on one side (8)\n"
             "    -l    number of lerping bits (4)\n"
             "    -m    number of polyphases (related to -l, default 16)\n"
-            "    -f    output format, can be fixed-point or floating-point (fixed)\n"
+            "    -f    output format, can be fixed, fixed16, or float (fixed)\n"
             "    -b    kaiser window parameter beta (7.865 [-80dB])\n"
             "    -v    attenuation in dBFS (0)\n",
             name, name
@@ -97,7 +98,8 @@
     double Fs = 48000;
     double Fc = 20478;
     double atten = 1;
-    int format = 0;
+    int format = 0;     // 0=fixed, 1=float
+    bool declarations = false;
 
     // in order to keep the errors associated with the linear
     // interpolation of the coefficients below the quantization error
@@ -158,11 +160,14 @@
 
     int M = 1 << 4; // number of phases for interpolation
     int ch;
-    while ((ch = getopt(argc, argv, ":hds:c:n:f:l:m:b:p:v:z:")) != -1) {
+    while ((ch = getopt(argc, argv, ":hds:c:n:f:l:m:b:p:v:z:D")) != -1) {
         switch (ch) {
             case 'd':
                 debug = true;
                 break;
+            case 'D':
+                declarations = true;
+                break;
             case 'p':
                 if (sscanf(optarg, "%u/%u", &polyM, &polyN) != 2) {
                     usage(argv[0]);
@@ -225,24 +230,26 @@
     for (int i = M-1 ; i; i>>=1, nz++);
     // generate the right half of the filter
     if (!debug) {
-        printf("// cmd-line: ");
-        for (int i=1 ; i<argc ; i++) {
-            printf("%s ", argv[i]);
+        printf("// cmd-line:");
+        for (int i=0 ; i<argc ; i++) {
+            printf(" %s", argv[i]);
         }
         printf("\n");
-        if (!polyphase) {
-            printf("const int32_t RESAMPLE_FIR_SIZE           = %d;\n", N);
-            printf("const int32_t RESAMPLE_FIR_INT_PHASES     = %d;\n", M);
-            printf("const int32_t RESAMPLE_FIR_NUM_COEF       = %d;\n", nzc);
-        } else {
-            printf("const int32_t RESAMPLE_FIR_SIZE           = %d;\n", 2*nzc*polyN);
-            printf("const int32_t RESAMPLE_FIR_NUM_COEF       = %d;\n", 2*nzc);
+        if (declarations) {
+            if (!polyphase) {
+                printf("const int32_t RESAMPLE_FIR_SIZE           = %d;\n", N);
+                printf("const int32_t RESAMPLE_FIR_INT_PHASES     = %d;\n", M);
+                printf("const int32_t RESAMPLE_FIR_NUM_COEF       = %d;\n", nzc);
+            } else {
+                printf("const int32_t RESAMPLE_FIR_SIZE           = %d;\n", 2*nzc*polyN);
+                printf("const int32_t RESAMPLE_FIR_NUM_COEF       = %d;\n", 2*nzc);
+            }
+            if (!format) {
+                printf("const int32_t RESAMPLE_FIR_COEF_BITS      = %d;\n", nc);
+            }
+            printf("\n");
+            printf("static %s resampleFIR[] = {", !format ? "int32_t" : "float");
         }
-        if (!format) {
-            printf("const int32_t RESAMPLE_FIR_COEF_BITS      = %d;\n", nc);
-        }
-        printf("\n");
-        printf("static %s resampleFIR[] = {", !format ? "int32_t" : "float");
     }
 
     if (!polyphase) {
@@ -260,12 +267,15 @@
                 if (!format) {
                     int64_t yi = toint(y, 1ULL<<(nc-1));
                     if (nc > 16) {
-                        printf("0x%08x, ", int32_t(yi));
+                        printf("0x%08x,", int32_t(yi));
                     } else {
-                        printf("0x%04x, ", int32_t(yi)&0xffff);
+                        printf("0x%04x,", int32_t(yi)&0xffff);
                     }
                 } else {
-                    printf("%.9g%s ", y, debug ? "," : "f,");
+                    printf("%.9g%s", y, debug ? "," : "f,");
+                }
+                if (j != nzc-1) {
+                    printf(" ");
                 }
             }
         }
@@ -283,23 +293,22 @@
                 if (!format) {
                     int64_t yi = toint(y, 1ULL<<(nc-1));
                     if (nc > 16) {
-                        printf("0x%08x, ", int32_t(yi));
+                        printf("0x%08x,", int32_t(yi));
                     } else {
-                        printf("0x%04x, ", int32_t(yi)&0xffff);
+                        printf("0x%04x,", int32_t(yi)&0xffff);
                     }
                 } else {
-                    printf("%.9g%s", y, debug ? "" : "f");
+                    printf("%.9g%s", y, debug ? "," : "f,");
                 }
 
-                if (debug && (i==nzc-1)) {
-                } else {
-                    printf(", ");
+                if (i != nzc-1) {
+                    printf(" ");
                 }
             }
         }
     }
 
-    if (!debug) {
+    if (!debug && declarations) {
         printf("\n};");
     }
     printf("\n");