Diff
Modified: trunk/LayoutTests/ChangeLog (266165 => 266166)
--- trunk/LayoutTests/ChangeLog 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/LayoutTests/ChangeLog 2020-08-26 11:16:03 UTC (rev 266166)
@@ -1,3 +1,19 @@
+2020-08-26 Youenn Fablet <[email protected]>
+
+ enumerateDevices should expose audiooutput devices that are tied to an audio input device
+ https://bugs.webkit.org/show_bug.cgi?id=215806
+
+ Reviewed by Eric Carlson.
+
+ * fast/mediastream/enumerate-speaker-expected.txt: Added.
+ * fast/mediastream/enumerate-speaker.html: Added.
+ * fast/mediastream/get-user-media-device-id.html:
+ * http/tests/media/media-stream/enumerate-devices-source-id.html:
+ * http/tests/media/media-stream/enumerate-devices-iframe-allow-attribute-expected.txt:
+ * http/tests/media/media-stream/enumerate-devices-iframe-allow-attribute.html:
+ Rewrite test to use testharness and ensure order of the iframe tests so that console messages are ordered.
+ * http/tests/media/media-stream/resources/enumerate-devices-iframe.html:
+
2020-08-26 Diego Pino Garcia <[email protected]>
[GTK] Unreviewed test gardening. Emit baseline after r266129.
Added: trunk/LayoutTests/fast/mediastream/enumerate-speaker-expected.txt (0 => 266166)
--- trunk/LayoutTests/fast/mediastream/enumerate-speaker-expected.txt (rev 0)
+++ trunk/LayoutTests/fast/mediastream/enumerate-speaker-expected.txt 2020-08-26 11:16:03 UTC (rev 266166)
@@ -0,0 +1,4 @@
+
+PASS Before gum, no audiooutput is exposed
+PASS audiooutput devices
+
Added: trunk/LayoutTests/fast/mediastream/enumerate-speaker.html (0 => 266166)
--- trunk/LayoutTests/fast/mediastream/enumerate-speaker.html (rev 0)
+++ trunk/LayoutTests/fast/mediastream/enumerate-speaker.html 2020-08-26 11:16:03 UTC (rev 266166)
@@ -0,0 +1,57 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <meta charset="utf-8">
+ <title>Test passing capture device IDs to getUserMedia</title>
+ <script src=""
+ <script src=""
+ <script>
+
+ function deviceFromLabel(devices, label)
+ {
+ for (let device of devices) {
+ if (device.label === label)
+ return device;
+ }
+ }
+
+ promise_test(async (test) => {
+ const devices = await navigator.mediaDevices.enumerateDevices();
+ devices.forEach((device) => {
+ assert_not_equals(device.kind, "audiooutput", "device.kind");
+ });
+ }, "Before gum, no audiooutput is exposed");
+
+ promise_test(async (test) => {
+ await navigator.mediaDevices.getUserMedia({ audio:true, video:true })
+ const devices = await navigator.mediaDevices.enumerateDevices();
+ assert_true(devices.length > 2, "after getting permission, more than 1 camera and 1 microphone are exposed");
+ devices.forEach((device) => {
+ assert_not_equals(device.deviceId.length == 0 , "device.deviceId is empty before permission to capture");
+ });
+
+ if (!window.internals)
+ return;
+
+ const mic1 = deviceFromLabel(devices, "Mock audio device 1");
+ const mic2 = deviceFromLabel(devices, "Mock audio device 2");
+ const speaker1 = deviceFromLabel(devices, "Mock speaker device 1");
+ const speaker2 = deviceFromLabel(devices, "Mock speaker device 2");
+ const speaker3 = deviceFromLabel(devices, "Mock speaker device 3");
+
+ assert_equals(speaker1.kind, "audiooutput", "speaker1");
+ assert_not_equals(speaker1.groupId, "", "speaker1 groupId");
+
+ assert_equals(speaker2.kind, "audiooutput", "speaker2");
+ assert_not_equals(speaker2.groupId, "", "speaker2 groupId");
+
+ assert_equals(speaker3, undefined, "speaker3");
+
+ assert_equals(speaker1.groupId, mic1.groupId, "device 1");
+ assert_equals(speaker2.groupId, mic2.groupId, "device 2");
+ }, "audiooutput devices");
+ </script>
+</head>
+<body>
+</body>
+</html>
Modified: trunk/LayoutTests/fast/mediastream/get-user-media-device-id.html (266165 => 266166)
--- trunk/LayoutTests/fast/mediastream/get-user-media-device-id.html 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/LayoutTests/fast/mediastream/get-user-media-device-id.html 2020-08-26 11:16:03 UTC (rev 266166)
@@ -85,7 +85,7 @@
if (device.kind === "audioinput") {
const stream = await navigator.mediaDevices.getUserMedia({audio: {deviceId: device.deviceId}});
assert_equals(stream.getAudioTracks()[0].getSettings().deviceId, device.deviceId, "Matching audio device id");
- } else {
+ } else if (device.kind === "videoinput") {
const stream = await navigator.mediaDevices.getUserMedia({video: {deviceId: device.deviceId}});
assert_equals(stream.getVideoTracks()[0].getSettings().deviceId, device.deviceId, "Matching video device id");
}
Modified: trunk/LayoutTests/http/tests/media/media-stream/enumerate-devices-iframe-allow-attribute-expected.txt (266165 => 266166)
--- trunk/LayoutTests/http/tests/media/media-stream/enumerate-devices-iframe-allow-attribute-expected.txt 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/LayoutTests/http/tests/media/media-stream/enumerate-devices-iframe-allow-attribute-expected.txt 2020-08-26 11:16:03 UTC (rev 266166)
@@ -1,5 +1,30 @@
CONSOLE MESSAGE: Feature policy 'Camera' check failed for iframe with origin 'http://localhost:8000' and allow attribute ''.
CONSOLE MESSAGE: Feature policy 'Microphone' check failed for iframe with origin 'http://localhost:8000' and allow attribute ''.
CONSOLE MESSAGE: Not allowed to call enumerateDevices.
-
-PASS: "allow" attribute respected in all iframes
+CONSOLE MESSAGE: Feature policy 'Camera' check failed for iframe with origin 'http://localhost:8000' and allow attribute ''.
+CONSOLE MESSAGE: Feature policy 'Microphone' check failed for iframe with origin 'http://localhost:8000' and allow attribute ''.
+CONSOLE MESSAGE: Not allowed to call enumerateDevices.
+CONSOLE MESSAGE: Feature policy 'Microphone' check failed for iframe with origin 'http://localhost:8000' and allow attribute ''.
+CONSOLE MESSAGE: Not allowed to call getUserMedia.
+CONSOLE MESSAGE: Feature policy 'Camera' check failed for iframe with origin 'http://localhost:8000' and allow attribute ''.
+CONSOLE MESSAGE: Not allowed to call getUserMedia.
+CONSOLE MESSAGE: Feature policy 'Camera' check failed for iframe with origin 'http://localhost:8000' and allow attribute ''.
+CONSOLE MESSAGE: Feature policy 'Microphone' check failed for iframe with origin 'http://localhost:8000' and allow attribute ''.
+CONSOLE MESSAGE: Not allowed to call enumerateDevices.
+CONSOLE MESSAGE: Feature policy 'Camera' check failed for iframe with origin 'http://localhost:8000' and allow attribute 'microphone'.
+CONSOLE MESSAGE: Not allowed to call getUserMedia.
+CONSOLE MESSAGE: Feature policy 'Microphone' check failed for iframe with origin 'http://localhost:8000' and allow attribute 'camera'.
+CONSOLE MESSAGE: Not allowed to call getUserMedia.
+CONSOLE MESSAGE: Feature policy 'Camera' check failed for iframe with origin 'http://localhost:8000' and allow attribute 'microphone;speaker-selection'.
+CONSOLE MESSAGE: Not allowed to call getUserMedia.
+
+
+PASS Wait for frames to be loaded
+PASS none
+PASS microphone
+PASS camera
+PASS camera+microphone
+PASS microphone+speakerselection
+PASS camera+microphone+speakerselection
+PASS same-origin
+
Modified: trunk/LayoutTests/http/tests/media/media-stream/enumerate-devices-iframe-allow-attribute.html (266165 => 266166)
--- trunk/LayoutTests/http/tests/media/media-stream/enumerate-devices-iframe-allow-attribute.html 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/LayoutTests/http/tests/media/media-stream/enumerate-devices-iframe-allow-attribute.html 2020-08-26 11:16:03 UTC (rev 266166)
@@ -1,49 +1,44 @@
<!doctype html>
<html>
-<script src=""
+<script src=""
+<script src=""
<body>
- <iframe id=none src=""
+ <iframe id="none" src=""
<iframe id="microphone" allow="microphone" src=""
<iframe id="camera" allow="camera" src=""
<iframe id="camera+microphone" allow="camera;microphone" src=""
+ <iframe id="microphone+speakerselection" allow="microphone;speaker-selection" src=""
+ <iframe id="camera+microphone+speakerselection" allow="camera;microphone;speaker-selection" src=""
<iframe id="same-origin" src=""
-
<script>
+ const iFrameTests = [
+ ['none', ''],
+ ['microphone', 'microphone'],
+ ['camera', 'camera'],
+ ['camera+microphone', 'camera+microphone'],
+ ['microphone+speakerselection', 'microphone+speakerselection'],
+ ['camera+microphone+speakerselection', 'camera+microphone+speakerselection'],
+ ['same-origin', 'camera+microphone+speakerselection']
+ ];
- if (window.testRunner) {
- testRunner.dumpAsText();
- testRunner.waitUntilDone();
- testRunner.setUserMediaPermission(true);
- }
+ promise_test(() => {
+ return Promise.all(iFrameTests.map((iFrameTest) => {
+ const frame = document.getElementById(iFrameTest[0]);
+ return new Promise(resolve => frame._onload_ = resolve);
+ }));
+ }, "Wait for frames to be loaded");
- let expected = {
- 'microphone' : 'microphone',
- 'none' : '',
- 'camera+microphone' : 'camera+microphone',
- 'camera' : 'camera',
- 'same-origin' : 'camera+microphone'
- };
- let count = 0;
- let success = true;
- window.addEventListener("message", (event) => {
- let visible = event.data.trim().split(':');
- if (expected[visible[0]] != visible[1]) {
- result.innerHTML += `FAIL: <iframe id=${visible[0]}> saw "${visible[1]}", should have seen "${expected[visible[0]]}"<br>`;
- success = false;
- }
- if (++count == 5) {
- if (success)
- result.innerHTML = 'PASS: "allow" attribute respected in all iframes';
- if (window.testRunner)
- testRunner.notifyDone();
- }
-
- }, false);
-
- let enumerate = (evt) => { evt.target.contentWindow.postMessage(evt.target.id, '*'); }
- Array.from(document.getElementsByTagName('iframe')).forEach(element => element._onload_ = enumerate);
+ iFrameTests.forEach((iFrameTest) => {
+ promise_test(async () => {
+ const promise = new Promise((resolve, reject) => {
+ window._onmessage_ = resolve;
+ setTimeout(() => reject('test timed out'), 5000);
+ });
+ document.getElementById(iFrameTest[0]).contentWindow.postMessage('', '*');
+ const event = await promise;
+ assert_equals(event.data, iFrameTest[1]);
+ }, iFrameTest[0]);
+ });
</script>
-
- <div id="result"></div>
</body>
</html>
Modified: trunk/LayoutTests/http/tests/media/media-stream/enumerate-devices-source-id.html (266165 => 266166)
--- trunk/LayoutTests/http/tests/media/media-stream/enumerate-devices-source-id.html 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/LayoutTests/http/tests/media/media-stream/enumerate-devices-source-id.html 2020-08-26 11:16:03 UTC (rev 266166)
@@ -34,7 +34,7 @@
if (originInfo[device.deviceId] != device.kind)
testFailed(`: duplicate device IDs for ${device.kind} and ${originInfo[device.deviceId]} in ${origin}/${self.origin}`);
- if (Object.keys(originInfo).length > 4)
+ if (Object.keys(originInfo).length > 6)
testFailed(`: more then four unique device IDs in ${origin}/${self.origin}`);
}
Modified: trunk/LayoutTests/http/tests/media/media-stream/resources/enumerate-devices-iframe.html (266165 => 266166)
--- trunk/LayoutTests/http/tests/media/media-stream/resources/enumerate-devices-iframe.html 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/LayoutTests/http/tests/media/media-stream/resources/enumerate-devices-iframe.html 2020-08-26 11:16:03 UTC (rev 266166)
@@ -1,14 +1,47 @@
<script>
- async function enumerate(event)
+ async function checkDeviceKind(kind)
{
- let devices = await navigator.mediaDevices.enumerateDevices();
- let visible = devices.map(device => device.kind.indexOf('video') == 0 ? 'camera' : 'microphone').sort();
+ try {
+ let devices = await navigator.mediaDevices.enumerateDevices();
+ for (let device of devices) {
+ if (device.kind === kind)
+ return true;
+ }
+ } catch (e) {
+ }
+ return false;
+ }
+
+ async function checkSpeakerSelection()
+ {
+ // Speakers are currently only exposed after getUserMedia.
+ try {
+ await navigator.mediaDevices.getUserMedia({ audio : true });
+ } catch (e) {
+ }
+ try {
+ await navigator.mediaDevices.getUserMedia({ video : true });
+ } catch (e) {
+ }
+ return checkDeviceKind('audiooutput');
+ }
+
+ async function enumerate()
+ {
+ let visible = [];
+ if (await checkDeviceKind('videoinput'))
+ visible.push('camera');
+ if (await checkDeviceKind('audioinput'))
+ visible.push('microphone');
+ if (await checkSpeakerSelection())
+ visible.push('speakerselection');
+
visible = visible.join('+');
- parent.postMessage(`${event.data}:${visible}`, '*');
- result.innerHTML = visible;
+ parent.postMessage(visible, '*');
+ result.innerHTML = 'result: "' + visible + '"';
}
- window.addEventListener("message", (id) => enumerate(id));
+ window.addEventListener("message", enumerate);
</script>
<div id='result'></div>
Modified: trunk/LayoutTests/imported/w3c/ChangeLog (266165 => 266166)
--- trunk/LayoutTests/imported/w3c/ChangeLog 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/LayoutTests/imported/w3c/ChangeLog 2020-08-26 11:16:03 UTC (rev 266166)
@@ -1,5 +1,15 @@
2020-08-26 Youenn Fablet <[email protected]>
+ enumerateDevices should expose audiooutput devices that are tied to an audio input device
+ https://bugs.webkit.org/show_bug.cgi?id=215806
+
+ Reviewed by Eric Carlson.
+
+ * web-platform-tests/mediacapture-streams/MediaStreamTrack-getSettings.https-expected.txt:
+ * web-platform-tests/mediacapture-streams/MediaStreamTrack-getSettings.https.html:
+
+2020-08-26 Youenn Fablet <[email protected]>
+
pipeThrough should check for readableStream type
https://bugs.webkit.org/show_bug.cgi?id=215497
Modified: trunk/LayoutTests/imported/w3c/web-platform-tests/mediacapture-streams/MediaStreamTrack-getSettings.https-expected.txt (266165 => 266166)
--- trunk/LayoutTests/imported/w3c/web-platform-tests/mediacapture-streams/MediaStreamTrack-getSettings.https-expected.txt 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/LayoutTests/imported/w3c/web-platform-tests/mediacapture-streams/MediaStreamTrack-getSettings.https-expected.txt 2020-08-26 11:16:03 UTC (rev 266166)
@@ -3,7 +3,7 @@
PASS A device can be opened twice and have the same device ID
PASS A device can be opened twice with different resolutions requested
-FAIL groupId is correctly reported by getSettings() for all input devices assert_equals: expected (string) "" but got (undefined) undefined
+FAIL groupId is correctly reported by getSettings() for all input devices assert_true: device groupId expected true got false
PASS deviceId is reported by getSettings() for getUserMedia() audio tracks
FAIL groupId is reported by getSettings() for getUserMedia() audio tracks assert_equals: groupId should exist and it should be a string. expected "string" but got "undefined"
PASS sampleRate is reported by getSettings() for getUserMedia() audio tracks
Modified: trunk/LayoutTests/imported/w3c/web-platform-tests/mediacapture-streams/MediaStreamTrack-getSettings.https.html (266165 => 266166)
--- trunk/LayoutTests/imported/w3c/web-platform-tests/mediacapture-streams/MediaStreamTrack-getSettings.https.html 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/LayoutTests/imported/w3c/web-platform-tests/mediacapture-streams/MediaStreamTrack-getSettings.https.html 2020-08-26 11:16:03 UTC (rev 266166)
@@ -73,8 +73,7 @@
: {video: device_id_constraint};
const stream = await navigator.mediaDevices.getUserMedia(constraints);
- assert_equals(stream.getTracks()[0].getSettings().groupId,
- device.groupId);
+ assert_true(stream.getTracks()[0].getSettings().groupId === device.groupId, "device groupId");
assert_greater_than(device.groupId.length, 0);
}
}, 'groupId is correctly reported by getSettings() for all input devices');
Modified: trunk/Source/WebCore/ChangeLog (266165 => 266166)
--- trunk/Source/WebCore/ChangeLog 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/ChangeLog 2020-08-26 11:16:03 UTC (rev 266166)
@@ -1,5 +1,84 @@
2020-08-26 Youenn Fablet <[email protected]>
+ enumerateDevices should expose audiooutput devices that are tied to an audio input device
+ https://bugs.webkit.org/show_bug.cgi?id=215806
+
+ Reviewed by Eric Carlson.
+
+ Add support for a speaker selection feature policy, as per https://w3c.github.io/mediacapture-output/#permissions-policy-integration.
+ Add support for speaker mock devices and for listing speakers on MacOS in CoreAudioCaptureDeviceManager.
+ Add support for groupId to group microphones and speakers for both mock audio manager and CoreAudioCaptureDeviceManager.
+ For mock speakers, we add two audiooutput devices that are related to a microphone and one audiooutput that is not related to any microphone.
+
+ Test: fast/mediastream/enumerate-speaker.html
+
+ * Modules/mediastream/MediaDevices.cpp:
+ (WebCore::MediaDevices::MediaDevices):
+ (WebCore::checkSpeakerAccess):
+ (WebCore::toMediaDeviceInfoKind):
+ (WebCore::MediaDevices::refreshDevices):
+ * Modules/mediastream/MediaDevices.h:
+ * Modules/mediastream/MediaStreamTrack.cpp:
+ (WebCore::sourceCaptureState):
+ (WebCore::MediaStreamTrack::updateToPageMutedState):
+ * html/FeaturePolicy.cpp:
+ (WebCore::policyTypeName):
+ (WebCore::FeaturePolicy::parse):
+ (WebCore::FeaturePolicy::allows const):
+ * html/FeaturePolicy.h:
+ * page/Settings.yaml:
+ * platform/mediastream/CaptureDevice.h:
+ (WebCore::CaptureDevice::label const):
+ (WebCore::CaptureDevice::setGroupId):
+ * platform/mediastream/RealtimeMediaSourceCenter.cpp:
+ (WebCore::RealtimeMediaSourceCenter::getMediaStreamDevices):
+ * platform/mediastream/RealtimeMediaSourceFactory.h:
+ * platform/mediastream/gstreamer/GStreamerAudioCaptureSource.cpp:
+ (WebCore::GStreamerAudioCaptureSourceFactory::speakerDevices const):
+ * platform/mediastream/ios/AVAudioSessionCaptureDeviceManager.h:
+ * platform/mediastream/mac/CoreAudioCaptureDevice.cpp:
+ (WebCore::CoreAudioCaptureDevice::create):
+ (WebCore::CoreAudioCaptureDevice::CoreAudioCaptureDevice):
+ (WebCore::CoreAudioCaptureDevice::relatedAudioDeviceIDs):
+ * platform/mediastream/mac/CoreAudioCaptureDevice.h:
+ * platform/mediastream/mac/CoreAudioCaptureDeviceManager.cpp:
+ (WebCore::CoreAudioCaptureDeviceManager::captureDevices):
+ (WebCore::deviceHasOutputStreams):
+ (WebCore::getDefaultCaptureInputDevice):
+ (WebCore::hasDevice):
+ (WebCore::CoreAudioCaptureDeviceManager::refreshAudioCaptureDevices):
+ * platform/mediastream/mac/CoreAudioCaptureDeviceManager.h:
+ * platform/mediastream/mac/CoreAudioCaptureSource.cpp:
+ (WebCore::CoreAudioCaptureSourceFactory::speakerDevices const):
+ * platform/mediastream/mac/CoreAudioCaptureSource.h:
+ * platform/mediastream/mac/DisplayCaptureManagerCocoa.cpp:
+ (WebCore::DisplayCaptureManagerCocoa::captureDeviceWithPersistentID):
+ * platform/mediastream/mac/DisplayCaptureSourceCocoa.cpp:
+ (WebCore::DisplayCaptureSourceCocoa::create):
+ * platform/mock/MockMediaDevice.h:
+ (WebCore::MockSpeakerProperties::encode const):
+ (WebCore::MockSpeakerProperties::decode):
+ (WebCore::MockMediaDevice::isSpeaker const):
+ (WebCore::MockMediaDevice::captureDevice const):
+ (WebCore::MockMediaDevice::type const):
+ (WebCore::MockMediaDevice::speakerProperties const):
+ (WebCore::MockMediaDevice::encode const):
+ (WebCore::MockMediaDevice::decode):
+ * platform/mock/MockRealtimeMediaSourceCenter.cpp:
+ (WebCore::defaultDevices):
+ (WebCore::deviceListForDevice):
+ (WebCore::toCaptureDevice):
+ (WebCore::createMockDevice):
+ (WebCore::MockRealtimeMediaSourceCenter::setDevices):
+ (WebCore::MockRealtimeMediaSourceCenter::addDevice):
+ (WebCore::MockRealtimeMediaSourceCenter::captureDeviceWithPersistentID):
+ (WebCore::MockRealtimeMediaSourceCenter::microphoneDevices):
+ (WebCore::MockRealtimeMediaSourceCenter::speakerDevices):
+ (WebCore::MockRealtimeMediaSourceCenter::videoDevices):
+ * platform/mock/MockRealtimeMediaSourceCenter.h:
+
+2020-08-26 Youenn Fablet <[email protected]>
+
pipeThrough should check for readableStream type
https://bugs.webkit.org/show_bug.cgi?id=215497
Modified: trunk/Source/WebCore/Modules/mediastream/MediaDevices.cpp (266165 => 266166)
--- trunk/Source/WebCore/Modules/mediastream/MediaDevices.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/Modules/mediastream/MediaDevices.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -37,10 +37,12 @@
#include "Document.h"
#include "Event.h"
#include "EventNames.h"
+#include "Frame.h"
#include "JSDOMPromiseDeferred.h"
#include "JSMediaDeviceInfo.h"
#include "MediaTrackSupportedConstraints.h"
#include "RealtimeMediaSourceSettings.h"
+#include "Settings.h"
#include "UserGestureIndicator.h"
#include "UserMediaController.h"
#include "UserMediaRequest.h"
@@ -55,6 +57,7 @@
: ActiveDOMObject(document)
, m_scheduledEventTimer(*this, &MediaDevices::scheduledEventTimerFired)
, m_eventNames(eventNames())
+ , m_idHashSalt(createCanonicalUUIDString())
{
suspendIfNeeded();
@@ -165,6 +168,30 @@
return isFeaturePolicyAllowedByDocumentAndAllOwners(FeaturePolicy::Type::Microphone, document, LogFeaturePolicyFailure::No);
}
+static inline bool checkSpeakerAccess(const Document& document)
+{
+ return document.frame()
+ && document.frame()->settings().exposeSpeakersEnabled()
+ && isFeaturePolicyAllowedByDocumentAndAllOwners(FeaturePolicy::Type::SpeakerSelection, document, LogFeaturePolicyFailure::No);
+}
+
+static inline MediaDeviceInfo::Kind toMediaDeviceInfoKind(CaptureDevice::DeviceType type)
+{
+ switch (type) {
+ case CaptureDevice::DeviceType::Microphone:
+ return MediaDeviceInfo::Kind::Audioinput;
+ case CaptureDevice::DeviceType::Speaker:
+ return MediaDeviceInfo::Kind::Audiooutput;
+ case CaptureDevice::DeviceType::Camera:
+ case CaptureDevice::DeviceType::Screen:
+ case CaptureDevice::DeviceType::Window:
+ return MediaDeviceInfo::Kind::Videoinput;
+ case CaptureDevice::DeviceType::Unknown:
+ ASSERT_NOT_REACHED();
+ }
+ return MediaDeviceInfo::Kind::Audioinput;
+}
+
void MediaDevices::refreshDevices(const Vector<CaptureDevice>& newDevices)
{
auto* document = this->document();
@@ -173,6 +200,7 @@
bool canAccessCamera = checkCameraAccess(*document);
bool canAccessMicrophone = checkMicrophoneAccess(*document);
+ bool canAccessSpeaker = checkSpeakerAccess(*document);
Vector<Ref<MediaDeviceInfo>> devices;
for (auto& newDevice : newDevices) {
@@ -180,8 +208,10 @@
continue;
if (!canAccessCamera && newDevice.type() == CaptureDevice::DeviceType::Camera)
continue;
+ if (!canAccessSpeaker && newDevice.type() == CaptureDevice::DeviceType::Speaker)
+ continue;
- auto deviceKind = newDevice.type() == CaptureDevice::DeviceType::Microphone ? MediaDeviceInfo::Kind::Audioinput : MediaDeviceInfo::Kind::Videoinput;
+ auto deviceKind = toMediaDeviceInfoKind(newDevice.type());
auto index = m_devices.findMatching([deviceKind, &newDevice](auto& oldDevice) {
return oldDevice->deviceId() == newDevice.persistentId() && oldDevice->kind() == deviceKind;
});
@@ -190,7 +220,8 @@
continue;
}
- devices.append(MediaDeviceInfo::create(newDevice.label(), newDevice.persistentId(), newDevice.groupId(), deviceKind));
+ auto groupId = RealtimeMediaSourceCenter::singleton().hashStringWithSalt(newDevice.groupId(), m_idHashSalt);
+ devices.append(MediaDeviceInfo::create(newDevice.label(), newDevice.persistentId(), WTFMove(groupId), deviceKind));
}
m_devices = WTFMove(devices);
}
Modified: trunk/Source/WebCore/Modules/mediastream/MediaDevices.h (266165 => 266166)
--- trunk/Source/WebCore/Modules/mediastream/MediaDevices.h 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/Modules/mediastream/MediaDevices.h 2020-08-26 11:16:03 UTC (rev 266166)
@@ -127,6 +127,7 @@
bool m_listeningForDeviceChanges { false };
Vector<Ref<MediaDeviceInfo>> m_devices;
+ String m_idHashSalt;
OptionSet<GestureAllowedRequest> m_requestTypesForCurrentGesture;
WeakPtr<UserGestureToken> m_currentGestureToken;
Modified: trunk/Source/WebCore/Modules/mediastream/MediaStreamTrack.cpp (266165 => 266166)
--- trunk/Source/WebCore/Modules/mediastream/MediaStreamTrack.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/Modules/mediastream/MediaStreamTrack.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -429,6 +429,7 @@
if (source.isProducingData())
return MediaProducer::HasActiveDisplayCaptureDevice;
break;
+ case CaptureDevice::DeviceType::Speaker:
case CaptureDevice::DeviceType::Unknown:
ASSERT_NOT_REACHED();
}
@@ -503,6 +504,7 @@
case CaptureDevice::DeviceType::Window:
m_private->setMuted(page->mutedState() & MediaProducer::ScreenCaptureIsMuted);
break;
+ case CaptureDevice::DeviceType::Speaker:
case CaptureDevice::DeviceType::Unknown:
ASSERT_NOT_REACHED();
break;
Modified: trunk/Source/WebCore/html/FeaturePolicy.cpp (266165 => 266166)
--- trunk/Source/WebCore/html/FeaturePolicy.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/html/FeaturePolicy.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -44,6 +44,8 @@
return "Camera";
case FeaturePolicy::Type::Microphone:
return "Microphone";
+ case FeaturePolicy::Type::SpeakerSelection:
+ return "SpeakerSelection";
case FeaturePolicy::Type::DisplayCapture:
return "DisplayCapture";
case FeaturePolicy::Type::SyncXHR:
@@ -155,6 +157,7 @@
FeaturePolicy policy;
bool isCameraInitialized = false;
bool isMicrophoneInitialized = false;
+ bool isSpeakerSelectionInitialized = false;
bool isDisplayCaptureInitialized = false;
bool isSyncXHRInitialized = false;
bool isFullscreenInitialized = false;
@@ -173,6 +176,11 @@
updateList(document, policy.m_microphoneRule, item.substring(11));
continue;
}
+ if (item.startsWith("speaker-selection")) {
+ isSpeakerSelectionInitialized = true;
+ updateList(document, policy.m_speakerSelectionRule, item.substring(18));
+ continue;
+ }
if (item.startsWith("display-capture")) {
isDisplayCaptureInitialized = true;
updateList(document, policy.m_displayCaptureRule, item.substring(16));
@@ -180,7 +188,7 @@
}
if (item.startsWith("sync-xhr")) {
isSyncXHRInitialized = true;
- updateList(document, policy.m_syncXHRRule, item.substring(8));
+ updateList(document, policy.m_syncXHRRule, item.substring(9));
continue;
}
if (item.startsWith("fullscreen")) {
@@ -197,12 +205,13 @@
#endif
}
- // By default, camera, microphone, display-capture, fullscreen and
- // xr-spatial-tracking policy is 'self'.
+ // By default, camera, microphone, speaker-selection, display-capture, fullscreen and xr-spatial-tracking policy is 'self'.
if (!isCameraInitialized)
policy.m_cameraRule.allowedList.add(document.securityOrigin().data());
if (!isMicrophoneInitialized)
policy.m_microphoneRule.allowedList.add(document.securityOrigin().data());
+ if (!isSpeakerSelectionInitialized)
+ policy.m_speakerSelectionRule.allowedList.add(document.securityOrigin().data());
if (!isDisplayCaptureInitialized)
policy.m_displayCaptureRule.allowedList.add(document.securityOrigin().data());
#if ENABLE(WEBXR)
@@ -238,6 +247,8 @@
return isAllowedByFeaturePolicy(m_cameraRule, origin);
case Type::Microphone:
return isAllowedByFeaturePolicy(m_microphoneRule, origin);
+ case Type::SpeakerSelection:
+ return isAllowedByFeaturePolicy(m_speakerSelectionRule, origin);
case Type::DisplayCapture:
return isAllowedByFeaturePolicy(m_displayCaptureRule, origin);
case Type::SyncXHR:
Modified: trunk/Source/WebCore/html/FeaturePolicy.h (266165 => 266166)
--- trunk/Source/WebCore/html/FeaturePolicy.h 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/html/FeaturePolicy.h 2020-08-26 11:16:03 UTC (rev 266166)
@@ -41,6 +41,7 @@
enum class Type {
Camera,
Microphone,
+ SpeakerSelection,
DisplayCapture,
SyncXHR,
Fullscreen,
@@ -59,6 +60,7 @@
private:
AllowRule m_cameraRule;
AllowRule m_microphoneRule;
+ AllowRule m_speakerSelectionRule;
AllowRule m_displayCaptureRule;
AllowRule m_syncXHRRule;
AllowRule m_fullscreenRule;
Modified: trunk/Source/WebCore/page/Settings.yaml (266165 => 266166)
--- trunk/Source/WebCore/page/Settings.yaml 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/page/Settings.yaml 2020-08-26 11:16:03 UTC (rev 266166)
@@ -824,6 +824,9 @@
mediaKeysStorageDirectory:
type: String
+exposeSpeakersEnabled:
+ initial: false
+
mediaDeviceIdentifierStorageDirectory:
type: String
conditional: MEDIA_STREAM
Modified: trunk/Source/WebCore/platform/mediastream/CaptureDevice.h (266165 => 266166)
--- trunk/Source/WebCore/platform/mediastream/CaptureDevice.h 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/platform/mediastream/CaptureDevice.h 2020-08-26 11:16:03 UTC (rev 266166)
@@ -31,7 +31,7 @@
class CaptureDevice {
public:
- enum class DeviceType { Unknown, Microphone, Camera, Screen, Window };
+ enum class DeviceType { Unknown, Microphone, Speaker, Camera, Screen, Window };
CaptureDevice(const String& persistentId, DeviceType type, const String& label, const String& groupId = emptyString())
: m_persistentId(persistentId)
@@ -49,12 +49,13 @@
{
static NeverDestroyed<String> airPods(MAKE_STATIC_STRING_IMPL("AirPods"));
- if (m_type == DeviceType::Microphone && m_label.contains(airPods))
+ if ((m_type == DeviceType::Microphone || m_type == DeviceType::Speaker) && m_label.contains(airPods))
return airPods;
return m_label;
}
+ void setGroupId(const String& groupId) { m_groupId = groupId; }
const String& groupId() const { return m_groupId; }
DeviceType type() const { return m_type; }
@@ -127,6 +128,7 @@
WebCore::CaptureDevice::DeviceType,
WebCore::CaptureDevice::DeviceType::Unknown,
WebCore::CaptureDevice::DeviceType::Microphone,
+ WebCore::CaptureDevice::DeviceType::Speaker,
WebCore::CaptureDevice::DeviceType::Camera,
WebCore::CaptureDevice::DeviceType::Screen,
WebCore::CaptureDevice::DeviceType::Window
Modified: trunk/Source/WebCore/platform/mediastream/RealtimeMediaSourceCenter.cpp (266165 => 266166)
--- trunk/Source/WebCore/platform/mediastream/RealtimeMediaSourceCenter.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/platform/mediastream/RealtimeMediaSourceCenter.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -119,6 +119,10 @@
if (device.enabled())
result.append(device);
}
+ for (auto& device : audioCaptureFactory().speakerDevices()) {
+ if (device.enabled())
+ result.append(device);
+ }
for (auto& device : videoCaptureFactory().videoCaptureDeviceManager().captureDevices()) {
if (device.enabled())
result.append(device);
Modified: trunk/Source/WebCore/platform/mediastream/RealtimeMediaSourceFactory.h (266165 => 266166)
--- trunk/Source/WebCore/platform/mediastream/RealtimeMediaSourceFactory.h 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/platform/mediastream/RealtimeMediaSourceFactory.h 2020-08-26 11:16:03 UTC (rev 266166)
@@ -60,6 +60,7 @@
virtual ~AudioCaptureFactory() = default;
virtual CaptureSourceOrError createAudioCaptureSource(const CaptureDevice&, String&&, const MediaConstraints*) = 0;
virtual CaptureDeviceManager& audioCaptureDeviceManager() = 0;
+ virtual const Vector<CaptureDevice>& speakerDevices() const = 0;
protected:
AudioCaptureFactory() = default;
Modified: trunk/Source/WebCore/platform/mediastream/gstreamer/GStreamerAudioCaptureSource.cpp (266165 => 266166)
--- trunk/Source/WebCore/platform/mediastream/gstreamer/GStreamerAudioCaptureSource.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/platform/mediastream/gstreamer/GStreamerAudioCaptureSource.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -59,6 +59,9 @@
}
private:
CaptureDeviceManager& audioCaptureDeviceManager() final { return GStreamerAudioCaptureDeviceManager::singleton(); }
+ const Vector<CaptureDevice>& speakerDevices() const { return m_speakerDevices; }
+
+ Vector<CaptureDevice> m_speakerDevices;
};
static GStreamerAudioCaptureSourceFactory& libWebRTCAudioCaptureSourceFactory()
Modified: trunk/Source/WebCore/platform/mediastream/ios/AVAudioSessionCaptureDeviceManager.h (266165 => 266166)
--- trunk/Source/WebCore/platform/mediastream/ios/AVAudioSessionCaptureDeviceManager.h 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/platform/mediastream/ios/AVAudioSessionCaptureDeviceManager.h 2020-08-26 11:16:03 UTC (rev 266166)
@@ -45,6 +45,7 @@
static AVAudioSessionCaptureDeviceManager& singleton();
const Vector<CaptureDevice>& captureDevices() final;
+ const Vector<CaptureDevice>& speakerDevices() const { return m_speakerDevices; }
Optional<CaptureDevice> captureDeviceWithPersistentID(CaptureDevice::DeviceType, const String&);
Vector<AVAudioSessionCaptureDevice>& audioSessionCaptureDevices();
@@ -57,6 +58,7 @@
void refreshAudioCaptureDevices();
Optional<Vector<CaptureDevice>> m_devices;
+ Vector<CaptureDevice> m_speakerDevices;
Optional<Vector<AVAudioSessionCaptureDevice>> m_audioSessionCaptureDevices;
RetainPtr<WebAVAudioSessionAvailableInputsListener> m_listener;
};
Modified: trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureDevice.cpp (266165 => 266166)
--- trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureDevice.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureDevice.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -65,22 +65,38 @@
return true;
}
-Optional<CoreAudioCaptureDevice> CoreAudioCaptureDevice::create(uint32_t deviceID)
+Optional<CoreAudioCaptureDevice> CoreAudioCaptureDevice::create(uint32_t deviceID, DeviceType type, const String& groupID)
{
+ ASSERT(type == CaptureDevice::DeviceType::Microphone || type == CaptureDevice::DeviceType::Speaker);
String persistentID;
String label;
if (!getDeviceInfo(deviceID, persistentID, label))
return WTF::nullopt;
- return CoreAudioCaptureDevice(deviceID, persistentID, label);
+ return CoreAudioCaptureDevice(deviceID, persistentID, type, label, groupID.isNull() ? persistentID : groupID);
}
-CoreAudioCaptureDevice::CoreAudioCaptureDevice(uint32_t deviceID, const String& persistentID, const String& label)
- : CaptureDevice(persistentID, CaptureDevice::DeviceType::Microphone, label)
+CoreAudioCaptureDevice::CoreAudioCaptureDevice(uint32_t deviceID, const String& persistentID, DeviceType deviceType, const String& label, const String& groupID)
+ : CaptureDevice(persistentID, deviceType, label, groupID)
, m_deviceID(deviceID)
{
}
+Vector<AudioDeviceID> CoreAudioCaptureDevice::relatedAudioDeviceIDs(AudioDeviceID deviceID)
+{
+ UInt32 size = 0;
+ AudioObjectPropertyAddress property = { kAudioDevicePropertyRelatedDevices, kAudioDevicePropertyScopeOutput, kAudioObjectPropertyElementMaster };
+ OSStatus error = AudioObjectGetPropertyDataSize(deviceID, &property, 0, 0, &size);
+ if (error || !size)
+ return { };
+
+ Vector<AudioDeviceID> devices(size / sizeof(AudioDeviceID));
+ error = AudioObjectGetPropertyData(deviceID, &property, 0, nullptr, &size, devices.data());
+ if (error)
+ return { };
+ return devices;
+}
+
RetainPtr<CMClockRef> CoreAudioCaptureDevice::deviceClock()
{
if (m_deviceClock)
Modified: trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureDevice.h (266165 => 266166)
--- trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureDevice.h 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureDevice.h 2020-08-26 11:16:03 UTC (rev 266166)
@@ -37,8 +37,7 @@
class CoreAudioCaptureDevice : public CaptureDevice {
public:
-
- static Optional<CoreAudioCaptureDevice> create(uint32_t);
+ static Optional<CoreAudioCaptureDevice> create(uint32_t, DeviceType, const String& groupID);
virtual ~CoreAudioCaptureDevice() = default;
uint32_t deviceID() const { return m_deviceID; }
@@ -45,8 +44,10 @@
RetainPtr<CMClockRef> deviceClock();
bool isAlive();
+ static Vector<AudioDeviceID> relatedAudioDeviceIDs(AudioDeviceID);
+
private:
- CoreAudioCaptureDevice(uint32_t, const String& persistentID, const String& label);
+ CoreAudioCaptureDevice(uint32_t, const String& persistentID, DeviceType, const String& label, const String& groupID);
uint32_t m_deviceID { 0 };
RetainPtr<CMClockRef> m_deviceClock;
Modified: trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureDeviceManager.cpp (266165 => 266166)
--- trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureDeviceManager.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureDeviceManager.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -52,7 +52,7 @@
const Vector<CaptureDevice>& CoreAudioCaptureDeviceManager::captureDevices()
{
coreAudioCaptureDevices();
- return m_devices;
+ return m_captureDevices;
}
Optional<CaptureDevice> CoreAudioCaptureDeviceManager::captureDeviceWithPersistentID(CaptureDevice::DeviceType type, const String& deviceID)
@@ -65,10 +65,9 @@
return WTF::nullopt;
}
-static bool deviceHasInputStreams(AudioObjectID deviceID)
+static bool deviceHasStreams(AudioObjectID deviceID, const AudioObjectPropertyAddress& address)
{
UInt32 dataSize = 0;
- AudioObjectPropertyAddress address = { kAudioDevicePropertyStreamConfiguration, kAudioDevicePropertyScopeInput, kAudioObjectPropertyElementMaster };
auto err = AudioObjectGetPropertyDataSize(deviceID, &address, 0, nullptr, &dataSize);
if (err || !dataSize)
return false;
@@ -80,6 +79,18 @@
return !err && bufferList->mNumberBuffers;
}
+static bool deviceHasInputStreams(AudioObjectID deviceID)
+{
+ AudioObjectPropertyAddress address = { kAudioDevicePropertyStreamConfiguration, kAudioDevicePropertyScopeInput, kAudioObjectPropertyElementMaster };
+ return deviceHasStreams(deviceID, address);
+}
+
+static bool deviceHasOutputStreams(AudioObjectID deviceID)
+{
+ AudioObjectPropertyAddress address = { kAudioDevicePropertyStreamConfiguration, kAudioDevicePropertyScopeOutput, kAudioObjectPropertyElementMaster };
+ return deviceHasStreams(deviceID, address);
+}
+
static bool isValidCaptureDevice(const CoreAudioCaptureDevice& device)
{
// Ignore output devices that have input only for echo cancellation.
@@ -126,7 +137,7 @@
if (err != noErr || deviceID == kAudioDeviceUnknown)
return { };
- return CoreAudioCaptureDevice::create(deviceID);
+ return CoreAudioCaptureDevice::create(deviceID, CaptureDevice::DeviceType::Microphone, { });
}
Vector<CoreAudioCaptureDevice>& CoreAudioCaptureDeviceManager::coreAudioCaptureDevices()
@@ -168,6 +179,13 @@
return WTF::nullopt;
}
+static inline bool hasDevice(const Vector<CoreAudioCaptureDevice>& devices, uint32_t deviceID, CaptureDevice::DeviceType deviceType)
+{
+ return std::any_of(devices.begin(), devices.end(), [&deviceID, deviceType](auto& device) {
+ return device.deviceID() == deviceID && device.type() == deviceType;
+ });
+}
+
void CoreAudioCaptureDeviceManager::refreshAudioCaptureDevices(NotifyIfDevicesHaveChanged notify)
{
ASSERT(isMainThread());
@@ -195,20 +213,51 @@
haveDeviceChanges = true;
}
+ // Microphones
for (size_t i = 0; i < deviceCount; i++) {
AudioObjectID deviceID = deviceIDs[i];
- if (!deviceHasInputStreams(deviceID))
- continue;
- if (std::any_of(m_coreAudioCaptureDevices.begin(), m_coreAudioCaptureDevices.end(), [deviceID](auto& device) { return device.deviceID() == deviceID; }))
+ if (!deviceHasInputStreams(deviceID) || hasDevice(m_coreAudioCaptureDevices, deviceID, CaptureDevice::DeviceType::Microphone))
continue;
- auto device = CoreAudioCaptureDevice::create(deviceID);
- if (!device || !isValidCaptureDevice(device.value()))
+ auto microphoneDevice = CoreAudioCaptureDevice::create(deviceID, CaptureDevice::DeviceType::Microphone, { });
+ if (microphoneDevice && isValidCaptureDevice(microphoneDevice.value())) {
+ m_coreAudioCaptureDevices.append(WTFMove(microphoneDevice.value()));
+ haveDeviceChanges = true;
+ }
+ }
+
+ // Speakers
+ for (size_t i = 0; i < deviceCount; i++) {
+ AudioObjectID deviceID = deviceIDs[i];
+
+ if (!deviceHasOutputStreams(deviceID) || hasDevice(m_coreAudioCaptureDevices, deviceID, CaptureDevice::DeviceType::Speaker))
continue;
- m_coreAudioCaptureDevices.append(WTFMove(device.value()));
- haveDeviceChanges = true;
+ String groupID;
+ for (auto relatedDeviceID : CoreAudioCaptureDevice::relatedAudioDeviceIDs(deviceID)) {
+ for (auto& device : m_coreAudioCaptureDevices) {
+ if (device.deviceID() == relatedDeviceID && device.type() == CaptureDevice::DeviceType::Microphone) {
+ groupID = device.persistentId();
+ break;
+ }
+ }
+ }
+
+ auto device = CoreAudioCaptureDevice::create(deviceID, CaptureDevice::DeviceType::Speaker, groupID);
+ if (device) {
+ // If there is no groupID, relate devices if the label is matching.
+ if (groupID.isNull()) {
+ for (auto& existingDevice : m_coreAudioCaptureDevices) {
+ if (existingDevice.label() == device->label() && existingDevice.type() == CaptureDevice::DeviceType::Microphone) {
+ device->setGroupId(existingDevice.persistentId());
+ break;
+ }
+ }
+ }
+ m_coreAudioCaptureDevices.append(WTFMove(device.value()));
+ haveDeviceChanges = true;
+ }
}
for (auto& device : m_coreAudioCaptureDevices) {
@@ -222,17 +271,20 @@
if (!haveDeviceChanges)
return;
- m_devices = Vector<CaptureDevice>();
-
- for (auto &device : m_coreAudioCaptureDevices) {
- CaptureDevice captureDevice(device.persistentId(), CaptureDevice::DeviceType::Microphone, device.label());
+ m_captureDevices.clear();
+ m_speakerDevices.clear();
+ for (auto& device : m_coreAudioCaptureDevices) {
+ CaptureDevice captureDevice { device.persistentId(), device.type(), device.label(), device.groupId() };
captureDevice.setEnabled(device.enabled());
- m_devices.append(captureDevice);
+ if (device.type() == CaptureDevice::DeviceType::Microphone)
+ m_captureDevices.append(WTFMove(captureDevice));
+ else
+ m_speakerDevices.append(WTFMove(captureDevice));
}
if (notify == NotifyIfDevicesHaveChanged::Notify) {
deviceChanged();
- CoreAudioCaptureSourceFactory::singleton().devicesChanged(m_devices);
+ CoreAudioCaptureSourceFactory::singleton().devicesChanged(m_captureDevices);
}
}
Modified: trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureDeviceManager.h (266165 => 266166)
--- trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureDeviceManager.h 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureDeviceManager.h 2020-08-26 11:16:03 UTC (rev 266166)
@@ -47,6 +47,7 @@
Optional<CaptureDevice> captureDeviceWithPersistentID(CaptureDevice::DeviceType, const String&);
Optional<CoreAudioCaptureDevice> coreAudioDeviceWithUID(const String&);
+ const Vector<CaptureDevice>& speakerDevices() const { return m_speakerDevices; }
private:
CoreAudioCaptureDeviceManager() = default;
@@ -57,7 +58,8 @@
enum class NotifyIfDevicesHaveChanged { Notify, DoNotNotify };
void refreshAudioCaptureDevices(NotifyIfDevicesHaveChanged);
- Vector<CaptureDevice> m_devices;
+ Vector<CaptureDevice> m_captureDevices;
+ Vector<CaptureDevice> m_speakerDevices;
Vector<CoreAudioCaptureDevice> m_coreAudioCaptureDevices;
};
Modified: trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureSource.cpp (266165 => 266166)
--- trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureSource.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureSource.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -691,6 +691,15 @@
#endif
}
+const Vector<CaptureDevice>& CoreAudioCaptureSourceFactory::speakerDevices() const
+{
+#if PLATFORM(MAC)
+ return CoreAudioCaptureDeviceManager::singleton().speakerDevices();
+#else
+ return AVAudioSessionCaptureDeviceManager::singleton().speakerDevices();
+#endif
+}
+
void CoreAudioCaptureSourceFactory::devicesChanged(const Vector<CaptureDevice>& devices)
{
CoreAudioSharedUnit::unit().devicesChanged(devices);
Modified: trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureSource.h (266165 => 266166)
--- trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureSource.h 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/platform/mediastream/mac/CoreAudioCaptureSource.h 2020-08-26 11:16:03 UTC (rev 266166)
@@ -123,6 +123,7 @@
}
CaptureDeviceManager& audioCaptureDeviceManager() final;
+ const Vector<CaptureDevice>& speakerDevices() const final;
};
} // namespace WebCore
Modified: trunk/Source/WebCore/platform/mediastream/mac/DisplayCaptureManagerCocoa.cpp (266165 => 266166)
--- trunk/Source/WebCore/platform/mediastream/mac/DisplayCaptureManagerCocoa.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/platform/mediastream/mac/DisplayCaptureManagerCocoa.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -105,6 +105,7 @@
case CaptureDevice::DeviceType::Camera:
case CaptureDevice::DeviceType::Microphone:
+ case CaptureDevice::DeviceType::Speaker:
case CaptureDevice::DeviceType::Unknown:
ASSERT_NOT_REACHED();
break;
Modified: trunk/Source/WebCore/platform/mediastream/mac/DisplayCaptureSourceCocoa.cpp (266165 => 266166)
--- trunk/Source/WebCore/platform/mediastream/mac/DisplayCaptureSourceCocoa.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/platform/mediastream/mac/DisplayCaptureSourceCocoa.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -67,6 +67,7 @@
case CaptureDevice::DeviceType::Window:
return create(WindowDisplayCapturerMac::create(device.persistentId()), device, constraints);
case CaptureDevice::DeviceType::Microphone:
+ case CaptureDevice::DeviceType::Speaker:
case CaptureDevice::DeviceType::Camera:
case CaptureDevice::DeviceType::Unknown:
ASSERT_NOT_REACHED();
Modified: trunk/Source/WebCore/platform/mock/MockMediaDevice.h (266165 => 266166)
--- trunk/Source/WebCore/platform/mock/MockMediaDevice.h 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/platform/mock/MockMediaDevice.h 2020-08-26 11:16:03 UTC (rev 266166)
@@ -55,6 +55,33 @@
int defaultSampleRate { 44100 };
};
+struct MockSpeakerProperties {
+ template<class Encoder>
+ void encode(Encoder& encoder) const
+ {
+ encoder << relatedMicrophoneId << static_cast<int32_t>(defaultSampleRate);
+ }
+
+ template <class Decoder>
+ static Optional<MockSpeakerProperties> decode(Decoder& decoder)
+ {
+ Optional<int32_t> defaultSampleRate;
+ decoder >> defaultSampleRate;
+ if (!defaultSampleRate)
+ return WTF::nullopt;
+
+ Optional<String> relatedMicrophoneId;
+ decoder >> relatedMicrophoneId;
+ if (!relatedMicrophoneId)
+ return WTF::nullopt;
+
+ return MockSpeakerProperties { WTFMove(*relatedMicrophoneId), *defaultSampleRate };
+ }
+
+ String relatedMicrophoneId;
+ int defaultSampleRate { 44100 };
+};
+
// FIXME: Add support for other properties.
struct MockCameraProperties {
template<class Encoder>
@@ -134,13 +161,29 @@
struct MockMediaDevice {
bool isMicrophone() const { return WTF::holds_alternative<MockMicrophoneProperties>(properties); }
+ bool isSpeaker() const { return WTF::holds_alternative<MockSpeakerProperties>(properties); }
bool isCamera() const { return WTF::holds_alternative<MockCameraProperties>(properties); }
bool isDisplay() const { return WTF::holds_alternative<MockDisplayProperties>(properties); }
+ CaptureDevice captureDevice() const
+ {
+ if (isMicrophone())
+ return CaptureDevice { persistentId, CaptureDevice::DeviceType::Microphone, label, persistentId };
+ if (isSpeaker())
+ return CaptureDevice { persistentId, CaptureDevice::DeviceType::Speaker, label, speakerProperties()->relatedMicrophoneId };
+ if (isCamera())
+ return CaptureDevice { persistentId, CaptureDevice::DeviceType::Camera, label, persistentId };
+
+ ASSERT(isDisplay());
+ return CaptureDevice { persistentId, CaptureDevice::DeviceType::Screen, label, persistentId };
+ }
+
CaptureDevice::DeviceType type() const
{
if (isMicrophone())
return CaptureDevice::DeviceType::Microphone;
+ if (isSpeaker())
+ return CaptureDevice::DeviceType::Speaker;
if (isCamera())
return CaptureDevice::DeviceType::Camera;
@@ -148,6 +191,11 @@
return WTF::get<MockDisplayProperties>(properties).type;
}
+ const MockSpeakerProperties* speakerProperties() const
+ {
+ return isSpeaker() ? &WTF::get<MockSpeakerProperties>(properties) : nullptr;
+ }
+
template<class Encoder>
void encode(Encoder& encoder) const
{
@@ -156,12 +204,15 @@
switchOn(properties, [&](const MockMicrophoneProperties& properties) {
encoder << (uint8_t)1;
encoder << properties;
- }, [&](const MockCameraProperties& properties) {
+ }, [&](const MockSpeakerProperties& properties) {
encoder << (uint8_t)2;
encoder << properties;
- }, [&](const MockDisplayProperties& properties) {
+ }, [&](const MockCameraProperties& properties) {
encoder << (uint8_t)3;
encoder << properties;
+ }, [&](const MockDisplayProperties& properties) {
+ encoder << (uint8_t)4;
+ encoder << properties;
});
}
@@ -197,8 +248,10 @@
case 1:
return decodeMockMediaDevice<MockMicrophoneProperties>(decoder, WTFMove(*persistentId), WTFMove(*label));
case 2:
+ return decodeMockMediaDevice<MockSpeakerProperties>(decoder, WTFMove(*persistentId), WTFMove(*label));
+ case 3:
return decodeMockMediaDevice<MockCameraProperties>(decoder, WTFMove(*persistentId), WTFMove(*label));
- case 3:
+ case 4:
return decodeMockMediaDevice<MockDisplayProperties>(decoder, WTFMove(*persistentId), WTFMove(*label));
}
return WTF::nullopt;
@@ -206,7 +259,7 @@
String persistentId;
String label;
- Variant<MockMicrophoneProperties, MockCameraProperties, MockDisplayProperties> properties;
+ Variant<MockMicrophoneProperties, MockSpeakerProperties, MockCameraProperties, MockDisplayProperties> properties;
};
} // namespace WebCore
Modified: trunk/Source/WebCore/platform/mock/MockRealtimeMediaSourceCenter.cpp (266165 => 266166)
--- trunk/Source/WebCore/platform/mock/MockRealtimeMediaSourceCenter.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/platform/mock/MockRealtimeMediaSourceCenter.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -55,6 +55,10 @@
MockMediaDevice { "239c24b0-2b15-11e3-8224-0800200c9a66"_s, "Mock audio device 1"_s, MockMicrophoneProperties { 44100 } },
MockMediaDevice { "239c24b1-2b15-11e3-8224-0800200c9a66"_s, "Mock audio device 2"_s, MockMicrophoneProperties { 48000 } },
+ MockMediaDevice { "239c24b0-2b15-11e3-8224-0800200c9a67"_s, "Mock speaker device 1"_s, MockSpeakerProperties { "239c24b0-2b15-11e3-8224-0800200c9a66"_s, 44100 } },
+ MockMediaDevice { "239c24b1-2b15-11e3-8224-0800200c9a67"_s, "Mock speaker device 2"_s, MockSpeakerProperties { "239c24b1-2b15-11e3-8224-0800200c9a66"_s, 48000 } },
+ MockMediaDevice { "239c24b2-2b15-11e3-8224-0800200c9a67"_s, "Mock speaker device 3"_s, MockSpeakerProperties { String { }, 48000 } },
+
MockMediaDevice { "239c24b2-2b15-11e3-8224-0800200c9a66"_s, "Mock video device 1"_s,
MockCameraProperties {
30,
@@ -161,6 +165,7 @@
#endif
break;
case CaptureDevice::DeviceType::Microphone:
+ case CaptureDevice::DeviceType::Speaker:
case CaptureDevice::DeviceType::Camera:
case CaptureDevice::DeviceType::Unknown:
ASSERT_NOT_REACHED();
@@ -190,6 +195,7 @@
RealtimeMediaSource* activeSource() final { return CoreAudioCaptureSourceFactory::singleton().activeSource(); }
#endif
CaptureDeviceManager& audioCaptureDeviceManager() final { return MockRealtimeMediaSourceCenter::singleton().audioCaptureDeviceManager(); }
+ const Vector<CaptureDevice>& speakerDevices() const final { return MockRealtimeMediaSourceCenter::speakerDevices(); }
};
static Vector<MockMediaDevice>& devices()
@@ -215,7 +221,9 @@
static inline Vector<CaptureDevice>& deviceListForDevice(const MockMediaDevice& device)
{
if (device.isMicrophone())
- return MockRealtimeMediaSourceCenter::audioDevices();
+ return MockRealtimeMediaSourceCenter::microphoneDevices();
+ if (device.isSpeaker())
+ return MockRealtimeMediaSourceCenter::speakerDevices();
if (device.isCamera())
return MockRealtimeMediaSourceCenter::videoDevices();
@@ -262,11 +270,18 @@
return singleton().m_isEnabled;
}
-static void createCaptureDevice(const MockMediaDevice& device)
+static CaptureDevice toCaptureDevice(const MockMediaDevice& device)
{
- deviceListForDevice(device).append(MockRealtimeMediaSourceCenter::captureDeviceWithPersistentID(device.type(), device.persistentId).value());
+ auto captureDevice = device.captureDevice();
+ captureDevice.setEnabled(true);
+ return captureDevice;
}
+static void createMockDevice(const MockMediaDevice& device)
+{
+ deviceListForDevice(device).append(toCaptureDevice(device));
+}
+
void MockRealtimeMediaSourceCenter::resetDevices()
{
setDevices(defaultDevices());
@@ -275,7 +290,8 @@
void MockRealtimeMediaSourceCenter::setDevices(Vector<MockMediaDevice>&& newMockDevices)
{
- audioDevices().clear();
+ microphoneDevices().clear();
+ speakerDevices().clear();
videoDevices().clear();
displayDevices().clear();
@@ -287,7 +303,7 @@
for (const auto& device : mockDevices) {
map.add(device.persistentId, device);
- createCaptureDevice(device);
+ createMockDevice(device);
}
RealtimeMediaSourceCenter::singleton().captureDevicesChanged();
}
@@ -296,7 +312,7 @@
{
devices().append(device);
deviceMap().set(device.persistentId, device);
- createCaptureDevice(device);
+ createMockDevice(device);
RealtimeMediaSourceCenter::singleton().captureDevicesChanged();
}
@@ -340,25 +356,37 @@
if (iterator == map.end() || iterator->value.type() != type)
return WTF::nullopt;
- CaptureDevice device { iterator->value.persistentId, type, iterator->value.label };
- device.setEnabled(true);
- return device;
+ return toCaptureDevice(iterator->value);
}
-Vector<CaptureDevice>& MockRealtimeMediaSourceCenter::audioDevices()
+Vector<CaptureDevice>& MockRealtimeMediaSourceCenter::microphoneDevices()
{
- static auto audioDevices = makeNeverDestroyed([] {
- Vector<CaptureDevice> audioDevices;
+ static auto microphoneDevices = makeNeverDestroyed([] {
+ Vector<CaptureDevice> microphoneDevices;
for (const auto& device : devices()) {
if (device.isMicrophone())
- audioDevices.append(captureDeviceWithPersistentID(CaptureDevice::DeviceType::Microphone, device.persistentId).value());
+ microphoneDevices.append(toCaptureDevice(device));
}
- return audioDevices;
+ return microphoneDevices;
}());
- return audioDevices;
+ return microphoneDevices;
}
+Vector<CaptureDevice>& MockRealtimeMediaSourceCenter::speakerDevices()
+{
+ static auto speakerDevices = makeNeverDestroyed([] {
+ Vector<CaptureDevice> speakerDevices;
+ for (const auto& device : devices()) {
+ if (device.isSpeaker())
+ speakerDevices.append(toCaptureDevice(device));
+ }
+ return speakerDevices;
+ }());
+
+ return speakerDevices;
+}
+
Vector<CaptureDevice>& MockRealtimeMediaSourceCenter::videoDevices()
{
static auto videoDevices = makeNeverDestroyed([] {
@@ -365,7 +393,7 @@
Vector<CaptureDevice> videoDevices;
for (const auto& device : devices()) {
if (device.isCamera())
- videoDevices.append(captureDeviceWithPersistentID(CaptureDevice::DeviceType::Camera, device.persistentId).value());
+ videoDevices.append(toCaptureDevice(device));
}
return videoDevices;
}());
Modified: trunk/Source/WebCore/platform/mock/MockRealtimeMediaSourceCenter.h (266165 => 266166)
--- trunk/Source/WebCore/platform/mock/MockRealtimeMediaSourceCenter.h 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebCore/platform/mock/MockRealtimeMediaSourceCenter.h 2020-08-26 11:16:03 UTC (rev 266166)
@@ -52,7 +52,8 @@
void setMockVideoCaptureEnabled(bool isEnabled) { m_isMockVideoCaptureEnabled = isEnabled; }
void setMockDisplayCaptureEnabled(bool isEnabled) { m_isMockDisplayCaptureEnabled = isEnabled; }
- static Vector<CaptureDevice>& audioDevices();
+ static Vector<CaptureDevice>& microphoneDevices();
+ static Vector<CaptureDevice>& speakerDevices();
static Vector<CaptureDevice>& videoDevices();
static Vector<CaptureDevice>& displayDevices();
@@ -73,7 +74,7 @@
class MockAudioCaptureDeviceManager final : public CaptureDeviceManager {
private:
- const Vector<CaptureDevice>& captureDevices() final { return MockRealtimeMediaSourceCenter::audioDevices(); }
+ const Vector<CaptureDevice>& captureDevices() final { return MockRealtimeMediaSourceCenter::microphoneDevices(); }
Optional<CaptureDevice> captureDeviceWithPersistentID(CaptureDevice::DeviceType type, const String& id) final { return MockRealtimeMediaSourceCenter::captureDeviceWithPersistentID(type, id); }
};
class MockVideoCaptureDeviceManager final : public CaptureDeviceManager {
Modified: trunk/Source/WebKit/ChangeLog (266165 => 266166)
--- trunk/Source/WebKit/ChangeLog 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebKit/ChangeLog 2020-08-26 11:16:03 UTC (rev 266166)
@@ -1,3 +1,33 @@
+2020-08-26 Youenn Fablet <[email protected]>
+
+ enumerateDevices should expose audiooutput devices that are tied to an audio input device
+ https://bugs.webkit.org/show_bug.cgi?id=215806
+
+ Reviewed by Eric Carlson.
+
+ Introduce an experimental flag (off by default) to cover speaker selection, including exposing speaker devices to web pages.
+ Update UserMediaPermissionRequestManagerProxy::computeFilteredDeviceList to only expose speakers that are tied to a microphone that is exposed.
+ This aligns with https://w3c.github.io/mediacapture-output/#privacy-obtaining-consent.
+
+ * GPUProcess/GPUConnectionToWebProcess.cpp:
+ * Shared/WebPreferences.yaml:
+ * UIProcess/API/C/WKMockMediaDevice.cpp:
+ (WKAddMockMediaDevice):
+ * UIProcess/API/C/WKPreferences.cpp:
+ (WKPreferencesSetExposeSpeakersEnabled):
+ (WKPreferencesGetExposeSpeakersEnabled):
+ * UIProcess/API/C/WKPreferencesRefPrivate.h:
+ * UIProcess/Cocoa/UserMediaCaptureManagerProxy.cpp:
+ (WebKit::UserMediaCaptureManagerProxy::createMediaSourceForCaptureDeviceWithConstraints):
+ * UIProcess/UserMediaPermissionRequestManagerProxy.cpp:
+ (WebKit::isMicrophoneDevice):
+ (WebKit::UserMediaPermissionRequestManagerProxy::computeFilteredDeviceList):
+ * WebProcess/cocoa/RemoteRealtimeMediaSource.cpp:
+ (WebKit::sourceTypeFromDeviceType):
+ (WebKit::RemoteRealtimeMediaSource::RemoteRealtimeMediaSource):
+ (WebKit::RemoteRealtimeMediaSource::~RemoteRealtimeMediaSource):
+ * WebProcess/cocoa/UserMediaCaptureManager.h:
+
2020-08-25 Tim Horton <[email protected]>
Web Share API Level 2 functions even when its experimental feature flag is disabled
Modified: trunk/Source/WebKit/GPUProcess/GPUConnectionToWebProcess.cpp (266165 => 266166)
--- trunk/Source/WebKit/GPUProcess/GPUConnectionToWebProcess.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebKit/GPUProcess/GPUConnectionToWebProcess.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -123,6 +123,7 @@
{
switch (type) {
case CaptureDevice::DeviceType::Unknown:
+ case CaptureDevice::DeviceType::Speaker:
return false;
case CaptureDevice::DeviceType::Microphone:
return m_process.allowsAudioCapture();
Modified: trunk/Source/WebKit/Shared/WebPreferences.yaml (266165 => 266166)
--- trunk/Source/WebKit/Shared/WebPreferences.yaml 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebKit/Shared/WebPreferences.yaml 2020-08-26 11:16:03 UTC (rev 266166)
@@ -668,6 +668,14 @@
category: internal
condition: ENABLE(WEB_RTC)
+ExposeSpeakersEnabled:
+ type: bool
+ defaultValue: false
+ humanReadableName: "Allow speaker device selection"
+ humanReadableDescription: "Allow speaker device selection"
+ category: experimental
+ condition: ENABLE(WEB_RTC)
+
VP9DecoderEnabled:
type: bool
defaultValue: defaultVP9DecoderEnabled()
Modified: trunk/Source/WebKit/UIProcess/API/C/WKMockMediaDevice.cpp (266165 => 266166)
--- trunk/Source/WebKit/UIProcess/API/C/WKMockMediaDevice.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebKit/UIProcess/API/C/WKMockMediaDevice.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -38,11 +38,13 @@
{
#if ENABLE(MEDIA_STREAM)
String typeString = WebKit::toImpl(type)->string();
- Variant<WebCore::MockMicrophoneProperties, WebCore::MockCameraProperties, WebCore::MockDisplayProperties> properties;
+ Variant<WebCore::MockMicrophoneProperties, WebCore::MockSpeakerProperties, WebCore::MockCameraProperties, WebCore::MockDisplayProperties> properties;
if (typeString == "camera")
properties = WebCore::MockCameraProperties { };
else if (typeString == "screen")
properties = WebCore::MockDisplayProperties { };
+ else if (typeString == "speaker")
+ properties = WebCore::MockSpeakerProperties { };
else if (typeString != "microphone")
return;
Modified: trunk/Source/WebKit/UIProcess/API/C/WKPreferences.cpp (266165 => 266166)
--- trunk/Source/WebKit/UIProcess/API/C/WKPreferences.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebKit/UIProcess/API/C/WKPreferences.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -1930,6 +1930,16 @@
return toImpl(preferencesRef)->mediaPreloadingEnabled();
}
+void WKPreferencesSetExposeSpeakersEnabled(WKPreferencesRef preferencesRef, bool flag)
+{
+ toImpl(preferencesRef)->setExposeSpeakersEnabled(flag);
+}
+
+bool WKPreferencesGetExposeSpeakersEnabled(WKPreferencesRef preferencesRef)
+{
+ return toImpl(preferencesRef)->exposeSpeakersEnabled();
+}
+
void WKPreferencesSetLargeImageAsyncDecodingEnabled(WKPreferencesRef preferencesRef, bool flag)
{
toImpl(preferencesRef)->setLargeImageAsyncDecodingEnabled(flag);
Modified: trunk/Source/WebKit/UIProcess/API/C/WKPreferencesRefPrivate.h (266165 => 266166)
--- trunk/Source/WebKit/UIProcess/API/C/WKPreferencesRefPrivate.h 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebKit/UIProcess/API/C/WKPreferencesRefPrivate.h 2020-08-26 11:16:03 UTC (rev 266166)
@@ -540,6 +540,10 @@
WK_EXPORT bool WKPreferencesGetMediaPreloadingEnabled(WKPreferencesRef);
// Defaults to false
+WK_EXPORT void WKPreferencesSetExposeSpeakersEnabled(WKPreferencesRef, bool flag);
+WK_EXPORT bool WKPreferencesGetExposeSpeakersEnabled(WKPreferencesRef);
+
+// Defaults to false
WK_EXPORT void WKPreferencesSetWebAuthenticationEnabled(WKPreferencesRef, bool flag);
WK_EXPORT bool WKPreferencesGetWebAuthenticationEnabled(WKPreferencesRef);
Modified: trunk/Source/WebKit/UIProcess/Cocoa/UserMediaCaptureManagerProxy.cpp (266165 => 266166)
--- trunk/Source/WebKit/UIProcess/Cocoa/UserMediaCaptureManagerProxy.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebKit/UIProcess/Cocoa/UserMediaCaptureManagerProxy.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -260,6 +260,7 @@
case WebCore::CaptureDevice::DeviceType::Window:
sourceOrError = RealtimeMediaSourceCenter::singleton().displayCaptureFactory().createDisplayCaptureSource(device, &constraints);
break;
+ case WebCore::CaptureDevice::DeviceType::Speaker:
case WebCore::CaptureDevice::DeviceType::Unknown:
ASSERT_NOT_REACHED();
break;
Modified: trunk/Source/WebKit/UIProcess/UserMediaPermissionRequestManagerProxy.cpp (266165 => 266166)
--- trunk/Source/WebKit/UIProcess/UserMediaPermissionRequestManagerProxy.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebKit/UIProcess/UserMediaPermissionRequestManagerProxy.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -598,6 +598,13 @@
return false;
}
+static inline bool haveMicrophoneDevice(const Vector<WebCore::CaptureDevice>& devices, const String& deviceID)
+{
+ return std::any_of(devices.begin(), devices.end(), [&deviceID](auto& device) {
+ return device.persistentId() == deviceID && device.type() == CaptureDevice::DeviceType::Microphone;
+ });
+}
+
Vector<CaptureDevice> UserMediaPermissionRequestManagerProxy::computeFilteredDeviceList(bool revealIdsAndLabels, const String& deviceIDHashSalt)
{
static const int defaultMaximumCameraCount = 1;
@@ -609,7 +616,7 @@
Vector<CaptureDevice> filteredDevices;
for (const auto& device : devices) {
- if (!device.enabled() || (device.type() != WebCore::CaptureDevice::DeviceType::Camera && device.type() != WebCore::CaptureDevice::DeviceType::Microphone))
+ if (!device.enabled() || (device.type() != WebCore::CaptureDevice::DeviceType::Camera && device.type() != WebCore::CaptureDevice::DeviceType::Microphone && device.type() != WebCore::CaptureDevice::DeviceType::Speaker))
continue;
if (!revealIdsAndLabels) {
@@ -617,6 +624,12 @@
continue;
if (device.type() == WebCore::CaptureDevice::DeviceType::Microphone && ++microphoneCount > defaultMaximumMicrophoneCount)
continue;
+ if (device.type() != WebCore::CaptureDevice::DeviceType::Camera && device.type() != WebCore::CaptureDevice::DeviceType::Microphone)
+ continue;
+ } else {
+ // We only expose speakers tied to a microphone for the moment.
+ if (device.type() == WebCore::CaptureDevice::DeviceType::Speaker && !haveMicrophoneDevice(devices, device.groupId()))
+ continue;
}
auto label = emptyString();
Modified: trunk/Source/WebKit/WebProcess/cocoa/RemoteRealtimeMediaSource.cpp (266165 => 266166)
--- trunk/Source/WebKit/WebProcess/cocoa/RemoteRealtimeMediaSource.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebKit/WebProcess/cocoa/RemoteRealtimeMediaSource.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -70,6 +70,7 @@
case CaptureDevice::DeviceType::Screen:
case CaptureDevice::DeviceType::Window:
return RealtimeMediaSource::Type::Video;
+ case CaptureDevice::DeviceType::Speaker:
case CaptureDevice::DeviceType::Unknown:
ASSERT_NOT_REACHED();
}
@@ -97,6 +98,7 @@
case CaptureDevice::DeviceType::Screen:
case CaptureDevice::DeviceType::Window:
break;
+ case CaptureDevice::DeviceType::Speaker:
case CaptureDevice::DeviceType::Unknown:
ASSERT_NOT_REACHED();
}
@@ -118,6 +120,7 @@
case CaptureDevice::DeviceType::Screen:
case CaptureDevice::DeviceType::Window:
break;
+ case CaptureDevice::DeviceType::Speaker:
case CaptureDevice::DeviceType::Unknown:
ASSERT_NOT_REACHED();
}
Modified: trunk/Source/WebKit/WebProcess/cocoa/UserMediaCaptureManager.h (266165 => 266166)
--- trunk/Source/WebKit/WebProcess/cocoa/UserMediaCaptureManager.h 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Source/WebKit/WebProcess/cocoa/UserMediaCaptureManager.h 2020-08-26 11:16:03 UTC (rev 266166)
@@ -70,9 +70,11 @@
private:
WebCore::CaptureSourceOrError createAudioCaptureSource(const WebCore::CaptureDevice&, String&& hashSalt, const WebCore::MediaConstraints*) final;
WebCore::CaptureDeviceManager& audioCaptureDeviceManager() final { return m_manager.m_noOpCaptureDeviceManager; }
+ const Vector<WebCore::CaptureDevice>& speakerDevices() const final { return m_speakerDevices; }
UserMediaCaptureManager& m_manager;
bool m_shouldCaptureInGPUProcess { false };
+ Vector<WebCore::CaptureDevice> m_speakerDevices;
};
class VideoFactory : public WebCore::VideoCaptureFactory {
public:
Modified: trunk/Tools/ChangeLog (266165 => 266166)
--- trunk/Tools/ChangeLog 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Tools/ChangeLog 2020-08-26 11:16:03 UTC (rev 266166)
@@ -1,3 +1,15 @@
+2020-08-26 Youenn Fablet <[email protected]>
+
+ enumerateDevices should expose audiooutput devices that are tied to an audio input device
+ https://bugs.webkit.org/show_bug.cgi?id=215806
+
+ Reviewed by Eric Carlson.
+
+ Enable experimental flag.
+
+ * WebKitTestRunner/TestController.cpp:
+ (WTR::TestController::resetPreferencesToConsistentValues):
+
2020-08-26 Diego Pino Garcia <[email protected]>
[ews] Split JSC ARMv7 queue into separate builder and tester queue
Modified: trunk/Tools/WebKitTestRunner/TestController.cpp (266165 => 266166)
--- trunk/Tools/WebKitTestRunner/TestController.cpp 2020-08-26 09:08:05 UTC (rev 266165)
+++ trunk/Tools/WebKitTestRunner/TestController.cpp 2020-08-26 11:16:03 UTC (rev 266166)
@@ -962,6 +962,7 @@
WKPreferencesSetResourceTimingEnabled(preferences, true);
WKPreferencesSetUserTimingEnabled(preferences, true);
WKPreferencesSetMediaPreloadingEnabled(preferences, true);
+ WKPreferencesSetExposeSpeakersEnabled(preferences, true);
WKPreferencesSetMediaPlaybackAllowsInline(preferences, true);
WKPreferencesSetInlineMediaPlaybackRequiresPlaysInlineAttribute(preferences, false);
WKPreferencesSetRemotePlaybackEnabled(preferences, true);