Feature/sound system (#287)

* Replaced old device push/pull based sound-system implementation with a new lightweight one

* media: fixed WebRtcAudioPlayer double call to stop when source is drained

* media: Reset reading flag in WebRtcAudioPlayer

* native: delete avdev library
This commit is contained in:
Alex Andres 2021-11-29 18:01:44 +01:00 committed by GitHub
parent 3b12ee8a3e
commit ea552bf282
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
119 changed files with 2773 additions and 4472 deletions

View file

@ -51,7 +51,7 @@ public abstract class ExecutableBase implements Executable {
/**
* Add a ExecutableStateListener listener to this component.
* Adds an {@code ExecutableStateListener} to this component.
*
* @param listener The listener to add.
*/
@ -60,7 +60,7 @@ public abstract class ExecutableBase implements Executable {
}
/**
* Remove a ExecutableStateListener listener from this component.
* Removes an {@code ExecutableStateListener} from this component.
*
* @param listener The listener to remove.
*/

View file

@ -19,6 +19,7 @@
package org.lecturestudio.core.app.configuration;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.AudioProcessingSettings;
import org.lecturestudio.core.beans.DoubleProperty;
import org.lecturestudio.core.beans.FloatProperty;
import org.lecturestudio.core.beans.ObjectProperty;
@ -40,9 +41,6 @@ public class AudioConfiguration {
/** The playback device name. */
private final StringProperty playbackDeviceName = new StringProperty();
/** The sound system name. */
private final StringProperty soundSystem = new StringProperty();
/** The path where the recordings are stored at. */
private final StringProperty recordingPath = new StringProperty();
@ -61,6 +59,9 @@ public class AudioConfiguration {
/** The audio format of the recording. */
private final ObjectProperty<AudioFormat> recordingFormat = new ObjectProperty<>();
/** The audio processing settings for recording. */
private final ObjectProperty<AudioProcessingSettings> recordingProcessingSettings = new ObjectProperty<>();
/**
* Obtain the capture device name.
@ -116,33 +117,6 @@ public class AudioConfiguration {
return playbackDeviceName;
}
/**
* Obtain the sound system name.
*
* @return the sound system name.
*/
public String getSoundSystem() {
return soundSystem.get();
}
/**
* Set the sound system name.
*
* @param soundSystem sound system name to set.
*/
public void setSoundSystem(String soundSystem) {
this.soundSystem.set(soundSystem);
}
/**
* Obtain the sound system property.
*
* @return the sound system property.
*/
public StringProperty soundSystemProperty() {
return soundSystem;
}
/**
* Obtain the recording path.
*
@ -314,4 +288,32 @@ public class AudioConfiguration {
return recordingFormat;
}
/**
* Obtain the {@code AudioProcessingSettings} for audio recording.
*
* @return the {@code AudioProcessingSettings} for recording.
*/
public AudioProcessingSettings getRecordingProcessingSettings() {
return recordingProcessingSettings.get();
}
/**
* Set the {@code AudioProcessingSettings} to be applied when recording
* audio.
*
* @param settings The new {@code AudioProcessingSettings}.
*/
public void setRecordingProcessingSettings(AudioProcessingSettings settings) {
this.recordingProcessingSettings.set(settings);
}
/**
* Get the {@code AudioProcessingSettings} which are applied for audio
* recordings.
*
* @return The {@code AudioProcessingSettings}.
*/
public ObjectProperty<AudioProcessingSettings> recordingProcessingSettingsProperty() {
return recordingProcessingSettings;
}
}

View file

@ -21,8 +21,8 @@ package org.lecturestudio.core.audio;
import org.lecturestudio.core.model.Time;
/**
* The {@link AudioPlaybackProgressListener} is notified each time an audio player has
* processed and pushed audio samples to a audio playback device.
* The {@link AudioPlaybackProgressListener} is notified each time an audio
* player has processed and pushed audio samples to a audio playback device.
*
* @author Alex Andres
*/
@ -30,11 +30,11 @@ import org.lecturestudio.core.model.Time;
public interface AudioPlaybackProgressListener {
/**
* Called when progress of the current playing audio changes.
* Called when progress of the playing audio changes.
*
* @param progressMs The current progress in milliseconds.
* @param durationMs The total duration of audio in milliseconds.
*/
void onAudioProgress(Time progressMs, Time durationMs);
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
@ -18,257 +18,70 @@
package org.lecturestudio.core.audio;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
import org.lecturestudio.core.ExecutableBase;
import org.lecturestudio.core.ExecutableException;
import org.lecturestudio.core.ExecutableState;
import org.lecturestudio.core.Executable;
import org.lecturestudio.core.ExecutableStateListener;
import org.lecturestudio.core.audio.device.AudioOutputDevice;
import org.lecturestudio.core.audio.source.AudioSource;
import org.lecturestudio.core.model.Time;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
/**
* Default audio player implementation.
* An AudioPlayer manages the audio resources to play audio on an audio output
* device, e.g. a speaker or headset.
*
* @author Alex Andres
*/
public class AudioPlayer extends ExecutableBase implements Player {
/** Logger for {@link AudioPlayer} */
private static final Logger LOG = LogManager.getLogger(AudioPlayer.class);
/** The sync state that is shared with other media players. */
private final SyncState syncState;
/** The audio playback device. */
private final AudioOutputDevice playbackDevice;
/** The audio source. */
private final AudioSource audioSource;
/** The audio source size. */
private final long inputSize;
/** The playback progress listener. */
private AudioPlaybackProgressListener progressListener;
/** The player state listener. */
private ExecutableStateListener stateListener;
/** The playback thread. */
private Thread thread;
/** The current audio source reading position. */
private long inputPos;
public interface AudioPlayer extends Executable {
/**
* Create an {@link AudioPlayer} with the specified playback device and source. The
* sync state is shared with other media players to keep different media
* sources in sync while playing.
* Sets the device name of the audio playback device which will play audio
* for this player.
*
* @param device The audio playback device.
* @param source The audio source.
* @param syncState The shared sync state.
*
* @throws Exception If the audio player failed to initialize.
* @param deviceName The audio output device name.
*/
public AudioPlayer(AudioOutputDevice device, AudioSource source, SyncState syncState) throws Exception {
if (isNull(device)) {
throw new NullPointerException("Missing audio playback device.");
}
if (isNull(source)) {
throw new NullPointerException("Missing audio source.");
}
this.playbackDevice = device;
this.audioSource = source;
this.syncState = syncState;
this.inputSize = source.getInputSize();
}
void setAudioDeviceName(String deviceName);
@Override
public void setVolume(float volume) {
if (volume < 0 || volume > 1) {
return;
}
/**
* Sets the {@code AudioSource} that will read the audio samples to play.
*
* @param source The audio source to set.
*/
void setAudioSource(AudioSource source);
playbackDevice.setVolume(volume);
}
/**
* Sets the recording audio volume. The value must be in the range of
* [0,1].
*
* @param volume The recording audio volume.
*/
void setAudioVolume(double volume);
@Override
public void seek(int timeMs) throws Exception {
AudioFormat format = audioSource.getAudioFormat();
/**
* Set the playback progress listener.
*
* @param listener The listener to set.
*/
void setAudioProgressListener(AudioPlaybackProgressListener listener);
float bytesPerSecond = AudioUtils.getBytesPerSecond(format);
int skipBytes = Math.round(bytesPerSecond * timeMs / 1000F);
/**
* Jump to the specified time position in the audio playback stream.
*
* @param timeMs The absolute time in milliseconds to jump to.
*
* @throws Exception If the playback stream failed to read the start of the
* specified position.
*/
void seek(int timeMs) throws Exception;
audioSource.reset();
audioSource.skip(skipBytes);
/**
* Add an {@code ExecutableStateListener} to this player.
*
* @param listener The listener to add.
*/
void addStateListener(ExecutableStateListener listener);
inputPos = skipBytes;
syncState.setAudioTime((long) (inputPos / (bytesPerSecond / 1000f)));
}
@Override
public void setProgressListener(AudioPlaybackProgressListener listener) {
this.progressListener = listener;
}
@Override
public void setStateListener(ExecutableStateListener listener) {
this.stateListener = listener;
}
@Override
protected void initInternal() throws ExecutableException {
try {
audioSource.reset();
}
catch (Exception e) {
throw new ExecutableException("Audio device could not be initialized.", e);
}
if (!playbackDevice.supportsAudioFormat(audioSource.getAudioFormat())) {
throw new ExecutableException("Audio device does not support the needed audio format.");
}
try {
playbackDevice.setAudioFormat(audioSource.getAudioFormat());
playbackDevice.open();
playbackDevice.start();
}
catch (Exception e) {
throw new ExecutableException("Audio device could not be initialized.", e);
}
}
@Override
protected void startInternal() throws ExecutableException {
if (getPreviousState() == ExecutableState.Suspended) {
synchronized (thread) {
thread.notify();
}
}
else {
thread = new Thread(new AudioReaderTask(), getClass().getSimpleName());
thread.start();
}
}
@Override
protected void stopInternal() throws ExecutableException {
try {
audioSource.reset();
}
catch (Exception e) {
throw new ExecutableException(e);
}
inputPos = 0;
syncState.reset();
}
@Override
protected void destroyInternal() throws ExecutableException {
try {
playbackDevice.close();
audioSource.close();
}
catch (Exception e) {
throw new ExecutableException(e);
}
}
@Override
protected void fireStateChanged() {
if (nonNull(stateListener)) {
stateListener.onExecutableStateChange(getPreviousState(), getState());
}
}
private void onProgress(Time progress, Time duration, long progressMs) {
if (nonNull(syncState)) {
syncState.setAudioTime(progressMs);
}
if (nonNull(progressListener) && started()) {
progress.setMillis(progressMs);
progressListener.onAudioProgress(progress, duration);
}
}
private class AudioReaderTask implements Runnable {
@Override
public void run() {
byte[] buffer = new byte[playbackDevice.getBufferSize()];
int bytesRead;
// Calculate bytes per millisecond.
float bpms = AudioUtils.getBytesPerSecond(audioSource.getAudioFormat()) / 1000f;
Time progress = new Time(0);
Time duration = new Time((long) (inputSize / bpms));
ExecutableState state;
while (true) {
state = getState();
if (state == ExecutableState.Started) {
try {
bytesRead = audioSource.read(buffer, 0, buffer.length);
if (bytesRead > 0) {
playbackDevice.write(buffer, 0, bytesRead);
inputPos += bytesRead;
onProgress(progress, duration, (long) (inputPos / bpms));
}
else if (bytesRead == -1) {
// EOM
break;
}
}
catch (Exception e) {
LOG.error("Play audio failed.", e);
break;
}
}
else if (state == ExecutableState.Suspended) {
synchronized (thread) {
try {
thread.wait();
}
catch (Exception e) {
// Ignore
}
}
}
else if (state == ExecutableState.Stopped) {
return;
}
}
// EOM
try {
stop();
}
catch (ExecutableException e) {
LOG.error("Stop " + getClass().getName() + " failed.", e);
}
}
}
/**
* Removes an {@code ExecutableStateListener} from this player.
*
* @param listener The listener to remove.
*/
void removeStateListener(ExecutableStateListener listener);
}

View file

@ -1,225 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
import java.security.InvalidParameterException;
import org.lecturestudio.core.ExecutableBase;
import org.lecturestudio.core.ExecutableException;
import org.lecturestudio.core.ExecutableState;
import org.lecturestudio.core.ExecutableStateListener;
import org.lecturestudio.core.audio.device.AudioOutputDevice;
import org.lecturestudio.core.audio.io.AudioPlaybackBuffer;
import org.lecturestudio.core.io.PlaybackData;
import org.lecturestudio.core.net.Synchronizer;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
/**
* Extended audio player implementation.
*
* @author Alex Andres
*/
public class AudioPlayerExt extends ExecutableBase implements Player {
/** Logger for {@link AudioPlayerExt} */
private static final Logger LOG = LogManager.getLogger(AudioPlayerExt.class);
/** The audio playback device. */
private AudioOutputDevice playbackDevice;
/** The audio source. */
private AudioPlaybackBuffer audioSource;
/** The player state listener. */
private ExecutableStateListener stateListener;
/** The playback thread. */
private Thread thread;
/**
* Create an {@link AudioPlayerExt} with the specified playback device and audio buffer.
*
* @param device The audio playback device.
* @param buffer The audio source.
*/
public AudioPlayerExt(AudioOutputDevice device, AudioPlaybackBuffer buffer) {
if (isNull(device)) {
throw new NullPointerException("Missing audio playback device.");
}
if (isNull(buffer)) {
throw new NullPointerException("Missing audio buffer source.");
}
this.playbackDevice = device;
this.audioSource = buffer;
}
@Override
public void setVolume(float volume) {
if (volume < 0 || volume > 1) {
throw new InvalidParameterException("Volume value should be within 0 and 1.");
}
playbackDevice.setVolume(volume);
}
@Override
public void seek(int time) {
audioSource.skip(time);
}
@Override
public void setProgressListener(AudioPlaybackProgressListener listener) {
}
@Override
public void setStateListener(ExecutableStateListener listener) {
this.stateListener = listener;
}
@Override
protected void initInternal() throws ExecutableException {
audioSource.reset();
AudioFormat format = audioSource.getAudioFormat();
if (!playbackDevice.supportsAudioFormat(format)) {
throw new ExecutableException("Audio device does not support the needed audio format.");
}
try {
playbackDevice.setAudioFormat(format);
playbackDevice.open();
playbackDevice.start();
}
catch (Exception e) {
throw new ExecutableException(e);
}
}
@Override
protected void startInternal() throws ExecutableException {
if (getPreviousState() == ExecutableState.Suspended) {
synchronized (thread) {
thread.notify();
}
}
else {
thread = new Thread(new AudioReaderTask(), getClass().getSimpleName());
thread.start();
}
}
@Override
protected void stopInternal() throws ExecutableException {
try {
synchronized (thread) {
thread.interrupt();
}
audioSource.reset();
}
catch (Exception e) {
throw new ExecutableException("Audio source could not be reset.", e);
}
}
@Override
protected void destroyInternal() throws ExecutableException {
try {
playbackDevice.close();
audioSource.reset();
}
catch (Exception e) {
throw new ExecutableException(e);
}
}
@Override
protected void fireStateChanged() {
if (nonNull(stateListener)) {
stateListener.onExecutableStateChange(getPreviousState(), getState());
}
}
private class AudioReaderTask implements Runnable {
@Override
public void run() {
int bytesRead;
ExecutableState state;
while (playbackDevice.isOpen()) {
state = getState();
if (state == ExecutableState.Started) {
try {
PlaybackData<byte[]> samples = audioSource.take();
if (nonNull(samples)) {
bytesRead = samples.getData().length;
Synchronizer.setAudioTime(samples.getTimestamp());
if (bytesRead > 0 && playbackDevice.isOpen()) {
playbackDevice.write(samples.getData(), 0, bytesRead); // TODO check deviceBufferSize
}
}
}
catch (Exception e) {
LOG.error("Play audio failed.", e);
break;
}
}
else if (state == ExecutableState.Suspended) {
synchronized (thread) {
try {
thread.wait();
}
catch (Exception e) {
// Ignore
}
}
}
else if (state == ExecutableState.Stopped) {
return;
}
}
try {
playbackDevice.stop();
playbackDevice.close();
}
catch (Exception e) {
LOG.error("Stop audio playback device failed.", e);
}
}
}
}

View file

@ -0,0 +1,117 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio;
import org.lecturestudio.core.beans.BooleanProperty;
import org.lecturestudio.core.beans.ObjectProperty;
/**
* Specifies software audio processing filters to be applied to audio samples
* coming from an audio input device, e.g. a microphone, or being provided to an
* audio output device, e.g. speakers. If the hardware has activated such
* filters, then the corresponding setting should be disabled here.
*
* @author Alex Andres
*/
public class AudioProcessingSettings {
public enum NoiseSuppressionLevel {
LOW,
MODERATE,
HIGH,
VERY_HIGH
}
private final BooleanProperty enableEchoCanceller = new BooleanProperty();
private final BooleanProperty enableGainControl = new BooleanProperty();
private final BooleanProperty enableHighpassFilter = new BooleanProperty();
private final BooleanProperty enableNoiseSuppression = new BooleanProperty();
private final BooleanProperty enableLevelEstimation = new BooleanProperty();
private final BooleanProperty enableVoiceDetection = new BooleanProperty();
private final ObjectProperty<NoiseSuppressionLevel> noiseSuppressionLevel = new ObjectProperty<>();
public boolean isEchoCancellerEnabled() {
return enableEchoCanceller.get();
}
public void setEchoCancellerEnabled(boolean enable) {
enableEchoCanceller.set(enable);
}
public boolean isGainControlEnabled() {
return enableGainControl.get();
}
public void setGainControlEnabled(boolean enable) {
enableGainControl.set(enable);
}
public boolean isHighpassFilterEnabled() {
return enableHighpassFilter.get();
}
public void setHighpassFilterEnabled(boolean enable) {
enableHighpassFilter.set(enable);
}
public boolean isNoiseSuppressionEnabled() {
return enableNoiseSuppression.get();
}
public void setNoiseSuppressionEnabled(boolean enable) {
enableNoiseSuppression.set(enable);
}
public boolean isLevelEstimationEnabled() {
return enableLevelEstimation.get();
}
public void setLevelEstimationEnabled(boolean enable) {
enableLevelEstimation.set(enable);
}
public boolean isVoiceDetectionEnabled() {
return enableVoiceDetection.get();
}
public void setVoiceDetectionEnabled(boolean enable) {
enableVoiceDetection.set(enable);
}
public NoiseSuppressionLevel getNoiseSuppressionLevel() {
return noiseSuppressionLevel.get();
}
public void setNoiseSuppressionLevel(NoiseSuppressionLevel level) {
noiseSuppressionLevel.set(level);
}
public ObjectProperty<NoiseSuppressionLevel> noiseSuppressionLevelProperty() {
return noiseSuppressionLevel;
}
}

View file

@ -0,0 +1,57 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio;
/**
* Audio statistics produced by an audio processing module.
*
* @author Alex Andres
*/
public class AudioProcessingStats {
/**
* The root-mean-square (RMS) level in dBFS (decibels from digital
* full-scale) of the last capture frame, after processing. It is
* constrained to [-127, 0].
* <p>
* The computation follows: https://tools.ietf.org/html/rfc6465 with the
* intent that it can provide the RTP audio level indication.
* <p>
* Only reported if level estimation is enabled via {@code
* AudioProcessingSettings}.
*/
public int outputRmsDbfs;
/**
* True if voice is detected in the last capture frame, after processing.
* <p>
* It is conservative in flagging audio as speech, with low likelihood of
* incorrectly flagging a frame as voice. Only reported if voice detection
* is enabled via {@code AudioProcessingSettings}.
*/
public boolean voiceDetected;
/**
* The instantaneous delay estimate produced in the AEC. The unit is in
* milliseconds and the value is the instantaneous value at the time of the
* call to getStatistics().
*/
public int delayMs;
}

View file

@ -0,0 +1,87 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio;
import org.lecturestudio.core.Executable;
import org.lecturestudio.core.ExecutableStateListener;
import org.lecturestudio.core.audio.sink.AudioSink;
/**
* An AudioRecorder manages the audio resources to record audio from an audio
* input device, e.g. a microphone.
*
* @author Alex Andres
*/
public interface AudioRecorder extends Executable {
/**
* Sets the device name of the audio recording device which will capture
* audio for this recorder.
*
* @param deviceName The audio capture device name.
*/
void setAudioDeviceName(String deviceName);
/**
* Sets the {@code AudioSink} that will receive the captured audio samples.
*
* @param sink The audio sink to set.
*/
void setAudioSink(AudioSink sink);
/**
* Sets the recording audio volume. The value must be in the range of
* [0,1].
*
* @param volume The recording audio volume.
*/
void setAudioVolume(double volume);
/**
* Get audio processing statistics. This method will only return valid
* statistics if {@link #setAudioProcessingSettings} has been called prior
* recording.
*
* @return The audio processing statistics.
*/
AudioProcessingStats getAudioProcessingStats();
/**
* Sets which software audio processing filters to be applied to recorded
* audio samples.
*
* @param settings The {@code AudioProcessingSettings} to be applied.
*/
void setAudioProcessingSettings(AudioProcessingSettings settings);
/**
* Add an {@code ExecutableStateListener} to this player.
*
* @param listener The listener to add.
*/
void addStateListener(ExecutableStateListener listener);
/**
* Removes an {@code ExecutableStateListener} from this player.
*
* @param listener The listener to remove.
*/
void removeStateListener(ExecutableStateListener listener);
}

View file

@ -0,0 +1,82 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio;
import org.lecturestudio.core.audio.device.AudioDevice;
/**
* Audio system provider implementation. This {@code AudioSystemProvider}
* provides access to all connected audio devices, such as microphones and
* speakers.
*
* @author Alex Andres
*/
public interface AudioSystemProvider {
/**
* Get the systems default audio recording device.
*
* @return The default audio recording device.
*/
AudioDevice getDefaultRecordingDevice();
/**
* Get the systems default audio playback device.
*
* @return The default audio playback device.
*/
AudioDevice getDefaultPlaybackDevice();
/**
* Get all available audio recording devices.
*
* @return An array of all audio recording devices.
*/
AudioDevice[] getRecordingDevices();
/**
* Get all available audio playback devices.
*
* @return An array of all audio playback devices.
*/
AudioDevice[] getPlaybackDevices();
/**
* Creates an audio player based on this provider internal implementation.
*
* @return A new audio player.
*/
AudioPlayer createAudioPlayer();
/**
* Creates an audio recorder based on this provider internal
* implementation.
*
* @return A new audio recorder.
*/
AudioRecorder createAudioRecorder();
/**
* Get the implementing service provider's name.
*
* @return the name of the service provider.
*/
String getProviderName();
}

View file

@ -18,15 +18,10 @@
package org.lecturestudio.core.audio;
import static java.util.Objects.isNull;
import java.util.ArrayList;
import java.util.List;
import java.util.Arrays;
import org.lecturestudio.core.audio.codec.AudioCodecLoader;
import org.lecturestudio.core.audio.device.AudioInputDevice;
import org.lecturestudio.core.audio.device.AudioOutputDevice;
import org.lecturestudio.core.audio.system.AudioSystemLoader;
import org.lecturestudio.core.audio.system.AudioSystemProvider;
import org.lecturestudio.core.audio.AudioFormat.Encoding;
/**
* Audio-related utility methods.
@ -35,165 +30,31 @@ import org.lecturestudio.core.audio.system.AudioSystemProvider;
*/
public class AudioUtils {
/** the singleton instance of {@link AudioSystemLoader} */
private static final AudioSystemLoader LOADER = AudioSystemLoader.getInstance();
/** An array of all available sample rates to support. */
public static final int[] SUPPORTED_SAMPLE_RATES = new int[] {
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 96000
};
/**
* Get default audio capture device of the {@link AudioSystemProvider} with the specified name.
* Returns all supported {@code AudioFormat}s which can be used for audio
* playback or recording.
*
* @param providerName The name of the {@link AudioSystemProvider}.
* @return The default audio capture device of the {@link AudioSystemProvider}
* or null if the{@link AudioSystemProvider} could not be found.
* @return A list of all supported {@code AudioFormat}s.
*/
public static AudioInputDevice getDefaultAudioCaptureDevice(String providerName) {
AudioSystemProvider provider = LOADER.getProvider(providerName);
public static List<AudioFormat> getAudioFormats() {
List<AudioFormat> formats = new ArrayList<>();
return isNull(provider) ? null : provider.getDefaultInputDevice();
}
/**
* Get default audio playback device of the {@link AudioSystemProvider} with the specified name.
*
* @param providerName The name of the {@link AudioSystemProvider}.
* @return The default audio playback device of the {@link AudioSystemProvider}
* or null if the{@link AudioSystemProvider} could not be found..
*/
public static AudioOutputDevice getDefaultAudioPlaybackDevice(String providerName) {
AudioSystemProvider provider = LOADER.getProvider(providerName);
return isNull(provider) ? null : provider.getDefaultOutputDevice();
}
/**
* Get all available audio capture devices of the {@link AudioSystemProvider} with the specified name.
*
* @param providerName The name of the {@link AudioSystemProvider}.
* @return All available audio capture devices of the {@link AudioSystemProvider}.
*/
public static AudioInputDevice[] getAudioCaptureDevices(String providerName) {
AudioSystemProvider provider = LOADER.getProvider(providerName);
return isNull(provider) ? new AudioInputDevice[0] : provider.getInputDevices();
}
/**
* Get all available audio playback devices of the {@link AudioSystemProvider} with the specified name.
*
* @param providerName The name of the {@link AudioSystemProvider}.
* @return All available audio playback devices of the {@link AudioSystemProvider}.
*/
public static AudioOutputDevice[] getAudioPlaybackDevices(String providerName) {
AudioSystemProvider provider = LOADER.getProvider(providerName);
return isNull(provider) ? new AudioOutputDevice[0] : provider.getOutputDevices();
}
/**
* Checks if an available audio capture device of the {@link
* AudioSystemProvider} with the {@code providerName} has the same name as
* the specified {@code deviceName}.
*
* @param providerName The name of the {@link AudioSystemProvider}.
* @param deviceName The name of the device.
*
* @return {@code true} if an available audio capture device has the same
* name as the specified {@code deviceName}, otherwise {@code false}.
*/
public static boolean hasCaptureDevice(String providerName, String deviceName) {
if (isNull(deviceName)) {
return false;
for (int sampleRate : SUPPORTED_SAMPLE_RATES) {
formats.add(new AudioFormat(Encoding.S16LE, sampleRate, 1));
}
return Arrays.stream(getAudioCaptureDevices(providerName))
.anyMatch(device -> device.getName().equals(deviceName));
return formats;
}
/**
* Checks if an available audio playback device of the {@link
* AudioSystemProvider} with the {@code providerName} has the same name as
* the specified {@code deviceName}.
*
* @param providerName The name of the {@link AudioSystemProvider}.
* @param deviceName The name of the device.
*
* @return {@code true} if an available audio playback device has the same
* name as the specified {@code deviceName}, otherwise {@code false}.
*/
public static boolean hasPlaybackDevice(String providerName, String deviceName) {
if (isNull(deviceName)) {
return false;
}
return Arrays.stream(getAudioPlaybackDevices(providerName))
.anyMatch(device -> device.getName().equals(deviceName));
}
/**
* Get an {@link AudioInputDevice} with the specified device name that is registered
* with the given audio system provider.
*
* @param providerName The audio system provider name.
* @param deviceName The audio capture device name.
*
* @return the retrieved {@link AudioInputDevice} or null if the capture device could not be found.
*/
public static AudioInputDevice getAudioInputDevice(String providerName, String deviceName) {
AudioSystemProvider provider = LOADER.getProvider(providerName);
if (isNull(provider)) {
throw new NullPointerException("Audio provider is not available: " + providerName);
}
AudioInputDevice inputDevice = provider.getInputDevice(deviceName);
if (isNull(inputDevice)) {
throw new NullPointerException("Audio device is not available: " + deviceName);
}
return inputDevice;
}
/**
* Get an {@link AudioOutputDevice} with the specified device name that is
* registered with the given audio system provider.
*
* @param providerName The audio system provider name.
* @param deviceName The audio playback device name.
*
* @return the retrieved {@link AudioOutputDevice} or null if the playback device could not be found.
*/
public static AudioOutputDevice getAudioOutputDevice(String providerName, String deviceName) {
AudioSystemProvider provider = LOADER.getProvider(providerName);
if (isNull(provider)) {
provider = LOADER.getProvider("Java Sound");
}
AudioOutputDevice outputDevice = provider.getOutputDevice(deviceName);
if (outputDevice == null) {
// Get next best device.
for (AudioOutputDevice device : provider.getOutputDevices()) {
if (device != null) {
return device;
}
}
}
return outputDevice;
}
/**
* Retrieve all supported audio codecs by the system.
*
* @return an array of names of supported audio codecs.
*/
public static String[] getSupportedAudioCodecs() {
return AudioCodecLoader.getInstance().getProviderNames();
}
/**
* Compute the number of bytes per second that the specified audio format will require.
* Computes the number of bytes per second that the specified audio format
* will require.
*
* @param audioFormat The audio format.
*
@ -205,7 +66,8 @@ public class AudioUtils {
}
/**
* Pack two sequential bytes into a {@code short} value according to the specified endianness.
* Pack two sequential bytes into a {@code short} value according to the
* specified endianness.
*
* @param bytes The bytes to pack, must of size 2.
* @param bigEndian True to pack with big-endian order, false to pack with
@ -238,15 +100,17 @@ public class AudioUtils {
}
/**
* Convert the specified integer value to a normalized float value in the range of [0,1].
* Convert the specified integer value to a normalized float value in the
* range of [0,1].
*
* @param value The integer value to convert.
* @param value The integer value to convert.
* @param frameSize The sample size in bytes.
* @param signed True to respect the sign bit, false otherwise.
* @param signed True to respect the sign bit, false otherwise.
*
* @return a normalized float value.
*/
public static float getNormalizedSampleValue(int value, int frameSize, boolean signed) {
public static float getNormalizedSampleValue(int value, int frameSize,
boolean signed) {
float relValue;
int maxValue;

View file

@ -1,62 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio;
import org.lecturestudio.core.Executable;
import org.lecturestudio.core.ExecutableStateListener;
/**
* Common interface to provide a consistent mechanism for media players.
*
* @author Alex Andres
*/
public interface Player extends Executable {
/**
* Set the audio volume for playback. The volume value must be in the range of [0,1].
*
* @param volume The new volume value.
*/
void setVolume(float volume);
/**
* Jump to the specified time position in the audio playback stream.
*
* @param timeMs The absolute time in milliseconds to jump to.
*
* @throws Exception If the playback stream failed to read the start of the
* specified position.
*/
void seek(int timeMs) throws Exception;
/**
* Set the playback progress listener.
*
* @param listener The listener to set.
*/
void setProgressListener(AudioPlaybackProgressListener listener);
/**
* Set the state listener.
*
* @param listener The listener to set.
*/
void setStateListener(ExecutableStateListener listener);
}

View file

@ -205,6 +205,11 @@ public class RingBuffer implements AudioSink, AudioSource {
return 0;
}
@Override
public int seekMs(int timeMs) {
return 0;
}
@Override
public long getInputSize() {
return 0;

View file

@ -1,64 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio.codec;
import com.github.javaffmpeg.CodecID;
import org.lecturestudio.core.audio.codec.ffmpeg.FFmpegAudioDecoder;
import org.lecturestudio.core.audio.codec.ffmpeg.FFmpegAudioEncoder;
import org.lecturestudio.core.audio.codec.ffmpeg.FFmpegRtpDepacketizer;
import org.lecturestudio.core.audio.codec.ffmpeg.FFmpegRtpPacketizer;
import org.lecturestudio.core.net.rtp.RtpDepacketizer;
import org.lecturestudio.core.net.rtp.RtpPacketizer;
/**
* OPUS audio codec provider implementation.
*
* @link http://opus-codec.org
*
* @author Alex Andres
*/
public class OpusCodecProvider implements AudioCodecProvider {
@Override
public AudioEncoder getAudioEncoder() {
return new FFmpegAudioEncoder(CodecID.OPUS);
}
@Override
public AudioDecoder getAudioDecoder() {
return new FFmpegAudioDecoder(CodecID.OPUS);
}
@Override
public RtpPacketizer getRtpPacketizer() {
return new FFmpegRtpPacketizer();
}
@Override
public RtpDepacketizer getRtpDepacketizer() {
return new FFmpegRtpDepacketizer();
}
@Override
public String getProviderName() {
return "OPUS";
}
}

View file

@ -1,186 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio.codec.ffmpeg;
import com.github.javaffmpeg.Audio;
import com.github.javaffmpeg.AudioFrame;
import com.github.javaffmpeg.AudioResampler;
import com.github.javaffmpeg.Codec;
import com.github.javaffmpeg.CodecID;
import com.github.javaffmpeg.Decoder;
import com.github.javaffmpeg.JavaFFmpegException;
import com.github.javaffmpeg.MediaPacket;
import com.github.javaffmpeg.MediaType;
import com.github.javaffmpeg.SampleFormat;
import java.nio.ByteBuffer;
import org.lecturestudio.core.ExecutableException;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.codec.AudioDecoder;
import org.bytedeco.javacpp.BytePointer;
/**
* FFmpeg audio decoder implementation.
*
* @link https://ffmpeg.org
*
* @author Alex Andres
*/
public class FFmpegAudioDecoder extends AudioDecoder {
/** The sample size in bytes. */
private static final int SAMPLE_SIZE = 2;
/** The internal FFmpeg decoder. */
private Decoder decoder;
/** The internal audio resampler */
private com.github.javaffmpeg.AudioResampler resampler;
/**
* Create a FFmpegAudioDecoder with the specified codec ID. Based on the ID
* the corresponding FFmpeg decoder will be created.
*
* @param codecID The ID of the codec to use.
*/
public FFmpegAudioDecoder(CodecID codecID) {
try {
decoder = new Decoder(Codec.getDecoderById(codecID));
decoder.setMediaType(MediaType.AUDIO);
}
catch (JavaFFmpegException e) {
e.printStackTrace();
}
}
@Override
public void process(byte[] input, int length, long timestamp) throws Exception {
ByteBuffer buffer = ByteBuffer.wrap(input, 0, length);
MediaPacket packet = new MediaPacket(buffer);
AudioFrame frame = decoder.decodeAudio(packet);
if (frame != null) {
if (resampler != null) {
AudioFrame[] frames = resampler.resample(frame);
for (AudioFrame resFrame : frames) {
processAudioFrame(resFrame, timestamp);
resFrame.clear();
}
}
else {
processAudioFrame(frame, timestamp);
}
frame.clear();
}
packet.clear();
}
@Override
protected void initInternal() throws ExecutableException {
}
@Override
protected void startInternal() throws ExecutableException {
AudioFormat inputFormat = getFormat();
int sampleRate = inputFormat.getSampleRate();
int channels = inputFormat.getChannels();
try {
decoder.setSampleRate(sampleRate);
decoder.setSampleFormat(SampleFormat.S16);
decoder.setAudioChannels(channels);
decoder.open(null);
}
catch (Exception e) {
throw new ExecutableException(e);
}
// requested format
com.github.javaffmpeg.AudioFormat reqFormat = new com.github.javaffmpeg.AudioFormat();
reqFormat.setChannelLayout(Audio.getChannelLayout(channels));
reqFormat.setChannels(channels);
reqFormat.setSampleFormat(SampleFormat.S16);
reqFormat.setSampleRate(sampleRate);
// decoder format
com.github.javaffmpeg.AudioFormat decFormat = new com.github.javaffmpeg.AudioFormat();
decFormat.setChannelLayout(decoder.getChannelLayout());
decFormat.setChannels(decoder.getAudioChannels());
decFormat.setSampleFormat(decoder.getSampleFormat());
decFormat.setSampleRate(decoder.getSampleRate());
// in some cases the decoder chooses its own parameters, e.g. OPUS
if (!reqFormat.equals(decFormat)) {
int samples = sampleRate / 50; // 20 ms audio
resampler = new AudioResampler();
try {
resampler.open(decFormat, reqFormat, samples);
}
catch (Exception e) {
throw new ExecutableException(e);
}
}
}
@Override
protected void stopInternal() throws ExecutableException {
decoder.close();
resampler.close();
}
@Override
protected void destroyInternal() throws ExecutableException {
}
private void processAudioFrame(AudioFrame frame, long timestamp) {
int planes = frame.getPlaneCount();
int size = planes * frame.getPlane(0).limit();
byte[] samples = new byte[size];
// interleave planes
for (int i = 0; i < planes; i++) {
BytePointer plane = frame.getPlane(i);
ByteBuffer pBuffer = plane.asByteBuffer();
int pLength = plane.limit();
int offset = i * planes;
for (int j = 0, k = offset; j < pLength; j += SAMPLE_SIZE) {
samples[k++] = (byte) (pBuffer.get() & 0xFF);
samples[k++] = (byte) (pBuffer.get() & 0xFF);
k += SAMPLE_SIZE * (planes - 1);
}
}
fireAudioDecoded(samples, samples.length, timestamp);
}
}

View file

@ -1,188 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio.codec.ffmpeg;
import com.github.javaffmpeg.Audio;
import com.github.javaffmpeg.AudioFrame;
import com.github.javaffmpeg.Codec;
import com.github.javaffmpeg.CodecID;
import com.github.javaffmpeg.Encoder;
import com.github.javaffmpeg.JavaFFmpegException;
import com.github.javaffmpeg.MediaPacket;
import com.github.javaffmpeg.MediaType;
import com.github.javaffmpeg.SampleFormat;
import org.lecturestudio.core.ExecutableException;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.codec.AudioEncoder;
/**
* FFmpeg audio encoder implementation.
*
* @link https://ffmpeg.org
*
* @author Alex Andres
*/
public class FFmpegAudioEncoder extends AudioEncoder {
/** An array of supported audio formats. */
private AudioFormat[] supportedFormats;
/** The internal FFmpeg encoder. */
private Encoder encoder;
/** The internal encoding format. */
private com.github.javaffmpeg.AudioFormat format;
/** The sample size in bytes. */
private int sampleSize;
/**
* Create a {@link FFmpegAudioEncoder} with the specified codec ID. Based on the ID
* the corresponding FFmpeg encoder will be created.
*
* @param codecId The ID of the codec to use.
*/
public FFmpegAudioEncoder(CodecID codecId) {
try {
encoder = new Encoder(Codec.getEncoderById(codecId));
encoder.setMediaType(MediaType.AUDIO);
setBitrate(128000);
getInputFormats();
}
catch (JavaFFmpegException e) {
e.printStackTrace();
}
}
@Override
public AudioFormat[] getSupportedFormats() {
return supportedFormats;
}
@Override
public void process(byte[] input, int length, long timestamp) throws Exception {
int samples = input.length / sampleSize;
AudioFrame frame = new AudioFrame(format, samples);
// mono stream
if (format.getChannels() == 1) {
frame.getPlane(0).asByteBuffer().put(input);
}
else {
throw new Exception("Frame input only for mono audio implemented.");
}
MediaPacket[] packets = encoder.encodeAudio(frame);
if (packets != null) {
for (MediaPacket packet : packets) {
if (packet == null) {
continue;
}
byte[] outputData = new byte[packet.getData().limit()];
packet.getData().get(outputData);
fireAudioEncoded(outputData, outputData.length, timestamp);
packet.clear();
}
}
frame.clear();
}
@Override
protected void initInternal() throws ExecutableException {
}
@Override
protected void startInternal() throws ExecutableException {
AudioFormat inputFormat = getFormat();
int sampleRate = inputFormat.getSampleRate();
int channels = inputFormat.getChannels();
try {
encoder.setMediaType(MediaType.AUDIO);
encoder.setBitrate(getBitrate());
encoder.setSampleRate(sampleRate);
encoder.setSampleFormat(SampleFormat.S16);
encoder.setAudioChannels(channels);
encoder.setQuality(0); // Quality-based encoding not supported.
encoder.open(null);
}
catch (JavaFFmpegException e) {
throw new ExecutableException(e);
}
format = new com.github.javaffmpeg.AudioFormat();
format.setChannelLayout(Audio.getChannelLayout(channels));
format.setChannels(channels);
format.setSampleFormat(SampleFormat.S16);
format.setSampleRate(sampleRate);
sampleSize = 2 * format.getChannels();
}
@Override
protected void stopInternal() throws ExecutableException {
encoder.close();
}
@Override
protected void destroyInternal() throws ExecutableException {
}
/**
* Assigns all supported audio formats to {@link #supportedFormats}.
*/
private void getInputFormats() {
if (encoder == null) {
return;
}
Integer[] sampleRates = encoder.getCodec().getSupportedSampleRates();
if (sampleRates == null) {
return;
}
supportedFormats = new AudioFormat[sampleRates.length];
AudioFormat.Encoding encoding = AudioFormat.Encoding.S16LE;
int channels = 1;
for (int i = 0; i < sampleRates.length; i++) {
int sampleRate = sampleRates[i];
AudioFormat audioFormat = new AudioFormat(encoding, sampleRate, channels);
supportedFormats[i] = audioFormat;
}
}
}

View file

@ -1,96 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio.codec.ffmpeg;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
import org.lecturestudio.core.net.rtp.RtpPacket;
import org.lecturestudio.core.net.rtp.RtpPacketizer;
/**
* FFmpeg RTP packetizer implementation.
*
* @author Alex Andres
*/
public class FFmpegRtpPacketizer implements RtpPacketizer {
/** The RTP packet which should be sent. */
private RtpPacket rtpPacket;
/**
* Create a new FFmpegRtpPacketizer instance and set the default RTP packet
* header values.
*/
public FFmpegRtpPacketizer() {
Random rand = new Random();
rtpPacket = new RtpPacket();
rtpPacket.setVersion(2);
rtpPacket.setPadding(0);
rtpPacket.setExtension(0);
rtpPacket.setMarker(0);
rtpPacket.setPayloadType(97); // dynamic
rtpPacket.setSeqNumber(rand.nextInt());
rtpPacket.setTimestamp(rand.nextInt());
rtpPacket.setSsrc(rand.nextInt());
}
@Override
public List<RtpPacket> processPacket(byte[] payload, int payloadLength, long timestamp) {
List<RtpPacket> packets = new ArrayList<>();
updateRtpPacket(timestamp);
packPacket(payload, payloadLength);
packets.add(rtpPacket.clone());
return packets;
}
/**
* Sets the payload of {@link #rtpPacket}
*
* @param packetBytes The payload data to pack
* @param payloadSize The length of the payload data
*/
private void packPacket(byte[] packetBytes, int payloadSize) {
byte[] payload = new byte[payloadSize];
System.arraycopy(packetBytes, 0, payload, 0, payloadSize);
rtpPacket.setPayload(payload);
}
/**
* Sets the sequence number and timestamp of {@link #rtpPacket}.
*
* @param timestamp The new timestamp of {@link #rtpPacket}
*/
private void updateRtpPacket(long timestamp) {
/* Increment RTP header flags */
rtpPacket.setSeqNumber(rtpPacket.getSeqNumber() + 1);
rtpPacket.setTimestamp(timestamp);
}
}

View file

@ -18,260 +18,33 @@
package org.lecturestudio.core.audio.device;
import java.util.Arrays;
import java.util.List;
import org.lecturestudio.core.audio.AudioFormat;
/**
* Common class to provide a consistent mechanism for audio devices.
* Common class to provide a consistent interface to audio devices.
*
* @author Alex Andres
*/
public abstract class AudioDevice {
public class AudioDevice {
/** An array of all available sample rates to support. */
public static final int[] SUPPORTED_SAMPLE_RATES = new int[] {
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
};
/** The name of this audio device. */
private final String name;
/** The audio format to be used by the audio device. */
private AudioFormat audioFormat;
/** The audio volume for playback or recording. */
private double volume = 1;
/** The measured audio signal power level of the last processed audio chunk. */
private double signalPowerLevel = 0;
/** Indicated whether the audio is muted or not. */
private boolean mute = false;
/**
* Creates a new {@code AudioDevice} with the specified name.
*
* @param name The name of the device.
*/
public AudioDevice(String name) {
this.name = name;
}
/**
* Get the name of the audio device assigned by the operating system.
*
* @return the name of the audio device.
*/
abstract public String getName();
/**
* Open the audio device and prepare the device to capture or play audio.
*
* @throws Exception If the audio device failed to open.
*/
abstract public void open() throws Exception;
/**
* Close the audio device and release all previously assigned resources.
*
* @throws Exception If the audio device could not be closed.
*/
abstract public void close() throws Exception;
/**
* Start capturing or playing audio by the device.
*
* @throws Exception If the device failed to start.
*/
abstract public void start() throws Exception;
/**
* Stop capturing or playing audio by the device.
*
* @throws Exception If the device failed to stop.
*/
abstract public void stop() throws Exception;
/**
* Check if the audio device is opened.
*
* @return {@code true} if the device is opened, otherwise {@code false}.
*/
abstract public boolean isOpen();
/**
* Get a list of all supported audio formats by this device.
*
* @return a list of all supported audio formats.
*/
abstract public List<AudioFormat> getSupportedFormats();
/**
* Get the current audio buffer size. The buffer size reflects the latency
* of the audio signal.
*
* @return the current audio buffer size.
*/
abstract public int getBufferSize();
/**
* Check if the specified audio format is supported by the device.
*
* @param format The audio format to check.
*
* @return {@code true} if the audio format is supported, otherwise {@code false}.
*/
public boolean supportsAudioFormat(AudioFormat format) {
return getSupportedFormats().contains(format);
}
/**
* Set the audio format to be used by the audio device for playback or
* recording.
*
* @param format The audio format to be used.
*/
public void setAudioFormat(AudioFormat format) {
this.audioFormat = format;
}
/**
* Get the audio format of the device.
*
* @return the audio format.
*/
public AudioFormat getAudioFormat() {
return audioFormat;
}
/**
* Get the volume of the device with which it plays or records audio.
*
* @return the volume of audio.
*/
public double getVolume() {
return volume;
}
/**
* Set the audio volume for playback or recording. The volume value must be
* in the range of [0,1].
*
* @param volume The new volume value.
*/
public void setVolume(double volume) {
if (volume < 0 || volume > 1)
return;
this.volume = volume;
}
/**
* Check whether audio signal is muted or not.
*
* @return {@code true} if the audio signal is muted, otherwise {@code false}.
*/
public boolean isMuted() {
return mute;
}
/**
* Set whether to mute the audio signal of this device.
*
* @param mute True to mute the audio signal, false otherwise.
*/
public void setMute(boolean mute) {
this.mute = mute;
}
/**
* Get the signal power level of the last processed audio data chunk.
*
* @return the current signal power level of audio.
*/
public double getSignalPowerLevel() {
if (isMuted()) {
return 0;
}
return signalPowerLevel;
}
/**
* Set the signal power level of the last processed audio data chunk.
*
* @param level the current signal power level of audio.
*/
protected void setSignalPowerLevel(float level) {
this.signalPowerLevel = level;
}
/**
* AGC algorithm to adjust the speech level of an audio signal to a
* specified value in dBFS.
*
* @param input Audio input samples with values in range [-1, 1].
* @param output Audio output samples with values in range [-1, 1].
* @param gainLevel Output power level in dBFS.
* @param sampleCount Number of samples.
*/
protected void AGC(float[] input, float[] output, float gainLevel, int sampleCount) {
// Convert power gain level into normal power.
float power = (float) Math.pow(10, (gainLevel / 10));
// Calculate the energy of the input signal.
float energy = 0;
for (int i = 0; i < sampleCount; i++) {
energy += input[i] * input[i];
}
// Calculate the amplification factor.
float amp = (float) Math.sqrt((power * sampleCount) / energy);
// Scale the input signal to achieve the required output power.
for (int i = 0; i < sampleCount; i++) {
output[i] = input[i] * amp;
}
}
void applyGain(byte[] buffer, int offset, int length) {
if (volume == 1) {
signalPowerLevel = getSignalPowerLevel(buffer);
return;
}
if (volume == 0) {
signalPowerLevel = 0;
Arrays.fill(buffer, offset, length - offset, (byte) 0);
return;
}
float energy = 0;
for (int i = 0; i < buffer.length; i += 2) {
int value = (short) ((buffer[i + 1] << 8) | (buffer[i] & 0xFF));
value = (int) (volume * value);
if (value > Short.MAX_VALUE) {
value = Short.MAX_VALUE;
}
else if (value < Short.MIN_VALUE) {
value = Short.MIN_VALUE;
}
float norm = (float) value / Short.MAX_VALUE;
energy += norm * norm;
buffer[i] = (byte) value;
buffer[i + 1] = (byte) (value >> 8);
}
signalPowerLevel = (float) (10 * Math.log10(energy / (buffer.length / 2f)) + 96) / 96;
}
private float getSignalPowerLevel(byte[] buffer) {
float energy = 0;
for (int i = 0; i < buffer.length; i += 2) {
int value = (short) ((buffer[i + 1] << 8) | (buffer[i] & 0xFF));
float norm = (float) value / Short.MAX_VALUE;
energy += norm * norm;
}
return (float) (10 * Math.log10(energy / (buffer.length / 2f)) + 96) / 96;
public String getName() {
return name;
}
}

View file

@ -1,64 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio.device;
/**
* Common class to provide a consistent mechanism for audio capture devices.
*
* @author Alex Andres
*/
public abstract class AudioInputDevice extends AudioDevice {
/**
* Device specific method to be implemented to read captured audio data from
* the device into the specified audio buffer.
*
* @param buffer The audio buffer into which to write the captured audio.
* @param offset The offset at which to start to write the audio buffer.
* @param length The length of the audio buffer.
*
* @return the number of bytes written to the audio buffer.
*
* @throws Exception If captured audio could not be written to the buffer.
*/
abstract protected int readInput(byte[] buffer, int offset, int length) throws Exception;
/**
* Read captured audio data from the device into the specified audio
* buffer.
*
* @param buffer The audio buffer into which to write the captured audio.
* @param offset The offset at which to start to write the audio buffer.
* @param length The length of the audio buffer.
*
* @return the number of bytes written to the audio buffer.
*
* @throws Exception If captured audio could not be written to the buffer.
*/
public synchronized int read(byte[] buffer, int offset, int length) throws Exception {
int read = readInput(buffer, offset, length);
if (!isMuted()) {
applyGain(buffer, offset, length);
}
return read;
}
}

View file

@ -1,64 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio.device;
/**
* Common class to provide a consistent mechanism for audio playback devices.
*
* @author Alex Andres
*/
public abstract class AudioOutputDevice extends AudioDevice {
/**
* Device specific method to be implemented to write audio data for playback
* to the device from the specified audio buffer.
*
* @param buffer The audio buffer containing the samples for playback.
* @param offset The offset from which to start to read the audio buffer.
* @param length The length of the audio buffer.
*
* @return the number of bytes written to the playback buffer of the device.
*
* @throws Exception If the playback device did not accept the audio buffer.
*/
abstract public int writeOutput(byte[] buffer, int offset, int length) throws Exception;
/**
* Write audio samples for playback.
*
* @param buffer The audio buffer containing the samples for playback.
* @param offset The offset from which to start to read the audio buffer.
* @param length The length of the audio buffer.
*
* @return the number of bytes written to the playback buffer of the device.
*
* @throws Exception If the playback device did not accept the audio buffer.
*/
public int write(byte[] buffer, int offset, int length) throws Exception {
int written = length;
if (!isMuted()) {
applyGain(buffer, offset, length);
written = writeOutput(buffer, offset, length);
}
return written;
}
}

View file

@ -1,128 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio.device;
import com.github.javaffmpeg.Audio;
import com.github.javaffmpeg.AudioFrame;
import java.util.ArrayList;
import java.util.List;
import org.lecturestudio.core.audio.AudioFormat;
/**
* FFmpeg audio capture device implementation.
*
* @author Alex Andres
*/
public class FFmpegAudioInputDevice extends AudioInputDevice {
/** Internal capture device. */
private final com.github.javaffmpeg.FFmpegAudioInputDevice device;
/**
* Create a new {@link FFmpegAudioInputDevice} instance with the specified FFmpeg
* capture device.
*
* @param device The FFmpeg capture device.
*/
public FFmpegAudioInputDevice(com.github.javaffmpeg.FFmpegAudioInputDevice device) {
this.device = device;
}
@Override
protected synchronized int readInput(byte[] buffer, int offset, int length) {
if (isOpen()) {
AudioFrame samples = device.getSamples();
// TODO get by sample format
Audio.getAudio16(samples, buffer);
samples.clear();
return length;
}
return 0;
}
@Override
public String getName() {
return device.getName();
}
@Override
public synchronized void open() throws Exception {
if (isOpen()) {
return;
}
AudioFormat audioFormat = getAudioFormat();
com.github.javaffmpeg.AudioFormat format = new com.github.javaffmpeg.AudioFormat();
format.setChannelLayout(Audio.getChannelLayout(audioFormat.getChannels()));
format.setChannels(audioFormat.getChannels());
format.setSampleFormat(Audio.getSampleFormat(audioFormat.getBytesPerSample(), false, false));
format.setSampleRate(audioFormat.getSampleRate());
device.setBufferMilliseconds(20);
device.open(format);
}
@Override
public synchronized void close() throws Exception {
device.close();
}
@Override
public synchronized void start() {
// nothing to do
}
@Override
public synchronized void stop() {
// nothing to do
}
@Override
public synchronized boolean isOpen() {
return device.isOpen();
}
@Override
public List<AudioFormat> getSupportedFormats() {
AudioFormat.Encoding encoding = AudioFormat.Encoding.S16LE;
int channels = 1;
List<AudioFormat> formats = new ArrayList<AudioFormat>();
for (int sampleRate : AudioDevice.SUPPORTED_SAMPLE_RATES) {
AudioFormat audioFormat = new AudioFormat(encoding, sampleRate, channels);
formats.add(audioFormat);
}
return formats;
}
@Override
public int getBufferSize() {
return device.getBufferSize();
}
}

View file

@ -1,170 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio.device;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
import java.util.ArrayList;
import java.util.List;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.Mixer;
import javax.sound.sampled.TargetDataLine;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.util.OsInfo;
/**
* Java-based audio capture device implementation.
*
* @author Alex Andres
*/
public class JavaSoundInputDevice extends AudioInputDevice {
/** Minimal audio buffer size to use with Java. */
private static final int BUFFER_SIZE = 4096;
/** Internal {@link Mixer.Info} that represents information about an audio mixer. */
private final Mixer.Info mixerInfo;
/** Internal capture source. */
private TargetDataLine line;
/**
* Create a new {@link JavaSoundInputDevice instance} with the specified {@link
* Mixer.Info} that contains information about an audio mixer.
*
* @param mixerInfo The audio mixer info.
*/
public JavaSoundInputDevice(Mixer.Info mixerInfo) {
this.mixerInfo = mixerInfo;
}
@Override
public String getName() {
return mixerInfo.getName();
}
@Override
public void open() throws Exception {
if (isNull(mixerInfo)) {
throw new Exception("Invalid audio mixer set.");
}
Mixer mixer = AudioSystem.getMixer(mixerInfo);
if (mixer == null) {
throw new Exception("Could not acquire specified mixer: " + getName());
}
AudioFormat audioFormat = getAudioFormat();
javax.sound.sampled.AudioFormat format = createAudioFormat(
audioFormat.getSampleRate(), audioFormat.getChannels());
DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
line = (TargetDataLine) mixer.getLine(info);
line.open(format, BUFFER_SIZE);
}
@Override
public void close() throws Exception {
if (nonNull(line)) {
line.stop();
line.flush();
if (!OsInfo.isMac()) {
line.close();
}
line = null;
}
}
@Override
public void start() {
if (nonNull(line)) {
line.start();
}
}
@Override
public void stop() {
if (nonNull(line)) {
line.stop();
line.flush();
}
}
@Override
public int readInput(byte[] buffer, int offset, int length) {
return line.read(buffer, offset, length);
}
@Override
public List<AudioFormat> getSupportedFormats() {
AudioFormat.Encoding encoding = AudioFormat.Encoding.S16LE;
int channels = 1;
List<AudioFormat> formats = new ArrayList<>();
Mixer mixer = AudioSystem.getMixer(mixerInfo);
DataLine.Info info;
for (int sampleRate : AudioDevice.SUPPORTED_SAMPLE_RATES) {
AudioFormat audioFormat = new AudioFormat(encoding, sampleRate, channels);
javax.sound.sampled.AudioFormat format = createAudioFormat(sampleRate, channels);
info = new DataLine.Info(TargetDataLine.class, format);
if (mixer.isLineSupported(info)) {
formats.add(audioFormat);
}
}
return formats;
}
@Override
public int getBufferSize() {
if (nonNull(line)) {
return line.getBufferSize();
}
return -1;
}
@Override
public boolean isOpen() {
return nonNull(line) && line.isOpen();
}
private javax.sound.sampled.AudioFormat createAudioFormat(int sampleRate, int channels) {
javax.sound.sampled.AudioFormat.Encoding encoding = javax.sound.sampled.AudioFormat.Encoding.PCM_SIGNED;
int sampleSizeInBits = 16;
int frameSize = (sampleSizeInBits / 8) * channels;
return new javax.sound.sampled.AudioFormat(
encoding, sampleRate, sampleSizeInBits, channels, frameSize,
sampleRate, false);
}
}

View file

@ -1,165 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio.device;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
import java.util.ArrayList;
import java.util.List;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.DataLine;
import javax.sound.sampled.Mixer;
import javax.sound.sampled.SourceDataLine;
import org.lecturestudio.core.audio.AudioFormat;
/**
* Java-based audio playback device implementation.
*
* @author Alex Andres
*/
public class JavaSoundOutputDevice extends AudioOutputDevice {
/** Minimal audio buffer size to use with Java. */
private static final int BUFFER_SIZE = 4096;
/** Internal {@link Mixer.Info} that represents information about an audio mixer. */
private final Mixer.Info mixerInfo;
/** Internal playback sink. */
private SourceDataLine line;
/**
* Create a new {@link JavaSoundOutputDevice} instance with the specified {@link
* Mixer.Info} that contains information about an audio mixer.
*
* @param mixerInfo The audio mixer info.
*/
public JavaSoundOutputDevice(Mixer.Info mixerInfo) {
this.mixerInfo = mixerInfo;
}
@Override
public String getName() {
return mixerInfo.getName();
}
@Override
public void open() throws Exception {
if (isNull(mixerInfo)) {
throw new Exception("Invalid audio mixer set.");
}
Mixer mixer = AudioSystem.getMixer(mixerInfo);
if (mixer == null) {
throw new Exception("Could not acquire specified mixer: " + getName());
}
AudioFormat audioFormat = getAudioFormat();
javax.sound.sampled.AudioFormat format = createAudioFormat(
audioFormat.getSampleRate(), audioFormat.getChannels());
DataLine.Info info = new DataLine.Info(SourceDataLine.class, format);
line = (SourceDataLine) mixer.getLine(info);
line.open(format, BUFFER_SIZE);
}
@Override
public void close() throws Exception {
if (nonNull(line)) {
line.stop();
line.flush();
line.close();
}
}
@Override
public void start() {
if (nonNull(line)) {
line.start();
}
}
@Override
public void stop() {
if (nonNull(line)) {
line.stop();
}
}
@Override
public int writeOutput(byte[] buffer, int offset, int length) {
return line.write(buffer, offset, length);
}
@Override
public List<AudioFormat> getSupportedFormats() {
AudioFormat.Encoding encoding = AudioFormat.Encoding.S16LE;
int[] supportedChannels = { 1, 2 };
List<AudioFormat> formats = new ArrayList<>();
Mixer mixer = AudioSystem.getMixer(mixerInfo);
DataLine.Info info;
for (int channels : supportedChannels) {
for (int sampleRate : AudioDevice.SUPPORTED_SAMPLE_RATES) {
AudioFormat audioFormat = new AudioFormat(encoding, sampleRate, channels);
javax.sound.sampled.AudioFormat format = createAudioFormat(sampleRate, channels);
info = new DataLine.Info(SourceDataLine.class, format);
if (mixer.isLineSupported(info)) {
formats.add(audioFormat);
}
}
}
return formats;
}
@Override
public int getBufferSize() {
if (nonNull(line)) {
return line.getBufferSize();
}
return -1;
}
@Override
public boolean isOpen() {
return nonNull(line) && line.isOpen();
}
private javax.sound.sampled.AudioFormat createAudioFormat(int sampleRate, int channels) {
javax.sound.sampled.AudioFormat.Encoding encoding = javax.sound.sampled.AudioFormat.Encoding.PCM_SIGNED;
int sampleSizeInBits = 16;
int frameSize = (sampleSizeInBits / 8) * channels;
return new javax.sound.sampled.AudioFormat(
encoding, sampleRate, sampleSizeInBits, channels, frameSize,
sampleRate, false);
}
}

View file

@ -70,9 +70,17 @@ public interface AudioSink {
int write(byte[] data, int offset, int length) throws IOException;
/**
* Set the audio format of audio samples the sink is ready to receive.
* Get the audio format of audio samples for this sink.
*
* @param format The audio format of samples to receive.
* @return The audio format of samples to write.
*/
AudioFormat getAudioFormat();
/**
* Sets the {@code AudioFormat} of samples which will be provided to this
* {@code AudioSink}.
*
* @param format The audio format of audio samples.
*/
void setAudioFormat(AudioFormat format);

View file

@ -0,0 +1,78 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio.sink;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import org.lecturestudio.core.audio.AudioFormat;
/**
* AudioSink implementation which is backed by a {@code ByteArrayOutputStream}.
*
* @author Alex Andres
*/
public class ByteArrayAudioSink implements AudioSink {
private ByteArrayOutputStream outputStream;
private AudioFormat format;
@Override
public void open() throws IOException {
outputStream = new ByteArrayOutputStream();
}
@Override
public void reset() throws IOException {
outputStream.reset();
}
@Override
public void close() throws IOException {
outputStream.close();
}
@Override
public int write(byte[] data, int offset, int length) throws IOException {
try {
outputStream.write(data, 0, length);
}
catch (Exception e) {
throw new IOException(e);
}
return length;
}
@Override
public AudioFormat getAudioFormat() {
return format;
}
@Override
public void setAudioFormat(AudioFormat format) {
this.format = format;
}
public byte[] toByteArray() {
return outputStream.toByteArray();
}
}

View file

@ -0,0 +1,69 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio.sink;
import java.io.IOException;
import org.lecturestudio.core.audio.AudioFormat;
/**
* AudioSink proxy implementation which redirects all calls to a specified
* sink.
*
* @author Alex Andres
*/
public class ProxyAudioSink implements AudioSink {
private final AudioSink proxy;
public ProxyAudioSink(AudioSink proxy) {
this.proxy = proxy;
}
@Override
public void open() throws IOException {
proxy.open();
}
@Override
public void reset() throws IOException {
proxy.reset();
}
@Override
public void close() throws IOException {
proxy.close();
}
@Override
public int write(byte[] data, int offset, int length) throws IOException {
return proxy.write(data, offset, length);
}
@Override
public AudioFormat getAudioFormat() {
return proxy.getAudioFormat();
}
@Override
public void setAudioFormat(AudioFormat format) {
proxy.setAudioFormat(format);
}
}

View file

@ -88,6 +88,11 @@ public class WavFileSink implements AudioSink {
return stream.write(data, offset, length);
}
@Override
public AudioFormat getAudioFormat() {
return format;
}
@Override
public void setAudioFormat(AudioFormat format) {
this.format = format;

View file

@ -22,6 +22,7 @@ import java.io.IOException;
import java.io.InputStream;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.AudioUtils;
/**
* Audio input stream implementation of {@link AudioSource}.
@ -79,6 +80,17 @@ public class AudioInputStreamSource implements AudioSource {
return audioStream.skip(n);
}
@Override
public int seekMs(int timeMs) throws IOException {
float bytesPerSecond = AudioUtils.getBytesPerSecond(getAudioFormat());
int skipBytes = Math.round(bytesPerSecond * timeMs / 1000F);
reset();
skip(skipBytes);
return skipBytes;
}
@Override
public AudioFormat getAudioFormat() {
return audioFormat;

View file

@ -70,6 +70,16 @@ public interface AudioSource {
*/
long skip(long n) throws IOException;
/**
* Jump to the specified time position in the audio playback stream.
*
* @param timeMs The absolute time in milliseconds to jump to.
*
* @throws IOException If the playback stream failed to read the start of
* the specified position.
*/
int seekMs(int timeMs) throws IOException;
/**
* Get the number of bytes the audio source has available to read.
*

View file

@ -0,0 +1,79 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio.source;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.AudioUtils;
public class ByteArrayAudioSource implements AudioSource {
private final ByteArrayInputStream inputStream;
private final AudioFormat format;
public ByteArrayAudioSource(ByteArrayInputStream inputStream, AudioFormat format) {
this.inputStream = inputStream;
this.format = format;
}
@Override
public int read(byte[] data, int offset, int length) {
return inputStream.read(data, offset, length);
}
@Override
public void close() throws IOException {
inputStream.close();
}
@Override
public void reset() {
inputStream.reset();
}
@Override
public long skip(long n) {
return inputStream.skip(n);
}
@Override
public int seekMs(int timeMs) {
float bytesPerSecond = AudioUtils.getBytesPerSecond(format);
int skipBytes = Math.round(bytesPerSecond * timeMs / 1000F);
reset();
skip(skipBytes);
return skipBytes;
}
@Override
public long getInputSize() {
return inputStream.available();
}
@Override
public AudioFormat getAudioFormat() {
return format;
}
}

View file

@ -21,6 +21,7 @@ package org.lecturestudio.core.audio.source;
import java.io.IOException;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.AudioUtils;
import org.lecturestudio.core.io.RandomAccessAudioStream;
import org.lecturestudio.core.model.Interval;
@ -50,6 +51,17 @@ public class RandomAccessAudioSource implements AudioSource {
return stream.skip(n);
}
@Override
public int seekMs(int timeMs) throws IOException {
float bytesPerSecond = AudioUtils.getBytesPerSecond(getAudioFormat());
int skipBytes = Math.round(bytesPerSecond * timeMs / 1000F);
reset();
skip(skipBytes);
return skipBytes;
}
@Override
public void reset() throws IOException {
stream.reset();

View file

@ -1,52 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio.system;
import org.lecturestudio.core.audio.device.AudioInputDevice;
import org.lecturestudio.core.audio.device.AudioOutputDevice;
/**
* Base {@link AudioSystemProvider} implementation that implements the default retrieval
* of audio devices.
*
* @author Alex Andres
*/
public abstract class AbstractSoundSystemProvider implements AudioSystemProvider {
@Override
public AudioInputDevice getInputDevice(String deviceName) {
for (AudioInputDevice device : getInputDevices()) {
if (device.getName().equals(deviceName)) {
return device;
}
}
return null;
}
@Override
public AudioOutputDevice getOutputDevice(String deviceName) {
for (AudioOutputDevice device : getOutputDevices()) {
if (device.getName().equals(deviceName)) {
return device;
}
}
return null;
}
}

View file

@ -1,88 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio.system;
import java.io.File;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import org.lecturestudio.core.spi.SpiLoader;
/**
* SPI audio system loader. To provide different audio system implementations
* the system provider must implement the {@link AudioSystemProvider} interface
* and be registered via the SPI.
*
* @author Alex Andres
* @see <a href= "https://docs.oracle.com/javase/tutorial/ext/basics/spi.html">
* https://docs.oracle.com/javase/tutorial/ext/basics/spi.html</a>
*/
public class AudioSystemLoader extends SpiLoader<AudioSystemProvider> {
/** The audio system loader. */
private static AudioSystemLoader serviceLoader;
/** The Java-based sound system provided. */
private final AudioSystemProvider javaSoundProvider = new JavaSoundProvider();
/**
* Retrieve the singleton instance of {@link AudioSystemLoader}.
*/
public static synchronized AudioSystemLoader getInstance() {
if (serviceLoader == null) {
serviceLoader = new AudioSystemLoader();
}
return serviceLoader;
}
/**
* Get an {@link AudioSystemProvider} with the specified name.
*
* @param providerName The name of the {@link AudioSystemProvider} to retrieve.
*
* @return the {@link AudioSystemProvider} or null, if no such provider exists.
*/
public AudioSystemProvider getProvider(String providerName) {
if (javaSoundProvider.getProviderName().equals(providerName)) {
return javaSoundProvider;
}
return super.getProvider(providerName);
}
/**
* Retrieve the names of all registered audio system providers.
*
* @return an array of names of all registered audio system providers.
*/
public String[] getProviderNames() {
List<String> names = new ArrayList<>();
Collections.addAll(names, super.getProviderNames());
names.add(javaSoundProvider.getProviderName());
return names.toArray(new String[0]);
}
private AudioSystemLoader() {
super(new File[] { new File("lib"), new File("../../lib") },
"org.lecturestudio.core.audio.system", AudioSystemProvider.class);
}
}

View file

@ -1,79 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio.system;
import org.lecturestudio.core.audio.device.AudioInputDevice;
import org.lecturestudio.core.audio.device.AudioOutputDevice;
import org.lecturestudio.core.spi.ServiceProvider;
/**
* Audio system provider implementation. The {@link AudioSystemProvider} provides access
* implementation specific audio devices.
*
* @author Alex Andres
*/
public interface AudioSystemProvider extends ServiceProvider {
/**
* Get the systems default audio capture device.
*
* @return The default audio capture device.
*/
AudioInputDevice getDefaultInputDevice();
/**
* Get the systems default audio playback device.
*
* @return The default audio playback device.
*/
AudioOutputDevice getDefaultOutputDevice();
/**
* Get all available audio capture devices.
*
* @return an array of all audio capture devices.
*/
AudioInputDevice[] getInputDevices();
/**
* Get all available audio playback devices.
*
* @return an array of all audio playback devices.
*/
AudioOutputDevice[] getOutputDevices();
/**
* Get an audio capture device with the specified device name.
*
* @param deviceName The name of the device to retrieve.
*
* @return an audio capture device.
*/
AudioInputDevice getInputDevice(String deviceName);
/**
* Get an audio playback device with the specified device name.
*
* @param deviceName The name of the device to retrieve.
*
* @return an audio playback device.
*/
AudioOutputDevice getOutputDevice(String deviceName);
}

View file

@ -1,99 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio.system;
import static java.util.Objects.isNull;
import java.util.ArrayList;
import java.util.List;
import javax.sound.sampled.AudioSystem;
import javax.sound.sampled.Line;
import javax.sound.sampled.Mixer;
import javax.sound.sampled.SourceDataLine;
import javax.sound.sampled.TargetDataLine;
import org.lecturestudio.core.audio.device.AudioInputDevice;
import org.lecturestudio.core.audio.device.AudioOutputDevice;
import org.lecturestudio.core.audio.device.JavaSoundInputDevice;
import org.lecturestudio.core.audio.device.JavaSoundOutputDevice;
/**
* Java-based sound system provider implementation.
*
* @author Alex Andres
*/
public class JavaSoundProvider extends AbstractSoundSystemProvider {
@Override
public AudioInputDevice getDefaultInputDevice() {
AudioInputDevice[] devices = getInputDevices();
return (isNull(devices) || devices.length < 1) ? null : devices[0];
}
@Override
public AudioOutputDevice getDefaultOutputDevice() {
AudioOutputDevice[] devices = getOutputDevices();
return (isNull(devices) || devices.length < 1) ? null : devices[0];
}
@Override
public AudioInputDevice[] getInputDevices() {
List<AudioInputDevice> inputDevices = new ArrayList<>();
Mixer.Info[] mixerInfo = AudioSystem.getMixerInfo();
for (Mixer.Info info : mixerInfo) {
Mixer mixer = AudioSystem.getMixer(info);
Line.Info targetInfo = new Line.Info(TargetDataLine.class);
if (mixer.isLineSupported(targetInfo)) {
JavaSoundInputDevice device = new JavaSoundInputDevice(info);
inputDevices.add(device);
}
}
return inputDevices.toArray(new AudioInputDevice[0]);
}
@Override
public AudioOutputDevice[] getOutputDevices() {
List<AudioOutputDevice> outputDevices = new ArrayList<>();
Mixer.Info[] mixerInfo = AudioSystem.getMixerInfo();
for (Mixer.Info info : mixerInfo) {
Mixer mixer = AudioSystem.getMixer(info);
Line.Info sourceInfo = new Line.Info(SourceDataLine.class);
if (mixer.isLineSupported(sourceInfo)) {
JavaSoundOutputDevice device = new JavaSoundOutputDevice(info);
outputDevices.add(device);
}
}
return outputDevices.toArray(new AudioOutputDevice[0]);
}
@Override
public String getProviderName() {
return "Java Sound";
}
}

View file

@ -1 +0,0 @@
org.lecturestudio.core.audio.codec.OpusCodecProvider

View file

@ -199,7 +199,6 @@ class ConfigurationTest {
config.getAudioConfig().setPlaybackDeviceName("Speakers");
config.getAudioConfig().setRecordingFormat(new AudioFormat(AudioFormat.Encoding.S16LE, 44100, 1));
config.getAudioConfig().setRecordingPath("/home/tmp");
config.getAudioConfig().setSoundSystem("Java");
config.getAudioConfig().setRecordingVolume("Microphone", 0.7f);
manager.save(configFile, config);
@ -211,7 +210,6 @@ class ConfigurationTest {
assertEquals("Speakers", audioConfig.getPlaybackDeviceName());
assertEquals(audioConfig.getRecordingFormat(), new AudioFormat(AudioFormat.Encoding.S16LE, 44100, 1));
assertEquals("/home/tmp", audioConfig.getRecordingPath());
assertEquals("Java", audioConfig.getSoundSystem());
assertEquals(Double.valueOf(0.7f), audioConfig.getRecordingVolume("Microphone"));
}

View file

@ -81,7 +81,6 @@ public class DefaultConfiguration extends EditorConfiguration {
getToolConfig().getPresetColors().addAll(new ArrayList<>(6));
getAudioConfig().setSoundSystem("Java Sound");
getAudioConfig().setPlaybackVolume(1);
}

View file

@ -35,11 +35,6 @@ import org.lecturestudio.core.ExecutableState;
import org.lecturestudio.core.app.ApplicationContext;
import org.lecturestudio.core.app.configuration.Configuration;
import org.lecturestudio.core.app.dictionary.Dictionary;
import org.lecturestudio.core.audio.AudioPlayer;
import org.lecturestudio.core.audio.AudioUtils;
import org.lecturestudio.core.audio.Player;
import org.lecturestudio.core.audio.SyncState;
import org.lecturestudio.core.audio.device.AudioOutputDevice;
import org.lecturestudio.core.audio.effect.DenoiseEffectRunner;
import org.lecturestudio.core.audio.effect.NoiseReductionParameters;
import org.lecturestudio.core.audio.sink.AudioSink;
@ -57,6 +52,7 @@ import org.lecturestudio.core.presenter.Presenter;
import org.lecturestudio.core.recording.Recording;
import org.lecturestudio.core.view.NotificationType;
import org.lecturestudio.editor.api.context.EditorContext;
import org.lecturestudio.media.webrtc.WebRtcAudioPlayer;
import org.lecturestudio.media.recording.RecordingEvent;
import org.lecturestudio.editor.api.presenter.command.NoiseReductionProgressCommand;
import org.lecturestudio.editor.api.service.RecordingFileService;
@ -77,7 +73,7 @@ public class NoiseReductionSettingsPresenter extends Presenter<NoiseReductionSet
private BooleanProperty playAudioSnippet;
private Player audioPlayer;
private WebRtcAudioPlayer audioPlayer;
@Inject
@ -163,7 +159,7 @@ public class NoiseReductionSettingsPresenter extends Presenter<NoiseReductionSet
}
private void startAudioSnippetPlayback() {
if (audioPlayer.getState() != ExecutableState.Started) {
if (!audioPlayer.started()) {
try {
audioPlayer.start();
}
@ -174,7 +170,7 @@ public class NoiseReductionSettingsPresenter extends Presenter<NoiseReductionSet
}
private void stopAudioSnippetPlayback() {
if (audioPlayer.getState() == ExecutableState.Started) {
if (audioPlayer.started()) {
try {
audioPlayer.stop();
}
@ -263,18 +259,18 @@ public class NoiseReductionSettingsPresenter extends Presenter<NoiseReductionSet
private void initAudioPlayer(RandomAccessAudioStream stream) {
shutdownPlayback();
AudioInputStreamSource audioSource = new AudioInputStreamSource(stream, stream.getAudioFormat());
AudioInputStreamSource audioSource = new AudioInputStreamSource(stream,
stream.getAudioFormat());
try {
Configuration config = context.getConfiguration();
String providerName = config.getAudioConfig().getSoundSystem();
String outputDeviceName = config.getAudioConfig().getPlaybackDeviceName();
AudioOutputDevice outputDevice = AudioUtils.getAudioOutputDevice(providerName, outputDeviceName);
outputDevice.setVolume(1);
audioPlayer = new AudioPlayer(outputDevice, audioSource, new SyncState());
audioPlayer.setStateListener(this::onAudioStateChange);
audioPlayer = new WebRtcAudioPlayer();
audioPlayer.setAudioVolume(1.0);
audioPlayer.setAudioDeviceName(outputDeviceName);
audioPlayer.setAudioSource(audioSource);
audioPlayer.addStateListener(this::onAudioStateChange);
// audioPlayer.setProgressListener(this::onAudioProgress);
}
catch (Exception e) {

View file

@ -0,0 +1,86 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.editor.api.presenter;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
import java.util.Arrays;
import java.util.stream.Collectors;
import javax.inject.Inject;
import org.lecturestudio.core.app.ApplicationContext;
import org.lecturestudio.core.app.configuration.AudioConfiguration;
import org.lecturestudio.core.app.configuration.Configuration;
import org.lecturestudio.core.audio.AudioSystemProvider;
import org.lecturestudio.core.audio.device.AudioDevice;
import org.lecturestudio.core.presenter.Presenter;
import org.lecturestudio.editor.api.config.DefaultConfiguration;
import org.lecturestudio.editor.api.view.SoundSettingsView;
public class SoundSettingsPresenter extends Presenter<SoundSettingsView> {
private final AudioConfiguration audioConfig;
private final AudioSystemProvider audioSystemProvider;
@Inject
SoundSettingsPresenter(ApplicationContext context, SoundSettingsView view,
AudioSystemProvider audioSystemProvider) {
super(context, view);
this.audioConfig = context.getConfiguration().getAudioConfig();
this.audioSystemProvider = audioSystemProvider;
}
@Override
public void initialize() throws Exception {
if (isNull(audioConfig.getPlaybackDeviceName())) {
setDefaultPlaybackDevice();
}
var devices = Arrays.stream(audioSystemProvider.getPlaybackDevices())
.map(AudioDevice::getName).collect(Collectors.toList());
view.setAudioPlaybackDevices(devices);
view.setAudioPlaybackDevice(audioConfig.playbackDeviceNameProperty());
view.setOnClose(this::close);
view.setOnReset(this::reset);
}
private void reset() {
Configuration config = context.getConfiguration();
DefaultConfiguration defaultConfig = new DefaultConfiguration();
config.getAudioConfig().setPlaybackDeviceName(
defaultConfig.getAudioConfig().getPlaybackDeviceName());
}
private void setDefaultPlaybackDevice() {
AudioDevice playbackDevice = audioSystemProvider.getDefaultPlaybackDevice();
// Select first available playback device.
if (nonNull(playbackDevice)) {
audioConfig.setPlaybackDeviceName(playbackDevice.getName());
}
}
}

View file

@ -32,6 +32,7 @@ import org.lecturestudio.core.ExecutableState;
import org.lecturestudio.core.ExecutableStateListener;
import org.lecturestudio.core.app.ApplicationContext;
import org.lecturestudio.core.app.configuration.AudioConfiguration;
import org.lecturestudio.core.audio.AudioSystemProvider;
import org.lecturestudio.core.audio.filter.AudioFilter;
import org.lecturestudio.core.model.Interval;
import org.lecturestudio.core.model.Page;
@ -45,6 +46,8 @@ public class RecordingPlaybackService extends ExecutableBase {
private final static Logger LOG = LogManager.getLogger(RecordingPlaybackService.class);
private final AudioSystemProvider audioSystemProvider;
private final EditorContext context;
private final ExecutableStateListener playbackStateListener = (oldState, newState) -> {
@ -62,7 +65,8 @@ public class RecordingPlaybackService extends ExecutableBase {
@Inject
RecordingPlaybackService(ApplicationContext context) {
RecordingPlaybackService(ApplicationContext context, AudioSystemProvider audioSystemProvider) {
this.audioSystemProvider = audioSystemProvider;
this.context = (EditorContext) context;
this.context.primarySelectionProperty().addListener((o, oldValue, newValue) -> {
if (initialized() || suspended()) {
@ -93,7 +97,9 @@ public class RecordingPlaybackService extends ExecutableBase {
closeRecording();
}
recordingPlayer = new RecordingPlayer(context, context.getConfiguration().getAudioConfig());
recordingPlayer = new RecordingPlayer(context,
context.getConfiguration().getAudioConfig(),
audioSystemProvider);
recordingPlayer.setRecording(recording);
try {

View file

@ -0,0 +1,31 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.editor.api.view;
import java.util.List;
import org.lecturestudio.core.beans.StringProperty;
public interface SoundSettingsView extends SettingsBaseView {
void setAudioPlaybackDevice(StringProperty deviceName);
void setAudioPlaybackDevices(List<String> devices);
}

View file

@ -18,6 +18,8 @@
package org.lecturestudio.editor.javafx;
import dev.onvoid.webrtc.logging.Logging;
import org.lecturestudio.core.app.ApplicationFactory;
import org.lecturestudio.javafx.app.JavaFxApplication;
@ -30,6 +32,8 @@ public class EditorFxApplication extends JavaFxApplication {
* @param args the main method's arguments.
*/
public static void main(String[] args) {
Logging.logThreads(true);
// Start with pre-loader.
EditorFxApplication.launch(args, EditorFxPreloader.class);
}

View file

@ -36,6 +36,7 @@ import org.lecturestudio.core.app.configuration.Configuration;
import org.lecturestudio.core.app.configuration.ConfigurationService;
import org.lecturestudio.core.app.configuration.JsonConfigurationService;
import org.lecturestudio.core.app.dictionary.Dictionary;
import org.lecturestudio.core.audio.AudioSystemProvider;
import org.lecturestudio.core.audio.bus.AudioBus;
import org.lecturestudio.core.bus.ApplicationBus;
import org.lecturestudio.core.bus.EventBus;
@ -49,6 +50,7 @@ import org.lecturestudio.core.util.DirUtils;
import org.lecturestudio.editor.api.config.DefaultConfiguration;
import org.lecturestudio.editor.api.config.EditorConfiguration;
import org.lecturestudio.editor.api.context.EditorContext;
import org.lecturestudio.media.webrtc.WebRtcAudioSystemProvider;
import org.lecturestudio.swing.DefaultRenderContext;
import org.apache.logging.log4j.LogManager;
@ -66,6 +68,7 @@ public class ApplicationModule extends AbstractModule {
@Override
protected void configure() {
bind(ToolController.class).asEagerSingleton();
bind(AudioSystemProvider.class).to(WebRtcAudioSystemProvider.class);
}
@Provides

View file

@ -67,6 +67,7 @@ public class ViewModule extends AbstractModule {
bind(ReplacePageView.class).to(FxReplacePageView.class);
bind(SettingsView.class).to(FxSettingsView.class);
bind(SlidesView.class).to(FxSlidesView.class);
bind(SoundSettingsView.class).to(FxSoundSettingsView.class);
bind(StartView.class).to(FxStartView.class);
bind(VideoExportView.class).to(FxVideoExportView.class);
bind(VideoExportProgressView.class).to(FxVideoExportProgressView.class);

View file

@ -0,0 +1,72 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.editor.javafx.view;
import java.util.List;
import org.lecturestudio.core.beans.StringProperty;
import org.lecturestudio.core.view.Action;
import org.lecturestudio.editor.api.view.SoundSettingsView;
import org.lecturestudio.javafx.beans.LectStringProperty;
import org.lecturestudio.javafx.util.FxUtils;
import org.lecturestudio.javafx.view.FxmlView;
import javafx.fxml.FXML;
import javafx.scene.control.Button;
import javafx.scene.control.ComboBox;
import javafx.scene.layout.GridPane;
@FxmlView(name = "sound-settings", presenter = org.lecturestudio.editor.api.presenter.SoundSettingsPresenter.class)
public class FxSoundSettingsView extends GridPane implements SoundSettingsView {
@FXML
private ComboBox<String> playbackDeviceCombo;
@FXML
private Button closeButton;
@FXML
private Button resetButton;
public FxSoundSettingsView() {
super();
}
@Override
public void setAudioPlaybackDevice(StringProperty deviceName) {
playbackDeviceCombo.valueProperty()
.bindBidirectional(new LectStringProperty(deviceName));
}
@Override
public void setAudioPlaybackDevices(List<String> devices) {
FxUtils.invoke(() -> playbackDeviceCombo.getItems().setAll(devices));
}
@Override
public void setOnClose(Action action) {
FxUtils.bindAction(closeButton, action);
}
@Override
public void setOnReset(Action action) {
FxUtils.bindAction(resetButton, action);
}
}

View file

@ -0,0 +1,11 @@
.general-settings {
-fx-hgap: 10;
-fx-vgap: 10;
-fx-padding: 2em;
}
.general-settings > .label {
-fx-padding: 0 0 0.425em 0;
}
.general-settings > .text-small {
-fx-padding: 0.15em 0 0 0;
}

View file

@ -1,11 +1,10 @@
<?xml version="1.0" encoding="UTF-8"?>
<?import javafx.geometry.*?>
<?import javafx.scene.control.*?>
<?import javafx.scene.layout.*?>
<?import org.lecturestudio.javafx.factory.*?>
<fx:root type="GridPane" hgap="10" vgap="10" xmlns="http://javafx.com/javafx/11.0.1" xmlns:fx="http://javafx.com/fxml/1">
<fx:root styleClass="general-settings" type="GridPane" xmlns="http://javafx.com/javafx/11.0.1" xmlns:fx="http://javafx.com/fxml/1">
<columnConstraints>
<ColumnConstraints/>
<ColumnConstraints/>
@ -39,8 +38,4 @@
<Button fx:id="resetButton" text="%button.reset"/>
<Button fx:id="closeButton" text="%button.close"/>
</HBox>
<padding>
<Insets bottom="20" left="20" right="20" top="20"/>
</padding>
</fx:root>

View file

@ -63,6 +63,9 @@
.settings-general-icon {
-fx-icon-content: "M32,376h283.35c6.186-14.112,20.281-24,36.65-24s30.465,9.888,36.65,24H480v32h-91.35c-6.186,14.112-20.281,24-36.65,24 s-30.465-9.888-36.65-24H32 M32,240h91.35c6.186-14.112,20.281-24,36.65-24s30.465,9.888,36.65,24H480v32H196.65c-6.186,14.112-20.281,24-36.65,24 s-30.465-9.888-36.65-24H32 M32,104h283.35c6.186-14.112,20.281-24,36.65-24s30.465,9.888,36.65,24H480v32h-91.35c-6.186,14.112-20.281,24-36.65,24 s-30.465-9.888-36.65-24H32";
}
.settings-sound-icon {
-fx-icon-content: "M 10.025,8 A 4.486,4.486 0 0 1 8.707,11.182 L 8,10.475 A 3.489,3.489 0 0 0 9.025,8 C 9.025,7.034 8.633,6.159 8,5.525 L 8.707,4.818 A 4.486,4.486 0 0 1 10.025,8 Z M 7,4 A 0.5,0.5 0 0 0 6.188,3.61 L 3.825,5.5 H 1.5 A 0.5,0.5 0 0 0 1,6 v 4 a 0.5,0.5 0 0 0 0.5,0.5 h 2.325 l 2.363,1.89 A 0.5,0.5 0 0 0 7,12 Z M 4.312,6.39 6,5.04 v 5.92 L 4.312,9.61 A 0.5,0.5 0 0 0 4,9.5 H 2 v -3 H 4 A 0.5,0.5 0 0 0 4.312,6.39 Z m 5.809,6.206 A 6.48,6.48 0 0 0 12.025,8 6.48,6.48 0 0 0 10.121,3.404 L 9.414,4.111 A 5.483,5.483 0 0 1 11.025,8 5.483,5.483 0 0 1 9.415,11.89 Z m 1.415,1.414 A 8.473,8.473 0 0 0 14.026,8 8.473,8.473 0 0 0 11.536,1.99 L 10.828,2.697 A 7.476,7.476 0 0 1 13.025,8 c 0,2.071 -0.84,3.946 -2.197,5.303 z";
}
.settings-video-icon {
-fx-icon-content: "M10 8v8l5-4-5-4zm9-5H5c-1.1 0-2 .9-2 2v14c0 1.1.9 2 2 2h14c1.1 0 2-.9 2-2V5c0-1.1-.9-2-2-2zm0 16H5V5h14v14z";
}

View file

@ -11,6 +11,12 @@
<SvgIcon styleClass="settings-general-icon" />
</graphic>
</Tab>
<Tab id="video" text="%settings.sound">
<FxSoundSettingsView />
<graphic>
<SvgIcon styleClass="settings-sound-icon" />
</graphic>
</Tab>
<Tab id="video" text="%settings.video">
<FxVideoSettingsView />
<graphic>

View file

@ -1,2 +1,3 @@
settings.general = Allgemein
settings.sound = Sound
settings.video = Video

View file

@ -1,2 +1,3 @@
settings.general = General
settings.sound = Sound
settings.video = Video

View file

@ -0,0 +1,10 @@
.sound-settings {
-fx-hgap: 10;
-fx-padding: 2em;
}
.sound-settings > .label {
-fx-padding: 0 0 0.425em 0;
}
.sound-settings > .text-small {
-fx-padding: 0.15em 0 0 0;
}

View file

@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<?import javafx.scene.control.*?>
<?import javafx.scene.layout.*?>
<fx:root styleClass="sound-settings" type="GridPane" xmlns="http://javafx.com/javafx/11.0.1" xmlns:fx="http://javafx.com/fxml/1">
<columnConstraints>
<ColumnConstraints/>
<ColumnConstraints/>
</columnConstraints>
<rowConstraints>
<RowConstraints/>
<RowConstraints/>
<RowConstraints vgrow="ALWAYS"/>
</rowConstraints>
<Label text="%sound.settings.playback.device" />
<ComboBox fx:id="playbackDeviceCombo" GridPane.rowIndex="1" GridPane.hgrow="ALWAYS" />
<HBox alignment="BOTTOM_RIGHT" spacing="5" GridPane.columnSpan="2" GridPane.rowIndex="2">
<Button fx:id="resetButton" text="%button.reset"/>
<Button fx:id="closeButton" text="%button.close"/>
</HBox>
</fx:root>

View file

@ -0,0 +1 @@
sound.settings.playback.device = Ausgabeger\u00e4t

View file

@ -0,0 +1 @@
sound.settings.playback.device = Output device

View file

@ -53,7 +53,7 @@
<dependency>
<groupId>dev.onvoid.webrtc</groupId>
<artifactId>webrtc-java</artifactId>
<version>0.4.0</version>
<version>0.5.0</version>
</dependency>
<dependency>

View file

@ -1,194 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.media.avdev;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
import java.util.ArrayList;
import java.util.List;
import org.lecturestudio.avdev.AudioCaptureDevice;
import org.lecturestudio.avdev.AudioOutputStream;
import org.lecturestudio.avdev.AudioSessionListener;
import org.lecturestudio.avdev.AudioSink;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.device.AudioDevice;
import org.lecturestudio.core.audio.device.AudioInputDevice;
/**
* AVdev audio capture device implementation.
*
* @author Alex Andres
*/
public class AVdevAudioInputDevice extends AudioInputDevice {
/** Internal capture device. */
private final AudioCaptureDevice device;
/** Internal audio capture stream. */
private AudioOutputStream stream;
/** Internal audio sink. */
private AudioSink sink;
/** Internal session listener. */
private AudioSessionListener sessionListener;
/**
* Create a new AVdevAudioInputDevice instance with the specified AVdev
* capture device.
*
* @param device The AVdev capture device.
*/
public AVdevAudioInputDevice(AudioCaptureDevice device) {
this.device = device;
}
/**
* Set the audio sink that receives the captured audio.
*
* @param sink The audio sink.
*/
public void setSink(AudioSink sink) {
this.sink = sink;
}
/**
* Set the session listener to monitor the audio session state of this
* device.
*
* @param listener The audio session listener.
*/
public void setSessionListener(AudioSessionListener listener) {
this.sessionListener = listener;
}
@Override
public String getName() {
return device.getName();
}
@Override
protected int readInput(byte[] buffer, int offset, int length) {
return 0;
}
@Override
public void setVolume(double volume) {
super.setVolume(volume);
if (nonNull(stream)) {
stream.setVolume((float) volume);
}
}
@Override
public double getVolume() {
if (nonNull(stream)) {
return stream.getVolume();
}
return super.getVolume();
}
@Override
public void open() throws Exception {
if (nonNull(stream)) {
stream.dispose();
stream = null;
}
if (isNull(sink)) {
throw new NullPointerException("No audio sink provided.");
}
AudioFormat audioFormat = getAudioFormat();
org.lecturestudio.avdev.AudioFormat format = new org.lecturestudio.avdev.AudioFormat(
org.lecturestudio.avdev.AudioFormat.SampleFormat.S16LE,
audioFormat.getSampleRate(), audioFormat.getChannels());
double volume = getVolume();
boolean mute = isMuted();
stream = device.createOutputStream(sink);
if (nonNull(sessionListener)) {
stream.attachSessionListener(sessionListener);
}
stream.setAudioFormat(format);
stream.setBufferLatency(50);
stream.setVolume((float) volume);
stream.setMute(mute);
stream.open();
}
@Override
public void close() throws Exception {
if (nonNull(stream)) {
if (nonNull(sessionListener)) {
stream.detachSessionListener(sessionListener);
}
stream.close();
stream = null;
}
}
@Override
public void start() throws Exception {
if (nonNull(stream)) {
stream.start();
}
}
@Override
public void stop() throws Exception {
if (nonNull(stream)) {
stream.stop();
}
}
@Override
public boolean isOpen() {
return nonNull(stream);
}
@Override
public List<AudioFormat> getSupportedFormats() {
AudioFormat.Encoding encoding = AudioFormat.Encoding.S16LE;
int channels = 1;
List<AudioFormat> formats = new ArrayList<>();
for (int sampleRate : AudioDevice.SUPPORTED_SAMPLE_RATES) {
formats.add(new AudioFormat(encoding, sampleRate, channels));
}
return formats;
}
@Override
public int getBufferSize() {
return 0;
}
}

View file

@ -1,174 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.media.avdev;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
import java.util.ArrayList;
import java.util.List;
import org.lecturestudio.avdev.AudioInputStream;
import org.lecturestudio.avdev.AudioPlaybackDevice;
import org.lecturestudio.avdev.AudioSource;
import org.lecturestudio.avdev.StreamListener;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.device.AudioDevice;
import org.lecturestudio.core.audio.device.AudioOutputDevice;
/**
* AVdev audio playback device implementation.
*
* @author Alex Andres
*/
public class AVdevAudioOutputDevice extends AudioOutputDevice {
/** Internal playback device. */
private final AudioPlaybackDevice device;
/** Internal audio playback stream. */
private AudioInputStream stream;
/** Internal audio source. */
private AudioSource source;
/** Internal stream listener. */
private StreamListener streamListener;
/**
* Create a new AVdevAudioOutputDevice instance with the specified AVdev
* playback device.
*
* @param device The AVdev playback device.
*/
public AVdevAudioOutputDevice(AudioPlaybackDevice device) {
this.device = device;
}
/**
* Set the audio source from which to read the audio samples to be played.
*
* @param source The audio source.
*/
public void setSource(AudioSource source) {
this.source = source;
}
/**
* Set the stream listener to monitor the stream state of this device.
*
* @param listener The stream listener.
*/
public void setStreamListener(StreamListener listener) {
this.streamListener = listener;
}
@Override
public String getName() {
return device.getName();
}
@Override
public int writeOutput(byte[] buffer, int offset, int length) {
return 0;
}
@Override
public void setVolume(double volume) {
super.setVolume(volume);
if (nonNull(stream)) {
stream.setVolume((float) volume);
}
}
@Override
public void open() throws Exception {
if (nonNull(stream)) {
stream.dispose();
stream = null;
}
if (isNull(source)) {
throw new NullPointerException("No audio source provided.");
}
AudioFormat audioFormat = getAudioFormat();
org.lecturestudio.avdev.AudioFormat format = new org.lecturestudio.avdev.AudioFormat(
org.lecturestudio.avdev.AudioFormat.SampleFormat.S16LE,
audioFormat.getSampleRate(), audioFormat.getChannels());
stream = device.createInputStream(source);
stream.setAudioFormat(format);
stream.setVolume((float) getVolume());
if (nonNull(streamListener)) {
stream.attachStreamListener(streamListener);
}
stream.open();
}
@Override
public void close() throws Exception {
if (nonNull(stream)) {
stream.close();
}
}
@Override
public void start() throws Exception {
if (nonNull(stream)) {
stream.start();
}
}
@Override
public void stop() throws Exception {
if (nonNull(stream)) {
stream.stop();
}
}
@Override
public boolean isOpen() {
return false;
}
@Override
public List<AudioFormat> getSupportedFormats() {
AudioFormat.Encoding encoding = AudioFormat.Encoding.S16LE;
int channels = 1;
List<AudioFormat> formats = new ArrayList<>();
for (int sampleRate : AudioDevice.SUPPORTED_SAMPLE_RATES) {
formats.add(new AudioFormat(encoding, sampleRate, channels));
}
return formats;
}
@Override
public int getBufferSize() {
return 0;
}
}

View file

@ -1,190 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.media.avdev;
import java.awt.color.ColorSpace;
import java.awt.image.BufferedImage;
import java.awt.image.ColorModel;
import java.awt.image.ComponentColorModel;
import java.awt.image.DataBuffer;
import java.awt.image.DataBufferByte;
import java.awt.image.Raster;
import java.awt.image.WritableRaster;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import org.lecturestudio.avdev.PictureFormat;
import org.lecturestudio.avdev.VideoCaptureDevice;
import org.lecturestudio.avdev.VideoOutputStream;
import org.lecturestudio.avdev.VideoSink;
import org.lecturestudio.core.camera.AbstractCamera;
import org.lecturestudio.core.camera.CameraException;
import org.lecturestudio.core.camera.CameraFormat;
/**
* AVdev camera implementation.
*
* @author Alex Andres
*/
public class AVdevCamera extends AbstractCamera {
/** The camera device. */
private final VideoCaptureDevice device;
/** The camera image output stream. */
private VideoOutputStream stream;
/* The buffered image. */
private BufferedImage image;
/* The temporary image buffer. */
private byte[] imageBuffer;
/**
* Create a AVdevCamera with the specified capture device.
*
* @param device The camera device.
*/
public AVdevCamera(VideoCaptureDevice device) {
super();
this.device = device;
}
@Override
public String getName() {
return device.getName();
}
@Override
public String getDeviceDescriptor() {
return device.getDescriptor();
}
@Override
public void open() throws CameraException {
if (!open.get()) {
int width = getFormat().getWidth();
int height = getFormat().getHeight();
// Keep RGB format, since pixel format conversion is done by AVdev.
PictureFormat format = new PictureFormat(PictureFormat.PixelFormat.RGB24, width, height);
try {
createBufferedImage(format.getWidth(), format.getHeight());
device.setPictureFormat(format);
device.setFrameRate((float) getFormat().getFrameRate());
stream = device.createOutputStream(new PictureSink());
stream.open();
stream.start();
}
catch (Exception e) {
throw new CameraException(e.getMessage(), e.getCause());
}
open.set(true);
}
}
@Override
public void close() throws CameraException {
if (open.compareAndSet(true, false) && stream != null) {
try {
stream.stop();
stream.close();
}
catch (Exception e) {
throw new CameraException(e.getMessage(), e.getCause());
}
}
}
@Override
public CameraFormat[] getSupportedFormats() {
if (formats == null) {
formats = getCameraFormats();
}
return formats;
}
private CameraFormat[] getCameraFormats() {
Set<CameraFormat> set = new HashSet<>();
List<PictureFormat> formats = device.getPictureFormats();
if (formats != null && !formats.isEmpty()) {
for (PictureFormat format : formats) {
if (format.getWidth() > 1920) {
continue;
}
set.add(new CameraFormat(format.getWidth(), format.getHeight(), 30));
}
}
return set.toArray(new CameraFormat[0]);
}
private void createBufferedImage(int width, int height) {
if (image != null) {
image.flush();
image = null;
imageBuffer = null;
}
int bytesPerPixel = 3;
int bufferSize = width * height * bytesPerPixel;
DataBufferByte dataBuffer = new DataBufferByte(bufferSize);
WritableRaster raster = Raster.createInterleavedRaster(dataBuffer,
width,
height,
width * bytesPerPixel,
bytesPerPixel,
new int[] { 2, 1, 0 },
null);
ColorModel colorModel = new ComponentColorModel(ColorSpace.getInstance(ColorSpace.CS_sRGB),
new int[] { 8, 8, 8 },
false,
false,
ComponentColorModel.OPAQUE, DataBuffer.TYPE_BYTE);
image = new BufferedImage(colorModel, raster, false, null);
imageBuffer = ((DataBufferByte) image.getRaster().getDataBuffer()).getData();
}
private class PictureSink implements VideoSink {
@Override
public void write(byte[] data, int length) {
// Copy pixels.
System.arraycopy(data, 0, imageBuffer, 0, imageBuffer.length);
}
}
}

View file

@ -1,65 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.media.avdev;
import org.lecturestudio.avdev.VideoCaptureDevice;
import org.lecturestudio.avdev.VideoDeviceManager;
import org.lecturestudio.core.camera.Camera;
import org.lecturestudio.core.camera.CameraDriver;
import java.util.ArrayList;
import java.util.List;
import java.util.TreeSet;
/**
* AVdev camera driver implementation.
*
* @author Alex Andres
*/
public class AVdevDriver implements CameraDriver {
/** Maintain all connected cameras for consistency. */
private final TreeSet<Camera> cameras = new TreeSet<>();
@Override
public Camera[] getCameras() {
List<Camera> cameraList = new ArrayList<>();
VideoDeviceManager manager = VideoDeviceManager.getInstance();
List<VideoCaptureDevice> devices = manager.getVideoCaptureDevices();
if (devices != null) {
for (VideoCaptureDevice device : devices) {
cameraList.add(new AVdevCamera(device));
}
}
// If camera is gone, perform intersection on set.
if (cameras.size() > cameraList.size()) {
cameras.retainAll(cameraList);
}
else {
cameras.addAll(cameraList);
}
return cameras.toArray(new Camera[0]);
}
}

View file

@ -1,123 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.media.avdev;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import org.lecturestudio.avdev.AudioCaptureDevice;
import org.lecturestudio.avdev.AudioDeviceManager;
import org.lecturestudio.avdev.AudioPlaybackDevice;
import org.lecturestudio.avdev.Device;
import org.lecturestudio.avdev.HotplugListener;
import org.lecturestudio.core.audio.bus.AudioBus;
import org.lecturestudio.core.audio.bus.event.AudioDeviceHotplugEvent;
import org.lecturestudio.core.audio.device.AudioInputDevice;
import org.lecturestudio.core.audio.device.AudioOutputDevice;
import org.lecturestudio.core.audio.system.AbstractSoundSystemProvider;
/**
* AVdev sound system provider implementation.
*
* @author Alex Andres
*/
public class AVdevProvider extends AbstractSoundSystemProvider {
/**
* Create a new AVdevProvider instance.
*/
public AVdevProvider() {
AudioDeviceManager manager = AudioDeviceManager.getInstance();
if (manager != null) {
HotplugListener listener = new HotplugListener() {
@Override
public void deviceDisconnected(Device device) {
AudioBus.post(new AudioDeviceHotplugEvent(device.getName(),
AudioDeviceHotplugEvent.Type.Disconnected));
}
@Override
public void deviceConnected(Device device) {
AudioBus.post(new AudioDeviceHotplugEvent(device.getName(),
AudioDeviceHotplugEvent.Type.Connected));
}
};
manager.attachHotplugListener(listener);
}
}
@Override
public AudioInputDevice getDefaultInputDevice() {
AudioDeviceManager manager = AudioDeviceManager.getInstance();
AudioCaptureDevice device = manager.getDefaultAudioCaptureDevice();
return new AVdevAudioInputDevice(device);
}
@Override
public AudioOutputDevice getDefaultOutputDevice() {
AudioDeviceManager manager = AudioDeviceManager.getInstance();
AudioPlaybackDevice device = manager.getDefaultAudioPlaybackDevice();
return new AVdevAudioOutputDevice(device);
}
@Override
public AudioInputDevice[] getInputDevices() {
List<AudioInputDevice> inputDevices = new ArrayList<>();
AudioDeviceManager manager = AudioDeviceManager.getInstance();
List<AudioCaptureDevice> devices = manager.getAudioCaptureDevices();
for (AudioCaptureDevice dev : devices) {
Optional<AudioInputDevice> result = inputDevices.stream()
.filter(device -> device.getName().equals(dev.getName()))
.findAny();
if (result.isEmpty()) {
inputDevices.add(new AVdevAudioInputDevice(dev));
}
}
return inputDevices.toArray(new AudioInputDevice[0]);
}
@Override
public AudioOutputDevice[] getOutputDevices() {
List<AudioOutputDevice> outputDevices = new ArrayList<>();
AudioDeviceManager manager = AudioDeviceManager.getInstance();
List<AudioPlaybackDevice> devices = manager.getAudioPlaybackDevices();
for (AudioPlaybackDevice dev : devices) {
outputDevices.add(new AVdevAudioOutputDevice(dev));
}
return outputDevices.toArray(new AudioOutputDevice[0]);
}
@Override
public String getProviderName() {
return "AVdev";
}
}

View file

@ -1,272 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.media.avdev;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
import java.io.IOException;
import org.lecturestudio.avdev.StreamListener;
import org.lecturestudio.core.ExecutableBase;
import org.lecturestudio.core.ExecutableException;
import org.lecturestudio.core.ExecutableStateListener;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.AudioPlaybackProgressListener;
import org.lecturestudio.core.audio.AudioUtils;
import org.lecturestudio.core.audio.Player;
import org.lecturestudio.core.audio.SyncState;
import org.lecturestudio.core.audio.source.AudioSource;
import org.lecturestudio.core.model.Time;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
/**
* AVdev audio player implementation.
*
* @author Alex Andres
*/
public class AvdevAudioPlayer extends ExecutableBase implements Player {
private static final Logger LOG = LogManager.getLogger(AvdevAudioPlayer.class);
/** The audio playback device. */
private final AVdevAudioOutputDevice playbackDevice;
/** The audio source. */
private final AudioSource audioSource;
/** The playback progress listener. */
private AudioPlaybackProgressListener progressListener;
/** The player state listener. */
private ExecutableStateListener stateListener;
/** The sync state that is shared with other media players. */
private final SyncState syncState;
/** The audio source size. */
private final long inputSize;
/** The current audio source reading position. */
private long inputPos;
/**
* Create an AvdevAudioPlayer with the specified playback device and source.
* The sync state is shared with other media players to keep different media
* sources in sync while playing.
*
* @param device The audio playback device.
* @param source The audio source.
* @param syncState The shared sync state.
*
* @throws Exception If the audio player failed to initialize.
*/
public AvdevAudioPlayer(AVdevAudioOutputDevice device, AudioSource source, SyncState syncState) throws Exception {
if (isNull(device)) {
throw new NullPointerException("Missing audio playback device.");
}
if (isNull(source)) {
throw new NullPointerException("Missing audio source.");
}
this.playbackDevice = device;
this.audioSource = source;
this.syncState = syncState;
this.inputSize = source.getInputSize();
}
@Override
public void setVolume(float volume) {
if (volume < 0 || volume > 1) {
return;
}
playbackDevice.setVolume(volume);
}
@Override
public void setProgressListener(AudioPlaybackProgressListener listener) {
this.progressListener = listener;
}
@Override
public void setStateListener(ExecutableStateListener listener) {
this.stateListener = listener;
}
@Override
public void seek(int time) throws Exception {
AudioFormat format = audioSource.getAudioFormat();
float bytesPerSecond = AudioUtils.getBytesPerSecond(format);
int skipBytes = Math.round(bytesPerSecond * time / 1000F);
audioSource.reset();
audioSource.skip(skipBytes);
inputPos = skipBytes;
syncState.setAudioTime((long) (inputPos / (bytesPerSecond / 1000f)));
}
@Override
protected void initInternal() throws ExecutableException {
try {
audioSource.reset();
// Calculate bytes per millisecond.
float bpms = AudioUtils.getBytesPerSecond(audioSource.getAudioFormat()) / 1000f;
playbackDevice.setStreamListener(new StreamStateListener());
playbackDevice.setSource(new org.lecturestudio.avdev.AudioSource() {
int bytesRead = 0;
final Time progress = new Time(0);
final Time duration = new Time((long) (inputSize / bpms));
@Override
public int read(byte[] data, int offset, int length) throws IOException {
bytesRead = audioSource.read(data, 0, length);
inputPos += bytesRead;
if (nonNull(syncState)) {
syncState.setAudioTime((long) (inputPos / bpms));
}
if (bytesRead > 0 && inputSize > 0) {
if (nonNull(progressListener) && started()) {
progress.setMillis((long) (inputPos / bpms));
progressListener.onAudioProgress(progress, duration);
}
}
return bytesRead;
}
});
playbackDevice.setAudioFormat(audioSource.getAudioFormat());
}
catch (Exception e) {
throw new ExecutableException(e);
}
}
@Override
protected void startInternal() throws ExecutableException {
try {
if (!playbackDevice.isOpen()) {
playbackDevice.open();
}
playbackDevice.start();
}
catch (Exception e) {
throw new ExecutableException(e);
}
}
@Override
protected void stopInternal() throws ExecutableException {
try {
playbackDevice.stop();
audioSource.reset();
syncState.reset();
inputPos = 0;
}
catch (Exception e) {
throw new ExecutableException(e);
}
}
@Override
protected void suspendInternal() throws ExecutableException {
try {
playbackDevice.stop();
}
catch (Exception e) {
throw new ExecutableException(e);
}
}
@Override
protected void destroyInternal() throws ExecutableException {
try {
audioSource.close();
}
catch (IOException e) {
throw new ExecutableException(e);
}
if (playbackDevice.isOpen()) {
try {
playbackDevice.close();
}
catch (Exception e) {
throw new ExecutableException(e);
}
}
}
@Override
protected void fireStateChanged() {
if (nonNull(stateListener)) {
stateListener.onExecutableStateChange(getPreviousState(), getState());
}
}
private class StreamStateListener implements StreamListener {
@Override
public void streamOpened() {
}
@Override
public void streamClosed() {
}
@Override
public void streamStarted() {
}
@Override
public void streamStopped() {
}
@Override
public void streamEnded() {
try {
stop();
}
catch (ExecutableException e) {
LOG.error("Stop audio player failed.", e);
}
}
}
}

View file

@ -1,208 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.media.avdev;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
import java.io.IOException;
import org.lecturestudio.avdev.AudioSource;
import org.lecturestudio.core.ExecutableBase;
import org.lecturestudio.core.ExecutableException;
import org.lecturestudio.core.ExecutableStateListener;
import org.lecturestudio.core.audio.AudioPlaybackProgressListener;
import org.lecturestudio.core.audio.Player;
import org.lecturestudio.core.audio.io.AudioPlaybackBuffer;
import org.lecturestudio.core.io.PlaybackData;
import org.lecturestudio.core.net.Synchronizer;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
/**
* Extended AVdev audio player implementation.
*
* @author Alex Andres
*/
public class AvdevAudioPlayerExt extends ExecutableBase implements Player {
private static final Logger LOG = LogManager.getLogger(AvdevAudioPlayerExt.class);
/** The audio playback device. */
private final AVdevAudioOutputDevice playbackDevice;
/** The audio source. */
private final AudioPlaybackBuffer audioSource;
/** The player state listener. */
private ExecutableStateListener stateListener;
/**
* Create an AvdevAudioPlayerExt with the specified playback device and
* audio source.
*
* @param device The audio playback device.
* @param source The audio source.
*/
public AvdevAudioPlayerExt(AVdevAudioOutputDevice device, AudioPlaybackBuffer source) {
if (isNull(device)) {
throw new NullPointerException("Missing audio playback device.");
}
if (isNull(source)) {
throw new NullPointerException("Missing audio source.");
}
this.playbackDevice = device;
this.audioSource = source;
}
@Override
public void setVolume(float volume) {
if (volume < 0 || volume > 1) {
return;
}
playbackDevice.setVolume(volume);
}
@Override
public void seek(int time) {
audioSource.skip(time);
}
@Override
public void setProgressListener(AudioPlaybackProgressListener listener) {
}
@Override
public void setStateListener(ExecutableStateListener listener) {
this.stateListener = listener;
}
@Override
protected void initInternal() throws ExecutableException {
audioSource.reset();
try {
playbackDevice.setSource(new AudioSource() {
int bytesRead = 0;
@Override
public int read(byte[] data, int offset, int length) throws IOException {
try {
PlaybackData<byte[]> samples = audioSource.take();
if (nonNull(samples)) {
bytesRead = samples.getData().length;
Synchronizer.setAudioTime(samples.getTimestamp());
if (bytesRead != length) {
LOG.warn("Buffer sizes differ - required: " + length + ", playback buffer: " + bytesRead);
}
int size = Math.min(bytesRead, length);
System.arraycopy(samples.getData(), 0, data, 0, size);
}
}
catch (Exception e) {
LOG.error("Read playback buffer failed." , e);
try {
stop();
}
catch (ExecutableException ex) {
throw new IOException(ex);
}
}
return bytesRead;
}
});
playbackDevice.setAudioFormat(audioSource.getAudioFormat());
}
catch (Exception e) {
throw new ExecutableException(e);
}
}
@Override
protected void startInternal() throws ExecutableException {
try {
if (!playbackDevice.isOpen()) {
playbackDevice.open();
}
playbackDevice.start();
}
catch (Exception e) {
throw new ExecutableException(e);
}
}
@Override
protected void suspendInternal() throws ExecutableException {
try {
playbackDevice.stop();
}
catch (Exception e) {
throw new ExecutableException(e);
}
}
@Override
protected void stopInternal() throws ExecutableException {
try {
playbackDevice.stop();
audioSource.reset();
}
catch (Exception e) {
throw new ExecutableException(e);
}
}
@Override
protected void destroyInternal() throws ExecutableException {
audioSource.reset();
if (playbackDevice.isOpen()) {
try {
playbackDevice.close();
}
catch (Exception e) {
throw new ExecutableException(e);
}
}
}
@Override
protected void fireStateChanged() {
if (nonNull(stateListener)) {
stateListener.onExecutableStateChange(getPreviousState(), getState());
}
}
}

View file

@ -1,152 +0,0 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.media.avdev;
import org.lecturestudio.avdev.AudioSink;
import org.lecturestudio.core.ExecutableState;
import org.lecturestudio.core.audio.AudioFormat;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
/**
* Default AVdevAudioRecorder implementation. The AVdev implementation has an
* advantage over the default Java implementation, multiple AVdev recorder
* instances can be used at the same time recording with different audio
* formats.
*
* @author Alex Andres
*/
public class AvdevAudioRecorder {
private static final Logger LOG = LogManager.getLogger(AvdevAudioRecorder.class);
/** The audio capture device. */
private final AVdevAudioInputDevice device;
/** The state of the recorder. */
private ExecutableState state = ExecutableState.Stopped;
/**
* Create an AvdevAudioRecorder with the specified audio capture device.
*
* @param device The audio capture device.
*/
public AvdevAudioRecorder(AVdevAudioInputDevice device) {
this.device = device;
}
/**
* Set the audio sink that will receive the captured audio samples.
*
* @param sink The audio sink to set.
*/
public void setSink(AudioSink sink) {
device.setSink(sink);
}
/**
* Set the audio format with which the capture device should capture audio.
*
* @param format The audio format for the capture device.
*/
public void setAudioFormat(AudioFormat format) {
device.setAudioFormat(format);
}
/**
* Set the recording audio volume. The value must be in the range of [0,1].
*
* @param volume The recording audio volume.
*/
public void setAudioVolume(double volume) {
device.setVolume(volume);
}
/**
* Start capturing audio. If the recorder is already running, nothing
* happens.
*/
public void start() {
if (state == ExecutableState.Started) {
return;
}
try {
if (state == ExecutableState.Stopped) {
device.open();
}
device.start();
setState(ExecutableState.Started);
}
catch (Exception e) {
LOG.error("Start audio recorder failed", e);
}
}
/**
* Pause audio capturing. The audio sink will not receive any audio data
* until the {@link #start()} method is called again.
*/
public void pause() {
if (state != ExecutableState.Started) {
return;
}
try {
device.stop();
setState(ExecutableState.Suspended);
}
catch (Exception e) {
LOG.error("Pause audio recorder failed", e);
}
}
/**
* Stop audio capturing. Once the recorder has stopped it will release all
* assigned resources.
*/
public void stop() {
if (state == ExecutableState.Stopped) {
return;
}
try {
if (state == ExecutableState.Started) {
device.stop();
}
device.close();
setState(ExecutableState.Stopped);
}
catch (Exception e) {
LOG.error("Stop audio recorder failed", e);
}
}
private void setState(ExecutableState state) {
this.state = state;
}
}

View file

@ -27,7 +27,6 @@ import org.lecturestudio.core.camera.Camera;
import org.lecturestudio.core.camera.CameraDiscovery;
import org.lecturestudio.core.camera.CameraDriver;
import org.lecturestudio.core.camera.CameraException;
import org.lecturestudio.media.avdev.AVdevDriver;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@ -67,7 +66,7 @@ public final class CameraService {
*/
public static CameraService get() {
if (instance == null) {
instance = new CameraService(new AVdevDriver());
instance = new CameraService(null);
}
return instance;

View file

@ -18,6 +18,7 @@
package org.lecturestudio.media.net.client;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
@ -29,22 +30,19 @@ import org.lecturestudio.core.ExecutableBase;
import org.lecturestudio.core.ExecutableException;
import org.lecturestudio.core.app.configuration.AudioConfiguration;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.AudioPlayerExt;
import org.lecturestudio.core.audio.AudioUtils;
import org.lecturestudio.core.audio.Player;
import org.lecturestudio.core.audio.codec.AudioCodecProvider;
import org.lecturestudio.core.audio.codec.AudioDecoder;
import org.lecturestudio.core.audio.codec.AudioDecoderListener;
import org.lecturestudio.core.audio.device.AudioOutputDevice;
import org.lecturestudio.core.audio.io.AudioPlaybackBuffer;
import org.lecturestudio.core.audio.source.AudioSource;
import org.lecturestudio.core.io.PlaybackData;
import org.lecturestudio.core.net.MediaType;
import org.lecturestudio.core.net.rtp.RtpDepacketizer;
import org.lecturestudio.core.net.rtp.RtpPacket;
import org.lecturestudio.core.net.rtp.RtpReceiveBuffer;
import org.lecturestudio.core.net.rtp.RtpReceiveBufferNode;
import org.lecturestudio.media.avdev.AVdevAudioOutputDevice;
import org.lecturestudio.media.avdev.AvdevAudioPlayerExt;
import org.lecturestudio.core.audio.AudioPlayer;
import org.lecturestudio.media.webrtc.WebRtcAudioPlayer;
/**
* The {@code RtpAudioClient} receives the audio stream from a server. This
@ -73,7 +71,7 @@ public class RtpAudioClient extends ExecutableBase implements MediaStreamClient<
private final RtpDepacketizer depacketizer;
/** The audio player. */
private Player audioPlayer;
private AudioPlayer audioPlayer;
/** The receive buffer to re-order packets. */
private RtpReceiveBuffer receiveBuffer;
@ -101,7 +99,7 @@ public class RtpAudioClient extends ExecutableBase implements MediaStreamClient<
return;
}
audioPlayer.setVolume(volume);
audioPlayer.setAudioVolume(volume);
}
@Override
@ -160,26 +158,54 @@ public class RtpAudioClient extends ExecutableBase implements MediaStreamClient<
playbackBuffer = null;
}
private void initAudioPlayer() throws ExecutableException {
String providerName = audioConfig.getSoundSystem();
private void initAudioPlayer() {
String outputDeviceName = audioConfig.getPlaybackDeviceName();
AudioOutputDevice outputDevice = AudioUtils.getAudioOutputDevice(providerName, outputDeviceName);
playbackBuffer = new AudioPlaybackBuffer();
playbackBuffer.setAudioFormat(audioFormat);
try {
if (providerName.equals("AVdev")) {
audioPlayer = new AvdevAudioPlayerExt((AVdevAudioOutputDevice) outputDevice, playbackBuffer);
audioPlayer = new WebRtcAudioPlayer();
audioPlayer.setAudioSource(new AudioSource() {
@Override
public int read(byte[] data, int offset, int length)
throws IOException {
return 0;
}
else {
audioPlayer = new AudioPlayerExt(outputDevice, playbackBuffer);
@Override
public void close() throws IOException {
}
}
catch (Exception e) {
throw new ExecutableException(e);
}
@Override
public void reset() throws IOException {
playbackBuffer.reset();
}
@Override
public long skip(long n) throws IOException {
return 0;
}
@Override
public int seekMs(int timeMs) throws IOException {
playbackBuffer.skip(timeMs);
return timeMs;
}
@Override
public long getInputSize() throws IOException {
return 0;
}
@Override
public AudioFormat getAudioFormat() {
return playbackBuffer.getAudioFormat();
}
});
audioPlayer.setAudioVolume(1.0);
audioPlayer.setAudioDeviceName(outputDeviceName);
}

View file

@ -27,9 +27,7 @@ import org.lecturestudio.core.ExecutableException;
import org.lecturestudio.core.ExecutableState;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.AudioUtils;
import org.lecturestudio.core.audio.device.AudioInputDevice;
import org.lecturestudio.media.avdev.AVdevAudioInputDevice;
import org.lecturestudio.media.avdev.AvdevAudioRecorder;
import org.lecturestudio.core.audio.AudioRecorder;
import org.lecturestudio.core.audio.RingBuffer;
import org.lecturestudio.core.audio.bus.AudioBus;
import org.lecturestudio.core.audio.bus.event.AudioVolumeEvent;
@ -40,6 +38,7 @@ import org.lecturestudio.core.audio.codec.AudioEncoderListener;
import org.lecturestudio.core.net.Sync;
import org.lecturestudio.core.net.rtp.RtpPacket;
import org.lecturestudio.core.net.rtp.RtpPacketizer;
import org.lecturestudio.media.webrtc.WebRtcAudioRecorder;
import org.lecturestudio.media.config.AudioStreamConfig;
import org.lecturestudio.web.api.connector.client.ClientConnector;
@ -68,7 +67,7 @@ public class RtpAudioServer extends ExecutableBase {
/** The audio reader reads recorded audio from buffer and calls the encoder. */
private AudioReader audioReader;
private AvdevAudioRecorder audioRecorder;
private AudioRecorder audioRecorder;
private final AudioStreamConfig audioStreamConfig;
@ -110,18 +109,13 @@ public class RtpAudioServer extends ExecutableBase {
AudioCodecProvider codecProvider = AudioCodecLoader.getInstance().getProvider(audioStreamConfig.codec);
AudioFormat audioFormat = audioStreamConfig.format;
AudioInputDevice inputDevice = AudioUtils.getAudioInputDevice(audioStreamConfig.system, audioStreamConfig.captureDeviceName);
if (inputDevice == null) {
throw new NullPointerException("Could not get audio capture device");
}
ringBuffer = new RingBuffer(1024 * 1024);
audioRecorder = new AvdevAudioRecorder((AVdevAudioInputDevice) inputDevice);
audioRecorder.setAudioFormat(audioFormat);
audioRecorder = new WebRtcAudioRecorder();
audioRecorder.setAudioDeviceName(audioStreamConfig.captureDeviceName);
audioRecorder.setAudioVolume(1);
audioRecorder.setSink((data, length) -> ringBuffer.write(data, 0, length));
audioRecorder.setAudioSink(ringBuffer);
encoder = codecProvider.getAudioEncoder();
encoder.addListener(new EncoderListener(codecProvider.getRtpPacketizer()));

View file

@ -27,13 +27,9 @@ import org.lecturestudio.core.ExecutableBase;
import org.lecturestudio.core.app.configuration.AudioConfiguration;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.AudioPlayer;
import org.lecturestudio.core.audio.AudioUtils;
import org.lecturestudio.media.avdev.AvdevAudioPlayer;
import org.lecturestudio.core.audio.Player;
import org.lecturestudio.core.audio.AudioSystemProvider;
import org.lecturestudio.core.audio.SyncState;
import org.lecturestudio.core.audio.bus.AudioBus;
import org.lecturestudio.media.avdev.AVdevAudioOutputDevice;
import org.lecturestudio.core.audio.device.AudioOutputDevice;
import org.lecturestudio.core.audio.source.AudioInputStreamSource;
import org.lecturestudio.core.bus.ApplicationBus;
import org.lecturestudio.core.controller.ToolController;
@ -68,6 +64,8 @@ public class RecordingPlayer extends ExecutableBase {
private final AudioConfiguration audioConfig;
private final AudioSystemProvider audioSystemProvider;
private final SyncState syncState = new SyncState();
private RandomAccessAudioStream audioStream;
@ -78,7 +76,7 @@ public class RecordingPlayer extends ExecutableBase {
private EventExecutor actionExecutor;
private Player audioPlayer;
private AudioPlayer audioPlayer;
private ToolController toolController;
@ -91,9 +89,12 @@ public class RecordingPlayer extends ExecutableBase {
private MediaPlayerProgressEvent progressEvent;
public RecordingPlayer(ApplicationContext context, AudioConfiguration audioConfig) {
public RecordingPlayer(ApplicationContext context,
AudioConfiguration audioConfig,
AudioSystemProvider audioSystemProvider) {
this.context = context;
this.audioConfig = audioConfig;
this.audioSystemProvider = audioSystemProvider;
}
public void setRecording(Recording recording) {
@ -156,10 +157,10 @@ public class RecordingPlayer extends ExecutableBase {
@Override
protected void stopInternal() throws ExecutableException {
if (audioPlayer.getState() == ExecutableState.Suspended || audioPlayer.getState() == ExecutableState.Started) {
if (audioPlayer.suspended() || audioPlayer.started()) {
audioPlayer.stop();
}
if (actionExecutor.getState() == ExecutableState.Suspended || actionExecutor.getState() == ExecutableState.Started) {
if (actionExecutor.suspended() || actionExecutor.started()) {
actionExecutor.stop();
}
@ -169,10 +170,10 @@ public class RecordingPlayer extends ExecutableBase {
@Override
protected void suspendInternal() throws ExecutableException {
if (audioPlayer.getState() == ExecutableState.Started) {
if (audioPlayer.started()) {
audioPlayer.suspend();
}
if (actionExecutor.getState() == ExecutableState.Started) {
if (actionExecutor.started()) {
actionExecutor.suspend();
}
}
@ -194,16 +195,12 @@ public class RecordingPlayer extends ExecutableBase {
return duration;
}
public Recording getRecordingFile() {
return recording;
}
public void setVolume(float volume) {
if (isNull(audioPlayer)) {
throw new NullPointerException("Audio player not initialized");
}
audioPlayer.setVolume(volume);
audioPlayer.setAudioVolume(volume);
}
public void selectNextPage() throws Exception {
@ -309,21 +306,18 @@ public class RecordingPlayer extends ExecutableBase {
audioStream.setAudioFormat(targetFormat);
AudioInputStreamSource audioSource = new AudioInputStreamSource(audioStream, targetFormat);
String providerName = audioConfig.getSoundSystem();
String outputDeviceName = audioConfig.getPlaybackDeviceName();
AudioOutputDevice outputDevice = AudioUtils.getAudioOutputDevice(providerName, outputDeviceName);
if (providerName.equals("AVdev")) {
audioPlayer = new AvdevAudioPlayer((AVdevAudioOutputDevice) outputDevice, audioSource, syncState);
if (isNull(outputDeviceName)) {
outputDeviceName = audioSystemProvider.getDefaultPlaybackDevice().getName();
}
else {
audioPlayer = new AudioPlayer(outputDevice, audioSource, syncState);
}
audioPlayer.setProgressListener(this::onAudioPlaybackProgress);
audioPlayer.setStateListener(this::onAudioStateChange);
audioPlayer = audioSystemProvider.createAudioPlayer();
audioPlayer.setAudioVolume(1.0);
audioPlayer.setAudioDeviceName(outputDeviceName);
audioPlayer.setAudioSource(audioSource);
audioPlayer.setAudioProgressListener(this::onAudioPlaybackProgress);
audioPlayer.addStateListener(this::onAudioStateChange);
audioPlayer.init();
}
@ -347,7 +341,9 @@ public class RecordingPlayer extends ExecutableBase {
int pageNumber = document.getCurrentPageNumber() + 1;
int pageCount = document.getPageCount();
syncState.setAudioTime(progress.getMillis());
progressEvent.setCurrentTime(progress);
progressEvent.setTotalTime(this.duration);
progressEvent.setPageNumber(pageNumber);

View file

@ -31,6 +31,7 @@ import org.lecturestudio.core.ExecutableState;
import org.lecturestudio.core.ExecutableStateListener;
import org.lecturestudio.core.app.ApplicationContext;
import org.lecturestudio.core.app.configuration.AudioConfiguration;
import org.lecturestudio.core.audio.AudioSystemProvider;
import org.lecturestudio.core.model.Page;
import org.lecturestudio.core.model.Time;
import org.lecturestudio.core.recording.Recording;
@ -42,6 +43,8 @@ public class RecordingPlaybackService extends ExecutableBase {
private final static Logger LOG = LogManager.getLogger(RecordingPlaybackService.class);
private final AudioSystemProvider audioSystemProvider;
private final PlaybackContext context;
private final ExecutableStateListener playbackStateListener = (oldState, newState) -> {
@ -59,7 +62,8 @@ public class RecordingPlaybackService extends ExecutableBase {
@Inject
RecordingPlaybackService(ApplicationContext context) {
RecordingPlaybackService(ApplicationContext context, AudioSystemProvider audioSystemProvider) {
this.audioSystemProvider = audioSystemProvider;
this.context = (PlaybackContext) context;
this.context.primarySelectionProperty().addListener((o, oldValue, newValue) -> {
if (initialized() || suspended()) {
@ -82,7 +86,9 @@ public class RecordingPlaybackService extends ExecutableBase {
closeRecording();
}
recordingPlayer = new RecordingPlayer(context, context.getConfiguration().getAudioConfig());
recordingPlayer = new RecordingPlayer(context,
context.getConfiguration().getAudioConfig(),
audioSystemProvider);
recordingPlayer.setRecording(recording);
try {

View file

@ -45,7 +45,7 @@ public class AVDefaults {
};
public static Integer[] SAMPLE_RATES = {
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 96000
};
public static Integer[] OPUS_SAMPLE_RATES = {

View file

@ -0,0 +1,108 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.media.webrtc;
import static java.util.Objects.nonNull;
import dev.onvoid.webrtc.media.audio.AudioProcessingConfig;
import dev.onvoid.webrtc.media.audio.AudioProcessingConfig.NoiseSuppression;
import org.lecturestudio.core.audio.AudioProcessingSettings;
import org.lecturestudio.core.audio.AudioProcessingSettings.NoiseSuppressionLevel;
import org.lecturestudio.core.beans.Converter;
/**
* WebRTC {@code AudioProcessingConfig} to {@code AudioProcessingSettings}
* converter.
*
* @author Alex Andres
*/
public class AudioProcessingConfigConverter implements
Converter<AudioProcessingConfig, AudioProcessingSettings> {
public static final AudioProcessingConfigConverter INSTANCE = new AudioProcessingConfigConverter();
@Override
public AudioProcessingSettings to(AudioProcessingConfig config) {
AudioProcessingSettings settings = new AudioProcessingSettings();
settings.setEchoCancellerEnabled(config.echoCanceller.enabled);
settings.setGainControlEnabled(config.gainControl.enabled);
settings.setHighpassFilterEnabled(config.highPassFilter.enabled);
settings.setNoiseSuppressionEnabled(config.noiseSuppression.enabled);
settings.setLevelEstimationEnabled(config.levelEstimation.enabled);
settings.setVoiceDetectionEnabled(config.voiceDetection.enabled);
NoiseSuppression.Level nsLevel = config.noiseSuppression.level;
if (nonNull(nsLevel)) {
settings.setNoiseSuppressionLevel(getNsLevel(nsLevel));
}
return settings;
}
@Override
public AudioProcessingConfig from(AudioProcessingSettings settings) {
AudioProcessingConfig config = new AudioProcessingConfig();
config.echoCanceller.enabled = settings.isEchoCancellerEnabled();
config.echoCanceller.enforceHighPassFiltering = false;
config.gainControl.enabled = settings.isGainControlEnabled();
config.highPassFilter.enabled = settings.isHighpassFilterEnabled();
config.noiseSuppression.enabled = settings.isNoiseSuppressionEnabled();
config.residualEchoDetector.enabled = settings.isEchoCancellerEnabled();
config.transientSuppression.enabled = settings.isNoiseSuppressionEnabled();
config.levelEstimation.enabled = settings.isLevelEstimationEnabled();
config.voiceDetection.enabled = settings.isVoiceDetectionEnabled();
NoiseSuppressionLevel nsLevel = settings.getNoiseSuppressionLevel();
if (nonNull(nsLevel)) {
config.noiseSuppression.level = getNativeNsLevel(nsLevel);
}
return config;
}
private NoiseSuppressionLevel getNsLevel(NoiseSuppression.Level nsLevel) {
switch (nsLevel) {
case LOW:
return NoiseSuppressionLevel.LOW;
case HIGH:
return NoiseSuppressionLevel.HIGH;
case VERY_HIGH:
return NoiseSuppressionLevel.VERY_HIGH;
default:
return NoiseSuppressionLevel.MODERATE;
}
}
private NoiseSuppression.Level getNativeNsLevel(NoiseSuppressionLevel nsLevel) {
switch (nsLevel) {
case LOW:
return NoiseSuppression.Level.LOW;
case HIGH:
return NoiseSuppression.Level.HIGH;
case VERY_HIGH:
return NoiseSuppression.Level.VERY_HIGH;
default:
return NoiseSuppression.Level.MODERATE;
}
}
}

View file

@ -0,0 +1,50 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.media.webrtc;
import dev.onvoid.webrtc.media.audio.AudioProcessingStats;
import org.lecturestudio.core.beans.Converter;
/**
* WebRTC {@code AudioProcessingStats} converter.
*
* @author Alex Andres
*/
public class AudioProcessingStatsConverter implements
Converter<AudioProcessingStats, org.lecturestudio.core.audio.AudioProcessingStats> {
public static final AudioProcessingStatsConverter INSTANCE = new AudioProcessingStatsConverter();
@Override
public org.lecturestudio.core.audio.AudioProcessingStats to(AudioProcessingStats stats) {
var audioStats = new org.lecturestudio.core.audio.AudioProcessingStats();
audioStats.delayMs = stats.delayMs;
audioStats.outputRmsDbfs = stats.outputRmsDbfs;
audioStats.voiceDetected = stats.voiceDetected;
return audioStats;
}
@Override
public AudioProcessingStats from(org.lecturestudio.core.audio.AudioProcessingStats stats) {
return null;
}
}

View file

@ -0,0 +1,77 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.media.webrtc;
import java.io.IOException;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.sink.AudioSink;
/**
* An {@code AudioSinkNode} takes an {@code AudioSink} which is a proxy to the
* {@code WebRtcAudioSinkNode}.
*
* @author Alex Andres
*/
public class AudioSinkNode implements WebRtcAudioSinkNode {
private final AudioSink sink;
public AudioSinkNode(AudioSink sink) {
this.sink = sink;
}
@Override
public void initialize() {
try {
sink.open();
}
catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public void destroy() {
try {
sink.close();
}
catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public void setAudioSinkFormat(AudioFormat format) {
// No-op
}
@Override
public void onData(byte[] audioSamples, int nSamples, int nBytesPerSample,
int nChannels, int samplesPerSec, int totalDelayMS,
int clockDrift) {
try {
sink.write(audioSamples, 0, audioSamples.length);
}
catch (IOException e) {
throw new RuntimeException(e);
}
}
}

View file

@ -1,5 +1,5 @@
/*
* Copyright (C) 2020 TU Darmstadt, Department of Computer Science,
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
@ -16,21 +16,23 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.core.audio.codec.ffmpeg;
import org.lecturestudio.core.net.rtp.RtpDepacketizer;
import org.lecturestudio.core.net.rtp.RtpPacket;
package org.lecturestudio.media.webrtc;
/**
* FFmpeg RTP depacketizer implementation.
* A generic interface for representing an audio processing graph.
*
* @author Alex Andres
*/
public class FFmpegRtpDepacketizer implements RtpDepacketizer {
public interface WebRtcAudioNode {
@Override
public byte[] processPacket(RtpPacket packet) {
return packet.getPayload();
}
/**
* Initialize this node by, e.g. allocating resources.
*/
void initialize();
/**
* Destroys this node by releasing all allocated resources.
*/
void destroy();
}

View file

@ -0,0 +1,297 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.media.webrtc;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
import static java.util.Objects.requireNonNull;
import dev.onvoid.webrtc.media.Device;
import dev.onvoid.webrtc.media.MediaDevices;
import dev.onvoid.webrtc.media.audio.AudioConverter;
import dev.onvoid.webrtc.media.audio.AudioDevice;
import dev.onvoid.webrtc.media.audio.AudioDeviceModule;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import org.lecturestudio.core.ExecutableBase;
import org.lecturestudio.core.ExecutableException;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.AudioPlaybackProgressListener;
import org.lecturestudio.core.audio.AudioUtils;
import org.lecturestudio.core.audio.source.AudioSource;
import org.lecturestudio.core.model.Time;
import org.lecturestudio.core.audio.AudioPlayer;
/**
* AudioPlayer implementation based on the WebRTC audio stack. This player can
* utilize WebRTC audio processing filters, such as HighPassFilter,
* NoiseSuppression, EchoCancellation, VoiceDetection, etc.
*
* @author Alex Andres
*/
public class WebRtcAudioPlayer extends ExecutableBase implements AudioPlayer {
private final AtomicBoolean reading = new AtomicBoolean();
private AudioDeviceModule deviceModule;
private AudioDevice playbackDevice;
private AudioSource source;
private AudioConverter audioConverter;
/** The playback progress listener. */
private AudioPlaybackProgressListener progressListener;
private Time progress = new Time();
private Time duration;
private double volume;
private byte[] buffer;
/** The audio source size. */
private long inputSize;
/** The current audio source reading position. */
private long inputPos;
@Override
public void setAudioProgressListener(AudioPlaybackProgressListener listener) {
this.progressListener = listener;
}
@Override
public void seek(int timeMs) throws Exception {
inputPos = source.seekMs(timeMs);
}
@Override
public void setAudioDeviceName(String deviceName) {
playbackDevice = getDeviceByName(MediaDevices.getAudioRenderDevices(),
deviceName);
}
@Override
public void setAudioSource(AudioSource source) {
this.source = source;
try {
this.inputSize = source.getInputSize();
}
catch (IOException e) {
throw new RuntimeException(e);
}
}
@Override
public void setAudioVolume(double volume) {
this.volume = volume;
if (started()) {
setAudioModuleVolume();
}
}
@Override
protected void initInternal() throws ExecutableException {
requireNonNull(source, "Audio source must be set");
requireNonNull(source.getAudioFormat(), "Audio format must be set");
requireNonNull(playbackDevice, "Audio playback device must be set");
AudioFormat format = source.getAudioFormat();
// Calculate bytes per millisecond.
float bytesPerMs = AudioUtils.getBytesPerSecond(format) / 1000f;
progress = new Time(0);
duration = new Time((long) (inputSize / bytesPerMs));
final int nSamplesIn = format.getSampleRate() / 100;
final int nBytesIn = nSamplesIn * format.getChannels() * 2;
buffer = new byte[nBytesIn];
try {
source.reset();
deviceModule = new AudioDeviceModule();
deviceModule.setPlayoutDevice(playbackDevice);
deviceModule.setAudioSource(new dev.onvoid.webrtc.media.audio.AudioSource() {
int bytesRead = 0;
int processResult;
@Override
public int onPlaybackData(byte[] audioSamples, int nSamples,
int nBytesPerSample, int nChannels, int samplesPerSec) {
if (isNull(audioConverter)) {
audioConverter = new AudioConverter(
format.getSampleRate(), format.getChannels(),
samplesPerSec, nChannels);
}
try {
bytesRead = source.read(buffer, 0, nBytesIn);
inputPos += bytesRead;
// Audio conversion.
audioConverter.convert(buffer, audioSamples);
}
catch (IOException e) {
panic(e, "Read audio samples failed");
return -1;
}
if (processResult != 0) {
panic(new IllegalAccessError("Error code: " + processResult),
"Process audio samples failed");
return -1;
}
if (bytesRead > 0) {
updateProgress();
}
else if (reading.compareAndSet(true, false)) {
stopPlayback();
}
return bytesRead;
}
});
}
catch (Throwable e) {
throw new ExecutableException(e);
}
}
@Override
protected void suspendInternal() throws ExecutableException {
try {
deviceModule.stopPlayout();
}
catch (Throwable e) {
throw new ExecutableException(e);
}
}
@Override
protected void startInternal() throws ExecutableException {
reading.set(true);
try {
deviceModule.initPlayout();
setAudioModuleVolume();
deviceModule.startPlayout();
}
catch (Throwable e) {
throw new ExecutableException(e);
}
}
@Override
protected void stopInternal() throws ExecutableException {
try {
deviceModule.stopPlayout();
source.reset();
}
catch (Throwable e) {
throw new ExecutableException(e);
}
inputPos = 0;
}
@Override
protected void destroyInternal() throws ExecutableException {
try {
deviceModule.dispose();
deviceModule = null;
}
catch (Throwable e) {
throw new ExecutableException(e);
}
try {
source.close();
}
catch (IOException e) {
throw new ExecutableException(e);
}
}
private void setAudioModuleVolume() {
int maxVolume = deviceModule.getMaxSpeakerVolume();
int minVolume = deviceModule.getMinSpeakerVolume();
int v = (int) (minVolume + (maxVolume - minVolume) * volume);
deviceModule.setSpeakerVolume(v);
}
private void panic(Throwable error, String description) {
logException(error, description);
stopPlayback();
}
private void updateProgress() {
if (nonNull(progressListener) && started()) {
progress.setMillis((long) (inputPos / (double) inputSize * duration.getMillis()));
progressListener.onAudioProgress(progress, duration);
}
}
private void stopPlayback() {
CompletableFuture.runAsync(() -> {
try {
stop();
}
catch (ExecutableException e) {
logException(e, "Stop playback failed");
}
});
}
/**
* Searches the provided list for a device with the provided name.
*
* @param devices The device list in which to search for the device.
* @param name The name of the device to search for.
* @param <T> The device type.
*
* @return The device with the specified name or {@code null} if not found.
*/
private <T extends Device> T getDeviceByName(List<T> devices, String name) {
return devices.stream()
.filter(device -> device.getName().equals(name))
.findFirst()
.orElse(null);
}
}

View file

@ -0,0 +1,121 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.media.webrtc;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
import static java.util.Objects.requireNonNull;
import dev.onvoid.webrtc.media.audio.AudioProcessing;
import dev.onvoid.webrtc.media.audio.AudioProcessingConfig;
import dev.onvoid.webrtc.media.audio.AudioProcessingStreamConfig;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.AudioProcessingStats;
public class WebRtcAudioProcessingNode implements WebRtcAudioSinkNode, WebRtcAudioSourceNode {
private AudioProcessing audioProcessing;
private AudioProcessingConfig config;
private AudioProcessingStreamConfig streamConfigIn;
private AudioProcessingStreamConfig streamConfigOut;
private AudioProcessingStats stats;
private AudioFormat sinkFormat;
private WebRtcAudioSinkNode sinkNode;
private byte[] buffer;
@Override
public void initialize() {
requireNonNull(sinkFormat, "AudioFormat must be set");
requireNonNull(config, "AudioProcessingConfig must be set");
int nBytesPerSampleOut = 2 * sinkFormat.getChannels();
int nSamplesOut = sinkFormat.getSampleRate() / 100; // 10 ms frame
int nBytesOut = nSamplesOut * nBytesPerSampleOut;
buffer = new byte[nBytesOut];
streamConfigOut = new AudioProcessingStreamConfig(
sinkFormat.getSampleRate(), sinkFormat.getChannels());
audioProcessing = new AudioProcessing();
audioProcessing.applyConfig(config);
stats = new AudioProcessingStats();
sinkNode.initialize();
}
@Override
public void destroy() {
if (nonNull(audioProcessing)) {
audioProcessing.dispose();
}
sinkNode.destroy();
}
@Override
public void setAudioSinkFormat(AudioFormat format) {
this.sinkFormat = format;
}
@Override
public void setAudioSinkNode(WebRtcAudioSinkNode node) {
this.sinkNode = node;
}
@Override
public void onData(byte[] audioSamples, int nSamples, int nBytesPerSample,
int nChannels, int samplesPerSec, int totalDelayMS,
int clockDrift) {
if (isNull(streamConfigIn)) {
streamConfigIn = new AudioProcessingStreamConfig(samplesPerSec,
nChannels);
}
audioProcessing.processStream(audioSamples, streamConfigIn,
streamConfigOut, buffer);
sinkNode.onData(buffer, nSamples, sinkFormat.getChannels() * 2,
sinkFormat.getChannels(), sinkFormat.getSampleRate(),
totalDelayMS, clockDrift);
}
public AudioProcessingStats getAudioProcessingStats() {
if (nonNull(audioProcessing)) {
return AudioProcessingStatsConverter.INSTANCE.to(
audioProcessing.getStatistics());
}
return stats;
}
public void setAudioProcessingConfig(AudioProcessingConfig config) {
this.config = config;
}
}

View file

@ -0,0 +1,212 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.media.webrtc;
import static java.util.Objects.nonNull;
import static java.util.Objects.requireNonNull;
import dev.onvoid.webrtc.media.Device;
import dev.onvoid.webrtc.media.MediaDevices;
import dev.onvoid.webrtc.media.audio.AudioDevice;
import dev.onvoid.webrtc.media.audio.AudioDeviceModule;
import dev.onvoid.webrtc.media.audio.AudioProcessingConfig;
import java.util.List;
import org.lecturestudio.core.ExecutableBase;
import org.lecturestudio.core.ExecutableException;
import org.lecturestudio.core.audio.AudioProcessingSettings;
import org.lecturestudio.core.audio.AudioProcessingStats;
import org.lecturestudio.core.audio.sink.AudioSink;
import org.lecturestudio.core.audio.AudioRecorder;
/**
* AudioRecorder implementation based on the WebRTC audio stack. This recorder
* utilizes WebRTC audio processing filters, such as HighPassFilter,
* NoiseSuppression, EchoCancellation, VoiceDetection, etc.
*
* @author Alex Andres
*/
public class WebRtcAudioRecorder extends ExecutableBase implements AudioRecorder {
private AudioDeviceModule deviceModule;
private AudioDevice captureDevice;
private AudioProcessingSettings processingSettings;
private AudioSink sink;
private WebRtcAudioSinkNode sinkNode;
private double volume;
@Override
public void setAudioDeviceName(String deviceName) {
captureDevice = getDeviceByName(MediaDevices.getAudioCaptureDevices(),
deviceName);
}
@Override
public void setAudioSink(AudioSink sink) {
this.sink = sink;
}
@Override
public void setAudioVolume(double volume) {
this.volume = volume;
if (started()) {
setAudioModuleVolume();
}
}
@Override
public AudioProcessingStats getAudioProcessingStats() {
if (started() && sinkNode instanceof WebRtcAudioProcessingNode) {
WebRtcAudioProcessingNode processingNode = (WebRtcAudioProcessingNode) sinkNode;
return processingNode.getAudioProcessingStats();
}
return null;
}
@Override
public void setAudioProcessingSettings(AudioProcessingSettings settings) {
this.processingSettings = settings;
}
@Override
protected void initInternal() throws ExecutableException {
requireNonNull(sink, "Audio sink must be set");
requireNonNull(sink.getAudioFormat(), "Audio format must be set");
requireNonNull(captureDevice, "Audio recording device must be set");
if (nonNull(processingSettings)) {
WebRtcAudioProcessingNode processingNode = new WebRtcAudioProcessingNode();
processingNode.setAudioSinkFormat(sink.getAudioFormat());
processingNode.setAudioProcessingConfig(getAudioProcessingConfig());
processingNode.setAudioSinkNode(new AudioSinkNode(sink));
sinkNode = processingNode;
}
else {
sinkNode = new AudioSinkNode(sink);
}
try {
sinkNode.initialize();
}
catch (Throwable e) {
throw new ExecutableException(e);
}
deviceModule = new AudioDeviceModule();
deviceModule.setRecordingDevice(captureDevice);
deviceModule.setAudioSink((audioSamples, nSamples, nBytesPerSample,
nChannels, samplesPerSec, totalDelayMS, clockDrift) -> {
try {
sinkNode.onData(audioSamples, nSamples, nBytesPerSample,
nChannels, samplesPerSec, totalDelayMS, clockDrift);
}
catch (Throwable e) {
logException(e, "Write audio to sink failed");
deviceModule.stopRecording();
}
});
}
@Override
protected void suspendInternal() throws ExecutableException {
try {
deviceModule.stopRecording();
}
catch (Throwable e) {
throw new ExecutableException(e);
}
}
@Override
protected void startInternal() throws ExecutableException {
try {
deviceModule.initRecording();
setAudioModuleVolume();
deviceModule.startRecording();
}
catch (Throwable e) {
throw new ExecutableException(e);
}
}
@Override
protected void stopInternal() throws ExecutableException {
try {
deviceModule.stopRecording();
deviceModule.dispose();
}
catch (Throwable e) {
throw new ExecutableException(e);
}
try {
sinkNode.destroy();
}
catch (Throwable e) {
throw new ExecutableException(e);
}
}
@Override
protected void destroyInternal() throws ExecutableException {
deviceModule = null;
}
private void setAudioModuleVolume() {
int maxVolume = deviceModule.getMaxMicrophoneVolume();
int minVolume = deviceModule.getMinMicrophoneVolume();
int v = (int) (minVolume + (maxVolume - minVolume) * volume);
deviceModule.setMicrophoneVolume(v);
}
private AudioProcessingConfig getAudioProcessingConfig() {
return AudioProcessingConfigConverter.INSTANCE.from(processingSettings);
}
/**
* Searches the provided list for a device with the provided name.
*
* @param devices The device list in which to search for the device.
* @param name The name of the device to search for.
* @param <T> The device type.
*
* @return The device with the specified name or {@code null} if not found.
*/
private <T extends Device> T getDeviceByName(List<T> devices, String name) {
return devices.stream()
.filter(device -> device.getName().equals(name))
.findFirst()
.orElse(null);
}
}

View file

@ -0,0 +1,36 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.media.webrtc;
import org.lecturestudio.core.audio.AudioFormat;
/**
* An {@code WebRtcAudioSinkNode} is used to receive audio data through an
* {@code WebRtcAudioSourceNode}.
*
* @author Alex Andres
*/
public interface WebRtcAudioSinkNode extends WebRtcAudioNode {
void setAudioSinkFormat(AudioFormat format);
void onData(byte[] audioSamples, int nSamples, int nBytesPerSample,
int nChannels, int samplesPerSec, int totalDelayMS, int clockDrift);
}

View file

@ -0,0 +1,25 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.media.webrtc;
public interface WebRtcAudioSourceNode extends WebRtcAudioNode {
void setAudioSinkNode(WebRtcAudioSinkNode node);
}

View file

@ -0,0 +1,99 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.media.webrtc;
import dev.onvoid.webrtc.media.Device;
import dev.onvoid.webrtc.media.MediaDevices;
import java.util.List;
import org.lecturestudio.core.audio.AudioPlayer;
import org.lecturestudio.core.audio.AudioRecorder;
import org.lecturestudio.core.audio.AudioSystemProvider;
import org.lecturestudio.core.audio.device.AudioDevice;
/**
* {@code AudioSystemProvider} implementation based on the native WebRTC audio
* module implementation.
*
* @author Alex Andres
*/
public class WebRtcAudioSystemProvider implements AudioSystemProvider {
@Override
public AudioDevice getDefaultRecordingDevice() {
var devices = MediaDevices.getAudioCaptureDevices();
if (devices.isEmpty()) {
return null;
}
return new AudioDevice(devices.get(0).getName());
}
@Override
public AudioDevice getDefaultPlaybackDevice() {
var devices = MediaDevices.getAudioRenderDevices();
if (devices.isEmpty()) {
return null;
}
return new AudioDevice(devices.get(0).getName());
}
@Override
public AudioDevice[] getRecordingDevices() {
return getDevices(MediaDevices.getAudioCaptureDevices());
}
@Override
public AudioDevice[] getPlaybackDevices() {
return getDevices(MediaDevices.getAudioRenderDevices());
}
@Override
public AudioPlayer createAudioPlayer() {
return new WebRtcAudioPlayer();
}
@Override
public AudioRecorder createAudioRecorder() {
return new WebRtcAudioRecorder();
}
@Override
public String getProviderName() {
return "WebRTC Audio";
}
private AudioDevice[] getDevices(List<? extends Device> devices) {
if (devices.isEmpty()) {
return new AudioDevice[0];
}
AudioDevice[] devArray = new AudioDevice[devices.size()];
for (int i = 0; i < devArray.length; i++) {
devArray[i] = new AudioDevice(devices.get(i).getName());
}
return devArray;
}
}

View file

@ -1 +0,0 @@
org.lecturestudio.media.avdev.AVdevProvider

View file

@ -81,7 +81,6 @@ public class DefaultConfiguration extends PlayerConfiguration {
getToolConfig().getPresetColors().addAll(new ArrayList<>(6));
getAudioConfig().setSoundSystem("Java Sound");
getAudioConfig().setPlaybackVolume(1);
}

View file

@ -25,6 +25,8 @@ import java.util.Locale;
import org.lecturestudio.broadcast.config.BroadcastProfile;
import org.lecturestudio.core.app.Theme;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.AudioProcessingSettings;
import org.lecturestudio.core.audio.AudioProcessingSettings.NoiseSuppressionLevel;
import org.lecturestudio.core.geometry.Dimension2D;
import org.lecturestudio.core.geometry.Position;
import org.lecturestudio.core.graphics.Color;
@ -122,12 +124,17 @@ public class DefaultConfiguration extends PresenterConfiguration {
getNetworkConfig().getBroadcastProfiles().add(localProfile);
getNetworkConfig().getBroadcastProfiles().add(etitProfile);
AudioProcessingSettings processingSettings = new AudioProcessingSettings();
processingSettings.setHighpassFilterEnabled(true);
processingSettings.setNoiseSuppressionEnabled(true);
processingSettings.setNoiseSuppressionLevel(NoiseSuppressionLevel.MODERATE);
getAudioConfig().setRecordingFormat(new AudioFormat(AudioFormat.Encoding.S16LE, 44100, 1));
getAudioConfig().setSoundSystem("AVdev");
getAudioConfig().setRecordingPath(System.getProperty("user.home"));
getAudioConfig().setRecordingProcessingSettings(processingSettings);
getAudioConfig().setDefaultRecordingVolume(1.0f);
getAudioConfig().setMasterRecordingVolume(1.0f);
getStreamConfig().setAudioCodec("OPUS");
getStreamConfig().setAudioFormat(new AudioFormat(AudioFormat.Encoding.S16LE, 24000, 1));
getStreamConfig().getCameraCodecConfig().setBitRate(200);

View file

@ -18,19 +18,15 @@
package org.lecturestudio.presenter.api.presenter;
import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
import java.io.File;
import java.util.List;
import javax.inject.Inject;
import org.lecturestudio.core.app.ApplicationContext;
import org.lecturestudio.core.app.configuration.AudioConfiguration;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.AudioUtils;
import org.lecturestudio.core.audio.device.AudioInputDevice;
import org.lecturestudio.core.presenter.Presenter;
import org.lecturestudio.core.view.DirectoryChooserView;
import org.lecturestudio.core.view.ViewContextFactory;
@ -57,43 +53,15 @@ public class RecordSettingsPresenter extends Presenter<RecordSettingsView> {
public void initialize() {
PresenterConfiguration config = (PresenterConfiguration) context.getConfiguration();
String soundSystemName = audioConfig.getSoundSystem();
String inputDeviceName = audioConfig.getCaptureDeviceName();
loadAudioFormats(soundSystemName, inputDeviceName);
view.setNotifyToRecord(config.notifyToRecordProperty());
view.setConfirmStopRecording(config.confirmStopRecordingProperty());
view.setPageRecordingTimeout(config.pageRecordingTimeoutProperty());
view.setRecordingAudioFormats(AudioUtils.getAudioFormats());
view.setRecordingAudioFormat(audioConfig.recordingFormatProperty());
view.setRecordingPath(audioConfig.recordingPathProperty());
view.setOnSelectRecordingPath(this::selectRecordingPath);
view.setOnReset(this::reset);
audioConfig.captureDeviceNameProperty().addListener((observable, oldDevice, newDevice) -> {
loadAudioFormats(soundSystemName, newDevice);
});
}
private void loadAudioFormats(String providerName, String deviceName) {
if (!AudioUtils.hasCaptureDevice(providerName, deviceName)) {
// Select default device.
AudioInputDevice[] devices = AudioUtils.getAudioCaptureDevices(providerName);
deviceName = (devices.length > 0) ? devices[0].getName() : null;
}
if (isNull(deviceName)) {
view.setRecordingAudioFormats(List.of());
return;
}
AudioInputDevice captureDevice = AudioUtils.getAudioInputDevice(providerName, deviceName);
if (nonNull(captureDevice)) {
List<AudioFormat> formats = captureDevice.getSupportedFormats();
view.setRecordingAudioFormats(formats);
}
}
private void selectRecordingPath() {

View file

@ -22,31 +22,29 @@ import static java.util.Objects.isNull;
import static java.util.Objects.nonNull;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.util.concurrent.CompletableFuture;
import java.util.Arrays;
import javax.inject.Inject;
import org.lecturestudio.avdev.AudioSink;
import org.lecturestudio.core.Executable;
import org.lecturestudio.core.ExecutableState;
import org.lecturestudio.core.app.ApplicationContext;
import org.lecturestudio.core.app.configuration.AudioConfiguration;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.AudioUtils;
import org.lecturestudio.core.audio.SyncState;
import org.lecturestudio.core.audio.bus.event.AudioSignalEvent;
import org.lecturestudio.core.audio.device.AudioInputDevice;
import org.lecturestudio.core.audio.device.AudioOutputDevice;
import org.lecturestudio.core.audio.source.AudioSource;
import org.lecturestudio.core.audio.AudioProcessingSettings;
import org.lecturestudio.core.audio.AudioProcessingSettings.NoiseSuppressionLevel;
import org.lecturestudio.core.audio.AudioSystemProvider;
import org.lecturestudio.core.audio.device.AudioDevice;
import org.lecturestudio.core.audio.sink.AudioSink;
import org.lecturestudio.core.audio.sink.ByteArrayAudioSink;
import org.lecturestudio.core.audio.source.ByteArrayAudioSource;
import org.lecturestudio.core.beans.BooleanProperty;
import org.lecturestudio.core.presenter.Presenter;
import org.lecturestudio.core.util.MapChangeListener;
import org.lecturestudio.core.util.ObservableMap;
import org.lecturestudio.media.avdev.AVdevAudioInputDevice;
import org.lecturestudio.media.avdev.AVdevAudioOutputDevice;
import org.lecturestudio.media.avdev.AvdevAudioPlayer;
import org.lecturestudio.core.audio.AudioPlayer;
import org.lecturestudio.core.audio.AudioRecorder;
import org.lecturestudio.presenter.api.config.DefaultConfiguration;
import org.lecturestudio.presenter.api.config.PresenterConfiguration;
import org.lecturestudio.presenter.api.presenter.command.AdjustAudioCaptureLevelCommand;
import org.lecturestudio.presenter.api.view.SoundSettingsView;
@ -54,17 +52,15 @@ public class SoundSettingsPresenter extends Presenter<SoundSettingsView> {
private final AudioConfiguration audioConfig;
private final String soundSystem;
private final AudioSystemProvider audioSystemProvider;
private AvdevAudioPlayer audioPlayer;
private AudioPlayer audioPlayer;
private AVdevAudioInputDevice levelDevice;
private AudioRecorder levelRecorder;
private AVdevAudioInputDevice testDevice;
private AudioRecorder testRecorder;
private ByteArrayInputStream testPlaybackStream;
private ByteArrayOutputStream testCaptureStream;
private AudioSink testAudioSink;
private BooleanProperty testCapture;
@ -78,19 +74,16 @@ public class SoundSettingsPresenter extends Presenter<SoundSettingsView> {
@Inject
SoundSettingsPresenter(ApplicationContext context, SoundSettingsView view) {
SoundSettingsPresenter(ApplicationContext context, SoundSettingsView view,
AudioSystemProvider audioSystemProvider) {
super(context, view);
PresenterConfiguration config = (PresenterConfiguration) context.getConfiguration();
audioConfig = config.getAudioConfig();
soundSystem = audioConfig.getSoundSystem();
this.audioConfig = context.getConfiguration().getAudioConfig();
this.audioSystemProvider = audioSystemProvider;
}
@Override
public void initialize() {
setAudioDevices(soundSystem);
testCapture = new BooleanProperty();
testPlayback = new BooleanProperty();
captureEnabled = new BooleanProperty(true);
@ -104,14 +97,36 @@ public class SoundSettingsPresenter extends Presenter<SoundSettingsView> {
playCaptureTest(newValue);
}
catch (Exception e) {
handleException(e, "Test playback failed", "microphone.settings.test.playback.error");
handleException(e, "Test playback failed",
"microphone.settings.test.playback.error",
"microphone.settings.test.playback.error.message");
}
});
if (isNull(audioConfig.getCaptureDeviceName())) {
setDefaultRecordingDevice();
}
if (isNull(audioConfig.getPlaybackDeviceName())) {
setDefaultPlaybackDevice();
}
if (isNull(audioConfig.getRecordingProcessingSettings())) {
AudioProcessingSettings processingSettings = new AudioProcessingSettings();
processingSettings.setHighpassFilterEnabled(true);
processingSettings.setNoiseSuppressionEnabled(true);
processingSettings.setNoiseSuppressionLevel(NoiseSuppressionLevel.MODERATE);
audioConfig.setRecordingProcessingSettings(processingSettings);
}
view.setAudioCaptureDevices(audioSystemProvider.getRecordingDevices());
view.setAudioPlaybackDevices(audioSystemProvider.getPlaybackDevices());
view.setAudioCaptureDevice(audioConfig.captureDeviceNameProperty());
view.setAudioPlaybackDevice(audioConfig.playbackDeviceNameProperty());
view.bindAudioCaptureLevel(audioConfig.recordingMasterVolumeProperty());
view.setOnViewVisible(this::captureAudioLevel);
view.setAudioCaptureNoiseSuppressionLevel(
audioConfig.getRecordingProcessingSettings()
.noiseSuppressionLevelProperty());
view.setOnViewVisible(this::onViewVisible);
view.setOnAdjustAudioCaptureLevel(this::adjustAudioCaptureLevel);
view.bindTestCaptureEnabled(captureEnabled);
view.bindTestPlaybackEnabled(playbackEnabled);
@ -120,20 +135,13 @@ public class SoundSettingsPresenter extends Presenter<SoundSettingsView> {
view.setOnReset(this::reset);
view.setOnClose(this::close);
if (isNull(audioConfig.getCaptureDeviceName())) {
setDefaultCaptureDevice();
}
if (isNull(audioConfig.getPlaybackDeviceName())) {
setDefaultPlaybackDevice();
}
if (AudioUtils.getAudioCaptureDevices(soundSystem).length < 1) {
if (audioSystemProvider.getRecordingDevices().length < 1) {
view.setViewEnabled(false);
}
audioConfig.captureDeviceNameProperty().addListener((observable, oldDevice, newDevice) -> {
if (isNull(newDevice)) {
setDefaultCaptureDevice();
setDefaultRecordingDevice();
}
else if (newDevice.equals(oldDevice)) {
return;
@ -152,21 +160,17 @@ public class SoundSettingsPresenter extends Presenter<SoundSettingsView> {
playbackDeviceChanged(newDevice);
});
audioConfig.soundSystemProperty().addListener((observable, oldSystem, newSystem) -> {
setAudioDevices(newSystem);
});
audioConfig.recordingMasterVolumeProperty().addListener((observable, oldValue, newValue) -> {
String deviceName = audioConfig.getCaptureDeviceName();
if (nonNull(deviceName)) {
audioConfig.setRecordingVolume(deviceName, newValue);
}
if (nonNull(levelDevice)) {
levelDevice.setVolume(newValue.doubleValue());
if (nonNull(levelRecorder)) {
levelRecorder.setAudioVolume(newValue.doubleValue());
}
if (nonNull(testDevice)) {
testDevice.setVolume(newValue.doubleValue());
if (nonNull(testRecorder)) {
testRecorder.setAudioVolume(newValue.doubleValue());
}
});
@ -174,8 +178,8 @@ public class SoundSettingsPresenter extends Presenter<SoundSettingsView> {
@Override
public void mapChanged(ObservableMap<String, Double> map) {
Double deviceVolume = nonNull(levelDevice) ?
map.get(levelDevice.getName()) :
Double deviceVolume = nonNull(levelRecorder) ?
map.get(audioConfig.getCaptureDeviceName()) :
null;
if (nonNull(deviceVolume)) {
@ -188,19 +192,20 @@ public class SoundSettingsPresenter extends Presenter<SoundSettingsView> {
private void adjustAudioCaptureLevel() {
context.getEventBus().post(new AdjustAudioCaptureLevelCommand(() -> {
// When determining a new microphone level, do it with the maximum volume.
if (nonNull(levelDevice)) {
levelDevice.setVolume(1);
if (nonNull(levelRecorder)) {
levelRecorder.setAudioVolume(1);
}
}, () -> {
// Reset capture volume, when canceled.
if (nonNull(levelDevice)) {
Double devVolume = audioConfig.getRecordingVolume(levelDevice.getName());
if (nonNull(levelRecorder)) {
Double devVolume = audioConfig.getRecordingVolume(audioConfig
.getCaptureDeviceName());
if (isNull(devVolume)) {
devVolume = (double) audioConfig.getMasterRecordingVolume();
}
levelDevice.setVolume(devVolume);
levelRecorder.setAudioVolume(devVolume);
}
}));
}
@ -209,56 +214,39 @@ public class SoundSettingsPresenter extends Presenter<SoundSettingsView> {
playbackEnabled.set(!capture);
if (capture) {
testCaptureStream = new ByteArrayOutputStream();
testDevice = createCaptureDevice();
testAudioSink = new ByteArrayAudioSink();
testAudioSink.setAudioFormat(audioConfig.getRecordingFormat());
if (isNull(testDevice)) {
logException(new NullPointerException(), "Create audio capture device failed");
return;
}
testRecorder = createAudioRecorder();
testRecorder.setAudioSink(testAudioSink);
testRecorder.setAudioProcessingSettings(
audioConfig.getRecordingProcessingSettings());
testDevice.setSink((data, length) -> testCaptureStream.write(data, 0, length));
startDevice(testDevice);
startAudioExecutable(testRecorder);
}
else {
stopDevice(testDevice);
stopAudioExecutable(testRecorder);
}
}
private void playCaptureTest(boolean play) throws Exception {
private void playCaptureTest(boolean play) {
captureEnabled.set(!play);
if (play) {
testPlaybackStream = new ByteArrayInputStream(testCaptureStream.toByteArray());
if (isNull(audioPlayer)) {
AVdevAudioOutputDevice device = createPlaybackDevice();
if (nonNull(device)) {
audioPlayer = new AvdevAudioPlayer(device,
new TestAudioSource(), new SyncState());
}
else {
showError("microphone.settings.test.playback.error",
"microphone.settings.test.playback.error.message");
}
audioPlayer = createAudioPlayer();
}
audioPlayer.start();
startAudioExecutable(audioPlayer);
}
else {
audioPlayer.stop();
stopAudioExecutable(audioPlayer);
audioPlayer = null;
}
}
private void setAudioDevices(String soundSystem) {
view.setAudioCaptureDevices(AudioUtils.getAudioCaptureDevices(soundSystem));
view.setAudioPlaybackDevices(AudioUtils.getAudioPlaybackDevices(soundSystem));
}
private void captureAudioLevel(boolean capture) {
private void onViewVisible(boolean capture) {
if (captureAudio == capture) {
return;
}
@ -266,6 +254,15 @@ public class SoundSettingsPresenter extends Presenter<SoundSettingsView> {
captureAudio = capture;
if (capture) {
if (!hasDevice(audioSystemProvider.getRecordingDevices(),
audioConfig.getCaptureDeviceName())) {
setDefaultRecordingDevice();
}
if (!hasDevice(audioSystemProvider.getPlaybackDevices(),
audioConfig.getPlaybackDeviceName())) {
setDefaultPlaybackDevice();
}
startAudioLevelCapture();
}
else {
@ -273,12 +270,12 @@ public class SoundSettingsPresenter extends Presenter<SoundSettingsView> {
}
}
private void setDefaultCaptureDevice() {
AudioInputDevice[] captureDevices = AudioUtils.getAudioCaptureDevices(soundSystem);
private void setDefaultRecordingDevice() {
AudioDevice captureDevice = audioSystemProvider.getDefaultRecordingDevice();
// Select first available capture device.
if (captureDevices.length > 0) {
audioConfig.setCaptureDeviceName(captureDevices[0].getName());
if (nonNull(captureDevice)) {
audioConfig.setCaptureDeviceName(captureDevice.getName());
}
else {
view.setViewEnabled(false);
@ -286,30 +283,28 @@ public class SoundSettingsPresenter extends Presenter<SoundSettingsView> {
}
private void setDefaultPlaybackDevice() {
AudioOutputDevice[] playbackDevices = AudioUtils.getAudioPlaybackDevices(soundSystem);
AudioDevice playbackDevice = audioSystemProvider.getDefaultPlaybackDevice();
// Select first available playback device.
if (playbackDevices.length > 0) {
audioConfig.setPlaybackDeviceName(playbackDevices[0].getName());
if (nonNull(playbackDevice)) {
audioConfig.setPlaybackDeviceName(playbackDevice.getName());
}
}
private void reset() {
DefaultConfiguration defaultConfig = new DefaultConfiguration();
AudioConfiguration defaultAudioConfig = defaultConfig.getAudioConfig();
AudioProcessingSettings defaultProcSettings = defaultAudioConfig.getRecordingProcessingSettings();
audioConfig.setSoundSystem(defaultConfig.getAudioConfig().getSoundSystem());
audioConfig.setCaptureDeviceName(defaultConfig.getAudioConfig().getCaptureDeviceName());
audioConfig.setPlaybackDeviceName(defaultConfig.getAudioConfig().getPlaybackDeviceName());
audioConfig.setDefaultRecordingVolume(defaultConfig.getAudioConfig().getDefaultRecordingVolume());
audioConfig.setMasterRecordingVolume(defaultConfig.getAudioConfig().getMasterRecordingVolume());
audioConfig.getRecordingProcessingSettings().setNoiseSuppressionLevel(defaultProcSettings.getNoiseSuppressionLevel());
audioConfig.setCaptureDeviceName(defaultAudioConfig.getCaptureDeviceName());
audioConfig.setPlaybackDeviceName(defaultAudioConfig.getPlaybackDeviceName());
audioConfig.setDefaultRecordingVolume(defaultAudioConfig.getDefaultRecordingVolume());
audioConfig.setMasterRecordingVolume(defaultAudioConfig.getMasterRecordingVolume());
audioConfig.getRecordingVolumes().clear();
}
private void recordingDeviceChanged(String name) {
if (nonNull(levelDevice) && levelDevice.getName().equals(name)) {
return;
}
Double deviceVolume = audioConfig.getRecordingVolume(name);
if (nonNull(deviceVolume)) {
@ -325,24 +320,34 @@ public class SoundSettingsPresenter extends Presenter<SoundSettingsView> {
}
private void startAudioLevelCapture() {
levelDevice = createCaptureDevice();
if (isNull(levelDevice)) {
logException(new NullPointerException(), "Create audio capture device failed");
return;
}
levelDevice.setSink(new AudioSink() {
levelRecorder = createAudioRecorder();
levelRecorder.setAudioSink(new AudioSink() {
@Override
public void write(byte[] data, int length) {
double level = getSignalPowerLevel(data);
public void open() {}
@Override
public void reset() {}
@Override
public void close() {}
@Override
public int write(byte[] data, int offset, int length) {
double level = getSignalPowerLevel(data);
view.setAudioCaptureLevel(level);
context.getAudioBus().post(new AudioSignalEvent(level));
return 0;
}
@Override
public AudioFormat getAudioFormat() {
return audioConfig.getRecordingFormat();
}
@Override
public void setAudioFormat(AudioFormat format) {}
private double getSignalPowerLevel(byte[] buffer) {
int max = 0;
@ -356,58 +361,42 @@ public class SoundSettingsPresenter extends Presenter<SoundSettingsView> {
}
});
startDevice(levelDevice);
startAudioExecutable(levelRecorder);
}
private void stopAudioLevelCapture() {
if (nonNull(levelDevice) && levelDevice.isOpen()) {
stopDevice(levelDevice);
if (nonNull(levelRecorder) && levelRecorder.started()) {
stopAudioExecutable(levelRecorder);
}
if (nonNull(testDevice) && testDevice.isOpen()) {
if (nonNull(testRecorder) && testRecorder.started()) {
// This will update the view and the model.
testCapture.set(false);
}
}
private void startDevice(AVdevAudioInputDevice device) {
private void startAudioExecutable(Executable executable) {
try {
device.open();
device.start();
executable.start();
}
catch (Exception e) {
logException(e, "Start audio capture device failed");
logException(e, "Start audio executable failed");
}
}
private void stopDevice(AVdevAudioInputDevice device) {
try {
device.stop();
device.close();
}
catch (Exception e) {
logException(e, "Stop audio capture device failed");
private void stopAudioExecutable(Executable executable) {
if (executable.started() || executable.suspended()) {
try {
executable.stop();
executable.destroy();
}
catch (Exception e) {
logException(e, "Stop audio executable failed");
}
}
}
private AVdevAudioInputDevice createCaptureDevice() {
private AudioRecorder createAudioRecorder() {
String inputDeviceName = audioConfig.getCaptureDeviceName();
if (!soundSystem.equals("AVdev")) {
return null;
}
if (!AudioUtils.hasCaptureDevice(soundSystem, inputDeviceName)) {
// Select default device.
AudioInputDevice[] devices = AudioUtils.getAudioCaptureDevices(soundSystem);
inputDeviceName = (devices.length > 0) ? devices[0].getName() : null;
}
if (isNull(inputDeviceName)) {
return null;
}
AudioFormat format = audioConfig.getRecordingFormat();
AudioInputDevice inputDevice = AudioUtils.getAudioInputDevice(soundSystem, inputDeviceName);
double volume = audioConfig.getMasterRecordingVolume();
Double devVolume = audioConfig.getRecordingVolume(inputDeviceName);
@ -415,70 +404,36 @@ public class SoundSettingsPresenter extends Presenter<SoundSettingsView> {
volume = devVolume;
}
AVdevAudioInputDevice device = (AVdevAudioInputDevice) inputDevice;
device.setAudioFormat(format);
device.setMute(false);
device.setVolume(volume);
AudioRecorder recorder = audioSystemProvider.createAudioRecorder();
recorder.setAudioDeviceName(inputDeviceName);
recorder.setAudioVolume(volume);
return device;
return recorder;
}
private AVdevAudioOutputDevice createPlaybackDevice() {
String playbackDeviceName = audioConfig.getPlaybackDeviceName();
private AudioPlayer createAudioPlayer() {
ByteArrayAudioSink sink = (ByteArrayAudioSink) testAudioSink;
ByteArrayInputStream inputStream = new ByteArrayInputStream(
sink.toByteArray());
if (!soundSystem.equals("AVdev")) {
return null;
}
if (!AudioUtils.hasPlaybackDevice(soundSystem, playbackDeviceName)) {
// Select default device.
AudioOutputDevice[] devices = AudioUtils.getAudioPlaybackDevices(soundSystem);
ByteArrayAudioSource source = new ByteArrayAudioSource(inputStream,
audioConfig.getRecordingFormat());
playbackDeviceName = (devices.length > 0) ? devices[0].getName() : null;
}
if (isNull(playbackDeviceName)) {
return null;
}
return (AVdevAudioOutputDevice) AudioUtils.getAudioOutputDevice(soundSystem, playbackDeviceName);
}
private class TestAudioSource implements AudioSource {
@Override
public int read(byte[] data, int offset, int length) {
int read = testPlaybackStream.read(data, offset, length);
if (read < 0) {
CompletableFuture.runAsync(() -> testPlayback.set(false));
AudioPlayer player = audioSystemProvider.createAudioPlayer();
player.setAudioDeviceName(audioConfig.getPlaybackDeviceName());
player.setAudioVolume(1.0);
player.setAudioSource(source);
player.addStateListener((oldState, newState) -> {
if (newState == ExecutableState.Stopped) {
testPlayback.set(false);
}
});
return read;
}
return player;
}
@Override
public void close() throws IOException {
testPlaybackStream.close();
}
@Override
public void reset() {
testPlaybackStream.reset();
}
@Override
public long skip(long n) {
return testPlaybackStream.skip(n);
}
@Override
public long getInputSize() {
return testPlaybackStream.available();
}
@Override
public AudioFormat getAudioFormat() {
return audioConfig.getRecordingFormat();
}
private boolean hasDevice(AudioDevice[] devices, String name) {
return Arrays.stream(devices)
.anyMatch(device -> device.getName().equals(name));
}
}

View file

@ -29,7 +29,7 @@ import javax.inject.Named;
import org.lecturestudio.core.app.ApplicationContext;
import org.lecturestudio.core.app.configuration.AudioConfiguration;
import org.lecturestudio.core.audio.AudioUtils;
import org.lecturestudio.core.audio.AudioSystemProvider;
import org.lecturestudio.core.beans.ChangeListener;
import org.lecturestudio.core.camera.Camera;
import org.lecturestudio.core.presenter.Presenter;
@ -48,6 +48,8 @@ import org.lecturestudio.web.api.stream.service.StreamProviderService;
public class StartStreamPresenter extends Presenter<StartStreamView> {
private final AudioSystemProvider audioSystemProvider;
private final CameraService camService;
private ChangeListener<String> camListener;
@ -67,9 +69,10 @@ public class StartStreamPresenter extends Presenter<StartStreamView> {
@Inject
StartStreamPresenter(ApplicationContext context, StartStreamView view,
CameraService camService) {
AudioSystemProvider audioSystemProvider, CameraService camService) {
super(context, view);
this.audioSystemProvider = audioSystemProvider;
this.camService = camService;
}
@ -93,7 +96,6 @@ public class StartStreamPresenter extends Presenter<StartStreamView> {
if (nonNull(courses)) {
AudioConfiguration audioConfig = config.getAudioConfig();
String soundSystem = audioConfig.getSoundSystem();
Course selectedCourse = pContext.getCourse();
if (isNull(selectedCourse) && !courses.isEmpty()) {
@ -124,9 +126,9 @@ public class StartStreamPresenter extends Presenter<StartStreamView> {
view.setEnableMicrophone(config.getStreamConfig().enableMicrophoneProperty());
view.setEnableCamera(config.getStreamConfig().enableCameraProperty());
view.setEnableMessenger(streamContext.enableMessengerProperty());
view.setAudioCaptureDevices(AudioUtils.getAudioCaptureDevices(soundSystem));
view.setAudioCaptureDevices(audioSystemProvider.getRecordingDevices());
view.setAudioCaptureDevice(audioConfig.captureDeviceNameProperty());
view.setAudioPlaybackDevices(AudioUtils.getAudioPlaybackDevices(soundSystem));
view.setAudioPlaybackDevices(audioSystemProvider.getPlaybackDevices());
view.setAudioPlaybackDevice(audioConfig.playbackDeviceNameProperty());
view.setCameraNames(camService.getCameraNames());
view.setCameraName(streamConfig.cameraNameProperty());

View file

@ -25,6 +25,7 @@ import com.google.common.eventbus.Subscribe;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
@ -44,10 +45,11 @@ import org.lecturestudio.core.ExecutableState;
import org.lecturestudio.core.app.configuration.AudioConfiguration;
import org.lecturestudio.core.audio.AudioDeviceNotConnectedException;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.AudioSystemProvider;
import org.lecturestudio.core.audio.AudioUtils;
import org.lecturestudio.core.audio.bus.AudioBus;
import org.lecturestudio.core.audio.device.AudioInputDevice;
import org.lecturestudio.core.audio.sink.AudioSink;
import org.lecturestudio.core.audio.sink.ProxyAudioSink;
import org.lecturestudio.core.audio.sink.WavFileSink;
import org.lecturestudio.core.bus.ApplicationBus;
import org.lecturestudio.core.bus.event.DocumentEvent;
@ -69,8 +71,7 @@ import org.lecturestudio.core.recording.action.StaticShapeAction;
import org.lecturestudio.core.service.DocumentService;
import org.lecturestudio.core.util.FileUtils;
import org.lecturestudio.core.util.ProgressCallback;
import org.lecturestudio.media.avdev.AVdevAudioInputDevice;
import org.lecturestudio.media.avdev.AvdevAudioRecorder;
import org.lecturestudio.core.audio.AudioRecorder;
import org.lecturestudio.presenter.api.event.RecordingStateEvent;
public class FileLectureRecorder extends LectureRecorder {
@ -85,6 +86,8 @@ public class FileLectureRecorder extends LectureRecorder {
private final RecordingBackup backup;
private final AudioSystemProvider audioSystemProvider;
private final AudioConfiguration audioConfig;
private final DocumentService documentService;
@ -93,7 +96,7 @@ public class FileLectureRecorder extends LectureRecorder {
private IdleTimer idleTimer;
private AvdevAudioRecorder audioRecorder;
private AudioRecorder audioRecorder;
private AudioSink audioSink;
@ -106,7 +109,10 @@ public class FileLectureRecorder extends LectureRecorder {
private int pageRecordingTimeout = 2000;
public FileLectureRecorder(DocumentService documentService, AudioConfiguration audioConfig, String recDir) throws IOException {
public FileLectureRecorder(AudioSystemProvider audioSystemProvider,
DocumentService documentService, AudioConfiguration audioConfig,
String recDir) throws IOException {
this.audioSystemProvider = audioSystemProvider;
this.documentService = documentService;
this.audioConfig = audioConfig;
this.backup = new RecordingBackup(recDir);
@ -254,9 +260,12 @@ public class FileLectureRecorder extends LectureRecorder {
ExecutableState prevState = getPreviousState();
if (prevState == ExecutableState.Initialized || prevState == ExecutableState.Stopped) {
if (!AudioUtils.hasCaptureDevice(audioConfig.getSoundSystem(), deviceName)) {
throw new AudioDeviceNotConnectedException("Audio device %s is not connected", deviceName, deviceName);
if (prevState == ExecutableState.Initialized ||
prevState == ExecutableState.Stopped) {
if (!hasRecordingDevice(deviceName)) {
throw new AudioDeviceNotConnectedException(
"Audio device %s is not connected", deviceName,
deviceName);
}
clear();
@ -267,7 +276,6 @@ public class FileLectureRecorder extends LectureRecorder {
try {
audioSink = new WavFileSink(new File(backup.getAudioFile()));
audioSink.setAudioFormat(audioFormat);
audioSink.open();
}
catch (IOException e) {
throw new ExecutableException("Could not create audio sink.", e);
@ -281,20 +289,27 @@ public class FileLectureRecorder extends LectureRecorder {
}
if (nonNull(audioRecorder)) {
audioRecorder.stop();
audioRecorder.destroy();
}
AudioInputDevice inputDevice = AudioUtils.getAudioInputDevice(audioConfig.getSoundSystem(), deviceName);
Double deviceVolume = audioConfig.getRecordingVolume(deviceName);
double masterVolume = audioConfig.getMasterRecordingVolume();
double volume = nonNull(deviceVolume) ? deviceVolume : masterVolume;
audioRecorder = new AvdevAudioRecorder((AVdevAudioInputDevice) inputDevice);
audioRecorder.setAudioFormat(audioFormat);
audioRecorder = audioSystemProvider.createAudioRecorder();
audioRecorder.setAudioProcessingSettings(
audioConfig.getRecordingProcessingSettings());
audioRecorder.setAudioDeviceName(deviceName);
audioRecorder.setAudioVolume(volume);
audioRecorder.setSink((data, length) -> {
audioSink.write(data, 0, length);
bytesConsumed += length;
audioRecorder.setAudioSink(new ProxyAudioSink(audioSink) {
@Override
public int write(byte[] data, int offset, int length)
throws IOException {
bytesConsumed += length;
return super.write(data, offset, length);
}
});
audioRecorder.start();
@ -319,20 +334,12 @@ public class FileLectureRecorder extends LectureRecorder {
}
@Override
protected void stopInternal() {
protected void stopInternal() throws ExecutableException {
AudioBus.unregister(this);
if (nonNull(audioRecorder)) {
audioRecorder.stop();
}
if (nonNull(audioSink)) {
try {
audioSink.close();
}
catch (IOException e) {
LOG.error("Close audio sink failed", e);
}
}
backup.close();
@ -340,9 +347,9 @@ public class FileLectureRecorder extends LectureRecorder {
}
@Override
protected void suspendInternal() {
protected void suspendInternal() throws ExecutableException {
if (getPreviousState() == ExecutableState.Started) {
audioRecorder.pause();
audioRecorder.suspend();
pendingActions.setPendingPage(getLastRecordedPage());
}
@ -507,6 +514,15 @@ public class FileLectureRecorder extends LectureRecorder {
return page.equals(getLastRecordedPage());
}
private boolean hasRecordingDevice(String deviceName) {
if (isNull(deviceName)) {
return false;
}
return Arrays.stream(audioSystemProvider.getRecordingDevices())
.anyMatch(device -> device.getName().equals(deviceName));
}
private void runIdleTimer() {
// Ignore all previous tasks.
if (nonNull(idleTimer)) {

View file

@ -195,7 +195,6 @@ public class ChannelStreamService extends ExecutableBase {
audioStreamConfig.codec = config.getStreamConfig().getAudioCodec();
audioStreamConfig.format = config.getStreamConfig().getAudioFormat();
audioStreamConfig.captureDeviceName = config.getAudioConfig().getCaptureDeviceName();
audioStreamConfig.system = config.getAudioConfig().getSoundSystem();
return audioStreamConfig;
}

View file

@ -18,10 +18,11 @@
package org.lecturestudio.presenter.api.view;
import org.lecturestudio.core.audio.device.AudioInputDevice;
import org.lecturestudio.core.audio.device.AudioOutputDevice;
import org.lecturestudio.core.audio.AudioProcessingSettings.NoiseSuppressionLevel;
import org.lecturestudio.core.audio.device.AudioDevice;
import org.lecturestudio.core.beans.BooleanProperty;
import org.lecturestudio.core.beans.FloatProperty;
import org.lecturestudio.core.beans.ObjectProperty;
import org.lecturestudio.core.beans.StringProperty;
import org.lecturestudio.core.view.Action;
import org.lecturestudio.core.view.ConsumerAction;
@ -32,11 +33,13 @@ public interface SoundSettingsView extends SettingsBaseView {
void setAudioCaptureDevice(StringProperty captureDeviceName);
void setAudioCaptureDevices(AudioInputDevice[] captureDevices);
void setAudioCaptureDevices(AudioDevice[] captureDevices);
void setAudioPlaybackDevice(StringProperty playbackDeviceName);
void setAudioPlaybackDevices(AudioOutputDevice[] playbackDevices);
void setAudioPlaybackDevices(AudioDevice[] playbackDevices);
void setAudioCaptureNoiseSuppressionLevel(ObjectProperty<NoiseSuppressionLevel> level);
void setAudioCaptureLevel(double value);

View file

@ -20,8 +20,7 @@ package org.lecturestudio.presenter.api.view;
import java.util.List;
import org.lecturestudio.core.audio.device.AudioInputDevice;
import org.lecturestudio.core.audio.device.AudioOutputDevice;
import org.lecturestudio.core.audio.device.AudioDevice;
import org.lecturestudio.core.beans.BooleanProperty;
import org.lecturestudio.core.beans.ObjectProperty;
import org.lecturestudio.core.beans.StringProperty;
@ -39,11 +38,11 @@ public interface StartStreamView extends View {
void setAudioCaptureDevice(StringProperty captureDeviceName);
void setAudioCaptureDevices(AudioInputDevice[] captureDevices);
void setAudioCaptureDevices(AudioDevice[] captureDevices);
void setAudioPlaybackDevice(StringProperty playbackDeviceName);
void setAudioPlaybackDevices(AudioOutputDevice[] playbackDevices);
void setAudioPlaybackDevices(AudioDevice[] playbackDevices);
void setCameraName(StringProperty cameraName);

View file

@ -42,7 +42,6 @@ class AdjustAudioCaptureLevelPresenterTest extends PresenterTest {
void setup() {
AudioConfiguration audioConfig = context.getConfiguration().getAudioConfig();
audioConfig.setCaptureDeviceName("dummy");
audioConfig.setSoundSystem("dummy");
view = new AdjustAudioCaptureLevelMockView();

View file

@ -74,13 +74,12 @@ class MainPresenterTest extends PresenterTest {
void setup() throws IOException {
AudioConfiguration audioConfig = context.getConfiguration().getAudioConfig();
audioConfig.setCaptureDeviceName("dummy");
audioConfig.setSoundSystem("dummy");
documentService = context.getDocumentService();
bookmarkService = context.getBookmarkService();
FileLectureRecorder recorder = new FileLectureRecorder(documentService, audioConfig, context.getRecordingDirectory());
FileLectureRecorder recorder = new FileLectureRecorder(audioSystemProvider, documentService, audioConfig, context.getRecordingDirectory());
recordingService = new RecordingService(context, recorder);

View file

@ -36,6 +36,7 @@ import org.lecturestudio.core.app.AppDataLocator;
import org.lecturestudio.core.app.ApplicationContext;
import org.lecturestudio.core.app.configuration.Configuration;
import org.lecturestudio.core.app.dictionary.Dictionary;
import org.lecturestudio.core.audio.AudioSystemProvider;
import org.lecturestudio.core.bus.EventBus;
import org.lecturestudio.core.model.Document;
import org.lecturestudio.core.presenter.NotificationPresenter;
@ -53,6 +54,7 @@ import org.lecturestudio.presenter.api.context.PresenterContext;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.lecturestudio.presenter.audio.DummyAudioSystemProvider;
abstract class PresenterTest {
@ -60,6 +62,8 @@ abstract class PresenterTest {
PresenterContext context;
AudioSystemProvider audioSystemProvider;
ViewContextFactory viewFactory;
AtomicReference<NotificationMockView> notifyViewRef;
@ -122,6 +126,8 @@ abstract class PresenterTest {
context.getDocumentService().selectDocument(document);
viewFactory = new ViewContextMockFactory();
audioSystemProvider = new DummyAudioSystemProvider();
}
@AfterEach

View file

@ -50,11 +50,10 @@ class QuitRecordingPresenterTest extends PresenterTest {
void setup() throws IOException {
AudioConfiguration audioConfig = context.getConfiguration().getAudioConfig();
audioConfig.setCaptureDeviceName("dummy");
audioConfig.setSoundSystem("dummy");
DocumentService documentService = context.getDocumentService();
FileLectureRecorder recorder = new FileLectureRecorder(documentService, audioConfig, context.getRecordingDirectory());
FileLectureRecorder recorder = new FileLectureRecorder(audioSystemProvider, documentService, audioConfig, context.getRecordingDirectory());
recordingService = new RecordingService(context, recorder);

View file

@ -48,7 +48,6 @@ class RecordSettingsPresenterTest extends PresenterTest {
void setup() {
AudioConfiguration config = context.getConfiguration().getAudioConfig();
config.setCaptureDeviceName("dummy");
config.setSoundSystem("dummy");
config.setRecordingPath(config.getRecordingPath());
config.setRecordingFormat(new AudioFormat(AudioFormat.Encoding.S32LE, 32000, 4));
}

View file

@ -54,10 +54,9 @@ class RestoreRecordingPresenterTest extends PresenterTest {
void setUp() throws IOException, ExecutableException {
AudioConfiguration audioConfig = context.getConfiguration().getAudioConfig();
audioConfig.setCaptureDeviceName("dummy");
audioConfig.setSoundSystem("dummy");
DocumentService documentService = context.getDocumentService();
FileLectureRecorder recorder = new FileLectureRecorder(documentService, audioConfig, context.getRecordingDirectory());
FileLectureRecorder recorder = new FileLectureRecorder(audioSystemProvider, documentService, audioConfig, context.getRecordingDirectory());
recordingService = new RecordingService(context, recorder);
recordingService.start();

View file

@ -74,11 +74,10 @@ class SaveRecordingPresenterTest extends PresenterTest {
AudioConfiguration audioConfig = context.getConfiguration().getAudioConfig();
audioConfig.setCaptureDeviceName("dummy");
audioConfig.setSoundSystem("dummy");
documentService = context.getDocumentService();
FileLectureRecorder recorder = new FileLectureRecorder(documentService, audioConfig, context.getRecordingDirectory());
FileLectureRecorder recorder = new FileLectureRecorder(audioSystemProvider, documentService, audioConfig, context.getRecordingDirectory());
recordingService = new RecordingService(context, recorder);

View file

@ -22,10 +22,11 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import org.lecturestudio.core.app.configuration.AudioConfiguration;
import org.lecturestudio.core.audio.device.AudioInputDevice;
import org.lecturestudio.core.audio.device.AudioOutputDevice;
import org.lecturestudio.core.audio.AudioProcessingSettings.NoiseSuppressionLevel;
import org.lecturestudio.core.audio.device.AudioDevice;
import org.lecturestudio.core.beans.BooleanProperty;
import org.lecturestudio.core.beans.FloatProperty;
import org.lecturestudio.core.beans.ObjectProperty;
import org.lecturestudio.core.beans.StringProperty;
import org.lecturestudio.core.view.Action;
import org.lecturestudio.core.view.ConsumerAction;
@ -44,11 +45,10 @@ class SoundSettingsPresenterTest extends PresenterTest {
void setup() {
AudioConfiguration config = context.getConfiguration().getAudioConfig();
config.setCaptureDeviceName("dummy");
config.setSoundSystem("dummy");
view = new SoundSettingsMockView();
SoundSettingsPresenter presenter = new SoundSettingsPresenter(context, view);
SoundSettingsPresenter presenter = new SoundSettingsPresenter(context, view, audioSystemProvider);
presenter.initialize();
}
@ -59,7 +59,6 @@ class SoundSettingsPresenterTest extends PresenterTest {
AudioConfiguration config = context.getConfiguration().getAudioConfig();
AudioConfiguration defaultConfig = new DefaultConfiguration().getAudioConfig();
assertEquals(defaultConfig.getSoundSystem(), config.getSoundSystem());
assertEquals(defaultConfig.getCaptureDeviceName(), config.getCaptureDeviceName());
assertEquals(defaultConfig.getDefaultRecordingVolume(), config.getDefaultRecordingVolume());
assertEquals(defaultConfig.getRecordingVolumes(), config.getRecordingVolumes());
@ -83,7 +82,7 @@ class SoundSettingsPresenterTest extends PresenterTest {
}
@Override
public void setAudioCaptureDevices(AudioInputDevice[] captureDevices) {
public void setAudioCaptureDevices(AudioDevice[] captureDevices) {
}
@ -93,8 +92,13 @@ class SoundSettingsPresenterTest extends PresenterTest {
}
@Override
public void setAudioPlaybackDevices(
AudioOutputDevice[] playbackDevices) {
public void setAudioPlaybackDevices(AudioDevice[] playbackDevices) {
}
@Override
public void setAudioCaptureNoiseSuppressionLevel(
ObjectProperty<NoiseSuppressionLevel> level) {
}

View file

@ -45,7 +45,6 @@ import org.lecturestudio.core.app.configuration.Configuration;
import org.lecturestudio.core.app.dictionary.Dictionary;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.AudioFormat.Encoding;
import org.lecturestudio.core.audio.AudioUtils;
import org.lecturestudio.core.audio.bus.AudioBus;
import org.lecturestudio.core.bus.ApplicationBus;
import org.lecturestudio.core.bus.EventBus;
@ -55,6 +54,7 @@ import org.lecturestudio.core.recording.DocumentRecorder;
import org.lecturestudio.core.service.DocumentService;
import org.lecturestudio.presenter.api.config.DefaultConfiguration;
import org.lecturestudio.presenter.api.context.PresenterContext;
import org.lecturestudio.presenter.audio.DummyAudioSystemProvider;
public class BasicRecordingTest {
@ -132,11 +132,9 @@ public class BasicRecordingTest {
};
int pageRecTimeout = 0;
String soundSystem = "Dummy";
AudioConfiguration audioConfig = context.getConfiguration().getAudioConfig();
audioConfig.setSoundSystem(soundSystem);
audioConfig.setCaptureDeviceName(AudioUtils.getDefaultAudioCaptureDevice(soundSystem).getName());
audioConfig.setCaptureDeviceName("dummy");
subscriber = new PageEventSubscriber();
@ -144,7 +142,9 @@ public class BasicRecordingTest {
documentService = context.getDocumentService();
recorder = new FileLectureRecorder(documentService, audioConfig, context.getRecordingDirectory());
DummyAudioSystemProvider audioSystemProvider = new DummyAudioSystemProvider();
recorder = new FileLectureRecorder(audioSystemProvider, documentService, audioConfig, context.getRecordingDirectory());
recorder.setAudioFormat(new AudioFormat(Encoding.S16LE, 44100, 1));
recorder.setPageRecordingTimeout(pageRecTimeout);

View file

@ -1,95 +0,0 @@
/*
* Copyright (C) 2021 TU Darmstadt, Department of Computer Science,
* Embedded Systems and Applications Group.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.lecturestudio.presenter.api.recording;
import java.util.ArrayList;
import java.util.List;
import org.lecturestudio.avdev.AudioCaptureDevice;
import org.lecturestudio.core.audio.AudioFormat;
import org.lecturestudio.core.audio.device.AudioDevice;
import org.lecturestudio.media.avdev.AVdevAudioInputDevice;
/**
* Dummy audio capture device implementation.
*
* @author Alex Andres
*/
public class DummyAudioInputDevice extends AVdevAudioInputDevice {
/**
* Create a new DummyAudioInputDevice instance with the specified AVdev
* capture device.
*
* @param device The AVdev capture device.
*/
public DummyAudioInputDevice(AudioCaptureDevice device) {
super(device);
}
@Override
public String getName() {
return "DummyCapture";
}
@Override
protected int readInput(byte[] buffer, int offset, int length) {
return 0;
}
@Override
public void open() {
}
@Override
public void close() throws Exception {
}
@Override
public void start() throws Exception {
}
@Override
public void stop() {
}
@Override
public boolean isOpen() {
return true;
}
@Override
public List<AudioFormat> getSupportedFormats() {
AudioFormat.Encoding encoding = AudioFormat.Encoding.S16LE;
int channels = 1;
List<AudioFormat> formats = new ArrayList<>();
for (int sampleRate : AudioDevice.SUPPORTED_SAMPLE_RATES) {
formats.add(new AudioFormat(encoding, sampleRate, channels));
}
return formats;
}
@Override
public int getBufferSize() {
return 0;
}
}

Some files were not shown because too many files have changed in this diff Show more