diff --git a/.github/workflows/publish-package.yaml b/.github/workflows/publish-package.yaml index a016c37..b70f834 100644 --- a/.github/workflows/publish-package.yaml +++ b/.github/workflows/publish-package.yaml @@ -20,6 +20,7 @@ jobs: NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: yarn - run: yarn build + - run: yarn build plugin - run: yarn npm publish env: NODE_AUTH_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.yarn/install-state.gz b/.yarn/install-state.gz index 1df1540..ce373cc 100644 Binary files a/.yarn/install-state.gz and b/.yarn/install-state.gz differ diff --git a/android/src/main/AndroidManifest.xml b/android/src/main/AndroidManifest.xml index bdae66c..0c245a9 100644 --- a/android/src/main/AndroidManifest.xml +++ b/android/src/main/AndroidManifest.xml @@ -1,2 +1,4 @@ - + + + diff --git a/android/src/main/java/expo/modules/audiostream/AudioDataEncoder.kt b/android/src/main/java/expo/modules/audiostream/AudioDataEncoder.kt new file mode 100644 index 0000000..11434ab --- /dev/null +++ b/android/src/main/java/expo/modules/audiostream/AudioDataEncoder.kt @@ -0,0 +1,9 @@ +package expo.modules.audiostream + +import android.util.Base64 + +class AudioDataEncoder { + public fun encodeToBase64(rawData: ByteArray): String { + return Base64.encodeToString(rawData, Base64.NO_WRAP) + } +} \ No newline at end of file diff --git a/android/src/main/java/expo/modules/audiostream/AudioFileHandler.kt b/android/src/main/java/expo/modules/audiostream/AudioFileHandler.kt new file mode 100644 index 0000000..0fe2e1c --- /dev/null +++ b/android/src/main/java/expo/modules/audiostream/AudioFileHandler.kt @@ -0,0 +1,92 @@ +package expo.modules.audiostream + +import java.io.File +import java.io.IOException +import java.io.OutputStream +import java.io.RandomAccessFile + +class AudioFileHandler(private val filesDir: File) { + // Method to write WAV file header + fun writeWavHeader(out: OutputStream, sampleRateInHz: Int, channels: Int, bitDepth: Int) { + val header = ByteArray(44) + val byteRate = sampleRateInHz * channels * bitDepth / 8 + val blockAlign = channels * bitDepth / 8 + + // RIFF/WAVE header + "RIFF".toByteArray().copyInto(header, 0) + // (file size - 8) to be updated later + header[4] = 0 // Placeholder + header[5] = 0 // Placeholder + header[6] = 0 // Placeholder + header[7] = 0 // Placeholder + "WAVE".toByteArray().copyInto(header, 8) + "fmt ".toByteArray().copyInto(header, 12) + + // 16 for PCM + header[16] = 16 + header[17] = 0 + header[18] = 0 + header[19] = 0 + + // PCM format ID + header[20] = 1 // Audio format 1 for PCM (not compressed) + header[21] = 0 + + // Number of channels + header[22] = (channels and 0xff).toByte() + header[23] = (channels shr 8 and 0xff).toByte() + + // Sample rate + header[24] = (sampleRateInHz and 0xff).toByte() + header[25] = (sampleRateInHz shr 8 and 0xff).toByte() + header[26] = (sampleRateInHz shr 16 and 0xff).toByte() + header[27] = (sampleRateInHz shr 24 and 0xff).toByte() + + // Byte rate + header[28] = (byteRate and 0xff).toByte() + header[29] = (byteRate shr 8 and 0xff).toByte() + header[30] = (byteRate shr 16 and 0xff).toByte() + header[31] = (byteRate shr 24 and 0xff).toByte() + + // Block align + header[32] = (blockAlign and 0xff).toByte() + header[33] = (blockAlign shr 8 and 0xff).toByte() + + // Bits per sample + header[34] = (bitDepth and 0xff).toByte() + header[35] = (bitDepth shr 8 and 0xff).toByte() + + // Data chunk + "data".toByteArray().copyInto(header, 36) + // Data size to be updated later + header[40] = 0 // Placeholder + header[41] = 0 // Placeholder + header[42] = 0 // Placeholder + header[43] = 0 // Placeholder + + out.write(header, 0, 44) + } + + fun updateWavHeader(file: File) { + try { + RandomAccessFile(file, "rw").use { raf -> + val fileSize = raf.length() + val dataSize = fileSize - 44 // Subtract the header size + + raf.seek(4) // Write correct file size, excluding the first 8 bytes of the RIFF header + raf.writeInt(Integer.reverseBytes((dataSize + 36).toInt())) + + raf.seek(40) // Go to the data size position + raf.writeInt(Integer.reverseBytes(dataSize.toInt())) // Write the size of the data segment + } + } catch (e: IOException) { + println("Could not update WAV header: ${e.message}") + } + } + + fun clearAudioStorage() { + filesDir.listFiles()?.forEach { + it.delete() + } + } +} \ No newline at end of file diff --git a/android/src/main/java/expo/modules/audiostream/AudioPlaybackManager.kt b/android/src/main/java/expo/modules/audiostream/AudioPlaybackManager.kt new file mode 100644 index 0000000..3061a3d --- /dev/null +++ b/android/src/main/java/expo/modules/audiostream/AudioPlaybackManager.kt @@ -0,0 +1,387 @@ +package expo.modules.audiostream + +import android.media.AudioAttributes +import android.media.AudioFormat +import android.media.AudioTrack +import android.util.Base64 +import android.util.Log +import expo.modules.kotlin.Promise +import kotlinx.coroutines.CoroutineScope +import kotlinx.coroutines.Dispatchers +import kotlinx.coroutines.Job +import kotlinx.coroutines.SupervisorJob +import kotlinx.coroutines.cancel +import kotlinx.coroutines.cancelAndJoin +import kotlinx.coroutines.channels.Channel +import kotlinx.coroutines.flow.consumeAsFlow +import kotlinx.coroutines.launch +import kotlinx.coroutines.suspendCancellableCoroutine +import kotlinx.coroutines.withContext +import java.nio.ByteBuffer +import java.nio.ByteOrder +import kotlin.coroutines.cancellation.CancellationException +import kotlin.math.max +import kotlin.math.min + +data class ChunkData(val chunk: String, val turnId: String, val promise: Promise) // contains the base64 chunk +data class AudioChunk( + val audioData: FloatArray, + val promise: Promise, + val turnId: String, + var isPromiseSettled: Boolean = false +) // contains the decoded base64 chunk + +class AudioPlaybackManager() { + private lateinit var processingChannel: Channel + private lateinit var playbackChannel: Channel + + private val coroutineScope = CoroutineScope(Dispatchers.Default + SupervisorJob()) + + private var processingJob: Job? = null + private var currentPlaybackJob: Job? = null + + private lateinit var audioTrack: AudioTrack + private var isPlaying = false + private var isMuted = false + private var currentTurnId: String? = null + + init { + initializeAudioTrack() + initializeChannels() + } + + fun playAudio(chunk: String, turnId: String, promise: Promise) { + coroutineScope.launch { + if (processingChannel.isClosedForSend || playbackChannel.isClosedForSend) { + Log.d("ExpoPlayStreamModule", "Re-initializing channels") + initializeChannels() + } + Log.d("ExpoPlayStreamModule", "PlayAudio input $turnId and current id $currentTurnId") + currentTurnId = turnId + isMuted = false + processingChannel.send(ChunkData(chunk, turnId, promise)) + ensureProcessingLoopStarted() + } + } + + fun setCurrentTurnId(turnId: String) { + currentTurnId = turnId + } + + private fun initializeChannels() { + // Close the channels if they are still open + if (!::processingChannel.isInitialized || processingChannel.isClosedForSend) { + processingChannel = Channel(Channel.UNLIMITED) + } + if (!::playbackChannel.isInitialized || playbackChannel.isClosedForSend) { + playbackChannel = Channel(Channel.UNLIMITED) + } + } + + fun runOnDispose() { + stopPlayback() + processingChannel.close() + stopProcessingLoop() + coroutineScope.cancel() + } + + fun stopProcessingLoop() { + processingJob?.cancel() + processingJob = null + } + + + private fun ensureProcessingLoopStarted() { + if (processingJob == null || processingJob?.isActive != true) { + startProcessingLoop() + } + } + + private fun startProcessingLoop() { + processingJob = + coroutineScope.launch { + for (chunkData in processingChannel) { + Log.d("ExpoPlayStreamModule", "Received TurnId ${chunkData.turnId} and current id $currentTurnId and playback is Muted $isMuted") + if (chunkData.turnId == currentTurnId) { + processAndEnqueueChunk(chunkData) + } + + if (processingChannel.isEmpty && !isPlaying && playbackChannel.isEmpty) { + break // Stop the loop if there's no more work to do + } + } + Log.d("ExpoPlayStreamModule", "Clear Processing JOB") + processingJob = null + } + } + + private suspend fun processAndEnqueueChunk(chunkData: ChunkData) { + try { + val decodedBytes = Base64.decode(chunkData.chunk, Base64.DEFAULT) + val audioDataWithoutRIFF = removeRIFFHeaderIfNeeded(decodedBytes) + val audioData = convertPCMDataToFloatArray(audioDataWithoutRIFF) + + playbackChannel.send( + AudioChunk( + audioData, + chunkData.promise, + chunkData.turnId + ) + ) + + if (!isPlaying) { + Log.d("ExpoPlayStreamModule", "Start Playback") + startPlayback() + } + } catch (e: Exception) { + chunkData.promise.reject("ERR_PROCESSING_AUDIO", e.message, e) + } + } + + fun setVolume(volume: Double, promise: Promise) { + val clampedVolume = max(0.0, min(volume, 100.0)) / 100.0 + try { + audioTrack.setVolume(clampedVolume.toFloat()) + promise.resolve(null) + } catch (e: Exception) { + promise.reject("ERR_SET_VOLUME", e.message, e) + } + } + + fun mutePlayback(promise: Promise) { + Log.d("ExpoPlayStreamModule", "Mute Playback") + isMuted = true + promise.resolve(null) + } + + fun pausePlayback(promise: Promise? = null, isFlushAudioTrack: Boolean = false) { + try { + audioTrack.pause() + if (isFlushAudioTrack) { + audioTrack.flush() + } + isPlaying = false + currentPlaybackJob?.cancel() + promise?.resolve(null) + } catch (e: Exception) { + promise?.reject("ERR_PAUSE_PLAYBACK", e.message, e) + } + } + + fun startPlayback(promise: Promise? = null) { + try { + if (!isPlaying) { + audioTrack.play() + isPlaying = true + Log.d("ExpoPlayStreamModule", "Starting Playback Loop") + startPlaybackLoop() + Log.d("ExpoPlayStreamModule", "Ensure processing Loop Started in startPlayback") + ensureProcessingLoopStarted() + } + promise?.resolve(null) + } catch (e: Exception) { + promise?.reject("ERR_START_PLAYBACK", e.message, e) + } + } + + fun stopPlayback(promise: Promise? = null) { + Log.d("ExpoPlayStreamModule", "Stopping playback") + if (!isPlaying || playbackChannel.isEmpty ) { + promise?.resolve(null) + Log.d("ExpoPlayStreamModule", "Nothing is played return") + return + } + isPlaying = false + coroutineScope.launch { + try { + + Log.d("ExpoPlayStreamModule", "Stopping audioTrack") + audioTrack.stop() + Log.d("ExpoPlayStreamModule", "Flushing audioTrack") + audioTrack.flush() + // Safely cancel jobs + if (currentPlaybackJob != null) { + Log.d("ExpoPlayStreamModule", "Cancelling currentPlaybackJob") + currentPlaybackJob?.cancelAndJoin() // Add logging here to trace progress + currentPlaybackJob = null + } + + if (processingJob != null) { + Log.d("ExpoPlayStreamModule", "Cancelling processingJob") + processingJob?.cancelAndJoin() // Add logging here to trace progress + processingJob = null + } + + // Resolve remaining promises in playbackChannel + Log.d("ExpoPlayStreamModule", "Resolving remaining promises in playbackChannel") + for (chunk in playbackChannel) { + Log.d("ExpoPlayStreamModule", "New chunk $chunk") + if (!chunk.isPromiseSettled) { + chunk.isPromiseSettled = true + chunk.promise.resolve(null) + } + } + + Log.d("ExpoPlayStreamModule", "Closing the channels") + + if (!processingChannel.isClosedForSend) { + Log.d("ExpoPlayStreamModule", "Closing processingChannel") + processingChannel.close() + } else { + Log.d("ExpoPlayStreamModule", "Processing channel is already closed") + } + + Log.d("ExpoPlayStreamModule", "Checking if playbackChannel is closed") + if (!playbackChannel.isClosedForSend) { + Log.d("ExpoPlayStreamModule", "Closing playbackChannel") + playbackChannel.close() + } else { + Log.d("ExpoPlayStreamModule", "Playback channel is already closed") + } + + Log.d("ExpoPlayStreamModule", "Stopped") + promise?.resolve(null) + } catch (e: CancellationException) { + Log.d("ExpoPlayStreamModule", "Stop playback was cancelled: ${e.message}") + promise?.resolve(null) + } catch (e: Exception) { + Log.d("ExpoPlayStreamModule", "Error in stopPlayback: ${e.message}") + promise?.reject("ERR_STOP_PLAYBACK", e.message, e) + } + } + } + + private fun initializeAudioTrack() { + val audioFormat = + AudioFormat.Builder() + .setSampleRate(16000) + .setEncoding(AudioFormat.ENCODING_PCM_FLOAT) + .setChannelMask(AudioFormat.CHANNEL_OUT_MONO) + .build() + + val minBufferSize = + AudioTrack.getMinBufferSize( + 16000, + AudioFormat.CHANNEL_OUT_MONO, + AudioFormat.ENCODING_PCM_FLOAT + ) + + audioTrack = + AudioTrack.Builder() + .setAudioAttributes( + AudioAttributes.Builder() + .setUsage(AudioAttributes.USAGE_MEDIA) + .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC) + .build() + ) + .setAudioFormat(audioFormat) + .setBufferSizeInBytes(minBufferSize * 2) + .setTransferMode(AudioTrack.MODE_STREAM) + .build() + } + + private fun startPlaybackLoop() { + currentPlaybackJob = + coroutineScope.launch { + playbackChannel.consumeAsFlow().collect { chunk -> + if (isPlaying) { + Log.d("ExpoPlayStreamModule", "Playing chunk : $chunk") + if (currentTurnId == chunk.turnId) { + playChunk(chunk) + } + + } else { + // If not playing, we should resolve the promise to avoid leaks + chunk.promise.resolve(null) + } + } + } + } + + private suspend fun playChunk(chunk: AudioChunk) { + withContext(Dispatchers.IO) { + try { + val chunkSize = chunk.audioData.size + + suspendCancellableCoroutine { continuation -> + val listener = + object : AudioTrack.OnPlaybackPositionUpdateListener { + override fun onMarkerReached(track: AudioTrack) { + audioTrack.setPlaybackPositionUpdateListener(null) + if (!chunk.isPromiseSettled) { + chunk.isPromiseSettled = true + chunk.promise.resolve(null) + } + continuation.resumeWith(Result.success(Unit)) + } + + override fun onPeriodicNotification(track: AudioTrack) {} + } + + audioTrack.setPlaybackPositionUpdateListener(listener) + audioTrack.setNotificationMarkerPosition(chunkSize) + val written = + audioTrack.write( + chunk.audioData, + 0, + chunkSize, + AudioTrack.WRITE_BLOCKING + ) + + Log.d("ExpoPlayStreamModule", "Chunk played : $written") + if (written != chunkSize) { + audioTrack.setPlaybackPositionUpdateListener(null) + val error = Exception("Failed to write entire audio chunk") + if (!chunk.isPromiseSettled) { + chunk.isPromiseSettled = true + // chunk.promise.reject("ERR_PLAYBACK", + // error.message, error) + chunk.promise.resolve(null) + } + continuation.resumeWith(Result.failure(error)) + } + + continuation.invokeOnCancellation { + audioTrack.setPlaybackPositionUpdateListener(null) + if (!chunk.isPromiseSettled) { + chunk.isPromiseSettled = true + chunk.promise.reject( + "ERR_PLAYBACK_CANCELLED", + "Playback was cancelled", + null + ) + } + } + } + } catch (e: Exception) { + if (!chunk.isPromiseSettled) { + chunk.isPromiseSettled = true + chunk.promise.reject("ERR_PLAYBACK", e.message, e) + } + } + } + } + + private fun convertPCMDataToFloatArray(pcmData: ByteArray): FloatArray { + val shortBuffer = ByteBuffer.wrap(pcmData).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer() + val shortArray = ShortArray(shortBuffer.remaining()) + shortBuffer.get(shortArray) + return FloatArray(shortArray.size) { index -> shortArray[index] / 32768.0f } + } + + private fun removeRIFFHeaderIfNeeded(audioData: ByteArray): ByteArray { + val headerSize = 44 + val riffHeader = "RIFF".toByteArray(Charsets.US_ASCII) + + return if (audioData.size > headerSize && audioData.startsWith(riffHeader)) { + audioData.copyOfRange(headerSize, audioData.size) + } else { + audioData + } + } + + private fun ByteArray.startsWith(prefix: ByteArray): Boolean { + if (this.size < prefix.size) return false + return prefix.contentEquals(this.sliceArray(prefix.indices)) + } +} \ No newline at end of file diff --git a/android/src/main/java/expo/modules/audiostream/AudioRecorderManager.kt b/android/src/main/java/expo/modules/audiostream/AudioRecorderManager.kt new file mode 100644 index 0000000..63214a9 --- /dev/null +++ b/android/src/main/java/expo/modules/audiostream/AudioRecorderManager.kt @@ -0,0 +1,528 @@ +package expo.modules.audiostream + +import android.media.AudioFormat +import android.media.AudioRecord +import android.media.MediaRecorder +import android.os.Build +import android.os.Bundle +import android.os.Handler +import android.os.Looper +import android.os.SystemClock +import android.util.Log +import androidx.annotation.RequiresApi +import androidx.core.os.bundleOf +import expo.modules.kotlin.Promise +import java.io.ByteArrayOutputStream +import java.io.File +import java.io.FileOutputStream +import java.io.IOException +import java.util.concurrent.atomic.AtomicBoolean + + +class AudioRecorderManager( + private val filesDir: File, + private val permissionUtils: PermissionUtils, + private val audioDataEncoder: AudioDataEncoder, + private val eventSender: EventSender +) { + private var audioRecord: AudioRecord? = null + private var bufferSizeInBytes = 0 + private var isRecording = AtomicBoolean(false) + private val isPaused = AtomicBoolean(false) + private var streamUuid: String? = null + private var audioFile: File? = null + private var recordingThread: Thread? = null + private var recordingStartTime: Long = 0 + private var totalRecordedTime: Long = 0 + private var totalDataSize = 0 + private var interval = 1000L // Emit data every 1000 milliseconds (1 second) + private var lastEmitTime = SystemClock.elapsedRealtime() + private var lastPauseTime = 0L + private var pausedDuration = 0L + private var lastEmittedSize = 0L + private val mainHandler = Handler(Looper.getMainLooper()) + private val audioRecordLock = Any() + private var audioFileHandler: AudioFileHandler = AudioFileHandler(filesDir) + + private lateinit var recordingConfig: RecordingConfig + private var mimeType = "audio/wav" + private var audioFormat: Int = AudioFormat.ENCODING_PCM_16BIT + + @RequiresApi(Build.VERSION_CODES.R) + fun startRecording(options: Map, promise: Promise) { + if (!permissionUtils.checkRecordingPermission()) { + promise.reject("PERMISSION_DENIED", "Recording permission has not been granted", null) + return + } + + if (isRecording.get() && !isPaused.get()) { + promise.reject("ALREADY_RECORDING", "Recording is already in progress", null) + return + } + + // Initialize the recording configuration + var tempRecordingConfig = RecordingConfig( + sampleRate = (options["sampleRate"] as? Number)?.toInt() ?: Constants.DEFAULT_SAMPLE_RATE, + channels = (options["channels"] as? Number)?.toInt() ?: 1, + encoding = options["encoding"] as? String ?: "pcm_16bit", + interval = (options["interval"] as? Number)?.toLong() ?: Constants.DEFAULT_INTERVAL, + pointsPerSecond = (options["pointsPerSecond"] as? Number)?.toDouble() ?: 20.0 + ) + Log.d(Constants.TAG, "Initial recording configuration: $tempRecordingConfig") + + // Validate sample rate and channels + if (tempRecordingConfig.sampleRate !in listOf(16000, 44100, 48000)) { + promise.reject( + "INVALID_SAMPLE_RATE", + "Sample rate must be one of 16000, 44100, or 48000 Hz", + null + ) + return + } + if (tempRecordingConfig.channels !in 1..2) { + promise.reject( + "INVALID_CHANNELS", + "Channels must be either 1 (Mono) or 2 (Stereo)", + null + ) + return + } + + // Set encoding and file extension + var fileExtension = ".wav" + audioFormat = when (tempRecordingConfig.encoding) { + "pcm_8bit" -> { + fileExtension = "wav" + mimeType = "audio/wav" + AudioFormat.ENCODING_PCM_8BIT + } + "pcm_16bit" -> { + fileExtension = "wav" + mimeType = "audio/wav" + AudioFormat.ENCODING_PCM_16BIT + } + "pcm_32bit" -> { + fileExtension = "wav" + mimeType = "audio/wav" + AudioFormat.ENCODING_PCM_FLOAT + } + "opus" -> { + if (Build.VERSION.SDK_INT < Build.VERSION_CODES.Q) { + promise.reject( + "UNSUPPORTED_FORMAT", + "Opus encoding not supported on this Android version.", + null + ) + return + } + fileExtension = "opus" + mimeType = "audio/opus" + AudioFormat.ENCODING_OPUS + } + "aac_lc" -> { + fileExtension = "aac" + mimeType = "audio/aac" + AudioFormat.ENCODING_AAC_LC + } + else -> { + fileExtension = "wav" + mimeType = "audio/wav" + AudioFormat.ENCODING_DEFAULT + } + } + + // Check if selected audio format is supported + if (!isAudioFormatSupported(tempRecordingConfig.sampleRate, tempRecordingConfig.channels, audioFormat)) { + Log.e(Constants.TAG, "Selected audio format not supported, falling back to 16-bit PCM") + audioFormat = AudioFormat.ENCODING_PCM_16BIT + if (!isAudioFormatSupported(tempRecordingConfig.sampleRate, tempRecordingConfig.channels, audioFormat)) { + promise.reject("INITIALIZATION_FAILED", "Failed to initialize audio recorder with any supported format", null) + return + } + tempRecordingConfig = tempRecordingConfig.copy(encoding = "pcm_16bit") + } + + // Update recordingConfig with potentially new encoding + recordingConfig = tempRecordingConfig + + + // Check if selected audio format is supported + if (!isAudioFormatSupported(tempRecordingConfig.sampleRate, tempRecordingConfig.channels, audioFormat)) { + Log.e(Constants.TAG, "Selected audio format not supported, falling back to 16-bit PCM") + audioFormat = AudioFormat.ENCODING_PCM_16BIT + if (!isAudioFormatSupported(tempRecordingConfig.sampleRate, tempRecordingConfig.channels, audioFormat)) { + promise.reject("INITIALIZATION_FAILED", "Failed to initialize audio recorder with any supported format", null) + return + } + tempRecordingConfig = tempRecordingConfig.copy(encoding = "pcm_16bit") + } + + // Update recordingConfig with potentially new encoding + recordingConfig = tempRecordingConfig + + // Recalculate bufferSizeInBytes if the format has changed + bufferSizeInBytes = AudioRecord.getMinBufferSize( + recordingConfig.sampleRate, + if (recordingConfig.channels == 1) AudioFormat.CHANNEL_IN_MONO else AudioFormat.CHANNEL_IN_STEREO, + audioFormat + ) + + if (bufferSizeInBytes == AudioRecord.ERROR || bufferSizeInBytes == AudioRecord.ERROR_BAD_VALUE || bufferSizeInBytes < 0) { + Log.e(Constants.TAG, "Failed to get minimum buffer size, falling back to default buffer size.") + bufferSizeInBytes = 4096 // Default buffer size in bytes + } + + Log.d(Constants.TAG, "AudioFormat: $audioFormat, BufferSize: $bufferSizeInBytes") + + // Initialize the AudioRecord if it's a new recording or if it's not currently paused + if (audioRecord == null || !isPaused.get()) { + Log.d(Constants.TAG, "AudioFormat: $audioFormat, BufferSize: $bufferSizeInBytes") + + audioRecord = AudioRecord( + MediaRecorder.AudioSource.MIC, + recordingConfig.sampleRate, + if (recordingConfig.channels == 1) AudioFormat.CHANNEL_IN_MONO else AudioFormat.CHANNEL_IN_STEREO, + audioFormat, + bufferSizeInBytes + ) + if (audioRecord?.state != AudioRecord.STATE_INITIALIZED) { + promise.reject( + "INITIALIZATION_FAILED", + "Failed to initialize the audio recorder", + null + ) + return + } + } + + streamUuid = java.util.UUID.randomUUID().toString() + audioFile = File(filesDir, "audio_${streamUuid}.${fileExtension}") + + try { + FileOutputStream(audioFile, true).use { fos -> + audioFileHandler.writeWavHeader(fos, recordingConfig.sampleRate, recordingConfig.channels, when (recordingConfig.encoding) { + "pcm_8bit" -> 8 + "pcm_16bit" -> 16 + "pcm_32bit" -> 32 + else -> 16 // Default to 16 if the encoding is not recognized + }) + } + } catch (e: IOException) { + promise.reject("FILE_CREATION_FAILED", "Failed to create the audio file", e) + return + } + + audioRecord?.startRecording() + isPaused.set(false) + isRecording.set(true) + + if (!isPaused.get()) { + recordingStartTime = + System.currentTimeMillis() // Only reset start time if it's not a resume + } + + recordingThread = Thread { recordingProcess() }.apply { start() } + + val result = bundleOf( + "fileUri" to audioFile?.toURI().toString(), + "channels" to recordingConfig.channels, + "bitDepth" to when (recordingConfig.encoding) { + "pcm_8bit" -> 8 + "pcm_16bit" -> 16 + "pcm_32bit" -> 32 + else -> 16 // Default to 16 if the encoding is not recognized + }, + "sampleRate" to recordingConfig.sampleRate, + "mimeType" to mimeType + ) + promise.resolve(result) + } + + private fun isAudioFormatSupported(sampleRate: Int, channels: Int, format: Int): Boolean { + if (!permissionUtils.checkRecordingPermission()) { + throw SecurityException("Recording permission has not been granted") + } + + val channelConfig = if (channels == 1) AudioFormat.CHANNEL_IN_MONO else AudioFormat.CHANNEL_IN_STEREO + val bufferSize = AudioRecord.getMinBufferSize(sampleRate, channelConfig, format) + + if (bufferSize <= 0) { + return false + } + + val audioRecord = AudioRecord( + MediaRecorder.AudioSource.MIC, + sampleRate, + channelConfig, + format, + bufferSize + ) + + val isSupported = audioRecord.state == AudioRecord.STATE_INITIALIZED + if (isSupported) { + val testBuffer = ByteArray(bufferSize) + audioRecord.startRecording() + val testRead = audioRecord.read(testBuffer, 0, bufferSize) + audioRecord.stop() + if (testRead < 0) { + return false + } + } + + audioRecord.release() + return isSupported + } + + fun stopRecording(promise: Promise) { + synchronized(audioRecordLock) { + + if (!isRecording.get()) { + Log.e(Constants.TAG, "Recording is not active") + promise.reject("NOT_RECORDING", "Recording is not active", null) + return + } + + try { + val audioData = ByteArray(bufferSizeInBytes) + val bytesRead = audioRecord?.read(audioData, 0, bufferSizeInBytes) ?: -1 + Log.d(Constants.TAG, "Last Read $bytesRead bytes") + if (bytesRead > 0) { + emitAudioData(audioData, bytesRead) + } + + Log.d(Constants.TAG, "Stopping recording state = ${audioRecord?.state}") + if (audioRecord != null && audioRecord!!.state == AudioRecord.STATE_INITIALIZED) { + Log.d(Constants.TAG, "Stopping AudioRecord"); + audioRecord!!.stop() + } + } catch (e: IllegalStateException) { + Log.e(Constants.TAG, "Error reading from AudioRecord", e); + } finally { + audioRecord?.release() + } + + try { + val fileSize = audioFile?.length() ?: 0 + val dataFileSize = fileSize - 44 // Subtract header size + val byteRate = recordingConfig.sampleRate * recordingConfig.channels * when (recordingConfig.encoding) { + "pcm_8bit" -> 1 + "pcm_16bit" -> 2 + "pcm_32bit" -> 4 + else -> 2 // Default to 2 bytes per sample if the encoding is not recognized + } + // Calculate duration based on the data size and byte rate + val duration = if (byteRate > 0) (dataFileSize * 1000 / byteRate) else 0 + + // Create result bundle + val result = bundleOf( + "fileUri" to audioFile?.toURI().toString(), + "filename" to audioFile?.name, + "durationMs" to duration, + "channels" to recordingConfig.channels, + "bitDepth" to when (recordingConfig.encoding) { + "pcm_8bit" -> 8 + "pcm_16bit" -> 16 + "pcm_32bit" -> 32 + else -> 16 // Default to 16 if the encoding is not recognized + }, + "sampleRate" to recordingConfig.sampleRate, + "size" to fileSize, + "mimeType" to mimeType + ) + promise.resolve(result) + + // Reset the timing variables + isRecording.set(false) + isPaused.set(false) + totalRecordedTime = 0 + pausedDuration = 0 + } catch (e: Exception) { + Log.d(Constants.TAG, "Failed to stop recording", e) + promise.reject("STOP_FAILED", "Failed to stop recording", e) + } finally { + audioRecord = null + } + } + } + + fun pauseRecording(promise: Promise) { + if (isRecording.get() && !isPaused.get()) { + audioRecord?.stop() + lastPauseTime = + System.currentTimeMillis() // Record the time when the recording was paused + isPaused.set(true) + promise.resolve("Recording paused") + } else { + promise.reject( + "NOT_RECORDING_OR_ALREADY_PAUSED", + "Recording is either not active or already paused", + null + ) + } + } + + fun resumeRecording(promise: Promise) { + if (isRecording.get() && !isPaused.get()) { + promise.reject("NOT_PAUSED", "Recording is not paused", null) + return + } else if (audioRecord == null) { + promise.reject("NOT_RECORDING", "Recording is not active", null) + } + + // Calculate the duration the recording was paused + pausedDuration += System.currentTimeMillis() - lastPauseTime + isPaused.set(false) + audioRecord?.startRecording() + promise.resolve("Recording resumed") + } + + fun getStatus(): Bundle { + synchronized(audioRecordLock) { + if (!isRecording.get()) { + Log.d(Constants.TAG, "Not recording --- skip status with default values") + + return bundleOf( + "isRecording" to false, + "isPaused" to false, + "mime" to mimeType, + "size" to 0, + "interval" to interval, + ) + } + + // Ensure you update this to check if audioFile is null or not + val fileSize = audioFile?.length() ?: 0 + + val duration = when (mimeType) { + "audio/wav" -> { + val dataFileSize = fileSize - Constants.WAV_HEADER_SIZE // Assuming header is always 44 bytes + val byteRate = recordingConfig.sampleRate * recordingConfig.channels * (if (recordingConfig.encoding == "pcm_8bit") 8 else 16) / 8 + if (byteRate > 0) dataFileSize * 1000 / byteRate else 0 + } + "audio/opus", "audio/aac" -> getCompressedAudioDuration(audioFile) + else -> 0 + } + return bundleOf( + "durationMs" to duration, + "isRecording" to isRecording.get(), + "isPaused" to isPaused.get(), + "mimeType" to mimeType, + "size" to totalDataSize, + "interval" to recordingConfig.interval + ) + } + } + + fun listAudioFiles(promise: Promise) { + val fileList = + filesDir.list()?.filter { it.endsWith(".wav") }?.map { File(filesDir, it).absolutePath } + ?: listOf() + promise.resolve(fileList) + } + + fun clearAudioStorage(promise: Promise) { + audioFileHandler.clearAudioStorage() + promise.resolve(null) + } + + private fun recordingProcess() { + Log.i(Constants.TAG, "Starting recording process...") + FileOutputStream(audioFile, true).use { fos -> + // Buffer to accumulate data + val accumulatedAudioData = ByteArrayOutputStream() + audioFileHandler.writeWavHeader( + accumulatedAudioData, + recordingConfig.sampleRate, + recordingConfig.channels, + when (recordingConfig.encoding) { + "pcm_8bit" -> 8 + "pcm_16bit" -> 16 + "pcm_32bit" -> 32 + else -> 16 // Default to 16 if the encoding is not recognized + } + ) + // Write audio data directly to the file + val audioData = ByteArray(bufferSizeInBytes) + Log.d(Constants.TAG, "Entering recording loop") + while (isRecording.get() && !Thread.currentThread().isInterrupted) { + if (isPaused.get()) { + // If recording is paused, skip reading from the microphone + continue + } + + val bytesRead = synchronized(audioRecordLock) { + // Only synchronize the read operation and the check + audioRecord?.let { + if (it.state != AudioRecord.STATE_INITIALIZED) { + Log.e(Constants.TAG, "AudioRecord not initialized") + return@let -1 + } + it.read(audioData, 0, bufferSizeInBytes).also { bytes -> + if (bytes < 0) { + Log.e(Constants.TAG, "AudioRecord read error: $bytes") + } + } + } ?: -1 // Handle null case + } + if (bytesRead > 0) { + fos.write(audioData, 0, bytesRead) + totalDataSize += bytesRead + accumulatedAudioData.write(audioData, 0, bytesRead) + + // Emit audio data at defined intervals + if (SystemClock.elapsedRealtime() - lastEmitTime >= interval) { + emitAudioData( + accumulatedAudioData.toByteArray(), + accumulatedAudioData.size() + ) + lastEmitTime = SystemClock.elapsedRealtime() // Reset the timer + accumulatedAudioData.reset() // Clear the accumulator + } + + Log.d(Constants.TAG, "Bytes written to file: $bytesRead") + } + } + } + // Update the WAV header to reflect the actual data size + audioFile?.let { file -> + audioFileHandler.updateWavHeader(file) + } + } + + private fun emitAudioData(audioData: ByteArray, length: Int) { + val encodedBuffer = audioDataEncoder.encodeToBase64(audioData) + + val fileSize = audioFile?.length() ?: 0 + val from = lastEmittedSize + val deltaSize = fileSize - lastEmittedSize + lastEmittedSize = fileSize + + // Calculate position in milliseconds + val positionInMs = (from * 1000) / (recordingConfig.sampleRate * recordingConfig.channels * (if (recordingConfig.encoding == "pcm_8bit") 8 else 16) / 8) + + mainHandler.post { + try { + eventSender.sendExpoEvent( + Constants.AUDIO_EVENT_NAME, bundleOf( + "fileUri" to audioFile?.toURI().toString(), + "lastEmittedSize" to from, + "encoded" to encodedBuffer, + "deltaSize" to length, + "position" to positionInMs, + "mimeType" to mimeType, + "totalSize" to fileSize, + "streamUuid" to streamUuid + ) + ) + } catch (e: Exception) { + Log.e(Constants.TAG, "Failed to send event", e) + } + } + } + + private fun getCompressedAudioDuration(file: File?): Long { + // Placeholder function for fetching duration from a compressed audio file + // This would depend on how you store or can retrieve duration info for compressed formats + return 0L // Implement this based on your specific requirements + } +} \ No newline at end of file diff --git a/android/src/main/java/expo/modules/audiostream/Constants.kt b/android/src/main/java/expo/modules/audiostream/Constants.kt new file mode 100644 index 0000000..326e6e7 --- /dev/null +++ b/android/src/main/java/expo/modules/audiostream/Constants.kt @@ -0,0 +1,18 @@ +package expo.modules.audiostream + +object Constants { + const val AUDIO_EVENT_NAME = "AudioData" + const val AUDIO_ANALYSIS_EVENT_NAME = "AudioAnalysis" + const val DEFAULT_SAMPLE_RATE = 16000 // Default sample rate for audio recording + const val DEFAULT_CHANNEL_CONFIG = 1 // Mono + const val DEFAULT_AUDIO_FORMAT = 16 // 16-bit PCM + const val DEFAULT_INTERVAL = 1000L + const val MIN_INTERVAL = 100L // Minimum interval in ms for emitting audio data + const val WAV_HEADER_SIZE = 44 + const val RIFF_HEADER = 0x52494646 // "RIFF" + const val WAVE_HEADER = 0x57415645 // "WAVE" + const val FMT_CHUNK_ID = 0x666d7420 // "fmt " + const val DATA_CHUNK_ID = 0x64617461 // "data" + const val INFO_CHUNK_ID = 0x494E464F // "info" + const val TAG = "AudioRecorderModule" +} \ No newline at end of file diff --git a/android/src/main/java/expo/modules/audiostream/EventSender.kt b/android/src/main/java/expo/modules/audiostream/EventSender.kt new file mode 100644 index 0000000..af1449b --- /dev/null +++ b/android/src/main/java/expo/modules/audiostream/EventSender.kt @@ -0,0 +1,7 @@ +package expo.modules.audiostream + +import android.os.Bundle + +interface EventSender { + fun sendExpoEvent(eventName: String, params: Bundle) +} diff --git a/android/src/main/java/expo/modules/audiostream/ExpoPlayAudioStreamModule.kt b/android/src/main/java/expo/modules/audiostream/ExpoPlayAudioStreamModule.kt index 18ab362..849924a 100644 --- a/android/src/main/java/expo/modules/audiostream/ExpoPlayAudioStreamModule.kt +++ b/android/src/main/java/expo/modules/audiostream/ExpoPlayAudioStreamModule.kt @@ -1,316 +1,109 @@ package expo.modules.audiostream -import android.media.AudioAttributes -import android.media.AudioFormat -import android.media.AudioTrack -import android.util.Base64 +import android.os.Build import expo.modules.kotlin.Promise import expo.modules.kotlin.modules.Module import expo.modules.kotlin.modules.ModuleDefinition -import java.nio.ByteBuffer -import java.nio.ByteOrder -import kotlin.math.max -import kotlin.math.min -import kotlinx.coroutines.CoroutineScope -import kotlinx.coroutines.Dispatchers -import kotlinx.coroutines.Job -import kotlinx.coroutines.SupervisorJob -import kotlinx.coroutines.cancel -import kotlinx.coroutines.channels.Channel -import kotlinx.coroutines.flow.consumeAsFlow -import kotlinx.coroutines.launch -import kotlinx.coroutines.suspendCancellableCoroutine -import kotlinx.coroutines.withContext +import android.os.Bundle +import android.util.Log +import androidx.annotation.RequiresApi +import expo.modules.interfaces.permissions.Permissions +import android.Manifest -class ExpoPlayAudioStreamModule : Module() { - data class ChunkData(val chunk: String, val promise: Promise) // contains the base64 chunk - data class AudioChunk( - val audioData: FloatArray, - val promise: Promise, - var isPromiseSettled: Boolean = false - ) // contains the decoded base64 chunk - private lateinit var processingChannel: Channel - private lateinit var playbackChannel: Channel - - private val coroutineScope = CoroutineScope(Dispatchers.Default + SupervisorJob()) - - private var processingJob: Job? = null - private var currentPlaybackJob: Job? = null - - private lateinit var audioTrack: AudioTrack - private var isPlaying = false +class ExpoPlayAudioStreamModule : Module(), EventSender { + private lateinit var audioRecorderManager: AudioRecorderManager + private lateinit var audioPlaybackManager: AudioPlaybackManager + @RequiresApi(Build.VERSION_CODES.R) override fun definition() = ModuleDefinition { Name("ExpoPlayAudioStream") + Events(Constants.AUDIO_EVENT_NAME) + + // Initialize managers for playback and for recording + initializeManager() + initializePlaybackManager() + OnCreate { - initializeAudioTrack() - initializeChannels() } OnDestroy { - stopPlayback() - processingChannel.close() - stopProcessingLoop() - coroutineScope.cancel() + audioPlaybackManager.runOnDispose() } - AsyncFunction("streamRiff16Khz16BitMonoPcmChunk") { chunk: String, promise: Promise -> - coroutineScope.launch { - if (processingChannel.isClosedForSend || playbackChannel.isClosedForSend) { - initializeChannels() - } - processingChannel.send(ChunkData(chunk, promise)) - ensureProcessingLoopStarted() - } + AsyncFunction("startRecording") { options: Map, promise: Promise -> + audioRecorderManager.startRecording(options, promise) } - AsyncFunction("setVolume") { volume: Double, promise: Promise -> - setVolume(volume, promise) + AsyncFunction("pauseRecording") { promise: Promise -> + audioRecorderManager.pauseRecording(promise) } - AsyncFunction("pause") { promise: Promise -> pausePlayback(promise) } - AsyncFunction("start") { promise: Promise -> startPlayback(promise) } - AsyncFunction("stop") { promise: Promise -> stopPlayback(promise) } - } - - private fun initializeChannels() { - processingChannel = Channel(Channel.UNLIMITED) - playbackChannel = Channel(Channel.UNLIMITED) - } - - private fun ensureProcessingLoopStarted() { - if (processingJob == null || processingJob?.isActive != true) { - startProcessingLoop() + AsyncFunction("resumeRecording") { promise: Promise -> + audioRecorderManager.resumeRecording(promise) } - } - - private fun startProcessingLoop() { - processingJob = - coroutineScope.launch { - for (chunkData in processingChannel) { - processAndEnqueueChunk(chunkData) - if (processingChannel.isEmpty && !isPlaying && playbackChannel.isEmpty) { - break // Stop the loop if there's no more work to do - } - } - processingJob = null - } - } - - private fun stopProcessingLoop() { - processingJob?.cancel() - processingJob = null - } - private suspend fun processAndEnqueueChunk(chunkData: ChunkData) { - try { - val decodedBytes = Base64.decode(chunkData.chunk, Base64.DEFAULT) - val audioDataWithoutRIFF = removeRIFFHeaderIfNeeded(decodedBytes) - val audioData = convertPCMDataToFloatArray(audioDataWithoutRIFF) - - playbackChannel.send(AudioChunk(audioData, chunkData.promise)) - - if (!isPlaying) { - startPlayback() - } - } catch (e: Exception) { - chunkData.promise.reject("ERR_PROCESSING_AUDIO", e.message, e) + AsyncFunction("stopRecording") { promise: Promise -> + audioRecorderManager.stopRecording(promise) } - } - private fun setVolume(volume: Double, promise: Promise) { - val clampedVolume = max(0.0, min(volume, 100.0)) / 100.0 - try { - audioTrack.setVolume(clampedVolume.toFloat()) - promise.resolve(null) - } catch (e: Exception) { - promise.reject("ERR_SET_VOLUME", e.message, e) + AsyncFunction("requestPermissionsAsync") { promise: Promise -> + Permissions.askForPermissionsWithPermissionsManager( + appContext.permissions, + promise, + Manifest.permission.RECORD_AUDIO + ) } - } - private fun pausePlayback(promise: Promise? = null) { - try { - audioTrack.pause() - isPlaying = false - currentPlaybackJob?.cancel() - promise?.resolve(null) - } catch (e: Exception) { - promise?.reject("ERR_PAUSE_PLAYBACK", e.message, e) + AsyncFunction("getPermissionsAsync") { promise: Promise -> + Permissions.getPermissionsWithPermissionsManager( + appContext.permissions, + promise, + Manifest.permission.RECORD_AUDIO + ) } - } - private fun startPlayback(promise: Promise? = null) { - try { - if (!isPlaying) { - audioTrack.play() - isPlaying = true - startPlaybackLoop() - ensureProcessingLoopStarted() - } - promise?.resolve(null) - } catch (e: Exception) { - promise?.reject("ERR_START_PLAYBACK", e.message, e) + AsyncFunction("playAudio") { chunk: String, turnId: String, promise: Promise -> + audioPlaybackManager.playAudio(chunk, turnId, promise) } - } - - private fun stopPlayback(promise: Promise? = null) { - try { - audioTrack.stop() - audioTrack.flush() - isPlaying = false - currentPlaybackJob?.cancel() - currentPlaybackJob = null - - // Resolve promises for any remaining chunks in the playback channel - coroutineScope.launch { - for (chunk in playbackChannel) { - if (!chunk.isPromiseSettled) { - chunk.isPromiseSettled = true - chunk.promise.resolve(null) - } - } - } - - // Cancel the processing job and close the channels - processingJob?.cancel() - processingJob = null - processingChannel.close() - playbackChannel.close() - promise?.resolve(null) - } catch (e: Exception) { - promise?.reject("ERR_STOP_PLAYBACK", e.message, e) + AsyncFunction("clearPlaybackQueueByTurnId") { turnId: String, promise: Promise -> + audioPlaybackManager.setCurrentTurnId(turnId) + promise.resolve(null) } - } - - private fun initializeAudioTrack() { - val audioFormat = - AudioFormat.Builder() - .setSampleRate(16000) - .setEncoding(AudioFormat.ENCODING_PCM_FLOAT) - .setChannelMask(AudioFormat.CHANNEL_OUT_MONO) - .build() - - val minBufferSize = - AudioTrack.getMinBufferSize( - 16000, - AudioFormat.CHANNEL_OUT_MONO, - AudioFormat.ENCODING_PCM_FLOAT - ) - - audioTrack = - AudioTrack.Builder() - .setAudioAttributes( - AudioAttributes.Builder() - .setUsage(AudioAttributes.USAGE_MEDIA) - .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC) - .build() - ) - .setAudioFormat(audioFormat) - .setBufferSizeInBytes(minBufferSize * 2) - .setTransferMode(AudioTrack.MODE_STREAM) - .build() - } - - private fun startPlaybackLoop() { - currentPlaybackJob = - coroutineScope.launch { - playbackChannel.consumeAsFlow().collect { chunk -> - if (isPlaying) { - playChunk(chunk) - } else { - // If not playing, we should resolve the promise to avoid leaks - chunk.promise.resolve(null) - } - } - } - } - private suspend fun playChunk(chunk: AudioChunk) { - withContext(Dispatchers.IO) { - try { - val chunkSize = chunk.audioData.size - - suspendCancellableCoroutine { continuation -> - val listener = - object : AudioTrack.OnPlaybackPositionUpdateListener { - override fun onMarkerReached(track: AudioTrack) { - audioTrack.setPlaybackPositionUpdateListener(null) - if (!chunk.isPromiseSettled) { - chunk.isPromiseSettled = true - chunk.promise.resolve(null) - } - continuation.resumeWith(Result.success(Unit)) - } + AsyncFunction("setVolume") { volume: Double, promise: Promise -> + audioPlaybackManager.setVolume(volume, promise) + } - override fun onPeriodicNotification(track: AudioTrack) {} - } + AsyncFunction("pauseAudio") { promise: Promise -> audioPlaybackManager.stopPlayback(promise) } - audioTrack.setPlaybackPositionUpdateListener(listener) - audioTrack.setNotificationMarkerPosition(chunkSize) - val written = - audioTrack.write( - chunk.audioData, - 0, - chunkSize, - AudioTrack.WRITE_BLOCKING - ) + AsyncFunction("stopAudio") { promise: Promise -> audioPlaybackManager.stopPlayback(promise) } - if (written != chunkSize) { - audioTrack.setPlaybackPositionUpdateListener(null) - val error = Exception("Failed to write entire audio chunk") - if (!chunk.isPromiseSettled) { - chunk.isPromiseSettled = true - // chunk.promise.reject("ERR_PLAYBACK", - // error.message, error) - chunk.promise.resolve(null) - } - continuation.resumeWith(Result.failure(error)) - } + AsyncFunction("clearAudioFiles") { promise: Promise -> + audioRecorderManager.clearAudioStorage(promise) + } - continuation.invokeOnCancellation { - audioTrack.setPlaybackPositionUpdateListener(null) - if (!chunk.isPromiseSettled) { - chunk.isPromiseSettled = true - chunk.promise.reject( - "ERR_PLAYBACK_CANCELLED", - "Playback was cancelled", - null - ) - } - } - } - } catch (e: Exception) { - if (!chunk.isPromiseSettled) { - chunk.isPromiseSettled = true - chunk.promise.reject("ERR_PLAYBACK", e.message, e) - } - } + AsyncFunction("listAudioFiles") { promise: Promise -> + audioRecorderManager.listAudioFiles(promise) } } - - private fun convertPCMDataToFloatArray(pcmData: ByteArray): FloatArray { - val shortBuffer = ByteBuffer.wrap(pcmData).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer() - val shortArray = ShortArray(shortBuffer.remaining()) - shortBuffer.get(shortArray) - return FloatArray(shortArray.size) { index -> shortArray[index] / 32768.0f } + private fun initializeManager() { + val androidContext = + appContext.reactContext ?: throw IllegalStateException("Android context not available") + val permissionUtils = PermissionUtils(androidContext) + val audioEncoder = AudioDataEncoder() + audioRecorderManager = + AudioRecorderManager(androidContext.filesDir, permissionUtils, audioEncoder, this) } - private fun removeRIFFHeaderIfNeeded(audioData: ByteArray): ByteArray { - val headerSize = 44 - val riffHeader = "RIFF".toByteArray(Charsets.US_ASCII) - - return if (audioData.size > headerSize && audioData.startsWith(riffHeader)) { - audioData.copyOfRange(headerSize, audioData.size) - } else { - audioData - } + private fun initializePlaybackManager() { + audioPlaybackManager = AudioPlaybackManager() } - private fun ByteArray.startsWith(prefix: ByteArray): Boolean { - if (this.size < prefix.size) return false - return prefix.contentEquals(this.sliceArray(prefix.indices)) + override fun sendExpoEvent(eventName: String, params: Bundle) { + Log.d(Constants.TAG, "Sending event EXPO: $eventName") + this@ExpoPlayAudioStreamModule.sendEvent(eventName, params) } } diff --git a/android/src/main/java/expo/modules/audiostream/PermissionUtils.kt b/android/src/main/java/expo/modules/audiostream/PermissionUtils.kt new file mode 100644 index 0000000..419d499 --- /dev/null +++ b/android/src/main/java/expo/modules/audiostream/PermissionUtils.kt @@ -0,0 +1,16 @@ +package expo.modules.audiostream + +import android.content.Context +import android.content.pm.PackageManager +import androidx.core.content.ContextCompat + +class PermissionUtils(private val context: Context) { + + /** + * Checks if the recording permission has been granted. + * @return Boolean indicating whether the RECORD_AUDIO permission is granted. + */ + fun checkRecordingPermission(): Boolean { + return ContextCompat.checkSelfPermission(context, android.Manifest.permission.RECORD_AUDIO) == PackageManager.PERMISSION_GRANTED + } +} \ No newline at end of file diff --git a/android/src/main/java/expo/modules/audiostream/RecordingConfig.kt b/android/src/main/java/expo/modules/audiostream/RecordingConfig.kt new file mode 100644 index 0000000..639273d --- /dev/null +++ b/android/src/main/java/expo/modules/audiostream/RecordingConfig.kt @@ -0,0 +1,10 @@ +package expo.modules.audiostream + +data class RecordingConfig( + val sampleRate: Int = Constants.DEFAULT_SAMPLE_RATE, + val channels: Int = 1, + val encoding: String = "pcm_16bit", + val interval: Long = Constants.DEFAULT_INTERVAL, + val pointsPerSecond: Double = 20.0 +) + diff --git a/app.plugin.js b/app.plugin.js new file mode 100644 index 0000000..e832073 --- /dev/null +++ b/app.plugin.js @@ -0,0 +1 @@ +module.exports = require('./plugin/build') \ No newline at end of file diff --git a/example/.yarn/install-state.gz b/example/.yarn/install-state.gz index 3862d0c..959a4f4 100644 Binary files a/example/.yarn/install-state.gz and b/example/.yarn/install-state.gz differ diff --git a/example/App.tsx b/example/App.tsx index 2245378..11aff12 100644 --- a/example/App.tsx +++ b/example/App.tsx @@ -1,21 +1,46 @@ -import { StyleSheet, Text, View } from "react-native"; +import { Button, Platform, StyleSheet, Text, View } from "react-native"; import { ExpoPlayAudioStream } from "@mykin-ai/expo-audio-stream"; -import { useEffect } from "react"; +import { useEffect, useRef } from "react"; import { sampleA } from "./samples/sample-a"; import { sampleB } from "./samples/sample-b"; +import { + AudioDataEvent, +} from "@mykin-ai/expo-audio-stream/types"; +import { Subscription } from "expo-modules-core"; +import { Audio } from 'expo-av'; + +const ANDROID_SAMPLE_RATE = 16000; +const IOS_SAMPLE_RATE = 48000; +const BIT_DEPTH = 16; +const CHANNELS = 1; +const ENCODING = "pcm_16bit"; +const RECORDING_INTERVAL = 50; + +const turnId1 = 'turnId1'; +const turnId2 = 'turnId2'; + export default function App() { + + + const eventListenerSubscriptionRef = useRef(null); + useEffect(() => { async function run() { try { - await ExpoPlayAudioStream.setVolume(100); - await ExpoPlayAudioStream.streamRiff16Khz16BitMonoPcmChunk(sampleA); - console.log("streamed A"); - await ExpoPlayAudioStream.streamRiff16Khz16BitMonoPcmChunk(sampleB); - console.log("streamed B"); - console.log("streaming A & B"); - ExpoPlayAudioStream.streamRiff16Khz16BitMonoPcmChunk(sampleA); - ExpoPlayAudioStream.streamRiff16Khz16BitMonoPcmChunk(sampleB); + // console.log("setPlayAndRecord"); + // //await ExpoPlayAudioStream.setVolume(100); + // await ExpoPlayAudioStream.streamRiff16Khz16BitMonoPcmChunk(sampleB); + // await ExpoPlayAudioStream.setPlayAndRecord(); + // console.log("after setPlayAndRecord"); + // //await new Promise((resolve) => setTimeout(resolve, 2000)); + // await ExpoPlayAudioStream.streamRiff16Khz16BitMonoPcmChunk(sampleB); + // console.log("streamed A"); + // await ExpoPlayAudioStream.streamRiff16Khz16BitMonoPcmChunk(sampleB); + // console.log("streamed B"); + // console.log("streaming A & B"); + //ExpoPlayAudioStream.streamRiff16Khz16BitMonoPcmChunk(sampleA); + //ExpoPlayAudioStream.streamRiff16Khz16BitMonoPcmChunk(sampleB); } catch (error) { console.error(error); } @@ -23,9 +48,85 @@ export default function App() { run(); }, []); + const onAudioCallback = async (audio: AudioDataEvent) => { + console.log(audio.data.slice(0, 100)); + }; + return ( hi +