musikr.pipeline: parallelize cache reads
This commit is contained in:
parent
9f68f59504
commit
6bad9e719d
2 changed files with 31 additions and 15 deletions
|
@ -15,7 +15,7 @@
|
||||||
* You should have received a copy of the GNU General Public License
|
* You should have received a copy of the GNU General Public License
|
||||||
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
* along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package org.oxycblt.musikr.pipeline
|
package org.oxycblt.musikr.pipeline
|
||||||
|
|
||||||
import android.content.Context
|
import android.content.Context
|
||||||
|
@ -23,6 +23,8 @@ import kotlinx.coroutines.Dispatchers
|
||||||
import kotlinx.coroutines.channels.Channel
|
import kotlinx.coroutines.channels.Channel
|
||||||
import kotlinx.coroutines.flow.Flow
|
import kotlinx.coroutines.flow.Flow
|
||||||
import kotlinx.coroutines.flow.buffer
|
import kotlinx.coroutines.flow.buffer
|
||||||
|
import kotlinx.coroutines.flow.flatMapMerge
|
||||||
|
import kotlinx.coroutines.flow.flattenMerge
|
||||||
import kotlinx.coroutines.flow.flowOn
|
import kotlinx.coroutines.flow.flowOn
|
||||||
import kotlinx.coroutines.flow.map
|
import kotlinx.coroutines.flow.map
|
||||||
import kotlinx.coroutines.flow.mapNotNull
|
import kotlinx.coroutines.flow.mapNotNull
|
||||||
|
@ -51,7 +53,8 @@ internal interface ExtractStep {
|
||||||
MetadataExtractor.from(context),
|
MetadataExtractor.from(context),
|
||||||
TagParser.new(),
|
TagParser.new(),
|
||||||
storage.cache,
|
storage.cache,
|
||||||
storage.storedCovers)
|
storage.storedCovers
|
||||||
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -73,10 +76,17 @@ private class ExtractStepImpl(
|
||||||
val audioNodes = filterFlow.right
|
val audioNodes = filterFlow.right
|
||||||
val playlistNodes = filterFlow.left.map { ExtractedMusic.Playlist(it) }
|
val playlistNodes = filterFlow.left.map { ExtractedMusic.Playlist(it) }
|
||||||
|
|
||||||
|
val distributedAudioNodes = audioNodes.distribute(8)
|
||||||
val cacheResults =
|
val cacheResults =
|
||||||
audioNodes
|
distributedAudioNodes.flows
|
||||||
.map { wrap(it) { file -> cache.read(file, storedCovers)} }
|
.map { flow ->
|
||||||
.flowOn(Dispatchers.IO)
|
flow.map {
|
||||||
|
wrap(it) { file -> cache.read(file, storedCovers) }
|
||||||
|
}
|
||||||
|
.flowOn(Dispatchers.IO)
|
||||||
|
.buffer(Channel.UNLIMITED)
|
||||||
|
}
|
||||||
|
.flattenMerge()
|
||||||
.buffer(Channel.UNLIMITED)
|
.buffer(Channel.UNLIMITED)
|
||||||
val cacheFlow =
|
val cacheFlow =
|
||||||
cacheResults.divert {
|
cacheResults.divert {
|
||||||
|
@ -104,13 +114,13 @@ private class ExtractStepImpl(
|
||||||
|
|
||||||
val metadata =
|
val metadata =
|
||||||
fds.mapNotNull { fileWith ->
|
fds.mapNotNull { fileWith ->
|
||||||
wrap(fileWith.file) { _ ->
|
wrap(fileWith.file) { _ ->
|
||||||
metadataExtractor
|
metadataExtractor
|
||||||
.extract(fileWith.with)
|
.extract(fileWith.with)
|
||||||
?.let { FileWith(fileWith.file, it) }
|
?.let { FileWith(fileWith.file, it) }
|
||||||
.also { withContext(Dispatchers.IO) { fileWith.with.close() } }
|
.also { withContext(Dispatchers.IO) { fileWith.with.close() } }
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
.flowOn(Dispatchers.IO)
|
.flowOn(Dispatchers.IO)
|
||||||
// Covers are pretty big, so cap the amount of parsed metadata in-memory to at most
|
// Covers are pretty big, so cap the amount of parsed metadata in-memory to at most
|
||||||
// 8 to minimize GCs.
|
// 8 to minimize GCs.
|
||||||
|
@ -135,7 +145,13 @@ private class ExtractStepImpl(
|
||||||
.flowOn(Dispatchers.IO)
|
.flowOn(Dispatchers.IO)
|
||||||
|
|
||||||
return merge(
|
return merge(
|
||||||
filterFlow.manager, cacheFlow.manager, cachedSongs, writtenSongs, playlistNodes)
|
filterFlow.manager,
|
||||||
|
distributedAudioNodes.manager,
|
||||||
|
cacheFlow.manager,
|
||||||
|
cachedSongs,
|
||||||
|
writtenSongs,
|
||||||
|
playlistNodes
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
private data class FileWith<T>(val file: DeviceFile, val with: T)
|
private data class FileWith<T>(val file: DeviceFile, val with: T)
|
||||||
|
|
|
@ -59,7 +59,7 @@ internal inline fun <T, L, R> Flow<T>.divert(
|
||||||
return DivertedFlow(managedFlow, leftChannel.receiveAsFlow(), rightChannel.receiveAsFlow())
|
return DivertedFlow(managedFlow, leftChannel.receiveAsFlow(), rightChannel.receiveAsFlow())
|
||||||
}
|
}
|
||||||
|
|
||||||
internal class DistributedFlow<T>(val manager: Flow<Nothing>, val flows: Array<Flow<T>>)
|
internal class DistributedFlow<T>(val manager: Flow<Nothing>, val flows: Flow<Flow<T>>)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Equally "distributes" the values of some flow across n new flows.
|
* Equally "distributes" the values of some flow across n new flows.
|
||||||
|
@ -68,7 +68,7 @@ internal class DistributedFlow<T>(val manager: Flow<Nothing>, val flows: Array<F
|
||||||
* order to function. Without this, all of the newly split flows will simply block.
|
* order to function. Without this, all of the newly split flows will simply block.
|
||||||
*/
|
*/
|
||||||
internal fun <T> Flow<T>.distribute(n: Int): DistributedFlow<T> {
|
internal fun <T> Flow<T>.distribute(n: Int): DistributedFlow<T> {
|
||||||
val posChannels = Array(n) { Channel<T>(Channel.UNLIMITED) }
|
val posChannels = List(n) { Channel<T>(Channel.UNLIMITED) }
|
||||||
val managerFlow =
|
val managerFlow =
|
||||||
flow<Nothing> {
|
flow<Nothing> {
|
||||||
withIndex().collect {
|
withIndex().collect {
|
||||||
|
@ -79,6 +79,6 @@ internal fun <T> Flow<T>.distribute(n: Int): DistributedFlow<T> {
|
||||||
channel.close()
|
channel.close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
val hotFlows = posChannels.map { it.receiveAsFlow() }.toTypedArray()
|
val hotFlows = posChannels.asFlow().map { it.receiveAsFlow() }
|
||||||
return DistributedFlow(managerFlow, hotFlows)
|
return DistributedFlow(managerFlow, hotFlows)
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue