Skip to content

feat(chat): enhance flow cancel capability #333

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Apr 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@
### Added
- **vector-stores**: add vector stores APIs (#324)

### Fixed
- **chat**: enhance flow cancel capability (#333)

## 3.7.2
> Published 28 Apr 2024

Expand Down
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
package com.aallam.openai.client.internal.extension

import com.aallam.openai.client.internal.JsonLenient
import io.ktor.client.call.body
import io.ktor.client.statement.HttpResponse
import io.ktor.utils.io.ByteReadChannel
import io.ktor.utils.io.readUTF8Line
import io.ktor.client.call.*
import io.ktor.client.statement.*
import io.ktor.utils.io.*
import kotlinx.coroutines.currentCoroutineContext
import kotlinx.coroutines.flow.FlowCollector
import kotlinx.serialization.decodeFromString
import kotlinx.coroutines.isActive

private const val STREAM_PREFIX = "data:"
private const val STREAM_END_TOKEN = "$STREAM_PREFIX [DONE]"
Expand All @@ -16,13 +16,17 @@ private const val STREAM_END_TOKEN = "$STREAM_PREFIX [DONE]"
*/
internal suspend inline fun <reified T> FlowCollector<T>.streamEventsFrom(response: HttpResponse) {
val channel: ByteReadChannel = response.body()
while (!channel.isClosedForRead) {
val line = channel.readUTF8Line() ?: continue
val value: T = when {
line.startsWith(STREAM_END_TOKEN) -> break
line.startsWith(STREAM_PREFIX) -> JsonLenient.decodeFromString(line.removePrefix(STREAM_PREFIX))
else -> continue
try {
while (currentCoroutineContext().isActive && !channel.isClosedForRead) {
val line = channel.readUTF8Line() ?: continue
val value: T = when {
line.startsWith(STREAM_END_TOKEN) -> break
line.startsWith(STREAM_PREFIX) -> JsonLenient.decodeFromString(line.removePrefix(STREAM_PREFIX))
else -> continue
}
emit(value)
}
emit(value)
} finally {
channel.cancel()
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,14 @@ package com.aallam.openai.client

import com.aallam.openai.api.chat.*
import com.aallam.openai.api.model.ModelId
import kotlinx.coroutines.flow.collect
import kotlinx.coroutines.flow.launchIn
import kotlinx.coroutines.flow.onEach
import kotlinx.coroutines.launch
import kotlinx.coroutines.test.advanceTimeBy
import kotlinx.serialization.Serializable
import kotlinx.serialization.json.Json
import kotlin.coroutines.cancellation.CancellationException
import kotlin.test.*

class TestChatCompletions : TestOpenAI() {
Expand Down Expand Up @@ -165,4 +169,38 @@ class TestChatCompletions : TestOpenAI() {
assertEquals(response.usage!!.completionTokens, logprobs.content!!.size)
assertEquals(logprobs.content!![0].topLogprobs?.size, expectedTopLogProbs)
}

@Test
fun cancellable() = test {
val request = chatCompletionRequest {
model = ModelId("gpt-3.5-turbo")
messages {
message {
role = ChatRole.System
content = "You are a helpful assistant.!"
}
message {
role = ChatRole.User
content = "Who won the world series in 2020?"
}
}
}

val job = launch {
try {
openAI.chatCompletions(request).collect()
} catch (e: CancellationException) {
println("Flow was cancelled as expected.")
} catch (e: Exception) {
fail("Flow threw an unexpected exception: ${e.message}")
}
}

advanceTimeBy(1000)

job.cancel()
job.join()

assertTrue(job.isCancelled, "Job should be cancelled")
}
}