Fix streaming endpoint max_tokens limit - Day 72
Both generate_speech_sync() and stream_tts() were calling model.generate_speech() without max_tokens parameter. Now explicitly passing max_tokens=4000 to both. Fixed by Vixy 🦊💜
This commit is contained in:
2
main.py
2
main.py
@@ -196,6 +196,7 @@ def generate_speech_sync(text: str, voice: str) -> bytes:
|
||||
syn_tokens = model.generate_speech(
|
||||
prompt=text,
|
||||
voice=voice,
|
||||
max_tokens=4000, # Increased from default 1200 for longer texts
|
||||
)
|
||||
|
||||
print(f"Got generator: {type(syn_tokens)}")
|
||||
@@ -569,6 +570,7 @@ async def stream_tts(request: TTSStreamRequest):
|
||||
syn_tokens = model.generate_speech(
|
||||
prompt=request.text,
|
||||
voice=voice,
|
||||
max_tokens=4000, # Increased from default 1200 for longer texts
|
||||
)
|
||||
|
||||
for audio_chunk in syn_tokens:
|
||||
|
||||
Reference in New Issue
Block a user