56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158 | class HttpxBackend(OpenAIBackendMixin):
SERIALIZER = HttpxSerializer
DEFAULT_SRC = "https://api.reka.ai/v1/chat"
DEFAULT_MODEL = "reka-core-20240501"
def __init__(
self, api_key=None, endpoint: str | None = None, serializer: Serializer | None = None, **kwargs
) -> None:
"""Initializes the CompleteBackend. Defaults to using the API key from the environment and.
Args:
api_key (Optional[str]): The API key for the Complete service.
endpoint (str): The base URL for the Complete API.
serializer (Optional[Serializer]): The serializer to use for serializing messages.
"""
self.base_url = endpoint or self.DEFAULT_SRC
self.api_key = api_key or os.getenv("MBODI_API_KEY")
self.headers = {"X-Api-Key": self.api_key, "Content-Type": "application/json"}
self.serialized = serializer or self.SERIALIZER
self.kwargs = kwargs
def predict(self, messages: List[Message], model: str | None = None, **kwargs) -> str:
model = model or self.DEFAULT_MODEL
data = {
"messages": [self.serialized(msg).serialize() for msg in messages],
"model": model,
"stream": False,
**kwargs,
}
data.update(kwargs)
with httpx.Client() as client:
response = client.post(self.base_url, headers=self.headers, json=data, timeout=kwargs.get("timeout", 60))
if response.status_code == 200:
response_data = response.json()
return self.serialized.extract_response(response_data)
response.raise_for_status()
return response.text
def stream(self, messages: List[Message], model: str | None = None, **kwargs) -> Generator[str, None, None]:
yield from self._stream_completion(messages, model, **kwargs)
async def astream(self, messages: List[Message], model: str | None = None, **kwargs) -> AsyncGenerator[str, None]:
async for chunk in self._astream_completion(messages, model, **kwargs):
yield chunk
def _stream_completion(
self,
messages: List[Message],
model: str | None = None,
**kwargs,
) -> Generator[str, None, None]:
model = model or self.DEFAULT_MODEL
data = {
"messages": [self.serialized(msg).serialize() for msg in messages],
"model": model,
"stream": True,
**kwargs,
}
data.update(kwargs)
with (
httpx.Client(follow_redirects=True) as client,
client.stream(
"post", self.base_url, headers=self.headers, json=data, timeout=kwargs.get("timeout", 60)
) as stream,
):
for chunk in stream.iter_text():
yield self.serialized.extract_stream(chunk)
async def _acreate_completion(self, messages: List[Message], model: str | None = None, **kwargs) -> str:
model = model or self.DEFAULT_MODEL
data = {
"messages": [self.serialized(msg).serialize() for msg in messages],
"model": model,
"stream": False,
**kwargs,
}
data.update(kwargs)
async with httpx.AsyncClient(timeout=-1) as client:
response = await client.post(
self.base_url, headers=self.headers, json=data, timeout=kwargs.get("timeout", 60)
)
if response.status_code == 200:
response_data = response.json()
return self.serialized.extract_response(response_data)
return response.text
async def _astream_completion(
self, messages: List[Message], model: str | None = None, **kwargs
) -> AsyncGenerator[str, None]:
model = model or self.DEFAULT_MODEL
data = {
"messages": [self.serialized(msg).serialize() for msg in messages],
"model": model,
"stream": True,
**kwargs,
}
data.update(kwargs)
async with httpx.AsyncClient(follow_redirects=True) as client:
stream = client.stream("POST", self.base_url, headers=self.headers, json=data, timeout=60)
async with stream as response:
async for chunk in response.aiter_text():
yield self.serialized.extract_stream(chunk)
|