Chat Examples
Basic example showing synchronous usage of the AIME API client interface.
synchronous_example.py
1import json
2from aime_api_client_interface import ModelAPI
3
4def main():
5 model_api = ModelAPI('https://api.aime.info', 'llama3_chat', 'apiexample@aime.info', '181e35ac-7b7d-4bfe-9f12-153757ec3952')
6 model_api.do_api_login()
7
8 chat_context = [
9 {"role": "user", "content": "Hi! How are you?"},
10 {"role": "assistant", "content": "I'm doing well, thank you! How can I help you today?"}
11 ]
12
13 params = {
14 "prompt_input": "Tell me a joke",
15 "chat_context": json.dumps(chat_context),
16 "top_k": 40,
17 "top_p": 0.9,
18 "temperature": 0.8,
19 "max_gen_tokens": 1000
20 }
21
22 result = model_api.do_api_request(params)
23 print("Synchronous result:", result)
24
25if __name__ == "__main__":
26 main()
Example showing asynchronous usage with callbacks for progress monitoring.
async_callbacks_example.py
1import asyncio
2import json
3from aime_api_client_interface import ModelAPI
4
5def result_callback(result):
6 print("Result callback:", result)
7
8def progress_callback(progress_info, progress_data):
9 print(f"Progress: {progress_info} - {progress_data}")
10
11def progress_error_callback(error_description):
12 print("Error:", error_description)
13
14async def main():
15 model_api = ModelAPI('https://api.aime.info', 'llama3_chat', 'apiexample@aime.info', '181e35ac-7b7d-4bfe-9f12-153757ec3952')
16 await model_api.do_api_login_async()
17
18 chat_context = [
19 {"role": "user", "content": "Hi! How are you?"},
20 {"role": "assistant", "content": "I'm doing well, thank you! How can I help you today?"}
21 ]
22
23 params = {
24 "prompt_input": "What is the capital of Germany?",
25 "chat_context": json.dumps(chat_context),
26 "top_k": 40,
27 "top_p": 0.9,
28 "temperature": 0.8,
29 "max_gen_tokens": 1000
30 }
31
32 result = await model_api.do_api_request_async(
33 params,
34 result_callback,
35 progress_callback,
36 progress_error_callback
37 )
38
39 print("Async with sync callbacks result:", result)
40 await model_api.close_session()
41
42if __name__ == "__main__":
43 asyncio.run(main())
Example showing asynchronous usage with asynchronous callbacks for progress monitoring.
async_callbacks_async_example.py
1import asyncio
2import json
3from aime_api_client_interface import ModelAPI
4
5async def result_callback(result):
6 print("Async result callback:", result)
7
8async def progress_callback(progress_info, progress_data):
9 print(f"Async progress: {progress_info} - {progress_data}") Example of async operation
10
11async def progress_error_callback(error_description):
12 print("Async error:", error_description)
13
14async def main():
15 model_api = ModelAPI('https://api.aime.info', 'llama3_chat', 'apiexample@aime.info', '181e35ac-7b7d-4bfe-9f12-153757ec3952')
16 await model_api.do_api_login_async()
17
18 chat_context = [
19 {"role": "user", "content": "Hi! How are you?"},
20 {"role": "assistant", "content": "I'm doing well, thank you! How can I help you today?"}
21 ]
22
23 params = {
24 "prompt_input": "What is the capital of Germany?",
25 "chat_context": json.dumps(chat_context),
26 "top_k": 40,
27 "top_p": 0.9,
28 "temperature": 0.8,
29 "max_gen_tokens": 1000
30 }
31
32 result = await model_api.do_api_request_async(
33 params,
34 result_callback,
35 progress_callback,
36 progress_error_callback
37 )
38
39 print("Async with async callbacks result:", result)
40 await model_api.close_session()
41
42if __name__ == "__main__":
43 asyncio.run(main())
Example showing asynchronous generator usage for streaming responses.
async_generator_example.py
1import asyncio
2import json
3from aime_api_client_interface import ModelAPI
4
5async def main():
6 model_api = ModelAPI('https://api.aime.info', 'llama3_chat', 'apiexample@aime.info', '181e35ac-7b7d-4bfe-9f12-153757ec3952')
7 await model_api.do_api_login_async()
8
9 chat_context = [
10 {"role": "user", "content": "Hi! How are you?"},
11 {"role": "assistant", "content": "I'm doing well, thank you! How can I help you today?"}
12 ]
13
14 params = {
15 "prompt_input": "What is the capital of Germany?",
16 "chat_context": json.dumps(chat_context),
17 "top_k": 40,
18 "top_p": 0.9,
19 "temperature": 0.8,
20 "max_gen_tokens": 1000
21 }
22
23 output_generator = model_api.get_api_request_generator(params)
24
25 try:
26 async for progress in output_generator:
27 if isinstance(progress, tuple) and len(progress) == 2:
28 progress_info, progress_data = progress
29 print(f"Progress: {progress_info} - {progress_data}")
30 else:
31 print(f"Progress: {progress}")
32 except Exception as e:
33 print(f"Error occurred: {e}")
34 finally:
35 await model_api.close_session()
36
37if __name__ == "__main__":
38 asyncio.run(main())
Image Generation Example
Example showing how to generate images using the AIME API client interface.
image_generation_example.py
1import json
2import base64
3from pathlib import Path
4from aime_api_client_interface import do_api_request
5
6def generate_image():
7 # Define the image generation parameters
8 params = {
9 'prompt': 'Astronaut on Mars holding a banner which states "AIME is happy to serve your model" during sunset sitting on a giant yellow rubber duck',
10 'seed': -1,
11 'height': 1024,
12 'width': 1024,
13 'steps': 50,
14 'guidance': 3.5,
15 'image2image_strength': 0.8,
16 'provide_progress_images': 'none',
17 'wait_for_result': True
18 }
19
20 # Call the AIME API
21 final = do_api_request(
22 'https://api.aime.info',
23 'flux-dev',
24 params,
25 user='apiexample@aime.info',
26 key='181e35ac-7b7d-4bfe-9f12-153757ec3952'
27 )
28
29 # Save the images
30 images = final.get('images') or final.get('job_result', {}).get('images', [])
31 if not images:
32 print("No images returned by the API.")
33 return final
34 for i, img_b64 in enumerate(images):
35 header, img_data = img_b64.split(',', 1) if ',' in img_b64 else (None, img_b64)
36 img_bytes = base64.b64decode(img_data)
37 filename = Path(__file__).parent / f'image_{i}.png'
38 filename.write_bytes(img_bytes)
39 print(f"Saved image to: {filename}")
40 print(f"\nImage generation complete. {len(images)} image(s) saved.")
41 return final
42
43if __name__ == "__main__":
44 generate_image()
Text-to-Speech Generation Example
Example showing synchronous text-to-speech generation using the AIME API client interface.
tts_sync_example.py
1import base64
2from aime_api_client_interface import ModelAPI
3
4def save_audio(audio_base64: str, output_filename: str = "output.wav"):
5 audio_data = base64.b64decode(audio_base64)
6 with open(output_filename, "wb") as f:
7 f.write(audio_data)
8 print(f"Saved audio to: {output_filename}")
9
10def progress_callback(progress_info, progress_data):
11 if progress_info:
12 print(f"Progress: {progress_info}%")
13 if progress_data:
14 print(f"Progress data: {progress_data}")
15
16def main():
17 model_api = ModelAPI('https://api.aime.info', 'tts_tortoise', 'apiexample@aime.info', '181e35ac-7b7d-4bfe-9f12-153757ec3952')
18
19 model_api.do_api_login()
20
21 params = {
22 "text": "Hello! This is a example of text to speech.",
23 "language": "eng",
24 "voice": "emma",
25 }
26
27 result = model_api.do_api_request(
28 params,
29 progress_callback=progress_callback
30 )
31
32 if result and 'audio' in result:
33 save_audio(result['audio'])
34
35if __name__ == "__main__":
36 main()
Example showing asynchronous text-to-speech generation using the AIME API client interface.
tts_async_example.py
1import asyncio
2import base64
3from aime_api_client_interface import ModelAPI
4
5def save_audio(audio_base64: str, output_filename: str = "output.wav"):
6 audio_data = base64.b64decode(audio_base64)
7 with open(output_filename, "wb") as f:
8 f.write(audio_data)
9 print(f"Saved audio to: {output_filename}")
10
11def progress_callback(progress_info, progress_data):
12 if progress_info:
13 print(f"Progress: {progress_info}")
14 if progress_data:
15 print(f"Progress data: {progress_data}")
16
17async def main():
18 model_api = ModelAPI('https://api.aime.info', 'tts_tortoise', 'apiexample@aime.info', '181e35ac-7b7d-4bfe-9f12-153757ec3952')
19
20 await model_api.do_api_login_async()
21
22 params = {
23 "text": "This is an asynchronous text to speech example.",
24 "language": "eng",
25 "voice": "emma"
26 }
27 async def result_callback(result):
28 if result and 'audio' in result:
29 save_audio(result['audio'], "output_async.wav")
30
31 await model_api.do_api_request_async(
32 params,
33 result_callback=result_callback,
34 progress_callback=progress_callback
35 )
36
37 await model_api.close_session()
38
39if __name__ == "__main__":
40 asyncio.run(main())