Skip to content

Commit

Permalink
Merge pull request #12 from KeithLin724/KY-docker-compose-linux
Browse files Browse the repository at this point in the history
KY-docker-compose-linux
  • Loading branch information
KeithLin724 authored Dec 11, 2023
2 parents a94f965 + 270b865 commit b8f79a4
Show file tree
Hide file tree
Showing 21 changed files with 75 additions and 1,341 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -11,4 +11,5 @@ gui_venv_linux
*.log
.vscode/
**.env
onnx-stable-diffusion-v1-5/
onnx-stable-diffusion-v1-5/
test/
3 changes: 2 additions & 1 deletion api/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -47,4 +47,5 @@ COPY . .
EXPOSE 5000

# Run the application.
CMD uvicorn api_server:app --reload --port 5000 --host 0.0.0.0
# CMD uvicorn api_server:app --reload --port 5000 --host 0.0.0.0
CMD uvicorn api_server:app --port 5000 --host 0.0.0.0
9 changes: 7 additions & 2 deletions api/api_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,6 @@
# import uvicorn
import router
from base import (
SERVER_IP,
SERVER_PORT,
SERVER_URL,
# redis queue
TASK_IMAGE_QUEUE,
Expand All @@ -28,6 +26,7 @@
server_close,
monitor_micro_server,
chat_to_ai_fast_function,
cut_prompt_with_fast_function,
)
from api_task_func import generate_image_queue
from contextlib import asynccontextmanager
Expand Down Expand Up @@ -104,6 +103,7 @@ async def generate_request_to_micro_service(generate_service: GenerateServiceIte
"prompt": generate_service.prompt,
}

# TODO: chat with ai service
if generate_service.type_service == "chat":
provider, result = await chat_to_ai_fast_function(
prompt=generate_service.prompt
Expand All @@ -114,6 +114,11 @@ async def generate_request_to_micro_service(generate_service: GenerateServiceIte
"message": result,
}

# TODO: cut prompt service
elif generate_service.type_service == "cut_prompt":
return await cut_prompt_with_fast_function(prompt=generate_service.prompt)

# TODO: other service
if generate_service.type_service in monitor_micro_server:
response = await handle_request_function(generate_service, json_data)
return response
Expand Down
19 changes: 19 additions & 0 deletions api/base/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,25 @@ async def chat_to_ai_fast_function(prompt: str):
return provider, reply_message


async def cut_prompt_with_fast_function(prompt: str):
"""The function `cut_prompt_with_fast_function` takes a prompt as input and uses a text generator model
to cut the prompt.
Parameters
----------
prompt : str
The `prompt` parameter is a string that represents the text you want to use as a prompt for the
text generation model. It is the input that the model will use to generate the desired output.
Returns
-------
The function `cut_prompt_with_fast_function` returns the result of calling the `cut_prompt` method
of the `text_generator_model` object with the provided `prompt` as an argument.
"""
return await text_generator_model.cut_prompt(prompt=prompt)


from .message_item import (
GenerateImageItem,
ChatItem,
Expand Down
45 changes: 45 additions & 0 deletions api/model/text_generator_g4f.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,16 @@ def __init__(self) -> None:
g4f.Provider.ChatBase,
g4f.Provider.GptGo,
g4f.Provider.You,
g4f.Provider.Raycast,
# g4f.Provider.Yqcloud,
]

self._cut_prompt_provider = [
g4f.Provider.You,
# g4f.Provider.GptGo,
g4f.Provider.GeekGpt,
]

async def run_provider(self, provider: g4f.Provider.BaseProvider, prompt: str):
"""
The function `run_provider` takes a provider and a prompt as input, and uses the provider to
Expand Down Expand Up @@ -142,3 +149,41 @@ async def generate(self, prompt):

provider, msg = await self.get_generate(prompt=prompt)
return provider, msg

async def cut_prompt(self, prompt: str) -> dict:
"""The `cut_prompt` function takes a prompt string and runs it through multiple providers
asynchronously, returning a list of dictionaries containing the state, provider, and response for
each provider.
Parameters
----------
prompt : str
The `prompt` parameter is a string that represents the text or question that you want to send to
each provider for processing.
Returns
-------
The `cut_prompt` function returns a list of dictionaries. Each dictionary contains three key-value
pairs: "state", "provider", and "response". The "state" key represents the state of the provider,
the "provider" key represents the provider itself, and the "response" key represents the response
received from the provider.
"""
pending_tasks = [
self.run_provider(
provider=provider,
prompt=prompt,
)
for provider in self._cut_prompt_provider
]

result = await asyncio.gather(*pending_tasks)

return [
{
"state": state,
"provider": provider,
"response": response,
}
for state, provider, response in result
]
Binary file removed test/old/cat.jpg
Binary file not shown.
36 changes: 0 additions & 36 deletions test/old/main.py

This file was deleted.

36 changes: 0 additions & 36 deletions test/old/task_id_to_rq.py

This file was deleted.

Binary file removed test/old/techno.wav
Binary file not shown.
30 changes: 0 additions & 30 deletions test/old/test.py

This file was deleted.

17 changes: 0 additions & 17 deletions test/old/test_2.py

This file was deleted.

43 changes: 0 additions & 43 deletions test/old/test_async.py

This file was deleted.

50 changes: 0 additions & 50 deletions test/old/test_async_2.py

This file was deleted.

38 changes: 0 additions & 38 deletions test/old/test_async_apscheduler.py

This file was deleted.

Loading

0 comments on commit b8f79a4

Please sign in to comment.