Adapter#
adapter.adapter#
adapter.api#
- gptcache.adapter.api.get(prompt: str, **kwargs) Any [source]#
search api, search the cache data according to the prompt Please make sure that the pre_embedding_func param is get_prompt when initializing the cache
Example
from gptcache.adapter.api import save from gptcache.processor.pre import get_prompt cache.init(pre_embedding_func=get_prompt) put("hello", "foo") print(get("hello"))
- gptcache.adapter.api.put(prompt: str, data: Any, **kwargs) None [source]#
save api, save qa pair information to GPTCache Please make sure that the pre_embedding_func param is get_prompt when initializing the cache
Example
from gptcache.adapter.api import save from gptcache.processor.pre import get_prompt cache.init(pre_embedding_func=get_prompt) put("hello", "foo")
adapter.diffusers#
- class gptcache.adapter.diffusers.StableDiffusionPipeline(*args, **kwargs)[source]#
Diffuser StableDiffusionPipeline Wrapper
Example
import torch from gptcache import cache from gptcache.processor.pre import get_prompt from gptcache.adapter.diffusers import StableDiffusionPipeline # init gptcache cache.init(pre_embedding_func=get_prompt) # run with gptcache model_id = "stabilityai/stable-diffusion-2-1" pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16) pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe = pipe.to("cuda") prompt = "a photo of an astronaut riding a horse on mars" image = pipe(prompt=prompt).images[0]
adapter.openai#
- class gptcache.adapter.openai.Audio(id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int] = None, api_base=None, engine=None, **params)[source]#
Openai Audio Wrapper
Example
from gptcache import cache from gptcache.processor.pre import get_file_bytes # init gptcache cache.init(pre_embedding_func=get_file_bytes) cache.set_openai_key() from gptcache.adapter import openai # run audio transcribe model with gptcache audio_file= open("/path/to/audio.mp3", "rb") transcript = openai.Audio.transcribe("whisper-1", audio_file) # run audio transcribe model with gptcache audio_file= open("/path/to/audio.mp3", "rb") transcript = openai.Audio.translate("whisper-1", audio_file)
- class gptcache.adapter.openai.ChatCompletion(engine: Optional[str] = None, **kwargs)[source]#
Openai ChatCompletion Wrapper
Example
from gptcache import cache from gptcache.processor.pre import get_prompt # init gptcache cache.init(pre_embedding_func=get_prompt) cache.set_openai_key() from gptcache.adapter import openai # run ChatCompletion model with gptcache response = openai.ChatCompletion.create( model='gpt-3.5-turbo', messages=[ { 'role': 'user', 'content': "what's github" }], ) response_content = response['choices'][0]['message']['content']
- classmethod create(*args, **kwargs)[source]#
Creates a new chat completion for the provided messages and parameters.
See https://platform.openai.com/docs/api-reference/chat-completions/create for a list of valid parameters.
- class gptcache.adapter.openai.Completion(engine: Optional[str] = None, **kwargs)[source]#
Openai Completion Wrapper
Example
from gptcache import cache from gptcache.processor.pre import get_prompt # init gptcache cache.init(pre_embedding_func=get_prompt) cache.set_openai_key() from gptcache.adapter import openai # run Completion model with gptcache response = openai.Completion.create(model="text-davinci-003", prompt="Hello world.") response_text = response["choices"][0]["text"]
- classmethod create(*args, **kwargs)[source]#
Creates a new completion for the provided prompt and parameters.
See https://platform.openai.com/docs/api-reference/completions/create for a list of valid parameters.
- class gptcache.adapter.openai.Image(id=None, api_key=None, api_version=None, api_type=None, organization=None, response_ms: Optional[int] = None, api_base=None, engine=None, **params)[source]#
Openai Image Wrapper
Example
from gptcache import cache from gptcache.processor.pre import get_prompt # init gptcache cache.init(pre_embedding_func=get_prompt) cache.set_openai_key() from gptcache.adapter import openai # run image generation model with gptcache response = openai.Image.create( prompt="a white siamese cat", n=1, size="256x256" ) response_url = response['data'][0]['url']