@ -125,6 +125,7 @@ DALL·E knows a lot about everything, so the deeper your knowledge of the requis
Pay careful to attention to the words that you use in the optimized prompt, the first words will be the strongest features visible in the image when DALL-E generates the image. Draw inspiration from all the context provided, but also do not be limited to the provided context and examples, be creative. Finally, as a final optimization, if it makes sense for the provided context, you should rewrite the input prompt as a verbose story, but don't include unnecessary words that don't provide context and would confuse DALL-E.
Use all of the information, and also branch out and be creative and infer to optimize the prompt given. Try to make each optimized prompt at maximum 75 words, and try your best to have at least 30 words. Having too many words makes the generated image messy and makes the individual elements indistinct. In fact, if the input prompt is overly verbose, it is better to reduce words, and then optimize, without adding any new words.
Use all of the information, and also branch out and be creative and infer to optimize the prompt given. Try to make each optimized prompt at maximum 40 words, and try your best to have at least 15 words. Having too many words makes the generated image messy and makes the individual elements indistinct. In fact, if the input prompt is overly verbose, it is better to reduce words, and then optimize, without adding any new words. Moreover, do not add extra words to an already suitable prompt. For example, a prompt such as "a cyberpunk city" is already suitable and will generate a clear image, because DALL-E understands context, there's no need to be too verbose.
self._temp=0.6# Higher value means more random, lower value means more likely to be a coherent sentence
self._top_p=0.9# 1 is equivalent to greedy sampling, 0.1 means that the model will only consider the top 10% of the probability distribution
self._max_tokens=4000# The maximum number of tokens the model can generate
self._presence_penalty=0# Penalize new tokens based on whether they appear in the text so far
self._presence_penalty=(
0# Penalize new tokens based on whether they appear in the text so far
)
self._frequency_penalty=0# Penalize new tokens based on their existing frequency in the text so far. (Higher frequency = lower probability of being chosen.)
self._best_of=1# Number of responses to compare the loglikelihoods of
self._prompt_min_length=12
@ -28,7 +31,7 @@ class Model:
self.usage_service=usage_service
self.DAVINCI_ROLES=["admin","Admin","GPT","gpt"]
openai.api_key=os.getenv('OPENAI_TOKEN')
openai.api_key=os.getenv("OPENAI_TOKEN")
# Use the @property and @setter decorators for all the self fields to provide value checking
@ -57,7 +60,9 @@ class Model:
@model.setter
defmodel(self,model):
ifmodelnotin[Models.DAVINCI,Models.CURIE]:
raiseValueError("Invalid model, must be text-davinci-003 or text-curie-001")
raiseValueError(
"Invalid model, must be text-davinci-003 or text-curie-001"
)
self._model=model
@property
@ -70,7 +75,9 @@ class Model:
ifvalue<1:
raiseValueError("Max conversation length must be greater than 1")
ifvalue>30:
raiseValueError("Max conversation length must be less than 30, this will start using credits quick.")
raiseValueError(
"Max conversation length must be less than 30, this will start using credits quick."
)
self._max_conversation_length=value
@property
@ -98,7 +105,10 @@ class Model:
deftemp(self,value):
value=float(value)
ifvalue<0orvalue>1:
raiseValueError("temperature must be greater than 0 and less than 1, it is currently "+str(value))
raiseValueError(
"temperature must be greater than 0 and less than 1, it is currently "
+str(value)
)
self._temp=value
@ -110,7 +120,10 @@ class Model:
deftop_p(self,value):
value=float(value)
ifvalue<0orvalue>1:
raiseValueError("top_p must be greater than 0 and less than 1, it is currently "+str(value))
raiseValueError(
"top_p must be greater than 0 and less than 1, it is currently "
+str(value)
)
self._top_p=value
@property
@ -121,7 +134,10 @@ class Model:
defmax_tokens(self,value):
value=int(value)
ifvalue<15orvalue>4096:
raiseValueError("max_tokens must be greater than 15 and less than 4096, it is currently "+str(value))
raiseValueError(
"max_tokens must be greater than 15 and less than 4096, it is currently "
+str(value)
)
self._max_tokens=value
@property
@ -131,7 +147,9 @@ class Model:
@presence_penalty.setter
defpresence_penalty(self,value):
ifint(value)<0:
raiseValueError("presence_penalty must be greater than 0, it is currently "+str(value))
raiseValueError(
"presence_penalty must be greater than 0, it is currently "+str(value)
)
self._presence_penalty=value
@property
@ -141,7 +159,10 @@ class Model:
@frequency_penalty.setter
deffrequency_penalty(self,value):
ifint(value)<0:
raiseValueError("frequency_penalty must be greater than 0, it is currently "+str(value))
raiseValueError(
"frequency_penalty must be greater than 0, it is currently "
+str(value)
)
self._frequency_penalty=value
@property
@ -153,7 +174,9 @@ class Model:
value=int(value)
ifvalue<1orvalue>3:
raiseValueError(
"best_of must be greater than 0 and ideally less than 3 to save tokens, it is currently "+str(value))
"best_of must be greater than 0 and ideally less than 3 to save tokens, it is currently "
+str(value)
)
self._best_of=value
@property
@ -165,14 +188,28 @@ class Model:
value=int(value)
ifvalue<10orvalue>4096:
raiseValueError(
"prompt_min_length must be greater than 10 and less than 4096, it is currently "+str(value))
"prompt_min_length must be greater than 10 and less than 4096, it is currently "
+str(value)
)
self._prompt_min_length=value
defsend_request(self,prompt,message):
defsend_request(
self,
prompt,
message,
temp_override=None,
top_p_override=None,
best_of_override=None,
frequency_penalty_override=None,
presence_penalty_override=None,
max_tokens_override=None,
):
# Validate that all the parameters are in a good state before we send the request
iflen(prompt)<self.prompt_min_length:
raiseValueError("Prompt must be greater than 25 characters, it is currently "+str(len(prompt)))
raiseValueError(
"Prompt must be greater than 25 characters, it is currently "