|
13 | 13 | from langchain.memory import ConversationBufferMemory
|
14 | 14 | from langchain.prompts import PromptTemplate
|
15 | 15 | from langchain.vectorstores import FAISS
|
16 |
| -from tenacity import retry, stop_after_attempt |
17 | 16 |
|
18 | 17 | openai.api_key = os.getenv('OPENAI_API_KEY')
|
19 | 18 |
|
@@ -87,22 +86,44 @@ def __init__(self, docs):
|
87 | 86 | self.qa_chain = None
|
88 | 87 | self._llm = None
|
89 | 88 |
|
90 |
| - self.prompt_template = """ |
91 |
| - Only answer what is asked. Answer step-by-step. |
92 |
| - If the content has sections, please summarize them in order and present them in a bulleted format. |
93 |
| - Utilize line breaks for better readability. |
94 |
| - For example, sequentially summarize the introduction, methods, results, and so on. |
95 |
| -
|
96 |
| - {context} |
97 |
| -
|
98 |
| - Question: {question} |
99 |
| - """ |
100 |
| - |
| 89 | + self.prompt_template = ( |
| 90 | + "Only answer what is asked. Answer step-by-step.\n" |
| 91 | + "If the content has sections, please summarize them " |
| 92 | + "in order and present them in a bulleted format.\n" |
| 93 | + "Utilize line breaks for better readability.\n" |
| 94 | + "For example, sequentially summarize the " |
| 95 | + "introduction, methods, results, and so on.\n" |
| 96 | + "Please use Python's newline symbols appropriately to " |
| 97 | + "enhance the readability of the response, " |
| 98 | + "but don't use two newline symbols consecutive.\n\n" |
| 99 | + "{context}\n\n" |
| 100 | + "Question: {question}\n" |
| 101 | + ) |
101 | 102 | self.prompt = PromptTemplate(
|
102 | 103 | template=self.prompt_template,
|
103 | 104 | input_variables=['context', 'question']
|
104 | 105 | )
|
105 | 106 |
|
| 107 | + self.refine_prompt_template = ( |
| 108 | + "The original question is as follows: {question}\n" |
| 109 | + "We have provided an existing answer: {existing_answer}\n" |
| 110 | + "We have the opportunity to refine the existing answer" |
| 111 | + "(only if needed) with some more context below.\n" |
| 112 | + "------------\n" |
| 113 | + "{context_str}\n" |
| 114 | + "------------\n" |
| 115 | + "Given the new context, refine the original answer to better " |
| 116 | + "answer the question. " |
| 117 | + "If the context isn't useful, return the original answer.\n" |
| 118 | + "Please use Python's newline symbols " |
| 119 | + "appropriately to enhance the readability of the response, " |
| 120 | + "but don't use two newline symbols consecutive.\n" |
| 121 | + ) |
| 122 | + self.refine_prompt = PromptTemplate( |
| 123 | + template=self.refine_prompt_template, |
| 124 | + input_variables=['question', 'existing_answer', 'context_str'] |
| 125 | + ) |
| 126 | + |
106 | 127 | @property
|
107 | 128 | def llm(self):
|
108 | 129 | return self._llm
|
@@ -143,7 +164,8 @@ def create_qa_chain(
|
143 | 164 | self._helper_prompt(chain_type)
|
144 | 165 | chain_type_kwargs = {
|
145 | 166 | 'question_prompt': self.prompt,
|
146 |
| - 'verbose': True |
| 167 | + 'verbose': True, |
| 168 | + 'refine_prompt': self.refine_prompt |
147 | 169 | }
|
148 | 170 |
|
149 | 171 | db = self._embeddings()
|
|
0 commit comments