Project

General

Profile

Feature #3584 ยป rag with groq.txt

Ram Kordale, 06/04/2024 03:52 AM

 
1
!pip install -qU langchain_experimental langchain_community langchain fastembed pypdf langchain-groq chromadb
2
%pip install --upgrade --quiet  langchain langchain-community
3
%pip install langchain_openai
4

    
5
from langchain.document_loaders import PyPDFLoader
6
from langchain.text_splitter import RecursiveCharacterTextSplitter
7
loader = PyPDFLoader("hesc103.pdf")
8
documents = loader.load()
9

    
10
from langchain.text_splitter import RecursiveCharacterTextSplitter
11

    
12
text_splitter = RecursiveCharacterTextSplitter(
13
    chunk_size=1000,
14
    chunk_overlap=100,
15
    length_function=len,
16
    is_separator_regex=False
17
)
18
naive_chunks = text_splitter.split_documents(documents)
19

    
20

    
21
from langchain_community.embeddings.fastembed import FastEmbedEmbeddings
22
embed_model = FastEmbedEmbeddings(model_name="BAAI/bge-base-en-v1.5")
23

    
24
from google.colab import userdata
25
from langchain_groq import ChatGroq
26
groq_api_key = "key"
27

    
28
from langchain_community.vectorstores import Chroma
29
naive_chunk_vectorstore = Chroma.from_documents(naive_chunks, embedding=embed_model)
30

    
31
from langchain_core.prompts import ChatPromptTemplate
32

    
33
rag_template = """\
34
Answer the question based only on the following Context:
35
{context}
36

    
37
Question:
38
{question}
39
"""
40

    
41
rag_prompt = ChatPromptTemplate.from_template(rag_template)
42

    
43
from langchain.chains import ConversationChain
44
# from langchain.llms import OpenAI
45
from langchain.memory import ConversationBufferMemory
46

    
47
chat_model = ChatGroq(temperature=0,
48
                      model_name="mixtral-8x7b-32768",
49
                      api_key="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",)
50

    
51
from langchain_core.runnables import RunnablePassthrough
52
from langchain_core.output_parsers import StrOutputParser
53

    
54
naive_rag_chain = (
55
    {"context" : naive_chunk_retriever, "question" : RunnablePassthrough()}
56
    | rag_prompt
57
    | chat_model
58
    | StrOutputParser()
59
)
60
memory = ConversationBufferMemory()
61
conversation = ConversationChain(llm=naive_rag_chain, memory=memory, verbose=True)
62

    
63
while True:
64
    user_input = input("You: ")
65

    
66
    if user_input.lower() == "exit":
67
        break
68

    
69
    response = conversation.predict(input=user_input)
70
    print("AI: ", response)
71

    
72

    
    (1-1/1)