참고 자료: https://velog.io/@judy_choi/LLaMA3-을-이용한-RAG-구축-Ollama-사용법-정리
pip install langchain-community langchain-core
pip install sentence-transformers
pip install pymupdf
pip install chromadb
import os
import warnings
warnings.filterwarnings("ignore")
from langchain_community.document_loaders import PyMuPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import Chroma
from langchain_community.chat_models import ChatOllama
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
loader = PyMuPDFLoader("{문서명}")
pages = loader.load()
## 청크 크기 500, 각 청크의 50자씩 겹치도록 청크를 나눈다
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=500,
chunk_overlap=50,
)
docs = text_splitter.split_documents(pages)
embeddings = HuggingFaceEmbeddings(
model_name='BAAI/bge-m3',
model_kwargs={'device':'cpu'},
#model_kwargs={'device':'cuda'}, #cuda 버전 설치 필요
encode_kwargs={'normalize_embeddings':True},
)
# 벡터 저장소 생성
vectorstore = Chroma.from_documents(docs, embeddings)
# 벡터 저장소 경로 설정
## 현재 경로에 'vectorstore' 경로 생성
vectorstore_path = 'vectorstore'
os.makedirs(vectorstore_path, exist_ok=True)
# 벡터 저장소 생성 및 저장
vectorstore = Chroma.from_documents(docs, embeddings, persist_directory=vectorstore_path)
# 벡터스토어 데이터를 디스크에 저장
vectorstore.persist()
print("Vectorstore created and persisted")