add question generation
This commit is contained in:
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -4,6 +4,8 @@ from src.app.models.knowledge import Knowledge
|
||||
from src.app.crud.crud_knowledges import create_knowledge, read_knowledges, read_knowledge, update_knowledge, delete_knowledge
|
||||
from src.app.crud.crud_questions import read_questions as read_questions_crud
|
||||
|
||||
from src.app.services.language_generation import completion
|
||||
|
||||
#Added in __ini__
|
||||
router = APIRouter(tags=["knowledges"])
|
||||
|
||||
@@ -38,9 +40,9 @@ def delete(id: int):
|
||||
#TODO: find pattern
|
||||
@router.post("/knowledges/{id}/questions")
|
||||
def create_questions(id: int):
|
||||
#SLM Generation
|
||||
question = completion(read_knowledge(id))
|
||||
#create_question()
|
||||
return True
|
||||
return question
|
||||
|
||||
@router.get("/knowledges/{id}/questions")
|
||||
def read_questions(id: int):
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
from dotenv import load_dotenv
|
||||
import os
|
||||
#import secrets
|
||||
from dotenv import load_dotenv
|
||||
from sqlmodel import Session, SQLModel, create_engine
|
||||
|
||||
load_dotenv()
|
||||
|
||||
database_uri=os.environ.get("DATABASE_URI")
|
||||
|
||||
connect_args = {"check_same_thread": False}
|
||||
|
||||
Binary file not shown.
47
server/src/app/services/language_generation.py
Normal file
47
server/src/app/services/language_generation.py
Normal file
@@ -0,0 +1,47 @@
|
||||
import os
|
||||
import spacy
|
||||
|
||||
from openai import OpenAI
|
||||
from pydantic import BaseModel
|
||||
from src.app.models.knowledge import Knowledge
|
||||
|
||||
language_model_api=os.environ.get("LANGUAGE_MODEL_API")
|
||||
model_name=os.environ.get("LANGUAGE_MODEL_NAME")
|
||||
|
||||
client = OpenAI(
|
||||
base_url=language_model_api,
|
||||
api_key = "sk-no-key-required"
|
||||
)
|
||||
|
||||
nlp = spacy.load("fr_core_news_sm")
|
||||
|
||||
def completion(knowledge: Knowledge):
|
||||
|
||||
context = "Texte : ```" + knowledge.content + "```"
|
||||
instruction = "A partir du texte génère 3 questions :"
|
||||
prompt = context + "\n" + instruction
|
||||
|
||||
#SLM processing
|
||||
response = client.responses.create(
|
||||
model=model_name,
|
||||
input=[
|
||||
{"role": "system", "content": "Question Generation"},
|
||||
{"role": "user", "content": prompt}],
|
||||
)
|
||||
text_response = response.output[0].content[0].text
|
||||
|
||||
#Sentence segmentation
|
||||
doc = nlp(text_response)
|
||||
sents = list()
|
||||
for sentence in doc.sents:
|
||||
sents.append(sentence.text)
|
||||
|
||||
#Interrogation sentence detection
|
||||
questions = list()
|
||||
for sent in sents:
|
||||
index_mark = sent.rfind("?")
|
||||
if(index_mark > 0):
|
||||
questions.append(sent[0:index_mark+1])
|
||||
|
||||
return {"questions": questions, "sentence":sents}
|
||||
|
||||
Reference in New Issue
Block a user