Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 0 additions & 11 deletions .env-example

This file was deleted.

17 changes: 17 additions & 0 deletions docs/Aprendizado_megale.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@


Site para fazer grafos

https://www.mermaidchart.com/app/projects/abe4786a-4c2c-42ac-bebe-267dd17151d5/diagrams/95bf5c1d-8b17-4fe9-8296-2596c97d712d/version/v0.1/edit

colar nesse link o codigo do grafo, e vc vera como Imagen

build.langchain.com. (esse mt foda )

https://whimsical.com/Ck8Sb5D8k4DzM8d4WUtVAy



---


2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ authors = [{ name = "Luiz Otávio" }]
requires-python = ">=3.13"
dependencies = [
"aiosqlite>=0.21.0",
"langchain>=1.0.8",
"langchain[google-genai,ollama]>=1.0.8",
"langchain-core>=1.0.7",
"langchain-google-genai>=3.1.0",
"langchain-ollama>=1.0.0",
Expand Down
5 changes: 4 additions & 1 deletion src/examples/ex001/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,9 @@
# `ChatOpenAI` do pacote `langchain_openai` isso deixaria o meu código um pouco
# mais acoplado, já que eu precisaria fazer isso sempre que mudasse de modelo.
# Pense nele como um atalho para criar um novo Chat Model.

from dotenv import load_dotenv
import os
from langchain.chat_models import init_chat_model

################################################################################
Expand Down Expand Up @@ -39,7 +42,7 @@

# Vou usar Ollama neste exemplo

llm = init_chat_model("ollama:gpt-oss:20b")
llm = init_chat_model("google_genai:gemini-2.5-flash", api_key=os.getenv("GOOGLE_API_KEY")) # passa explicitamente

# A beleza do LangChain é que daqui para baixo, tudo é praticamente igual
# para qualquer modelo.
Expand Down
23 changes: 23 additions & 0 deletions src/examples/ex001/megale01.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
# main.py
from dotenv import load_dotenv
import os
from rich import print
# Carrega as variáveis do .env

load_dotenv()

# Verifica se a variável foi carregada
print("GOOGLE_API_KEY =", os.getenv("GOOGLE_API_KEY"))



# Agora inicialize seu LLM normalmente
from langchain.chat_models import init_chat_model

llm = init_chat_model(
"google_genai:gemini-2.5-flash",
api_key=os.getenv("GOOGLE_API_KEY") # passa explicitamente
)

response = llm.invoke("Olá, como vai?")
print(response)
69 changes: 69 additions & 0 deletions src/examples/ex002/megale02.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
# main.py
from dotenv import load_dotenv
import os
from rich import print

# Carrega as variáveis do .env
load_dotenv()

# Verifica se a variável foi carregada
print("GOOGLE_API_KEY =", os.getenv("GOOGLE_API_KEY"))

# Importações corretas do LangChain

from langchain.chat_models import init_chat_model
from langchain.messages import HumanMessage, SystemMessage

# Inicializa seu LLM
llm = init_chat_model(
model="google_genai:gemini-2.5-flash",
api_key=os.getenv("GOOGLE_API_KEY")
)

# Mensagem do sistema (instrui o comportamento do assistente)
system_message = SystemMessage(
content=(
"Você é um guia de estudos que ajuda estudantes a aprenderem novos tópicos.\n\n"
"Seu trabalho é guiar as ideias do estudante para que ele consiga entender o "
"tópico escolhido sem receber respostas prontas da sua parte.\n\n"
"Evite conversar sobre assuntos paralelos ao tópico escolhido. Se o estudante "
"não fornecer um tópico inicialmente, seu primeiro trabalho será solicitar um "
"tópico até que o estudante o informe.\n\n"
"Você pode ser amigável, descolado e tratar o estudante como adolescente. Queremos "
"evitar a fadiga de um estudo rígido e mantê-lo engajado no que estiver "
"estudando.\n\n"
"As próximas mensagens serão de um estudante."
)
)

# Mensagem do humano (usuário)
human_message = HumanMessage(content="Olá, tudo bem?")

# Monta a lista de mensagens
messages = [system_message, human_message]

# Obtém a resposta do modelo
response = llm.invoke(messages)
print(f"{'AI':-^80}")
print(response.content)

messages.append(response)
while True:
print(f"{'Human':-^80}")
user_input = input("Digite sua mensagem: ")
human_message = HumanMessage(content=user_input)

if user_input.lower() in ["exit", "quit", "bye", "q"]:
break

messages.append(human_message)
response = llm.invoke(messages)
print(f"{'AI':-^80}")
print(response.content)
print()
messages.append(response)

print()
print(f"{'Histórico':-^80}")
print(*[f"{m.type.upper()}\n{m.content}\n\n" for m in messages], sep="", end="" )
print()
55 changes: 55 additions & 0 deletions src/examples/ex003/megale03_01.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
from typing import Annotated, TypedDict
from langgraph.graph import StateGraph, add_messages
from rich import print
##import operator


# def reducer(a: list[str], b: list[str]) -> list[str]:
# reducer_result = a + b
# print("> reducer em execução", f"{reducer_result=}")
# return reducer_result

## O estado do grafo
class State(TypedDict):
nodes_path: Annotated[list[str],add_messages] # operator.add Usando o operador de adição como reducer

#2 Definir os nodes

def node_a(state: State) -> State:
output_state: State = {"nodes_path": ["A"]}
print("> node_a em execução", f"{state=}", f"{output_state=}")
return output_state


def node_b(state: State) -> State:
output_state: State = {"nodes_path": ["B"]}
print("> node_b em execução", f"{state=}", f"{output_state=}")
return output_state

## definir o builder do grafo //stategraph
builder = StateGraph(State)

builder.add_node("A", node_a)
builder.add_node("B", node_b)

##Conectar a Edges

builder.add_edge('__start__', "A")
builder.add_edge("A", "B")
builder.add_edge("B", "__end__")

## Compilar o grafo

graph = builder.compile()

## Executar o grafo

#print(graph.get_graph().draw_mermaid()) ## quiser ver img, sio colcar no mermeid chart

##Pegar o resultado

response = graph.invoke({"nodes_path": []})

print()
print(f"{response}")
print()
73 changes: 73 additions & 0 deletions src/examples/ex003/megale03_02.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
from typing import Annotated, TypedDict, Literal
from langgraph.graph import StateGraph, END, START
from rich import print
import operator
from dataclasses import dataclass


@dataclass
class State:
nodes_path: Annotated[list[str],operator.add]
current_number: int = 0

#2 Definir os nodes


def node_a(state: State) -> State:
output_state: State = State(nodes_path=["A"], current_number=state.current_number)
print("> node_a em execução", f"{state=}", f"{output_state=}")
return output_state


def node_b(state: State) -> State:
output_state: State = State(nodes_path=["B"], current_number=state.current_number)
print("> node_b em execução", f"{state=}", f"{output_state=}")
return output_state

def node_c(state: State) -> State:
output_state: State = State(nodes_path=["C"], current_number=state.current_number)
print("> node_c em execução", f"{state=}", f"{output_state=}")
return output_state

#Funcao condicional

def the_conditional(state: State) -> Literal["B", "C"]:
if state.current_number >= 50:
return "C"
else:
return "B"









## definir o builder do grafo //stategraph
builder = StateGraph(State)
builder.add_node("A", node_a)
builder.add_node("B", node_b)
builder.add_node("C", node_c)

##Conectar a Edges
builder.add_edge(START, "A")
builder.add_conditional_edges("A", the_conditional,{"B":"B", "C":"C"}) ##ex 'goes_to_c' : 'c' ///
builder.add_edge("B", END)
builder.add_edge("C", END)

## Compilar o grafo
graph = builder.compile()

## Pegar resultado

print()
response = graph.invoke(State(nodes_path=[]))
print(f"{response}")
print()

print()
response = graph.invoke(State(nodes_path=[], current_number=51))
print(f"{response}")
print()
Loading