Skip to content

Commit 54c2419

Browse files
authored
chore(langchain): enable ruff docstring-code-format in langchain_v1 (#32855)
1 parent 35e9d36 commit 54c2419

File tree

9 files changed

+49
-38
lines changed

9 files changed

+49
-38
lines changed

libs/langchain_v1/langchain/_internal/_prompts.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -61,6 +61,7 @@ def resolve_prompt(
6161
def custom_prompt(state, runtime):
6262
return [{"role": "system", "content": "Custom"}]
6363
64+
6465
messages = resolve_prompt(custom_prompt, state, runtime, "content", "default")
6566
messages = resolve_prompt("Custom system", state, runtime, "content", "default")
6667
messages = resolve_prompt(None, state, runtime, "content", "Default")
@@ -128,15 +129,13 @@ async def aresolve_prompt(
128129
async def async_prompt(state, runtime):
129130
return [{"role": "system", "content": "Async"}]
130131
132+
131133
def sync_prompt(state, runtime):
132134
return [{"role": "system", "content": "Sync"}]
133135
134-
messages = await aresolve_prompt(
135-
async_prompt, state, runtime, "content", "default"
136-
)
137-
messages = await aresolve_prompt(
138-
sync_prompt, state, runtime, "content", "default"
139-
)
136+
137+
messages = await aresolve_prompt(async_prompt, state, runtime, "content", "default")
138+
messages = await aresolve_prompt(sync_prompt, state, runtime, "content", "default")
140139
messages = await aresolve_prompt("Custom", state, runtime, "content", "default")
141140
```
142141

libs/langchain_v1/langchain/agents/interrupt.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -53,15 +53,15 @@ class HumanInterrupt(TypedDict):
5353
request = HumanInterrupt(
5454
action_request=ActionRequest(
5555
action="run_command", # The action being requested
56-
args={"command": "ls", "args": ["-l"]} # Arguments for the action
56+
args={"command": "ls", "args": ["-l"]}, # Arguments for the action
5757
),
5858
config=HumanInterruptConfig(
59-
allow_ignore=True, # Allow skipping this step
60-
allow_respond=True, # Allow text feedback
61-
allow_edit=False, # Don't allow editing
62-
allow_accept=True # Allow direct acceptance
59+
allow_ignore=True, # Allow skipping this step
60+
allow_respond=True, # Allow text feedback
61+
allow_edit=False, # Don't allow editing
62+
allow_accept=True, # Allow direct acceptance
6363
),
64-
description="Please review the command before execution"
64+
description="Please review the command before execution",
6565
)
6666
# Send the interrupt request and get the response
6767
response = interrupt([request])[0]

libs/langchain_v1/langchain/agents/react_agent.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -957,14 +957,17 @@ def create_agent( # noqa: D417
957957
```python
958958
from dataclasses import dataclass
959959
960+
960961
@dataclass
961962
class ModelContext:
962963
model_name: str = "gpt-3.5-turbo"
963964
965+
964966
# Instantiate models globally
965967
gpt4_model = ChatOpenAI(model="gpt-4")
966968
gpt35_model = ChatOpenAI(model="gpt-3.5-turbo")
967969
970+
968971
def select_model(state: AgentState, runtime: Runtime[ModelContext]) -> ChatOpenAI:
969972
model_name = runtime.context.model_name
970973
model = gpt4_model if model_name == "gpt-4" else gpt35_model

libs/langchain_v1/langchain/agents/tool_node.py

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -23,10 +23,12 @@
2323
from langchain_core.tools import tool
2424
from langchain.agents import ToolNode
2525
26+
2627
@tool
2728
def my_tool(x: int) -> str:
2829
return f"Result: {x}"
2930
31+
3032
tool_node = ToolNode([my_tool])
3133
```
3234
"""
@@ -369,6 +371,7 @@ def context_tool(query: str, state: Annotated[dict, InjectedState]) -> str:
369371
def handle_errors(e: ValueError) -> str:
370372
return "Invalid input provided"
371373
374+
372375
tool_node = ToolNode([my_tool], handle_tool_errors=handle_errors)
373376
```
374377
"""
@@ -887,16 +890,18 @@ def tools_condition(
887890
from langgraph.agents.tool_node import ToolNode, tools_condition
888891
from typing_extensions import TypedDict
889892
893+
890894
class State(TypedDict):
891895
messages: list
892896
897+
893898
graph = StateGraph(State)
894899
graph.add_node("llm", call_model)
895900
graph.add_node("tools", ToolNode([my_tool]))
896901
graph.add_conditional_edges(
897902
"llm",
898903
tools_condition, # Routes to "tools" or "__end__"
899-
{"tools": "tools", "__end__": "__end__"}
904+
{"tools": "tools", "__end__": "__end__"},
900905
)
901906
```
902907
@@ -956,6 +961,7 @@ class AgentState(TypedDict):
956961
messages: List[BaseMessage]
957962
foo: str
958963
964+
959965
@tool
960966
def state_tool(x: int, state: Annotated[dict, InjectedState]) -> str:
961967
'''Do something with state.'''
@@ -964,11 +970,13 @@ def state_tool(x: int, state: Annotated[dict, InjectedState]) -> str:
964970
else:
965971
return "not enough messages"
966972
973+
967974
@tool
968975
def foo_tool(x: int, foo: Annotated[str, InjectedState("foo")]) -> str:
969976
'''Do something else with state.'''
970977
return foo + str(x + 1)
971978
979+
972980
node = ToolNode([state_tool, foo_tool])
973981
974982
tool_call1 = {"name": "state_tool", "args": {"x": 1}, "id": "1", "type": "tool_call"}
@@ -982,8 +990,8 @@ def foo_tool(x: int, foo: Annotated[str, InjectedState("foo")]) -> str:
982990
983991
```pycon
984992
[
985-
ToolMessage(content='not enough messages', name='state_tool', tool_call_id='1'),
986-
ToolMessage(content='bar2', name='foo_tool', tool_call_id='2')
993+
ToolMessage(content="not enough messages", name="state_tool", tool_call_id="1"),
994+
ToolMessage(content="bar2", name="foo_tool", tool_call_id="2"),
987995
]
988996
```
989997

libs/langchain_v1/langchain/chat_models/base.py

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -191,14 +191,12 @@ def init_chat_model(
191191
configurable_model = init_chat_model(temperature=0)
192192
193193
configurable_model.invoke(
194-
"what's your name",
195-
config={"configurable": {"model": "gpt-4o"}}
194+
"what's your name", config={"configurable": {"model": "gpt-4o"}}
196195
)
197196
# GPT-4o response
198197
199198
configurable_model.invoke(
200-
"what's your name",
201-
config={"configurable": {"model": "claude-3-5-sonnet-latest"}}
199+
"what's your name", config={"configurable": {"model": "claude-3-5-sonnet-latest"}}
202200
)
203201
# claude-3.5 sonnet response
204202
@@ -213,7 +211,7 @@ def init_chat_model(
213211
"openai:gpt-4o",
214212
configurable_fields="any", # this allows us to configure other params like temperature, max_tokens, etc at runtime.
215213
config_prefix="foo",
216-
temperature=0
214+
temperature=0,
217215
)
218216
219217
configurable_model_with_default.invoke("what's your name")
@@ -224,9 +222,9 @@ def init_chat_model(
224222
config={
225223
"configurable": {
226224
"foo_model": "anthropic:claude-3-5-sonnet-latest",
227-
"foo_temperature": 0.6
225+
"foo_temperature": 0.6,
228226
}
229-
}
227+
},
230228
)
231229
# Claude-3.5 sonnet response with temperature 0.6
232230
@@ -241,31 +239,34 @@ def init_chat_model(
241239
from langchain.chat_models import init_chat_model
242240
from pydantic import BaseModel, Field
243241
242+
244243
class GetWeather(BaseModel):
245244
'''Get the current weather in a given location'''
246245
247246
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
248247
248+
249249
class GetPopulation(BaseModel):
250250
'''Get the current population in a given location'''
251251
252252
location: str = Field(..., description="The city and state, e.g. San Francisco, CA")
253253
254+
254255
configurable_model = init_chat_model(
255-
"gpt-4o",
256-
configurable_fields=("model", "model_provider"),
257-
temperature=0
256+
"gpt-4o", configurable_fields=("model", "model_provider"), temperature=0
258257
)
259258
260-
configurable_model_with_tools = configurable_model.bind_tools([GetWeather, GetPopulation])
259+
configurable_model_with_tools = configurable_model.bind_tools(
260+
[GetWeather, GetPopulation]
261+
)
261262
configurable_model_with_tools.invoke(
262263
"Which city is hotter today and which is bigger: LA or NY?"
263264
)
264265
# GPT-4o response with tool calls
265266
266267
configurable_model_with_tools.invoke(
267268
"Which city is hotter today and which is bigger: LA or NY?",
268-
config={"configurable": {"model": "claude-3-5-sonnet-latest"}}
269+
config={"configurable": {"model": "claude-3-5-sonnet-latest"}},
269270
)
270271
# Claude-3.5 sonnet response with tools
271272

libs/langchain_v1/langchain/embeddings/base.py

Lines changed: 2 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -162,17 +162,11 @@ def init_embeddings(
162162
model.embed_query("Hello, world!")
163163
164164
# Using explicit provider
165-
model = init_embeddings(
166-
model="text-embedding-3-small",
167-
provider="openai"
168-
)
165+
model = init_embeddings(model="text-embedding-3-small", provider="openai")
169166
model.embed_documents(["Hello, world!", "Goodbye, world!"])
170167
171168
# With additional parameters
172-
model = init_embeddings(
173-
"openai:text-embedding-3-small",
174-
api_key="sk-..."
175-
)
169+
model = init_embeddings("openai:text-embedding-3-small", api_key="sk-...")
176170
177171
.. versionadded:: 0.3.9
178172

libs/langchain_v1/langchain/storage/encoder_backed.py

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,15 +22,19 @@ class EncoderBackedStore(BaseStore[K, V]):
2222
2323
import json
2424
25+
2526
def key_encoder(key: int) -> str:
2627
return json.dumps(key)
2728
29+
2830
def value_serializer(value: float) -> str:
2931
return json.dumps(value)
3032
33+
3134
def value_deserializer(serialized_value: str) -> float:
3235
return json.loads(serialized_value)
3336
37+
3438
# Create an instance of the abstract store
3539
abstract_store = MyCustomStore()
3640
@@ -39,7 +43,7 @@ def value_deserializer(serialized_value: str) -> float:
3943
store=abstract_store,
4044
key_encoder=key_encoder,
4145
value_serializer=value_serializer,
42-
value_deserializer=value_deserializer
46+
value_deserializer=value_deserializer,
4347
)
4448
4549
# Use the encoder-backed store methods

libs/langchain_v1/pyproject.toml

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,9 @@ skip = ".git,*.pdf,*.svg,*.pdf,*.yaml,*.ipynb,poetry.lock,*.min.js,*.css,package
102102
ignore-regex = ".*(Stati Uniti|Tense=Pres).*"
103103
ignore-words-list = "momento,collison,ned,foor,reworkd,parth,whats,aapply,mysogyny,unsecure,damon,crate,aadd,symbl,precesses,accademia,nin"
104104

105+
[tool.ruff.format]
106+
docstring-code-format = true
107+
105108
[tool.ruff.lint]
106109
select = [
107110
"ALL"

libs/langchain_v1/tests/unit_tests/conftest.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,8 +33,7 @@ def pytest_collection_modifyitems(config: pytest.Config, items: Sequence[pytest.
3333
.. code-block:: python
3434
3535
@pytest.mark.requires("package1", "package2")
36-
def test_something():
37-
...
36+
def test_something(): ...
3837
3938
"""
4039
# Mapping from the name of a package to whether it is installed or not.

0 commit comments

Comments
 (0)