The open source
agent harness SDK.
Build an agent harness. Control it end-to-end.
pip install strands-agents npm install @strands-agents/sdk from strands import Agent, tool
from strands.hooks import BeforeToolCallEvent
from pathlib import Path
@tool
def save_report(title: str, content: str) -> str:
"""Save a research report to disk."""
path = f"reports/{title}.md"
Path(path).write_text(content)
return f"Saved {path}"
def require_sources(event: BeforeToolCallEvent):
name = event.tool_use["name"]
inp = str(event.tool_use["input"])
if name == "save_report" and "[source]" not in inp:
event.cancel_tool = "Add source citations."
agent = Agent(
tools=[save_report],
hooks=[require_sources],
)
agent("Research AI agent frameworks") from strands import Agent, tool
from strands.hooks import BeforeToolCallEvent
from pathlib import Path
@tool
def save_report(title: str, content: str) -> str:
"""Save a research report to disk."""
path = f"reports/{title}.md"
Path(path).write_text(content)
return f"Saved {path}"
def require_sources(event: BeforeToolCallEvent):
name = event.tool_use["name"]
inp = str(event.tool_use["input"])
if name == "save_report" and "[source]" not in inp:
event.cancel_tool = "Add source citations."
agent = Agent(
tools=[save_report],
hooks=[require_sources],
)
agent("Research AI agent frameworks") import {
Agent, tool, BeforeToolCallEvent
} from '@strands-agents/sdk'
import z from 'zod'
import { writeFileSync } from 'fs'
const saveReport = tool({
name: 'save_report',
description: 'Save a research report.',
inputSchema: z.object({
title: z.string(),
content: z.string(),
}),
callback: ({ title, content }) => {
writeFileSync(`reports/${title}.md`, content)
return `Saved ${title}.md`
},
})
const agent = new Agent({ tools: [saveReport] })
agent.addHook(BeforeToolCallEvent, (event) => {
const inp = String(event.toolUse.input)
if (event.toolUse.name === 'save_report') {
if (!inp.includes('[source]')) {
event.cancel = 'Add source citations.'
}
}
})
await agent.invoke('Research AI agent frameworks') import {
Agent, tool, BeforeToolCallEvent
} from '@strands-agents/sdk'
import z from 'zod'
import { writeFileSync } from 'fs'
const saveReport = tool({
name: 'save_report',
description: 'Save a research report.',
inputSchema: z.object({
title: z.string(),
content: z.string(),
}),
callback: ({ title, content }) => {
writeFileSync(`reports/${title}.md`, content)
return `Saved ${title}.md`
},
})
const agent = new Agent({ tools: [saveReport] })
agent.addHook(BeforeToolCallEvent, (event) => {
const inp = String(event.toolUse.input)
if (event.toolUse.name === 'save_report') {
if (!inp.includes('[source]')) {
event.cancel = 'Add source citations.'
}
}
})
await agent.invoke('Research AI agent frameworks') Build your way
Any model, any cloud. You get context management, execution limits, and observability before you write a line of config. Swap backends when you scale. Your code stays the same.
from strands import Agent, tool
@tool
def search_logs(query: str, hours: int = 24) -> list:
"""Search application logs by keyword."""
return log_api.search(query, hours)
agent = Agent(
tools=[search_logs],
)
agent("Find all timeout errors from the last 6 hours") from strands import Agent, tool
@tool
def search_logs(query: str, hours: int = 24) -> list:
"""Search application logs by keyword."""
return log_api.search(query, hours)
agent = Agent(
tools=[search_logs],
)
agent("Find all timeout errors from the last 6 hours") import { Agent, tool } from '@strands-agents/sdk'
import z from 'zod'
const searchLogs = tool({
name: 'search_logs',
description: 'Search logs by keyword.',
inputSchema: z.object({
query: z.string(),
hours: z.number().default(24),
}),
callback: ({ query, hours }) =>
logApi.search(query, hours),
})
const agent = new Agent({ tools: [searchLogs] })
await agent.invoke(
'Find all timeout errors from the last 6 hours'
) import { Agent, tool } from '@strands-agents/sdk'
import z from 'zod'
const searchLogs = tool({
name: 'search_logs',
description: 'Search logs by keyword.',
inputSchema: z.object({
query: z.string(),
hours: z.number().default(24),
}),
callback: ({ query, hours }) =>
logApi.search(query, hours),
})
const agent = new Agent({ tools: [searchLogs] })
await agent.invoke(
'Find all timeout errors from the last 6 hours'
) Progressive complexity. Zero lock-in.
from strands.agent import SummarizingConversationManager
# Same agent, now with summarization.
agent = Agent(
tools=[search_logs],
conversation_manager=SummarizingConversationManager(),
) from strands.agent import SummarizingConversationManager
# Same agent, now with summarization.
agent = Agent(
tools=[search_logs],
conversation_manager=SummarizingConversationManager(),
) import {
SummarizingConversationManager,
} from '@strands-agents/sdk'
// Same agent, now with summarization.
const agent = new Agent({
tools: [searchLogs],
conversationManager:
new SummarizingConversationManager(),
}) import {
SummarizingConversationManager,
} from '@strands-agents/sdk'
// Same agent, now with summarization.
const agent = new Agent({
tools: [searchLogs],
conversationManager:
new SummarizingConversationManager(),
}) Stay in control
Monitor, modify, and debug with hooks. The agent loop traces every decision by default. Hooks let you intercept any step to log it, validate it, or redirect it.
from strands import Agent
from strands.hooks import AfterToolCallEvent
def log_tool_calls(event: AfterToolCallEvent):
"""Log every tool call."""
print(f"Tool: {event.tool_use['name']}")
print(f"Result: {event.result['status']}")
agent = Agent(
tools=[search_logs, query_database],
hooks=[log_tool_calls],
trace_attributes={
"service": "ops-agent",
"env": "production",
},
) from strands import Agent
from strands.hooks import AfterToolCallEvent
def log_tool_calls(event: AfterToolCallEvent):
"""Log every tool call."""
print(f"Tool: {event.tool_use['name']}")
print(f"Result: {event.result['status']}")
agent = Agent(
tools=[search_logs, query_database],
hooks=[log_tool_calls],
trace_attributes={
"service": "ops-agent",
"env": "production",
},
) import {
Agent, AfterToolCallEvent,
} from '@strands-agents/sdk'
const agent = new Agent({
tools: [searchLogs, queryDatabase],
traceAttributes: {
service: 'ops-agent',
env: 'production',
},
})
agent.addHook(AfterToolCallEvent, (event) => {
console.log(`Tool: ${event.toolUse.name}`)
console.log(`Status: ${event.result.status}`)
}) import {
Agent, AfterToolCallEvent,
} from '@strands-agents/sdk'
const agent = new Agent({
tools: [searchLogs, queryDatabase],
traceAttributes: {
service: 'ops-agent',
env: 'production',
},
})
agent.addHook(AfterToolCallEvent, (event) => {
console.log(`Tool: ${event.toolUse.name}`)
console.log(`Status: ${event.result.status}`)
}) Built-in observability.
Deliver outcomes that work
Guardrails catch mistakes before they run.
from strands import Agent
from strands.hooks import BeforeToolCallEvent
WRITE_OPS = ["INSERT", "UPDATE", "DELETE", "DROP"]
def read_only_guard(event: BeforeToolCallEvent):
"""Block writes. This agent is read-only."""
if event.tool_use["name"] == "query_database":
sql = event.tool_use["input"].get("query", "")
if any(kw in sql.upper() for kw in WRITE_OPS):
event.cancel_tool = "Read-only access."
agent = Agent(
tools=[query_database],
hooks=[read_only_guard],
) from strands import Agent
from strands.hooks import BeforeToolCallEvent
WRITE_OPS = ["INSERT", "UPDATE", "DELETE", "DROP"]
def read_only_guard(event: BeforeToolCallEvent):
"""Block writes. This agent is read-only."""
if event.tool_use["name"] == "query_database":
sql = event.tool_use["input"].get("query", "")
if any(kw in sql.upper() for kw in WRITE_OPS):
event.cancel_tool = "Read-only access."
agent = Agent(
tools=[query_database],
hooks=[read_only_guard],
) Then the harness gives specific feedback: "add a WHERE clause," "check permissions first." The agent corrects itself. You get reliable outcomes without micromanaging every step.
from strands.vended_plugins.steering import (
SteeringHandler, Guide, Proceed,
)
class QueryQualityPolicy(SteeringHandler):
async def steer_before_tool(
self, *, agent, tool_use, **kwargs
):
sql = tool_use["input"].get("query", "").upper()
if "SELECT" in sql and "WHERE" not in sql:
return Guide(
reason="Add a WHERE clause and LIMIT."
)
if sql.upper().count("JOIN") > 3:
return Guide(
reason="4+ joins. Break into smaller queries."
)
return Proceed(reason="Query looks good.")
agent = Agent(
tools=[query_database],
plugins=[QueryQualityPolicy()],
) from strands.vended_plugins.steering import (
SteeringHandler, Guide, Proceed,
)
class QueryQualityPolicy(SteeringHandler):
async def steer_before_tool(
self, *, agent, tool_use, **kwargs
):
sql = tool_use["input"].get("query", "").upper()
if "SELECT" in sql and "WHERE" not in sql:
return Guide(
reason="Add a WHERE clause and LIMIT."
)
if sql.upper().count("JOIN") > 3:
return Guide(
reason="4+ joins. Break into smaller queries."
)
return Proceed(reason="Query looks good.")
agent = Agent(
tools=[query_database],
plugins=[QueryQualityPolicy()],
) Hard-coded workflows scored 80.8%.
Agents with Strands steering handlers recovered from every mistake.
At Smartsheet, we chose Strands for our next generation of AI capabilities because it provided the perfect balance of enterprise-ready features and development efficiency. Its robust conversation memory and dynamic tool registration systems were crucial for creating a responsive, context-aware intelligent AI assistant. With Strands, we were able to quickly implement a secure and scalable solution, giving us a production-ready foundation to deliver a secure, high-performance, and enterprise-grade AI experience.
Transform traditional error alerts into intelligent incident responses using Amazon Bedrock, RAG with Amazon OpenSearch, Multi-Agent Orchestration with Strands SDK, and Kiro AI IDE - reducing MTTR by 60% without manual coding.
Strands’ SDK and great integration with AWS native services streamlined Landchecker’s development of agents. With easier integration of AgentCore Runtime, Bedrock Guardrails, and built-in support for OpenTelemetry, we could focus on what we do best – developing property information tools and data integrations.
At Swisscom, we need an agentic AI backbone that is both enterprise-ready and future-proof. Strands Agents gives us the best of both worlds: a native fit with our cloud environment, yet fully open source and flexible. That combination allowed us to build proof-of-concepts within just a few weeks and now sets us on the path to scale multi-agent systems with confidence, while keeping our focus on delivering real value to customers and the business.
The advisor is where things get interesting. We use the Strands Agents SDK to define an agent with a tool, a function the model can call during its reasoning loop.
We chose Strands because it’s AWS-native, intuitive, and made agent development accessible across our engineering team. Its abstraction layer and built-in multi-agent patterns (like Agent-as-Tool and Swarm) let us focus on remediation logic instead of infrastructure work. We’ve already built multiple agents, and wiring them together has been seamless. On top of that, we layered our Agentic Remediation™ capability to automate vulnerability fixes and configuration validation/fault correction workflows, coordinating cross-agent remediation with precision
Scaling our global trading platform required reimagining our support capabilities, and Strands Agents was the key to making it happen at enterprise scale. What would traditionally take months of development, Strands allowed us to achieve in just 10 days - delivering a secure, robust, production-ready agentic solution. The results speak for themselves: investigation time dropped on average from 30 minutes to 45 seconds, investigation quality improved by 94%, and we saved $5M in operational costs. Strands didn’t just accelerate our development - it gave us the confidence to explore other agentic AI use cases across our entire business, including launching our Agentic Security Operations Center
Adding bidirectional voice to my existing Strands agent was surprisingly straightforward. BidiAgent handles the WebSocket complexity and interruption logic, my @tool functions carried over unchanged, and the same code deploys to AgentCore without modification. Strands made real-time voice feel like a natural extension, not a separate project.
We see Strands as a great fit to power TeamForm’s next evolution of Agentic AI. Our customers need enterprise-grade security and scalability, which is exactly what Strands delivers. Its seamless integration with AWS and simplicity enables us to focus on innovating our AI capabilities and delivering value to our customers.
For Jit’s infrastructure drift detection agent, we leverage Strands Agents, an open-source framework developed by AWS for building production-ready AI agents. Strands Agents provides several advantages including simplified development, native AWS integration, and built-in security.
As someone who builds agents with LangGraph daily at work, Strands was a genuine surprise. The model-driven approach cut my setup from 40 lines to 3 — and for the 80% case, it just works without sacrificing flexibility.
Strands Agents on Bedrock turns autonomous agents into an enterprise product: governed, observable, and safe by design. Together with Claude models, we analyze live webpages and generate code responsibly - helping customers reduce risk while accelerating delivery. Safety is non-negotiable in offensive security. On Amazon Bedrock, Strands Agents plus Claude let us scale autonomous pen-testing with Bedrock Guardrails - increasing coverage without increasing risk.
The combination of the Strands Agents SDK and Tavily represents a significant advancement in enterprise-grade research agent development. This integration can help organizations build sophisticated, secure, and scalable AI agents while maintaining the highest standards of security and performance. Learn more in this blog.
Strands was used to build a growing set of agents that run a company to do actual tasks.
At Smartsheet, we chose Strands for our next generation of AI capabilities because it provided the perfect balance of enterprise-ready features and development efficiency. Its robust conversation memory and dynamic tool registration systems were crucial for creating a responsive, context-aware intelligent AI assistant. With Strands, we were able to quickly implement a secure and scalable solution, giving us a production-ready foundation to deliver a secure, high-performance, and enterprise-grade AI experience.
Transform traditional error alerts into intelligent incident responses using Amazon Bedrock, RAG with Amazon OpenSearch, Multi-Agent Orchestration with Strands SDK, and Kiro AI IDE - reducing MTTR by 60% without manual coding.
Strands’ SDK and great integration with AWS native services streamlined Landchecker’s development of agents. With easier integration of AgentCore Runtime, Bedrock Guardrails, and built-in support for OpenTelemetry, we could focus on what we do best – developing property information tools and data integrations.
At Swisscom, we need an agentic AI backbone that is both enterprise-ready and future-proof. Strands Agents gives us the best of both worlds: a native fit with our cloud environment, yet fully open source and flexible. That combination allowed us to build proof-of-concepts within just a few weeks and now sets us on the path to scale multi-agent systems with confidence, while keeping our focus on delivering real value to customers and the business.
The advisor is where things get interesting. We use the Strands Agents SDK to define an agent with a tool, a function the model can call during its reasoning loop.
We chose Strands because it’s AWS-native, intuitive, and made agent development accessible across our engineering team. Its abstraction layer and built-in multi-agent patterns (like Agent-as-Tool and Swarm) let us focus on remediation logic instead of infrastructure work. We’ve already built multiple agents, and wiring them together has been seamless. On top of that, we layered our Agentic Remediation™ capability to automate vulnerability fixes and configuration validation/fault correction workflows, coordinating cross-agent remediation with precision
Scaling our global trading platform required reimagining our support capabilities, and Strands Agents was the key to making it happen at enterprise scale. What would traditionally take months of development, Strands allowed us to achieve in just 10 days - delivering a secure, robust, production-ready agentic solution. The results speak for themselves: investigation time dropped on average from 30 minutes to 45 seconds, investigation quality improved by 94%, and we saved $5M in operational costs. Strands didn’t just accelerate our development - it gave us the confidence to explore other agentic AI use cases across our entire business, including launching our Agentic Security Operations Center
Adding bidirectional voice to my existing Strands agent was surprisingly straightforward. BidiAgent handles the WebSocket complexity and interruption logic, my @tool functions carried over unchanged, and the same code deploys to AgentCore without modification. Strands made real-time voice feel like a natural extension, not a separate project.
We see Strands as a great fit to power TeamForm’s next evolution of Agentic AI. Our customers need enterprise-grade security and scalability, which is exactly what Strands delivers. Its seamless integration with AWS and simplicity enables us to focus on innovating our AI capabilities and delivering value to our customers.
For Jit’s infrastructure drift detection agent, we leverage Strands Agents, an open-source framework developed by AWS for building production-ready AI agents. Strands Agents provides several advantages including simplified development, native AWS integration, and built-in security.
As someone who builds agents with LangGraph daily at work, Strands was a genuine surprise. The model-driven approach cut my setup from 40 lines to 3 — and for the 80% case, it just works without sacrificing flexibility.
Strands Agents on Bedrock turns autonomous agents into an enterprise product: governed, observable, and safe by design. Together with Claude models, we analyze live webpages and generate code responsibly - helping customers reduce risk while accelerating delivery. Safety is non-negotiable in offensive security. On Amazon Bedrock, Strands Agents plus Claude let us scale autonomous pen-testing with Bedrock Guardrails - increasing coverage without increasing risk.
The combination of the Strands Agents SDK and Tavily represents a significant advancement in enterprise-grade research agent development. This integration can help organizations build sophisticated, secure, and scalable AI agents while maintaining the highest standards of security and performance. Learn more in this blog.
Strands was used to build a growing set of agents that run a company to do actual tasks.
Automate workflows
Classify, score, and route. One agent, one job. Replace brittle scripts with tools that adapt when your process changes.
from strands import Agent, tool
@tool
def classify_lead(email: str, company: str) -> dict:
"""Score and classify an inbound lead."""
firmographics = crm.lookup(company)
return {
"score": compute_icp_score(firmographics),
"segment": firmographics["industry"],
}
@tool
def route_to_rep(lead_id: str, region: str) -> str:
"""Assign a lead to the right sales rep."""
rep = crm.get_rep_for_region(region)
crm.assign(lead_id, rep)
return f"Assigned to {rep}"
agent = Agent(
tools=[classify_lead, route_to_rep],
)
agent("New lead: jane@acme.com, Acme Corp, US-West") from strands import Agent, tool
@tool
def classify_lead(email: str, company: str) -> dict:
"""Score and classify an inbound lead."""
firmographics = crm.lookup(company)
return {
"score": compute_icp_score(firmographics),
"segment": firmographics["industry"],
}
@tool
def route_to_rep(lead_id: str, region: str) -> str:
"""Assign a lead to the right sales rep."""
rep = crm.get_rep_for_region(region)
crm.assign(lead_id, rep)
return f"Assigned to {rep}"
agent = Agent(
tools=[classify_lead, route_to_rep],
)
agent("New lead: jane@acme.com, Acme Corp, US-West") import { Agent, tool } from '@strands-agents/sdk'
import z from 'zod'
const classifyLead = tool({
name: 'classify_lead',
description: 'Score and classify a lead.',
inputSchema: z.object({
email: z.string(),
company: z.string(),
}),
callback: ({ email, company }) => {
const data = crm.lookup(company)
return {
score: computeIcpScore(data),
segment: data.industry,
}
},
})
const routeToRep = tool({
name: 'route_to_rep',
description: 'Assign a lead to a rep.',
inputSchema: z.object({
leadId: z.string(),
region: z.string(),
}),
callback: ({ leadId, region }) => {
const rep = crm.getRepForRegion(region)
crm.assign(leadId, rep)
return `Assigned to ${rep}`
},
})
const agent = new Agent({
tools: [classifyLead, routeToRep],
})
await agent.invoke(
'New lead: jane@acme.com, Acme Corp, US-West'
) import { Agent, tool } from '@strands-agents/sdk'
import z from 'zod'
const classifyLead = tool({
name: 'classify_lead',
description: 'Score and classify a lead.',
inputSchema: z.object({
email: z.string(),
company: z.string(),
}),
callback: ({ email, company }) => {
const data = crm.lookup(company)
return {
score: computeIcpScore(data),
segment: data.industry,
}
},
})
const routeToRep = tool({
name: 'route_to_rep',
description: 'Assign a lead to a rep.',
inputSchema: z.object({
leadId: z.string(),
region: z.string(),
}),
callback: ({ leadId, region }) => {
const rep = crm.getRepForRegion(region)
crm.assign(leadId, rep)
return `Assigned to ${rep}`
},
})
const agent = new Agent({
tools: [classifyLead, routeToRep],
})
await agent.invoke(
'New lead: jane@acme.com, Acme Corp, US-West'
) Build AI assistants
Ground agents in your knowledge base via MCP. Context management keeps long conversations in bounds. Interrupts pause for human approval before sensitive actions.
from strands import Agent, tool
from strands.tools.mcp import MCPClient
from strands.hooks import BeforeToolCallEvent
from strands.agent import SlidingWindowConversationManager
from mcp import stdio_client, StdioServerParameters
kb = MCPClient(lambda: stdio_client(
StdioServerParameters(command="uvx", args=["kb-server"])
))
@tool
def issue_refund(order_id: str, amount: float) -> str:
"""Process a customer refund."""
return payments.refund(order_id, amount)
def approve_refunds(event: BeforeToolCallEvent):
"""Pause for human approval before processing refunds."""
if event.tool_use["name"] == "issue_refund":
response = event.interrupt(
"refund_approval", reason=event.tool_use["input"]
)
if response != "APPROVE":
event.cancel_tool = "Refund not approved."
agent = Agent(
system_prompt="Support assistant. Use the KB. "
"Refunds require approval.",
tools=[kb, issue_refund],
hooks=[approve_refunds],
conversation_manager=SlidingWindowConversationManager(
window_size=20
),
) from strands import Agent, tool
from strands.tools.mcp import MCPClient
from strands.hooks import BeforeToolCallEvent
from strands.agent import SlidingWindowConversationManager
from mcp import stdio_client, StdioServerParameters
kb = MCPClient(lambda: stdio_client(
StdioServerParameters(command="uvx", args=["kb-server"])
))
@tool
def issue_refund(order_id: str, amount: float) -> str:
"""Process a customer refund."""
return payments.refund(order_id, amount)
def approve_refunds(event: BeforeToolCallEvent):
"""Pause for human approval before processing refunds."""
if event.tool_use["name"] == "issue_refund":
response = event.interrupt(
"refund_approval", reason=event.tool_use["input"]
)
if response != "APPROVE":
event.cancel_tool = "Refund not approved."
agent = Agent(
system_prompt="Support assistant. Use the KB. "
"Refunds require approval.",
tools=[kb, issue_refund],
hooks=[approve_refunds],
conversation_manager=SlidingWindowConversationManager(
window_size=20
),
) import {
Agent, tool, McpClient,
BeforeToolCallEvent,
SlidingWindowConversationManager,
} from '@strands-agents/sdk'
import { StdioClientTransport } from
'@modelcontextprotocol/sdk/client/stdio.js'
import z from 'zod'
const kb = new McpClient({
transport: new StdioClientTransport({
command: 'npx',
args: ['kb-server'],
}),
})
const issueRefund = tool({
name: 'issue_refund',
description: 'Process a refund.',
inputSchema: z.object({
orderId: z.string(),
amount: z.number(),
}),
callback: ({ orderId, amount }) =>
payments.refund(orderId, amount),
})
const agent = new Agent({
systemPrompt: 'Support assistant. '
+ 'Use KB. Refunds require approval.',
tools: [kb, issueRefund],
conversationManager:
new SlidingWindowConversationManager({
windowSize: 20,
}),
})
// Cancel refunds (interrupt coming soon to TS)
agent.addHook(BeforeToolCallEvent, (event) => {
if (event.toolUse.name === 'issue_refund') {
event.cancel = 'Refund approval required.'
}
}) import {
Agent, tool, McpClient,
BeforeToolCallEvent,
SlidingWindowConversationManager,
} from '@strands-agents/sdk'
import { StdioClientTransport } from
'@modelcontextprotocol/sdk/client/stdio.js'
import z from 'zod'
const kb = new McpClient({
transport: new StdioClientTransport({
command: 'npx',
args: ['kb-server'],
}),
})
const issueRefund = tool({
name: 'issue_refund',
description: 'Process a refund.',
inputSchema: z.object({
orderId: z.string(),
amount: z.number(),
}),
callback: ({ orderId, amount }) =>
payments.refund(orderId, amount),
})
const agent = new Agent({
systemPrompt: 'Support assistant. '
+ 'Use KB. Refunds require approval.',
tools: [kb, issueRefund],
conversationManager:
new SlidingWindowConversationManager({
windowSize: 20,
}),
})
// Cancel refunds (interrupt coming soon to TS)
agent.addHook(BeforeToolCallEvent, (event) => {
if (event.toolUse.name === 'issue_refund') {
event.cancel = 'Refund approval required.'
}
}) Build research agents
Wake up to a briefing instead of a to-do list. Structured output keeps results typed and predictable.
from pydantic import BaseModel, Field
from strands import Agent
from strands_tools import http_request
from pathlib import Path
class Briefing(BaseModel):
headline: str = Field(description="One-line summary")
developments: list[str] = Field(
description="Key developments"
)
sources: list[str] = Field(
description="URLs consulted"
)
agent = Agent(
system_prompt="Research assistant. Search the web, "
"find developments from the last 24 hours, "
"and produce a briefing with citations.",
tools=[http_request],
)
result = agent(
"What happened in AI agent frameworks yesterday?",
structured_output_model=Briefing,
)
briefing = result.structured_output
Path("briefings/daily.md").write_text(
f"# {briefing.headline}\n\n"
+ "\n".join(f"- {d}" for d in briefing.developments)
) from pydantic import BaseModel, Field
from strands import Agent
from strands_tools import http_request
from pathlib import Path
class Briefing(BaseModel):
headline: str = Field(description="One-line summary")
developments: list[str] = Field(
description="Key developments"
)
sources: list[str] = Field(
description="URLs consulted"
)
agent = Agent(
system_prompt="Research assistant. Search the web, "
"find developments from the last 24 hours, "
"and produce a briefing with citations.",
tools=[http_request],
)
result = agent(
"What happened in AI agent frameworks yesterday?",
structured_output_model=Briefing,
)
briefing = result.structured_output
Path("briefings/daily.md").write_text(
f"# {briefing.headline}\n\n"
+ "\n".join(f"- {d}" for d in briefing.developments)
) import { Agent } from '@strands-agents/sdk'
import { httpRequest } from '@strands-agents/tools'
import z from 'zod'
import { writeFileSync } from 'fs'
const BriefingSchema = z.object({
headline: z.string().describe('Summary'),
developments: z.array(z.string())
.describe('Key developments'),
sources: z.array(z.string())
.describe('URLs consulted'),
})
const agent = new Agent({
systemPrompt: 'Research assistant. '
+ 'Search the web. Cite sources.',
tools: [httpRequest],
})
const result = await agent.invoke(
'AI agent frameworks: what happened yesterday?',
{ structuredOutputSchema: BriefingSchema },
)
const briefing = result.structuredOutput
writeFileSync('briefings/daily.md',
`# ${briefing.headline}\n\n`
+ briefing.developments
.map((d: string) => `- ${d}`)
.join('\n')
) import { Agent } from '@strands-agents/sdk'
import { httpRequest } from '@strands-agents/tools'
import z from 'zod'
import { writeFileSync } from 'fs'
const BriefingSchema = z.object({
headline: z.string().describe('Summary'),
developments: z.array(z.string())
.describe('Key developments'),
sources: z.array(z.string())
.describe('URLs consulted'),
})
const agent = new Agent({
systemPrompt: 'Research assistant. '
+ 'Search the web. Cite sources.',
tools: [httpRequest],
})
const result = await agent.invoke(
'AI agent frameworks: what happened yesterday?',
{ structuredOutputSchema: BriefingSchema },
)
const briefing = result.structuredOutput
writeFileSync('briefings/daily.md',
`# ${briefing.headline}\n\n`
+ briefing.developments
.map((d: string) => `- ${d}`)
.join('\n')
)