<?xml version="1.0" encoding="UTF-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
  <channel>
    <title>AgentsGuardrails Blog</title>
    <link>https://agentsguardrails.com/blog</link>
    <description>AI agent security, guardrails, compliance, and monitoring insights from the AgentsGuardrails team.</description>
    <language>en-us</language>
    <lastBuildDate>Sat, 15 Mar 2026 00:00:00 GMT</lastBuildDate>
    <atom:link href="https://agentsguardrails.com/feed.xml" rel="self" type="application/rss+xml" />

    <item>
      <title>Understanding Prompt Injection Attacks on AI Agents</title>
      <link>https://agentsguardrails.com/ai-threats/prompt-injection</link>
      <description>A comprehensive guide to prompt injection attacks: how they work, real-world examples, detection methods, and prevention strategies for AI agent deployments.</description>
      <pubDate>Sat, 15 Mar 2026 00:00:00 GMT</pubDate>
      <guid>https://agentsguardrails.com/ai-threats/prompt-injection</guid>
    </item>

    <item>
      <title>What Are AI Agent Guardrails? A Complete Guide</title>
      <link>https://agentsguardrails.com/definitions/ai-agent-guardrails</link>
      <description>Learn what AI agent guardrails are, why they matter, and how to implement them. Covers PII detection, budget controls, topic restrictions, and compliance automation.</description>
      <pubDate>Wed, 15 Jan 2026 00:00:00 GMT</pubDate>
      <guid>https://agentsguardrails.com/definitions/ai-agent-guardrails</guid>
    </item>

    <item>
      <title>AI Agent Security: Protecting Autonomous Systems</title>
      <link>https://agentsguardrails.com/definitions/ai-agent-security</link>
      <description>Comprehensive overview of AI agent security challenges, from prompt injection to data exfiltration. Learn how to build secure agent architectures.</description>
      <pubDate>Wed, 15 Jan 2026 00:00:00 GMT</pubDate>
      <guid>https://agentsguardrails.com/definitions/ai-agent-security</guid>
    </item>

    <item>
      <title>Prompt Injection Examples Dataset for Security Research</title>
      <link>https://agentsguardrails.com/datasets/ai-prompt-injection-examples</link>
      <description>Curated dataset of prompt injection techniques, attack patterns, and AI agent security incidents for security testing and guardrail development.</description>
      <pubDate>Wed, 15 Jan 2026 00:00:00 GMT</pubDate>
      <guid>https://agentsguardrails.com/datasets/ai-prompt-injection-examples</guid>
    </item>
  </channel>
</rss>
