<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0">
  <channel>
    <title>InfoQ</title>
    <link>https://www.infoq.com</link>
    <description>InfoQ feed</description>
    <item>
      <title>Article: The Mathematics of Backlogs: Capacity Planning for Queue Recovery</title>
      <link>https://www.infoq.com/articles/capacity-planning-queue-recovery/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/articles/capacity-planning-queue-recovery/en/headerimage/The-Mathematics-of-Backlogs-Capacity-Planning-for-Queue-Recovery-header-1778227922596.jpg"/&gt;&lt;p&gt;Backlogs in distributed systems are arithmetic problems, not mysteries. This article provides practical formulas for calculating backlog drain time, sizing consumer headroom, and setting auto-scaling triggers. It covers key failure modes — retry amplification, metastable states, and cascading pipeline bottlenecks — plus when to shed load instead of draining.&lt;/p&gt; &lt;i&gt;By Rajesh Kumar Pandey&lt;/i&gt;</description>
      <category>Queue</category>
      <category>Load Testing</category>
      <category>Failure</category>
      <category>Capacity Planning</category>
      <category>DevOps</category>
      <category>Architecture &amp; Design</category>
      <category>article</category>
      <pubDate>Wed, 13 May 2026 09:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/articles/capacity-planning-queue-recovery/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Rajesh Kumar Pandey</dc:creator>
      <dc:date>2026-05-13T09:00:00Z</dc:date>
      <dc:identifier>/articles/capacity-planning-queue-recovery/en</dc:identifier>
    </item>
    <item>
      <title>Grafana's Pyroscope 2.0 Makes Continuous Profiling Practical at Scale</title>
      <link>https://www.infoq.com/news/2026/05/pyroscope-2-profiling/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/pyroscope-2-profiling/en/headerimage/generatedHeaderImage-1778539051167.jpg"/&gt;&lt;p&gt;Grafana Labs has launched Pyroscope 2.0, a rearchitected open-source continuous profiling database. This version improves storage costs, query performance, and operational complexity. Key changes include single write paths for profiles, stateless query processing, and enhanced capabilities for profiling data. It supports the OpenTelemetry Protocol, aligning with current trends in observability.&lt;/p&gt; &lt;i&gt;By Matt Saunders&lt;/i&gt;</description>
      <category>Observability</category>
      <category>Grafana</category>
      <category>DevOps</category>
      <category>news</category>
      <pubDate>Wed, 13 May 2026 08:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/pyroscope-2-profiling/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Matt Saunders</dc:creator>
      <dc:date>2026-05-13T08:00:00Z</dc:date>
      <dc:identifier>/news/2026/05/pyroscope-2-profiling/en</dc:identifier>
    </item>
    <item>
      <title>AWS WorkSpaces Now Lets AI Agents Operate Legacy Desktop Applications Without APIs</title>
      <link>https://www.infoq.com/news/2026/05/aws-workspaces-ai-agents/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/aws-workspaces-ai-agents/en/headerimage/generatedHeaderImage-1778485554177.jpg"/&gt;&lt;p&gt;AWS announced that Amazon WorkSpaces can now serve as managed virtual desktops for AI agents in public preview. Agents authenticate through IAM and operate legacy applications via computer vision and input simulation without APIs. Reflex benchmarks show vision agents consume 45x more tokens than API agents.&lt;/p&gt; &lt;i&gt;By Steef-Jan Wiggers&lt;/i&gt;</description>
      <category>Automation</category>
      <category>AWS</category>
      <category>Cloud Architecture</category>
      <category>Agents</category>
      <category>AI Architecture</category>
      <category>Amazon</category>
      <category>Cloud</category>
      <category>Amazon Web Services</category>
      <category>DevOps</category>
      <category>AI, ML &amp; Data Engineering</category>
      <category>Development</category>
      <category>Architecture &amp; Design</category>
      <category>news</category>
      <pubDate>Wed, 13 May 2026 07:31:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/aws-workspaces-ai-agents/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Steef-Jan Wiggers</dc:creator>
      <dc:date>2026-05-13T07:31:00Z</dc:date>
      <dc:identifier>/news/2026/05/aws-workspaces-ai-agents/en</dc:identifier>
    </item>
    <item>
      <title>Presentation: Beyond Coding: How Senior ICs Grow Influence and Drive Impact</title>
      <link>https://www.infoq.com/presentations/lessons-building-engineering-team/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/presentations/lessons-building-engineering-team/en/mediumimage/medium-1778064119173.jpg"/&gt;&lt;p&gt;Netflix’s Kasia Trapszo discusses the transition from writing code to scaling organizations. She shares lessons on building trust through technical clarity, aligning teams to solve the "right" problems, and using intentional documentation to scale your judgment. Learn how to move beyond individual output to create a lasting architectural legacy that empowers others to make better decisions.&lt;/p&gt; &lt;i&gt;By Kasia Trapszo&lt;/i&gt;</description>
      <category>Team Leader</category>
      <category>Transcripts</category>
      <category>Staff Plus</category>
      <category>Team Performance</category>
      <category>QCon San Francisco 2025</category>
      <category>Culture &amp; Methods</category>
      <category>presentation</category>
      <pubDate>Tue, 12 May 2026 13:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/presentations/lessons-building-engineering-team/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Kasia Trapszo</dc:creator>
      <dc:date>2026-05-12T13:00:00Z</dc:date>
      <dc:identifier>/presentations/lessons-building-engineering-team/en</dc:identifier>
    </item>
    <item>
      <title>GitHub Expands Secret Scanning with General Availability of MCP Server Integration</title>
      <link>https://www.infoq.com/news/2026/05/github-mcp-secret-scanning/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/github-mcp-secret-scanning/en/headerimage/generatedHeaderImage-1778422946373.jpg"/&gt;&lt;p&gt;GitHub has announced the general availability of secret scanning support through its MCP Server, extending automated credential detection and remediation capabilities into AI-assisted and agent-driven development workflows.&lt;/p&gt; &lt;i&gt;By Craig Risi&lt;/i&gt;</description>
      <category>github</category>
      <category>Model Context Protocol (MCP)</category>
      <category>Application Security</category>
      <category>DevOps</category>
      <category>news</category>
      <pubDate>Tue, 12 May 2026 12:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/github-mcp-secret-scanning/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Craig Risi</dc:creator>
      <dc:date>2026-05-12T12:00:00Z</dc:date>
      <dc:identifier>/news/2026/05/github-mcp-secret-scanning/en</dc:identifier>
    </item>
    <item>
      <title>AdonisJS v7 Ships End-to-End Type Safety, Reworked Starter Kits and Zero-Config OpenTelemetry</title>
      <link>https://www.infoq.com/news/2026/05/adonis-v7-opentelemetry/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/adonis-v7-opentelemetry/en/headerimage/generatedHeaderImage-1778574705638.jpg"/&gt;&lt;p&gt;AdonisJS version 7 introduces end-to-end type safety and reworked starter kits, alongside improved documentation. The release includes 45+ updated packages and three new ones for OpenTelemetry, typed content. It requires Node.js 24, allowing the use of native APIs. The framework emphasizes a convention-over-configuration approach while offering tools for routing, ORM, and authentication.&lt;/p&gt; &lt;i&gt;By Daniel Curtis&lt;/i&gt;</description>
      <category>Web Development</category>
      <category>Node.js</category>
      <category>TypeScript</category>
      <category>OpenTelemetry</category>
      <category>Development</category>
      <category>news</category>
      <pubDate>Tue, 12 May 2026 11:26:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/adonis-v7-opentelemetry/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Daniel Curtis</dc:creator>
      <dc:date>2026-05-12T11:26:00Z</dc:date>
      <dc:identifier>/news/2026/05/adonis-v7-opentelemetry/en</dc:identifier>
    </item>
    <item>
      <title>Article: Time-Series Storage: Design Choices That Shape Cost and Performance</title>
      <link>https://www.infoq.com/articles/time-series-storage-design/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/articles/time-series-storage-design/en/headerimage/Time-Series-Storage-Design-Choices-That-Shape-Cost-and-Performance-header-1778155792101.jpg"/&gt;&lt;p&gt;Every time-series database makes a set of storage design decisions: how to lay out rows, when to compress, what to partition on. These decisions determine cost and query performance more than the choice of database itself. This article works through those fundamentals from first principles, using widely available tools like PostgreSQL and Apache Parquet to make each trade-off measurable.&lt;/p&gt; &lt;i&gt;By Nirmesh Khandelwal&lt;/i&gt;</description>
      <category>Big Data</category>
      <category>Time Series Data</category>
      <category>AI, ML &amp; Data Engineering</category>
      <category>article</category>
      <pubDate>Tue, 12 May 2026 09:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/articles/time-series-storage-design/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Nirmesh Khandelwal</dc:creator>
      <dc:date>2026-05-12T09:00:00Z</dc:date>
      <dc:identifier>/articles/time-series-storage-design/en</dc:identifier>
    </item>
    <item>
      <title>Copy Fail and Dirty Frag: Linux Page-Cache Exploits Target Every Major Distribution</title>
      <link>https://www.infoq.com/news/2026/05/copy-fail-dirty-frag-linux/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/copy-fail-dirty-frag-linux/en/headerimage/generatedHeaderImage-1778536323907.jpg"/&gt;&lt;p&gt;Two recent Linux kernel vulnerabilities have been disclosed: Copy Fail (CVE-2026-31431) on April 29, 2026, and Dirty Frag (CVE-2026-43284 and CVE-2026-43500) on May 7, 2026. Both allow local users to gain root access, affecting multiple Linux distributions. These vulnerabilities exploit flaws in the page cache via different subsystems, necessitating immediate patching by affected organizations.&lt;/p&gt; &lt;i&gt;By Matt Saunders&lt;/i&gt;</description>
      <category>Linux</category>
      <category>Security Vulnerabilities</category>
      <category>DevOps</category>
      <category>news</category>
      <pubDate>Tue, 12 May 2026 08:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/copy-fail-dirty-frag-linux/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Matt Saunders</dc:creator>
      <dc:date>2026-05-12T08:00:00Z</dc:date>
      <dc:identifier>/news/2026/05/copy-fail-dirty-frag-linux/en</dc:identifier>
    </item>
    <item>
      <title>Cangjie, a New Open-Source Compiled Language with Native Effect Handlers and Algebraic Data Types</title>
      <link>https://www.infoq.com/news/2026/05/cangjie-effect-handlers-adt/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/cangjie-effect-handlers-adt/en/headerimage/generatedHeaderImage-1778450390348.jpg"/&gt;&lt;p&gt;Prof. Dan Ghica, who leads the Programming Languages Lab at Huawei’s Edinburgh Research Centre, recently presented Cangjie (CJ), a new application development language that features algebraic data types and effect handlers. The open-sourced language is positioned as a counterpart to Java, Kotlin, or Swift. Cangjie is taught by 80+ universities in China.&lt;/p&gt; &lt;i&gt;By Bruno Couriol&lt;/i&gt;</description>
      <category>Web Development</category>
      <category>Programming Languages</category>
      <category>TypeScript</category>
      <category>Rust</category>
      <category>Development</category>
      <category>news</category>
      <pubDate>Mon, 11 May 2026 21:56:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/cangjie-effect-handlers-adt/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Bruno Couriol</dc:creator>
      <dc:date>2026-05-11T21:56:00Z</dc:date>
      <dc:identifier>/news/2026/05/cangjie-effect-handlers-adt/en</dc:identifier>
    </item>
    <item>
      <title>Coder Agents Enable Running AI Coding Workflows on Self-Hosted Infrastructure</title>
      <link>https://www.infoq.com/news/2026/05/coder-agents-self-hosted-ai/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/coder-agents-self-hosted-ai/en/headerimage/coder-agents-self-hosted-ai-1778516884639.jpeg"/&gt;&lt;p&gt;Coder Agents is a model-agnostic platform designed to let organizations run AI coding agents on their own infrastructure, rather than relying on cloud-based services. This allows teams to maintain full control over code, data, and execution environments.&lt;/p&gt; &lt;i&gt;By Sergio De Simone&lt;/i&gt;</description>
      <category>Large language models</category>
      <category>Agents</category>
      <category>AI Coding</category>
      <category>AI, ML &amp; Data Engineering</category>
      <category>news</category>
      <pubDate>Mon, 11 May 2026 17:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/coder-agents-self-hosted-ai/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Sergio De Simone</dc:creator>
      <dc:date>2026-05-11T17:00:00Z</dc:date>
      <dc:identifier>/news/2026/05/coder-agents-self-hosted-ai/en</dc:identifier>
    </item>
    <item>
      <title>Netflix Serves 84% of Query Results from Cache with Interval-Aware Caching in Apache Druid</title>
      <link>https://www.infoq.com/news/2026/05/netflix-druid-interval-cache/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/netflix-druid-interval-cache/en/headerimage/generatedHeaderImage-1777092326529.jpg"/&gt;&lt;p&gt;Netflix improves Apache Druid performance with interval aware caching, serving 84% of analytics results from cache and reducing query load by 33%. The system decomposes rolling window queries into reusable time segments, enabling partial cache reuse and recomputation only for recent data. At scale, it reduces scan volume, improves P90 latency, and optimizes real time analytics workloads.&lt;/p&gt; &lt;i&gt;By Leela Kumili&lt;/i&gt;</description>
      <category>Data Analytics</category>
      <category>Observability</category>
      <category>Caching</category>
      <category>Distributed Systems</category>
      <category>Optimization</category>
      <category>Apache</category>
      <category>Time Series Data</category>
      <category>Development</category>
      <category>Architecture &amp; Design</category>
      <category>news</category>
      <pubDate>Mon, 11 May 2026 14:36:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/netflix-druid-interval-cache/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Leela Kumili</dc:creator>
      <dc:date>2026-05-11T14:36:00Z</dc:date>
      <dc:identifier>/news/2026/05/netflix-druid-interval-cache/en</dc:identifier>
    </item>
    <item>
      <title>Presentation: Evolution of a Backend for a Streaming Application</title>
      <link>https://www.infoq.com/presentations/streaming-application-aws-infrastructure/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/presentations/streaming-application-aws-infrastructure/en/mediumimage/medium-1778061840987.jpg"/&gt;&lt;p&gt;Daniele Frasca explains the architectural evolution of Joyn, a German streaming giant. He discusses moving from fragile single-node setups to resilient serverless architectures using AWS. He shares insights on the Hub and Spoke pattern for data consistency, cell-based isolation to reduce blast radius, and cost-optimization strategies for achieving affordable multi-region active-active setups.&lt;/p&gt; &lt;i&gt;By Daniele Frasca&lt;/i&gt;</description>
      <category>Case Study</category>
      <category>AWS</category>
      <category>Transcripts</category>
      <category>Cloud</category>
      <category>Infrastructure</category>
      <category>InfoQ Dev Summit Munich 2025</category>
      <category>DevOps</category>
      <category>Architecture &amp; Design</category>
      <category>presentation</category>
      <pubDate>Mon, 11 May 2026 11:45:00 GMT</pubDate>
      <guid>https://www.infoq.com/presentations/streaming-application-aws-infrastructure/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Daniele Frasca</dc:creator>
      <dc:date>2026-05-11T11:45:00Z</dc:date>
      <dc:identifier>/presentations/streaming-application-aws-infrastructure/en</dc:identifier>
    </item>
    <item>
      <title>Article: Local-First AI Inference: A Cloud Architecture Pattern for Cost-Effective Document Processing</title>
      <link>https://www.infoq.com/articles/local-first-ai-inference-cloud/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/articles/local-first-ai-inference-cloud/en/headerimage/Local-First-AI-Inference-A-Cloud-Architecture-Pattern-for-Cost-Effective-Document-Processing-header-1778141518292.jpg"/&gt;&lt;p&gt;The Local-First AI Inference pattern routes 70–80% of documents to deterministic local extraction at zero API cost, reserving Azure OpenAI calls for edge cases and flagging low-confidence results for human review. Deployed on 4,700 engineering drawing PDFs, it cut API costs by 75% and processing time by 55%, while bounding errors through a human review tier.&lt;/p&gt; &lt;i&gt;By Obinna Iheanachor&lt;/i&gt;</description>
      <category>GPT-4</category>
      <category>Microsoft Azure</category>
      <category>Generative AI</category>
      <category>Model Inference</category>
      <category>Observability</category>
      <category>Azure</category>
      <category>Artificial Intelligence</category>
      <category>Cloud</category>
      <category>Cost Optimization</category>
      <category>DevOps</category>
      <category>AI, ML &amp; Data Engineering</category>
      <category>Development</category>
      <category>article</category>
      <pubDate>Mon, 11 May 2026 11:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/articles/local-first-ai-inference-cloud/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Obinna Iheanachor</dc:creator>
      <dc:date>2026-05-11T11:00:00Z</dc:date>
      <dc:identifier>/articles/local-first-ai-inference-cloud/en</dc:identifier>
    </item>
    <item>
      <title>Podcast: From Java EE to Quarkus and LLMs: Adam Bien’s Playbook for Boring, Future‑Proof Systems</title>
      <link>https://www.infoq.com/podcasts/java-ee-quarkus-llm/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/podcasts/java-ee-quarkus-llm/en/smallimage/the-infoq-podcast-logo-thumbnail-1777449793047.jpg"/&gt;&lt;p&gt;Adam Bien, an independent consultant and pioneer of zero dependencies in the enterprise world of Java, highlights the benefits of consistently using standards, regardless of whether they involve Java or existing patterns. He argues that by doing so, he managed to future-proof the systems he built, preparing them for the cloud era and even for the AI-Native era.&lt;/p&gt; &lt;i&gt;By Adam Bien&lt;/i&gt;</description>
      <category>Java</category>
      <category>Cloud</category>
      <category>The InfoQ Podcast</category>
      <category>Code Generation</category>
      <category>Development</category>
      <category>podcast</category>
      <pubDate>Mon, 11 May 2026 11:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/podcasts/java-ee-quarkus-llm/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Adam Bien</dc:creator>
      <dc:date>2026-05-11T11:00:00Z</dc:date>
      <dc:identifier>/podcasts/java-ee-quarkus-llm/en</dc:identifier>
    </item>
    <item>
      <title>New DORA Report Claims Strong Engineering Foundations Drive AI Return on Investment</title>
      <link>https://www.infoq.com/news/2026/05/dora-roi-ai-assisted-dev-report/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/dora-roi-ai-assisted-dev-report/en/headerimage/generatedHeaderImage-1778428945703.jpg"/&gt;&lt;p&gt;Google Cloud's DORA team released a report detailing a framework for assessing the ROI of AI in software development. It emphasizes that successful AI implementation depends on organizational systems rather than just tools. The report introduces a J-Curve model for value realization. It also discusses the importance of workforce retention and process redesign for achieving long-term gains.&lt;/p&gt; &lt;i&gt;By Matt Saunders&lt;/i&gt;</description>
      <category>ROI</category>
      <category>AI Assisted Coding</category>
      <category>DevOps</category>
      <category>news</category>
      <pubDate>Mon, 11 May 2026 08:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/dora-roi-ai-assisted-dev-report/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Matt Saunders</dc:creator>
      <dc:date>2026-05-11T08:00:00Z</dc:date>
      <dc:identifier>/news/2026/05/dora-roi-ai-assisted-dev-report/en</dc:identifier>
    </item>
  </channel>
</rss>
