<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0">
  <channel>
    <title>InfoQ</title>
    <link>https://www.infoq.com</link>
    <description>InfoQ feed</description>
    <item>
      <title>Google Announces GKE Agent Sandbox and Hypercluster at Next '26, Positioning Kubernetes as AI Agent</title>
      <link>https://www.infoq.com/news/2026/05/gke-agent-sandbox-hypercluster/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/gke-agent-sandbox-hypercluster/en/headerimage/generatedHeaderImage-1777875585040.jpg"/&gt;&lt;p&gt;Google announced GKE Agent Sandbox and hypercluster at Cloud Next '26. Agent Sandbox uses gVisor kernel isolation for secure agent code execution at 300 sandboxes per second, built as an open-source Kubernetes SIG Apps subproject. It is currently the only native agent sandbox among the three major hyperscalers. Hypercluster manages a million chips from a single control plane.&lt;/p&gt; &lt;i&gt;By Steef-Jan Wiggers&lt;/i&gt;</description>
      <category>AI Architecture</category>
      <category>Google Cloud</category>
      <category>Cloud</category>
      <category>Containers</category>
      <category>Cloud Native Computing Foundation</category>
      <category>Google Cloud Platform</category>
      <category>DevOps</category>
      <category>AI, ML &amp; Data Engineering</category>
      <category>Architecture &amp; Design</category>
      <category>Development</category>
      <category>news</category>
      <pubDate>Thu, 07 May 2026 10:06:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/gke-agent-sandbox-hypercluster/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Steef-Jan Wiggers</dc:creator>
      <dc:date>2026-05-07T10:06:00Z</dc:date>
      <dc:identifier>/news/2026/05/gke-agent-sandbox-hypercluster/en</dc:identifier>
    </item>
    <item>
      <title>Leading Open Source Author Calls for Verification over Trust in Software Supply Chains</title>
      <link>https://www.infoq.com/news/2026/05/stenberg-curl-verification-trust/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/stenberg-curl-verification-trust/en/headerimage/generatedHeaderImage-1777409230642.jpg"/&gt;&lt;p&gt;In a blog post published in March 2026, Daniel Stenberg, creator and lead developer of curl, makes the case that the software industry's default position of trusting well-known components is no longer adequate. Stenberg argues that users and organisations should actively verify the software they consume, and he uses curl's own practices as a concrete example of how that can be done.&lt;/p&gt; &lt;i&gt;By Matt Saunders&lt;/i&gt;</description>
      <category>Software Supply Chain</category>
      <category>Verification</category>
      <category>Dependency Management</category>
      <category>DevOps</category>
      <category>Culture &amp; Methods</category>
      <category>news</category>
      <pubDate>Thu, 07 May 2026 07:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/stenberg-curl-verification-trust/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Matt Saunders</dc:creator>
      <dc:date>2026-05-07T07:00:00Z</dc:date>
      <dc:identifier>/news/2026/05/stenberg-curl-verification-trust/en</dc:identifier>
    </item>
    <item>
      <title>LinkedIn Consolidates Hiring Data Pipelines to Power AI Driven Talent Systems</title>
      <link>https://www.infoq.com/news/2026/05/linkedin-unified-hiring-platform/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/linkedin-unified-hiring-platform/en/headerimage/generatedHeaderImage-1776925266106.jpg"/&gt;&lt;p&gt;LinkedIn introduced a unified integrations platform to standardize and reconcile hiring data across systems. The platform reduces onboarding time by 72%, improves data consistency and completeness, and enables scalable AI-driven hiring features through standardized schemas, orchestration workflows, and centralized data processing.&lt;/p&gt; &lt;i&gt;By Leela Kumili&lt;/i&gt;</description>
      <category>Platforms</category>
      <category>Hiring</category>
      <category>Evolutionary Architecture</category>
      <category>Unification</category>
      <category>Integration</category>
      <category>Data Pipelines</category>
      <category>Data Analytics</category>
      <category>AI, ML &amp; Data Engineering</category>
      <category>Development</category>
      <category>Architecture &amp; Design</category>
      <category>news</category>
      <pubDate>Wed, 06 May 2026 14:15:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/linkedin-unified-hiring-platform/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Leela Kumili</dc:creator>
      <dc:date>2026-05-06T14:15:00Z</dc:date>
      <dc:identifier>/news/2026/05/linkedin-unified-hiring-platform/en</dc:identifier>
    </item>
    <item>
      <title>Presentation: AI-First Software Delivery: Balancing Innovation with Proven Practices</title>
      <link>https://www.infoq.com/presentations/ai-first-practices/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/presentations/ai-first-practices/en/mediumimage/medium-1777371216610.jpeg"/&gt;&lt;p&gt;Wes Reisz discusses the shift toward AI-first software delivery, emphasizing that agentic workflows are not one-size-fits-all.  He explains a strategic two-by-two model based on code longevity and automated verification to decide between supervised and unsupervised agents.  He shares the RIPER-5 framework - Research, Innovate, Plan, Execute, Review - to amplify engineering discipline.&lt;/p&gt; &lt;i&gt;By Wes Reisz&lt;/i&gt;</description>
      <category>Artificial Intelligence</category>
      <category>Best Practices</category>
      <category>QCon AI 2025</category>
      <category>Transcripts</category>
      <category>AI, ML &amp; Data Engineering</category>
      <category>presentation</category>
      <pubDate>Wed, 06 May 2026 11:12:00 GMT</pubDate>
      <guid>https://www.infoq.com/presentations/ai-first-practices/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Wes Reisz</dc:creator>
      <dc:date>2026-05-06T11:12:00Z</dc:date>
      <dc:identifier>/presentations/ai-first-practices/en</dc:identifier>
    </item>
    <item>
      <title>Attacker Bought 30 WordPress Plugins on Flippa and Backdoored All of Them</title>
      <link>https://www.infoq.com/news/2026/05/wordpress-plugins-supply-chain/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/wordpress-plugins-supply-chain/en/headerimage/generatedHeaderImage-1777874069748.jpg"/&gt;&lt;p&gt;An attacker purchased 30+ WordPress plugins on Flippa for six figures, planted a PHP deserialization backdoor in the first commit, and waited eight months before activating it across 400,000 installations. The attack used Ethereum smart contracts to resolve C2. WordPress.org has no mechanism for reviewing plugin ownership transfers, a gap that npm and PyPI addressed years ago.&lt;/p&gt; &lt;i&gt;By Steef-Jan Wiggers&lt;/i&gt;</description>
      <category>Security Vulnerabilities</category>
      <category>Application Security</category>
      <category>Software Supply Chain</category>
      <category>Dependency Management</category>
      <category>Development</category>
      <category>Architecture &amp; Design</category>
      <category>news</category>
      <pubDate>Wed, 06 May 2026 10:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/wordpress-plugins-supply-chain/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Steef-Jan Wiggers</dc:creator>
      <dc:date>2026-05-06T10:00:00Z</dc:date>
      <dc:identifier>/news/2026/05/wordpress-plugins-supply-chain/en</dc:identifier>
    </item>
    <item>
      <title>Google New TPU Generation is Specifically Designed for Agents and SOTA Model Training</title>
      <link>https://www.infoq.com/news/2026/05/google-8th-tpu-generation/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/google-8th-tpu-generation/en/headerimage/google-8th-gen-tpus-1778060595193.jpeg"/&gt;&lt;p&gt;Google has unvelied a new generation of Tensor Processing Units (TPUs), featuring two specialized chips designed to accelerate model training and agent workflows, which require continuous, multi-step reasoning, and action loops distributed across multiple models. The new TPUs deliver better performance, memory, and energy efficiency, the company says.&lt;/p&gt; &lt;i&gt;By Sergio De Simone&lt;/i&gt;</description>
      <category>Agents</category>
      <category>GPU</category>
      <category>Large language models</category>
      <category>Google</category>
      <category>AI, ML &amp; Data Engineering</category>
      <category>Development</category>
      <category>news</category>
      <pubDate>Wed, 06 May 2026 10:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/google-8th-tpu-generation/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Sergio De Simone</dc:creator>
      <dc:date>2026-05-06T10:00:00Z</dc:date>
      <dc:identifier>/news/2026/05/google-8th-tpu-generation/en</dc:identifier>
    </item>
    <item>
      <title>Article: Beyond the Benchmark: A Metrics-Driven Approach to Sustained iOS Performance on Real Devices</title>
      <link>https://www.infoq.com/articles/metrics-driven-approach-ios-performance/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/articles/metrics-driven-approach-ios-performance/en/headerimage/metrics-driven-approach-ios-performance-header-1777624958302.jpg"/&gt;&lt;p&gt;iOS performance engineering often defaults to a mental model where performance is a property of a component. Performance is instead an emergent behavior of the interaction between application code, device hardware, OS resource management, network conditions, and user behavior patterns over time. This article gives a direct, first-party path to capturing performance issues using Xcode Instruments.&lt;/p&gt; &lt;i&gt;By Vasuki Uday Kiran Vudathala&lt;/i&gt;</description>
      <category>User Experience</category>
      <category>Xcode</category>
      <category>Mobile</category>
      <category>iOS</category>
      <category>Performance</category>
      <category>Development</category>
      <category>article</category>
      <pubDate>Wed, 06 May 2026 09:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/articles/metrics-driven-approach-ios-performance/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Vasuki Uday Kiran Vudathala</dc:creator>
      <dc:date>2026-05-06T09:00:00Z</dc:date>
      <dc:identifier>/articles/metrics-driven-approach-ios-performance/en</dc:identifier>
    </item>
    <item>
      <title>Grafana's Kubernetes Monitoring Helm Chart v4 Brings Multiple Fixes</title>
      <link>https://www.infoq.com/news/2026/05/kubernetes-monitoring-helm/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/kubernetes-monitoring-helm/en/headerimage/generatedHeaderImage-1777406799196.jpg"/&gt;&lt;p&gt;Grafana Labs has released version 4 of its Kubernetes Monitoring Helm chart, describing it as the most significant update the chart has received since its introduction. The release, announced in April 2026 by Pete Wall and Beverly Buchanan, addresses a range of configuration problems that had accumulated as users scaled to larger and more complex deployments.&lt;/p&gt; &lt;i&gt;By Matt Saunders&lt;/i&gt;</description>
      <category>Kubernetes</category>
      <category>Monitoring</category>
      <category>helm</category>
      <category>DevOps</category>
      <category>news</category>
      <pubDate>Wed, 06 May 2026 07:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/kubernetes-monitoring-helm/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Matt Saunders</dc:creator>
      <dc:date>2026-05-06T07:00:00Z</dc:date>
      <dc:identifier>/news/2026/05/kubernetes-monitoring-helm/en</dc:identifier>
    </item>
    <item>
      <title>Inside Claude Code Auto Mode: Anthropic’s Autonomous Coding System with Human Approval Gates</title>
      <link>https://www.infoq.com/news/2026/05/anthropic-claude-code-auto-mode/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/anthropic-claude-code-auto-mode/en/headerimage/generatedHeaderImage-1777787075311.jpg"/&gt;&lt;p&gt;Anthropic has introduced auto mode in Claude Code, enabling multi-step software development workflows with reduced manual intervention. The feature combines automated execution with layered safety mechanisms, including input filtering, action evaluation, and two-stage classification, while maintaining human approval checkpoints for sensitive operations.&lt;/p&gt; &lt;i&gt;By Leela Kumili&lt;/i&gt;</description>
      <category>autonomous</category>
      <category>AI Architecture</category>
      <category>AI Development</category>
      <category>Developer Experience</category>
      <category>Anthropic</category>
      <category>AI Assisted Coding</category>
      <category>Claude</category>
      <category>AI Coding</category>
      <category>Orchestration</category>
      <category>AI, ML &amp; Data Engineering</category>
      <category>Development</category>
      <category>Architecture &amp; Design</category>
      <category>news</category>
      <pubDate>Tue, 05 May 2026 14:38:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/anthropic-claude-code-auto-mode/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Leela Kumili</dc:creator>
      <dc:date>2026-05-05T14:38:00Z</dc:date>
      <dc:identifier>/news/2026/05/anthropic-claude-code-auto-mode/en</dc:identifier>
    </item>
    <item>
      <title>Presentation: How Netflix Shapes our Fleet for Efficiency and Reliability</title>
      <link>https://www.infoq.com/presentations/strategy-workload-hardware/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/presentations/strategy-workload-hardware/en/mediumimage/medium-1777370214319.jpg"/&gt;&lt;p&gt;The speakers explain the inherent tension between service efficiency and reliability at Netflix's global scale. They share a mental model for "risk-adjusted net value," moving beyond simple CPU utilization to focus on capacity buffers. They discuss hardware shaping, proactive traffic steering, and reactive levers like "hammers" and prioritized load shedding to protect critical playback.&lt;/p&gt; &lt;i&gt;By Joseph Lynch, Argha C&lt;/i&gt;</description>
      <category>Resilience</category>
      <category>QCon San Francisco 2025</category>
      <category>Hardware</category>
      <category>Case Study</category>
      <category>Transcripts</category>
      <category>DevOps</category>
      <category>presentation</category>
      <pubDate>Tue, 05 May 2026 14:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/presentations/strategy-workload-hardware/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Joseph Lynch, Argha C</dc:creator>
      <dc:date>2026-05-05T14:00:00Z</dc:date>
      <dc:identifier>/presentations/strategy-workload-hardware/en</dc:identifier>
    </item>
    <item>
      <title>GitHub Enhances CodeQL with Declarative Security Modeling for Faster, More Flexible Analysis</title>
      <link>https://www.infoq.com/news/2026/05/github-codeql-security-modeling/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/github-codeql-security-modeling/en/headerimage/generatedHeaderImage-1777889993931.jpg"/&gt;&lt;p&gt;GitHub has introduced a significant update to its CodeQL engine, enabling developers to define custom sanitizers and validators directly through "models-as-data," a move that simplifies how teams extend security analysis across their codebases.&lt;/p&gt; &lt;i&gt;By Craig Risi&lt;/i&gt;</description>
      <category>github</category>
      <category>CodeQL</category>
      <category>Application Security</category>
      <category>DevOps</category>
      <category>news</category>
      <pubDate>Tue, 05 May 2026 12:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/github-codeql-security-modeling/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Craig Risi</dc:creator>
      <dc:date>2026-05-05T12:00:00Z</dc:date>
      <dc:identifier>/news/2026/05/github-codeql-security-modeling/en</dc:identifier>
    </item>
    <item>
      <title>Mistral Adds Remote Agents and Work Mode to Le Chat</title>
      <link>https://www.infoq.com/news/2026/05/mistral-agents-lechat/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/mistral-agents-lechat/en/headerimage/generatedHeaderImage-1777663082615.jpg"/&gt;&lt;p&gt;Mistral has released Mistral Medium 3.5, a 128-billion parameter model designed to handle instruction following, reasoning, and coding within a single system, and introduced new cloud-based agent capabilities in its Vibe and Le Chat products.&lt;/p&gt; &lt;i&gt;By Daniel Dominguez&lt;/i&gt;</description>
      <category>Artificial Intelligence</category>
      <category>Agents</category>
      <category>Large language models</category>
      <category>Mistral AI</category>
      <category>OpenAI</category>
      <category>AI, ML &amp; Data Engineering</category>
      <category>news</category>
      <pubDate>Tue, 05 May 2026 10:08:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/mistral-agents-lechat/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Daniel Dominguez</dc:creator>
      <dc:date>2026-05-05T10:08:00Z</dc:date>
      <dc:identifier>/news/2026/05/mistral-agents-lechat/en</dc:identifier>
    </item>
    <item>
      <title>Article: Three Pillars of Platform Engineering: a Virtuous Cycle</title>
      <link>https://www.infoq.com/articles/platform-reliability-cycle/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/articles/platform-reliability-cycle/en/headerimage/platform-reliability-cycle-header-1777467114542.jpg"/&gt;&lt;p&gt;Platform engineering succeeds when reliability and ergonomics reinforce each other rather than compete. This article explores three foundational pillars: automated reliability, developer ergonomics, and operator ergonomics. Together, they establish a virtuous cycle that strengthens system stability, reduces operational burden, and empowers teams to scale infrastructure with confidence.&lt;/p&gt; &lt;i&gt;By Pratik Agarwal&lt;/i&gt;</description>
      <category>Developer Experience</category>
      <category>Site Reliability Engineering</category>
      <category>Observability</category>
      <category>Distributed Systems</category>
      <category>Platform Engineering</category>
      <category>DevOps</category>
      <category>Architecture &amp; Design</category>
      <category>article</category>
      <pubDate>Tue, 05 May 2026 09:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/articles/platform-reliability-cycle/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Pratik Agarwal</dc:creator>
      <dc:date>2026-05-05T09:00:00Z</dc:date>
      <dc:identifier>/articles/platform-reliability-cycle/en</dc:identifier>
    </item>
    <item>
      <title>Figma Builds In-House Redis Proxy to Hit Six Nines Uptime</title>
      <link>https://www.infoq.com/news/2026/05/figma-redis-figcache/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/figma-redis-figcache/en/headerimage/generatedHeaderImage-1777405837575.jpg"/&gt;&lt;p&gt;Figma has published a detailed account of how it built an in-house Redis proxy service called FigCache, replacing a fragmented caching stack that had become a liability for site availability. The system, described in a post by Kevin Lin, has been in production since the second half of 2025 and has delivered what the company describes as six nines of uptime across its caching layer.&lt;/p&gt; &lt;i&gt;By Matt Saunders&lt;/i&gt;</description>
      <category>Redis</category>
      <category>DevOps</category>
      <category>Architecture &amp; Design</category>
      <category>news</category>
      <pubDate>Tue, 05 May 2026 07:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/figma-redis-figcache/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Matt Saunders</dc:creator>
      <dc:date>2026-05-05T07:00:00Z</dc:date>
      <dc:identifier>/news/2026/05/figma-redis-figcache/en</dc:identifier>
    </item>
    <item>
      <title>Cloudflare Introduces Flagship: an Edge-Native Feature Flag Service Built on OpenFeature</title>
      <link>https://www.infoq.com/news/2026/05/cloudflare-flagship-openfeature/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</link>
      <description>&lt;img src="https://res.infoq.com/news/2026/05/cloudflare-flagship-openfeature/en/headerimage/generatedHeaderImage-1776925782675.jpg"/&gt;&lt;p&gt;Cloudflare recently announced the closed beta of Flagship, a new feature flag service built directly into its global edge platform. The service lets teams control feature rollouts and experiment with changes without redeploying code, while evaluating flags locally in Cloudflare Workers rather than calling external flag services.&lt;/p&gt; &lt;i&gt;By Renato Losio&lt;/i&gt;</description>
      <category>Continuous Delivery</category>
      <category>Cloudflare</category>
      <category>Low Latency</category>
      <category>Edge Computing</category>
      <category>Cloud Native Computing Foundation</category>
      <category>Feature Toggle</category>
      <category>DevOps</category>
      <category>Development</category>
      <category>news</category>
      <pubDate>Tue, 05 May 2026 06:24:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2026/05/cloudflare-flagship-openfeature/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=global</guid>
      <dc:creator>Renato Losio</dc:creator>
      <dc:date>2026-05-05T06:24:00Z</dc:date>
      <dc:identifier>/news/2026/05/cloudflare-flagship-openfeature/en</dc:identifier>
    </item>
  </channel>
</rss>
