<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0">
  <channel>
    <title>InfoQ - Model Inference - News</title>
    <link>https://www.infoq.com</link>
    <description>InfoQ Model Inference News feed</description>
    <item>
      <title>KubeCon NA 2025 - Robert Nishihara on Open Source AI Compute with Kubernetes, Ray, PyTorch, and vLLM</title>
      <link>https://www.infoq.com/news/2025/11/kubecon-open-source-ai-compute/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=Model+Inference-news</link>
      <description>&lt;img src="https://res.infoq.com/news/2025/11/kubecon-open-source-ai-compute/en/headerimage/kubecon-open-source-ai-header-1764314734603.jpg"/&gt;&lt;p&gt;AI workloads are growing more complex in terms of compute and data, and technologies like Kubernetes and PyTorch can help build production-ready AI systems to support them. Robert Nishihara from Anyscale recently spoke at KubeCon + CloudNativeCon North America 2025 Conference about how an AI compute stack comprising Kubernetes, PyTorch, VLLM and Ray technologies can support these new AI workloads.&lt;/p&gt; &lt;i&gt;By Srini Penchikala&lt;/i&gt;</description>
      <category>Model Inference</category>
      <category>Containers</category>
      <category>Kubernetes</category>
      <category>Orchestration</category>
      <category>Artificial Intelligence</category>
      <category>Machine Learning</category>
      <category>AI, ML &amp; Data Engineering</category>
      <category>news</category>
      <pubDate>Fri, 28 Nov 2025 18:15:00 GMT</pubDate>
      <guid>https://www.infoq.com/news/2025/11/kubecon-open-source-ai-compute/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=Model+Inference-news</guid>
      <dc:creator>Srini Penchikala</dc:creator>
      <dc:date>2025-11-28T18:15:00Z</dc:date>
      <dc:identifier>/news/2025/11/kubecon-open-source-ai-compute/en</dc:identifier>
    </item>
  </channel>
</rss>
