<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0">
  <channel>
    <title>InfoQ - Large Concept Models - Podcasts</title>
    <link>https://www.infoq.com</link>
    <description>InfoQ Large Concept Models Podcasts feed</description>
    <item>
      <title>Podcast: Elena Samuylova on Large Language Model (LLM)-Based Application Evaluation and LLM as a Judge</title>
      <link>https://www.infoq.com/podcasts/llm-based-application-evaluation/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=Large+Concept+Models-podcasts</link>
      <description>&lt;img src="https://res.infoq.com/podcasts/llm-based-application-evaluation/en/smallimage/the-infoq-podcast-logo-thumbnail-1759238043300.jpg"/&gt;&lt;p&gt;In this podcast, InfoQ spoke with Elena Samuylova from Evidently AI, on best practices in evaluating Large Language Model (LLM)-based applications. She also discussed the tools for evaluating, testing and monitoring applications powered by AI technologies.&lt;/p&gt; &lt;i&gt;By Elena Samuylova&lt;/i&gt;</description>
      <category>Benchmark</category>
      <category>The InfoQ Podcast</category>
      <category>Agents</category>
      <category>Artificial Intelligence</category>
      <category>Large Concept Models</category>
      <category>AI, ML &amp; Data Engineering</category>
      <category>podcast</category>
      <pubDate>Mon, 06 Oct 2025 11:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/podcasts/llm-based-application-evaluation/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=Large+Concept+Models-podcasts</guid>
      <dc:creator>Elena Samuylova</dc:creator>
      <dc:date>2025-10-06T11:00:00Z</dc:date>
      <dc:identifier>/podcasts/llm-based-application-evaluation/en</dc:identifier>
    </item>
  </channel>
</rss>
