<?xml version="1.0" encoding="UTF-8"?>
<rss xmlns:dc="http://purl.org/dc/elements/1.1/" version="2.0">
  <channel>
    <title>InfoQ - Common Vulnerabilities and Exposures - Articles</title>
    <link>https://www.infoq.com</link>
    <description>InfoQ Common Vulnerabilities and Exposures Articles feed</description>
    <item>
      <title>Article: Prompt Injection for Large Language Models</title>
      <link>https://www.infoq.com/articles/large-language-models-prompt-injection-stealing/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=Common+Vulnerabilities+and+Exposures-articles</link>
      <description>&lt;img src="https://res.infoq.com/articles/large-language-models-prompt-injection-stealing/en/headerimage/prompt-injection-llm-header-1737547033103.jpg"/&gt;&lt;p&gt;This article will cover two common attack vectors against large language models and tools based on them, prompt injection and prompt stealing. We will additionally introduce three approaches to make your LLM-based systems and tools less vulnerable to this kind of attacks and review their benefits and limitations, including fine-tuning, adversarial detectors, and system prompt hardening.&lt;/p&gt; &lt;i&gt;By Georg Dresler&lt;/i&gt;</description>
      <category>Information Security</category>
      <category>Large language models</category>
      <category>InfoQ Dev Summit Munich 2024</category>
      <category>Common Vulnerabilities and Exposures</category>
      <category>Security</category>
      <category>QCon Software Development Conference</category>
      <category>Development</category>
      <category>AI, ML &amp; Data Engineering</category>
      <category>article</category>
      <pubDate>Mon, 03 Feb 2025 11:00:00 GMT</pubDate>
      <guid>https://www.infoq.com/articles/large-language-models-prompt-injection-stealing/?utm_campaign=infoq_content&amp;utm_source=infoq&amp;utm_medium=feed&amp;utm_term=Common+Vulnerabilities+and+Exposures-articles</guid>
      <dc:creator>Georg Dresler</dc:creator>
      <dc:date>2025-02-03T11:00:00Z</dc:date>
      <dc:identifier>/articles/large-language-models-prompt-injection-stealing/en</dc:identifier>
    </item>
  </channel>
</rss>
