<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet href="/stylesheet.xsl" type="text/xsl"?>
<rss version="2.0" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:atom="http://www.w3.org/2005/Atom" xmlns:sy="http://purl.org/rss/1.0/modules/syndication/" xmlns:content="http://purl.org/rss/1.0/modules/content/" xmlns:itunes="http://www.itunes.com/dtds/podcast-1.0.dtd" xmlns:podcast="https://podcastindex.org/namespace/1.0">
  <channel>
    <atom:link rel="self" type="application/rss+xml" href="https://feeds.transistor.fm/machines-meaning" title="MP3 Audio"/>
    <atom:link rel="hub" href="https://pubsubhubbub.appspot.com/"/>
    <podcast:podping usesPodping="true"/>
    <title>Machines &amp; Meaning</title>
    <generator>Transistor (https://transistor.fm)</generator>
    <itunes:new-feed-url>https://feeds.transistor.fm/machines-meaning</itunes:new-feed-url>
    <description>Machines &amp; Meaning examines artificial intelligence through the lens of different philosophers to understand how AI technology shapes human experience. Created for curious, thoughtful people who want to move beyond simplistic "AI is good" or "AI is bad" narratives, each episode takes a key concept from a philosopher and uses it to examine a specific aspect of AI technology and its impact on human life. While the show assumes listeners are familiar with current AI developments, it doesn't require technical knowledge. The series aims to help listeners develop a deeper understanding of how these technologies are changing how we think, behave, and relate to one another by bringing philosophical insights into conversation with modern AI developments.</description>
    <copyright>© 2024 Angel Evan</copyright>
    <podcast:guid>cd0cc57f-7baf-583a-a701-7be36e6b2853</podcast:guid>
    <podcast:locked>yes</podcast:locked>
    <language>en</language>
    <pubDate>Mon, 16 Mar 2026 06:00:13 -0700</pubDate>
    <lastBuildDate>Mon, 16 Mar 2026 06:02:00 -0700</lastBuildDate>
    <link>http://machinesandmeaning.com</link>
    
    <itunes:category text="Society &amp; Culture">
      <itunes:category text="Philosophy"/>
    </itunes:category>
    <itunes:category text="Technology"/>
    <itunes:type>episodic</itunes:type>
    <itunes:author>Angel Evan</itunes:author>
    <itunes:image href="https://img.transistorcdn.com/aLQBbVtQx3Lq22_BijDIqVMxnSZHer8ZCW3bEl9GCoY/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9kNjhj/ZThlMzIyODU1ZjBm/YjY0MDA4MWUyMzdh/OTM0NC5wbmc.jpg"/>
    <itunes:summary>Machines &amp; Meaning examines artificial intelligence through the lens of different philosophers to understand how AI technology shapes human experience. Created for curious, thoughtful people who want to move beyond simplistic "AI is good" or "AI is bad" narratives, each episode takes a key concept from a philosopher and uses it to examine a specific aspect of AI technology and its impact on human life. While the show assumes listeners are familiar with current AI developments, it doesn't require technical knowledge. The series aims to help listeners develop a deeper understanding of how these technologies are changing how we think, behave, and relate to one another by bringing philosophical insights into conversation with modern AI developments.</itunes:summary>
    <itunes:subtitle>Machines &amp; Meaning examines artificial intelligence through the lens of different philosophers to understand how AI technology shapes human experience.</itunes:subtitle>
    <itunes:keywords>AI, Ethics, Philosophy, Technology</itunes:keywords>
    <itunes:owner>
      <itunes:name>Angel Evan</itunes:name>
    </itunes:owner>
    <itunes:complete>No</itunes:complete>
    <itunes:explicit>No</itunes:explicit>
    <item>
      <title>Ayn Rand and the Dark Side of AI Efficiency</title>
      <itunes:season>2</itunes:season>
      <podcast:season>2</podcast:season>
      <itunes:episode>1</itunes:episode>
      <podcast:episode>1</podcast:episode>
      <itunes:title>Ayn Rand and the Dark Side of AI Efficiency</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">7eef41d6-90f6-4072-a4df-47fdbc7b2d6b</guid>
      <link>https://share.transistor.fm/s/0025eada</link>
      <description>
        <![CDATA[<p>Using Ayn Rand’s philosophy of Objectivism, we examine how AI’s efficiency gains are made possible by ignoring the quiet awareness of claiming skills we don’t fully possess.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Using Ayn Rand’s philosophy of Objectivism, we examine how AI’s efficiency gains are made possible by ignoring the quiet awareness of claiming skills we don’t fully possess.</p>]]>
      </content:encoded>
      <pubDate>Mon, 16 Mar 2026 06:00:00 -0700</pubDate>
      <author>Angel Evan</author>
      <enclosure url="https://media.transistor.fm/0025eada/d608233e.mp3" length="15621706" type="audio/mpeg"/>
      <itunes:author>Angel Evan</itunes:author>
      <itunes:image href="https://img.transistorcdn.com/Z4zKv3HCzen85Uwxf79VEnOSwLstXqdooskpiP2JDY8/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS81OTM2/YzU5MDNiM2U4MzA5/ZmRkMzQwYzUxNDgz/YWNlZC5wbmc.jpg"/>
      <itunes:duration>975</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Using Ayn Rand’s philosophy of Objectivism, we examine how AI’s efficiency gains are made possible by ignoring the quiet awareness of claiming skills we don’t fully possess.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, Ethics, Philosophy, Technology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Ibn Khaldun’s Warning: When Tools Become Purposes</title>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>12</itunes:episode>
      <podcast:episode>12</podcast:episode>
      <itunes:title>Ibn Khaldun’s Warning: When Tools Become Purposes</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">b8cfa9f8-93c0-4f7e-89cf-1895561acb02</guid>
      <link>https://share.transistor.fm/s/99df61d6</link>
      <description>
        <![CDATA[<p>Episode Description: Using Ibn Khaldun’s concept of asabiyyah (ah-sa-BEE-yah), a word derived from Arabic that roughly translates to tribal solidarity or social cohesion, we examine how AI is being rhetorically elevated to the status of collective purpose.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Episode Description: Using Ibn Khaldun’s concept of asabiyyah (ah-sa-BEE-yah), a word derived from Arabic that roughly translates to tribal solidarity or social cohesion, we examine how AI is being rhetorically elevated to the status of collective purpose.</p>]]>
      </content:encoded>
      <pubDate>Mon, 22 Dec 2025 13:23:40 -0800</pubDate>
      <author>Angel Evan</author>
      <enclosure url="https://media.transistor.fm/99df61d6/5964a8b5.mp3" length="11588832" type="audio/mpeg"/>
      <itunes:author>Angel Evan</itunes:author>
      <itunes:image href="https://img.transistorcdn.com/CrnNDoqZoENbC8BfqqgWW4fx_SvF9R0pvJbBm2s_y5Q/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9hZTAy/NDIzNTg2MTczNDVh/ODFlNWU4YWRjMDdi/MTYxYS5wbmc.jpg"/>
      <itunes:duration>721</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Episode Description: Using Ibn Khaldun’s concept of asabiyyah (ah-sa-BEE-yah), a word derived from Arabic that roughly translates to tribal solidarity or social cohesion, we examine how AI is being rhetorically elevated to the status of collective purpose.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, Ethics, Philosophy, Technology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Credibility Deficits: Miranda Fricker and the Illusion of AI Literacy</title>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>11</itunes:episode>
      <podcast:episode>11</podcast:episode>
      <itunes:title>Credibility Deficits: Miranda Fricker and the Illusion of AI Literacy</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">a1145738-5b0a-4c44-af5e-85a29c9f9fef</guid>
      <link>https://share.transistor.fm/s/2af52ddf</link>
      <description>
        <![CDATA[<p>Using Miranda Fricker’s concept of testimonial injustice, we examine how AI creates new hierarchies of who gets taken seriously and how the credibility we assign (or don’t) affect people’s lives.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Using Miranda Fricker’s concept of testimonial injustice, we examine how AI creates new hierarchies of who gets taken seriously and how the credibility we assign (or don’t) affect people’s lives.</p>]]>
      </content:encoded>
      <pubDate>Tue, 11 Nov 2025 06:21:17 -0800</pubDate>
      <author>Angel Evan</author>
      <enclosure url="https://media.transistor.fm/2af52ddf/07d2b759.mp3" length="14590737" type="audio/mpeg"/>
      <itunes:author>Angel Evan</itunes:author>
      <itunes:image href="https://img.transistorcdn.com/Me0hCp7pRmvIF_HsBMHkgWoB3NzTr5ORc7L9nbCi0hY/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS8zZjE3/NWJlYjQ3NTg1ZTU4/YzljZWRlYmI1NTg5/YTg3NC5qcGc.jpg"/>
      <itunes:duration>911</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Using Miranda Fricker’s concept of testimonial injustice, we examine how AI creates new hierarchies of who gets taken seriously and how the credibility we assign (or don’t) affect people’s lives.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, Ethics, Philosophy, Technology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>AI’s Aesthetic Trap: Søren Kierkegaard’s Three Spheres of Existence</title>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>10</itunes:episode>
      <podcast:episode>10</podcast:episode>
      <itunes:title>AI’s Aesthetic Trap: Søren Kierkegaard’s Three Spheres of Existence</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">242e215a-06a4-4451-b183-475b1e3abe8d</guid>
      <link>https://share.transistor.fm/s/8399497b</link>
      <description>
        <![CDATA[<p>Exploring how Kierkegaard’s three spheres of existence reveal why AI might be creating the most sophisticated trap for authentic human development by appearing to create fulfillment while preventing genuine growth.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Exploring how Kierkegaard’s three spheres of existence reveal why AI might be creating the most sophisticated trap for authentic human development by appearing to create fulfillment while preventing genuine growth.</p>]]>
      </content:encoded>
      <pubDate>Mon, 13 Oct 2025 05:00:00 -0700</pubDate>
      <author>Angel Evan</author>
      <enclosure url="https://media.transistor.fm/8399497b/a9bba1c0.mp3" length="14807169" type="audio/mpeg"/>
      <itunes:author>Angel Evan</itunes:author>
      <itunes:image href="https://img.transistorcdn.com/q-5lqtpTy8TTo3rkxNryFQpuaERFJE4yob0U4hyhh2s/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9iNWU0/ZTMyMWQyN2Y5MGY1/YTE5NmRiNzEwN2Y1/NzNhMy5wbmc.jpg"/>
      <itunes:duration>923</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Exploring how Kierkegaard’s three spheres of existence reveal why AI might be creating the most sophisticated trap for authentic human development by appearing to create fulfillment while preventing genuine growth.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, Ethics, Philosophy, Technology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Hannah Arendt and AI’s Collective Thoughtlessness</title>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>9</itunes:episode>
      <podcast:episode>9</podcast:episode>
      <itunes:title>Hannah Arendt and AI’s Collective Thoughtlessness</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">ae6a9581-664f-42cc-82f2-82ad84712df6</guid>
      <link>https://share.transistor.fm/s/a22d0fb3</link>
      <description>
        <![CDATA[<p>Exploring how Hannah Arendt’s concept of “thoughtlessness” reveals why AI systems create the perfect conditions for systematic harm that emerge from widespread non-engagement with consequences.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Exploring how Hannah Arendt’s concept of “thoughtlessness” reveals why AI systems create the perfect conditions for systematic harm that emerge from widespread non-engagement with consequences.</p>]]>
      </content:encoded>
      <pubDate>Mon, 08 Sep 2025 03:00:00 -0700</pubDate>
      <author>Angel Evan</author>
      <enclosure url="https://media.transistor.fm/a22d0fb3/8d717ef9.mp3" length="12450097" type="audio/mpeg"/>
      <itunes:author>Angel Evan</itunes:author>
      <itunes:image href="https://img.transistorcdn.com/5Sf3klPKbSrSS5we4UhoHWtzgLplDHz1IV7pWP5QoFU/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS8yZDk5/MDQ4MTBlMGFhMjlk/NDQ5MzYwOGQwMDNj/ZTI5OS5wbmc.jpg"/>
      <itunes:duration>777</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Exploring how Hannah Arendt’s concept of “thoughtlessness” reveals why AI systems create the perfect conditions for systematic harm that emerge from widespread non-engagement with consequences.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, Ethics, Philosophy, Technology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Aristotle’s Phronesis and the Wisdom to Judge Ourselves</title>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>8</itunes:episode>
      <podcast:episode>8</podcast:episode>
      <itunes:title>Aristotle’s Phronesis and the Wisdom to Judge Ourselves</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">d418d3d8-f317-4cb8-9023-852c277fd0e0</guid>
      <link>https://share.transistor.fm/s/bf1ce83e</link>
      <description>
        <![CDATA[<p>Exploring how Aristotle’s concept of practical wisdom reveals the meta-cognitive skills professionals will need to remain valuable in an age when AI can perform most technical tasks.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Exploring how Aristotle’s concept of practical wisdom reveals the meta-cognitive skills professionals will need to remain valuable in an age when AI can perform most technical tasks.</p>]]>
      </content:encoded>
      <pubDate>Mon, 04 Aug 2025 06:00:00 -0700</pubDate>
      <author>Angel Evan</author>
      <enclosure url="https://media.transistor.fm/bf1ce83e/39a9dc8e.mp3" length="13930888" type="audio/mpeg"/>
      <itunes:author>Angel Evan</itunes:author>
      <itunes:image href="https://img.transistorcdn.com/0d9YPEzu_HEipjIJqbJl6x65MM0f6ohSMOK33SzmmCk/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS85N2Fh/NGViODljMDc3MjIz/OTg2ZjA1ZGM3N2Q0/Yzk1Zi5wbmc.jpg"/>
      <itunes:duration>868</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Exploring how Aristotle’s concept of practical wisdom reveals the meta-cognitive skills professionals will need to remain valuable in an age when AI can perform most technical tasks.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, Ethics, Philosophy, Technology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Permanent Intermediates: Martin Heidegger and AI’s Erosion of Mastery</title>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>7</itunes:episode>
      <podcast:episode>7</podcast:episode>
      <itunes:title>Permanent Intermediates: Martin Heidegger and AI’s Erosion of Mastery</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">c95ab5d0-a89f-4472-936e-5f931ead6fb5</guid>
      <link>https://share.transistor.fm/s/6b843850</link>
      <description>
        <![CDATA[<p>Exploring how artificial intelligence systematically undermines the conditions necessary for developing human expertise, creating what we might call “permanent intermediates,” people who achieve functional competence but never develop true mastery.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Exploring how artificial intelligence systematically undermines the conditions necessary for developing human expertise, creating what we might call “permanent intermediates,” people who achieve functional competence but never develop true mastery.</p>]]>
      </content:encoded>
      <pubDate>Tue, 01 Jul 2025 07:00:00 -0700</pubDate>
      <author>Angel Evan</author>
      <enclosure url="https://media.transistor.fm/6b843850/598b0e70.mp3" length="12427301" type="audio/mpeg"/>
      <itunes:author>Angel Evan</itunes:author>
      <itunes:image href="https://img.transistorcdn.com/8I-2wbDPhitxz-M-twWEsPIsrl1l5gL_pPM06vaEhcw/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS82MWUz/MWE2OTFjNWQxZDYy/NGUwMGU5ZWExZTA3/YjRlNC5qcGc.jpg"/>
      <itunes:duration>772</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Exploring how artificial intelligence systematically undermines the conditions necessary for developing human expertise, creating what we might call “permanent intermediates,” people who achieve functional competence but never develop true mastery.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, Ethics, Philosophy, Technology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>The Accountability Threshold: Thomas Aquinas’ Doctrine of Double Effect.</title>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>6</itunes:episode>
      <podcast:episode>6</podcast:episode>
      <itunes:title>The Accountability Threshold: Thomas Aquinas’ Doctrine of Double Effect.</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">1b2b2214-1207-4e2d-aff6-05e3b488a1de</guid>
      <link>https://share.transistor.fm/s/b49258df</link>
      <description>
        <![CDATA[<p>Exploring how Thomas Aquinas’ Doctrine of Double Effect helps us understand our complex relationship with AI’s unintended consequences. </p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Exploring how Thomas Aquinas’ Doctrine of Double Effect helps us understand our complex relationship with AI’s unintended consequences. </p>]]>
      </content:encoded>
      <pubDate>Sun, 01 Jun 2025 06:00:00 -0700</pubDate>
      <author>Angel Evan</author>
      <enclosure url="https://media.transistor.fm/b49258df/d1b42e96.mp3" length="13866579" type="audio/mpeg"/>
      <itunes:author>Angel Evan</itunes:author>
      <itunes:image href="https://img.transistorcdn.com/HqZ6N63wCOZCq5D5H5TGQFrPbw6W2CteHE7sotxTA7Q/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS8wNDY0/OTNjNzY0MTIyNGRi/MGU3OGNkNjBlMTMy/M2RlYS5wbmc.jpg"/>
      <itunes:duration>863</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Exploring how Thomas Aquinas’ Doctrine of Double Effect helps us understand our complex relationship with AI’s unintended consequences. </p>]]>
      </itunes:summary>
      <itunes:keywords>AI, Ethics, Philosophy, Technology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Universal Laws: Kant’s Categorical Imperative and AI’s Immutable Rules</title>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>5</itunes:episode>
      <podcast:episode>5</podcast:episode>
      <itunes:title>Universal Laws: Kant’s Categorical Imperative and AI’s Immutable Rules</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">eac7decb-bfbc-42e2-81b5-7592fc16603b</guid>
      <link>https://share.transistor.fm/s/5f25a84a</link>
      <description>
        <![CDATA[<p>Exploring how Immanuel Kant’s concept of the categorical imperative parallels our current challenge of creating immutable ethical rules for artificial intelligence.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Exploring how Immanuel Kant’s concept of the categorical imperative parallels our current challenge of creating immutable ethical rules for artificial intelligence.</p>]]>
      </content:encoded>
      <pubDate>Thu, 01 May 2025 06:00:00 -0700</pubDate>
      <author>Angel Evan</author>
      <enclosure url="https://media.transistor.fm/5f25a84a/8256edd0.mp3" length="15142946" type="audio/mpeg"/>
      <itunes:author>Angel Evan</itunes:author>
      <itunes:image href="https://img.transistorcdn.com/Gl5j1wjNnN9uqda3ZjoL72DaHXkwZPKE7Pb5k5Bujew/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS8xMmIy/M2Y0ZGY1OTVjZDQx/MGM0MzEwMTAwYWNk/MGYxMC5wbmc.jpg"/>
      <itunes:duration>945</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Exploring how Immanuel Kant’s concept of the categorical imperative parallels our current challenge of creating immutable ethical rules for artificial intelligence.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, Ethics, Philosophy, Technology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>The Detriment of Constructs: Simone de Beauvoir and Our AI Categories</title>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>4</itunes:episode>
      <podcast:episode>4</podcast:episode>
      <itunes:title>The Detriment of Constructs: Simone de Beauvoir and Our AI Categories</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">8d2a9f64-21da-4fff-9663-e545e36fd077</guid>
      <link>https://share.transistor.fm/s/a0460b67</link>
      <description>
        <![CDATA[<p>Using Simone de Beauvoir’s philosophical framework on categorization, we examine how rigid binary thinking and over-compartmentalization limit our ability to understand and govern A.I.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Using Simone de Beauvoir’s philosophical framework on categorization, we examine how rigid binary thinking and over-compartmentalization limit our ability to understand and govern A.I.</p>]]>
      </content:encoded>
      <pubDate>Tue, 01 Apr 2025 06:00:00 -0700</pubDate>
      <author>Angel Evan</author>
      <enclosure url="https://media.transistor.fm/a0460b67/fdd84787.mp3" length="14396322" type="audio/mpeg"/>
      <itunes:author>Angel Evan</itunes:author>
      <itunes:image href="https://img.transistorcdn.com/kb_iaYr7D9cosZferXN2gjucAKi_Gwh6w2LIINr0sWE/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS85YjMz/NDI0NTBhNzBmYmE3/YjdlNWE2NWQ0MzVk/MTk4OC5qcGc.jpg"/>
      <itunes:duration>898</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Using Simone de Beauvoir’s philosophical framework on categorization, we examine how rigid binary thinking and over-compartmentalization limit our ability to understand and govern A.I.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, Ethics, Philosophy, Technology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>The Calculation Default: What René Descartes Teaches Us About Reasoning Models</title>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>3</itunes:episode>
      <podcast:episode>3</podcast:episode>
      <itunes:title>The Calculation Default: What René Descartes Teaches Us About Reasoning Models</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">2ca77c6d-aae0-4980-95b8-4f45f9d84ea1</guid>
      <link>https://share.transistor.fm/s/12f9b519</link>
      <description>
        <![CDATA[<p>Using Descartes’ framework for how we acquire knowledge, we examine what happens when AI reasoning models confront problems where mathematical certainty isn’t enough.</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>Using Descartes’ framework for how we acquire knowledge, we examine what happens when AI reasoning models confront problems where mathematical certainty isn’t enough.</p>]]>
      </content:encoded>
      <pubDate>Sat, 01 Mar 2025 19:32:21 -0800</pubDate>
      <author>Angel Evan</author>
      <enclosure url="https://media.transistor.fm/12f9b519/cbe62556.mp3" length="14767359" type="audio/mpeg"/>
      <itunes:author>Angel Evan</itunes:author>
      <itunes:image href="https://img.transistorcdn.com/xhhFs88TX6TjyUFXxikp0gGaoA0jYdS2Ff5wnNtyQa0/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS81ZTNm/NjlhMDBhODY2Y2U2/OTZhOGI3ODVhNTJm/MmViZS5wbmc.jpg"/>
      <itunes:duration>917</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>Using Descartes’ framework for how we acquire knowledge, we examine what happens when AI reasoning models confront problems where mathematical certainty isn’t enough.</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, Ethics, Philosophy, Technology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>Who’s Adapting to Whom? Lewis Mumford’s Warning for Technics.</title>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>2</itunes:episode>
      <podcast:episode>2</podcast:episode>
      <itunes:title>Who’s Adapting to Whom? Lewis Mumford’s Warning for Technics.</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">70adc20c-49f1-4457-a1c5-f4da63f94993</guid>
      <link>https://share.transistor.fm/s/d0536ab1</link>
      <description>
        <![CDATA[<p>We explore Lewis Mumford’s concept of ‘technics’ to answer an essential question in AI: are we creating technologies that adapt to serve human needs, or are we increasingly adapting ourselves to serve theirs?</p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>We explore Lewis Mumford’s concept of ‘technics’ to answer an essential question in AI: are we creating technologies that adapt to serve human needs, or are we increasingly adapting ourselves to serve theirs?</p>]]>
      </content:encoded>
      <pubDate>Sun, 02 Feb 2025 17:51:25 -0800</pubDate>
      <author>Angel Evan</author>
      <enclosure url="https://media.transistor.fm/d0536ab1/a544b0fa.mp3" length="11569582" type="audio/mpeg"/>
      <itunes:author>Angel Evan</itunes:author>
      <itunes:image href="https://img.transistorcdn.com/kHjmW46RTghrDG6qwQsBd0HJSJXJPSptmKhvO7TH7MU/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS9jMDNi/ZWE1ZWM2N2EzNDJj/ZWI1ZWJjN2E5YmY3/ZjE4Ni5qcGc.jpg"/>
      <itunes:duration>719</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>We explore Lewis Mumford’s concept of ‘technics’ to answer an essential question in AI: are we creating technologies that adapt to serve human needs, or are we increasingly adapting ourselves to serve theirs?</p>]]>
      </itunes:summary>
      <itunes:keywords>AI, Ethics, Philosophy, Technology</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
    </item>
    <item>
      <title>The Narrative Machine: LLMs Through the Eyes of Alasdair MacIntyre</title>
      <itunes:season>1</itunes:season>
      <podcast:season>1</podcast:season>
      <itunes:episode>1</itunes:episode>
      <podcast:episode>1</podcast:episode>
      <itunes:title>The Narrative Machine: LLMs Through the Eyes of Alasdair MacIntyre</itunes:title>
      <itunes:episodeType>full</itunes:episodeType>
      <guid isPermaLink="false">00ca54d4-ad00-4406-9a28-3bb332bc26ac</guid>
      <link>https://share.transistor.fm/s/427a0da0</link>
      <description>
        <![CDATA[<p>We explore Alisdair MacIntyre’s concept of narrative fragmentation and whether large language models (LLMs) contribute to it through their underlying architecture. </p>]]>
      </description>
      <content:encoded>
        <![CDATA[<p>We explore Alisdair MacIntyre’s concept of narrative fragmentation and whether large language models (LLMs) contribute to it through their underlying architecture. </p>]]>
      </content:encoded>
      <pubDate>Mon, 30 Dec 2024 18:02:44 -0800</pubDate>
      <author>Angel Evan</author>
      <enclosure url="https://media.transistor.fm/427a0da0/171831c9.mp3" length="11780429" type="audio/mpeg"/>
      <itunes:author>Angel Evan</itunes:author>
      <itunes:image href="https://img.transistorcdn.com/rIVOhP_wMR4ijdTftI_QsGEftLFvY70Uk9KnoXKrSNU/rs:fill:0:0:1/w:1400/h:1400/q:60/mb:500000/aHR0cHM6Ly9pbWct/dXBsb2FkLXByb2R1/Y3Rpb24udHJhbnNp/c3Rvci5mbS8wN2E5/MTUwYmY0NWVkZjhj/OGUyMWYxMTFmMzU4/MDQ0Mi5qcGVn.jpg"/>
      <itunes:duration>735</itunes:duration>
      <itunes:summary>
        <![CDATA[<p>We explore Alisdair MacIntyre’s concept of narrative fragmentation and whether large language models (LLMs) contribute to it through their underlying architecture. </p>]]>
      </itunes:summary>
      <itunes:keywords>LLM, large language models, Alasdair MacIntyre, Ethics, AI, Philosophy, culture</itunes:keywords>
      <itunes:explicit>No</itunes:explicit>
      <podcast:transcript url="https://share.transistor.fm/s/427a0da0/transcription.vtt" type="text/vtt" rel="captions"/>
      <podcast:transcript url="https://share.transistor.fm/s/427a0da0/transcription.srt" type="application/x-subrip" rel="captions"/>
      <podcast:transcript url="https://share.transistor.fm/s/427a0da0/transcription.json" type="application/json" rel="captions"/>
      <podcast:transcript url="https://share.transistor.fm/s/427a0da0/transcription.txt" type="text/plain"/>
      <podcast:transcript url="https://share.transistor.fm/s/427a0da0/transcription" type="text/html"/>
    </item>
  </channel>
</rss>
